diff options
Diffstat (limited to 'include')
602 files changed, 12188 insertions, 5542 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index ca88c4706f2b..3f7f01f03869 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -480,6 +480,8 @@ void acpi_initialize_hp_context(struct acpi_device *adev, /* acpi_device.dev.bus == &acpi_bus_type */ extern struct bus_type acpi_bus_type; +int acpi_bus_for_each_dev(int (*fn)(struct device *, void *), void *data); + /* * Events * ------ diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 69e89d572b9e..02c1fa16e638 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -507,8 +507,12 @@ typedef u64 acpi_integer; /* Pointer/Integer type conversions */ #define ACPI_TO_POINTER(i) ACPI_CAST_PTR (void, (acpi_size) (i)) +#ifndef ACPI_TO_INTEGER #define ACPI_TO_INTEGER(p) ACPI_PTR_DIFF (p, (void *) 0) +#endif +#ifndef ACPI_OFFSET #define ACPI_OFFSET(d, f) ACPI_PTR_DIFF (&(((d *) 0)->f), (void *) 0) +#endif #define ACPI_PTR_TO_PHYSADDR(i) ACPI_TO_INTEGER(i) /* Optimizations for 4-character (32-bit) acpi_name manipulation */ diff --git a/include/acpi/apei.h b/include/acpi/apei.h index ece0a8af2bae..afaca3a075e8 100644 --- a/include/acpi/apei.h +++ b/include/acpi/apei.h @@ -27,14 +27,16 @@ extern int hest_disable; extern int erst_disable; #ifdef CONFIG_ACPI_APEI_GHES extern bool ghes_disable; +void __init acpi_ghes_init(void); #else #define ghes_disable 1 +static inline void acpi_ghes_init(void) { } #endif #ifdef CONFIG_ACPI_APEI void __init acpi_hest_init(void); #else -static inline void acpi_hest_init(void) { return; } +static inline void acpi_hest_init(void) { } #endif int erst_write(const struct cper_record_header *record); diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index b3ffb9bbf664..cec41e004ecf 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -114,6 +114,11 @@ #define acpi_raw_spinlock raw_spinlock_t * #define acpi_cpu_flags unsigned long +#define acpi_uintptr_t uintptr_t + +#define ACPI_TO_INTEGER(p) ((uintptr_t)(p)) +#define ACPI_OFFSET(d, f) offsetof(d, f) + /* Use native linux version of acpi_os_allocate_zeroed */ #define USE_NATIVE_ALLOCATE_ZEROED diff --git a/include/asm-generic/access_ok.h b/include/asm-generic/access_ok.h new file mode 100644 index 000000000000..2866ae61b1cd --- /dev/null +++ b/include/asm-generic/access_ok.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_GENERIC_ACCESS_OK_H__ +#define __ASM_GENERIC_ACCESS_OK_H__ + +/* + * Checking whether a pointer is valid for user space access. + * These definitions work on most architectures, but overrides can + * be used where necessary. + */ + +/* + * architectures with compat tasks have a variable TASK_SIZE and should + * override this to a constant. + */ +#ifndef TASK_SIZE_MAX +#define TASK_SIZE_MAX TASK_SIZE +#endif + +#ifndef __access_ok +/* + * 'size' is a compile-time constant for most callers, so optimize for + * this case to turn the check into a single comparison against a constant + * limit and catch all possible overflows. + * On architectures with separate user address space (m68k, s390, parisc, + * sparc64) or those without an MMU, this should always return true. + * + * This version was originally contributed by Jonas Bonn for the + * OpenRISC architecture, and was found to be the most efficient + * for constant 'size' and 'limit' values. + */ +static inline int __access_ok(const void __user *ptr, unsigned long size) +{ + unsigned long limit = TASK_SIZE_MAX; + unsigned long addr = (unsigned long)ptr; + + if (IS_ENABLED(CONFIG_ALTERNATE_USER_ADDRESS_SPACE) || + !IS_ENABLED(CONFIG_MMU)) + return true; + + return (size <= limit) && (addr <= (limit - size)); +} +#endif + +#ifndef access_ok +#define access_ok(addr, size) likely(__access_ok(addr, size)) +#endif + +#endif diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h index c90192b1c755..4225a8ca9c1a 100644 --- a/include/asm-generic/bitops/instrumented-atomic.h +++ b/include/asm-generic/bitops/instrumented-atomic.h @@ -23,7 +23,7 @@ * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void set_bit(long nr, volatile unsigned long *addr) +static __always_inline void set_bit(long nr, volatile unsigned long *addr) { instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_set_bit(nr, addr); @@ -36,7 +36,7 @@ static inline void set_bit(long nr, volatile unsigned long *addr) * * This is a relaxed atomic operation (no implied memory barriers). */ -static inline void clear_bit(long nr, volatile unsigned long *addr) +static __always_inline void clear_bit(long nr, volatile unsigned long *addr) { instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_clear_bit(nr, addr); @@ -52,7 +52,7 @@ static inline void clear_bit(long nr, volatile unsigned long *addr) * Note that @nr may be almost arbitrarily large; this function is not * restricted to acting on a single-word quantity. */ -static inline void change_bit(long nr, volatile unsigned long *addr) +static __always_inline void change_bit(long nr, volatile unsigned long *addr) { instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_change_bit(nr, addr); @@ -65,7 +65,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr) * * This is an atomic fully-ordered operation (implied full memory barrier). */ -static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) +static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) { kcsan_mb(); instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long)); @@ -79,7 +79,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) * * This is an atomic fully-ordered operation (implied full memory barrier). */ -static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) +static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) { kcsan_mb(); instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long)); @@ -93,7 +93,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) * * This is an atomic fully-ordered operation (implied full memory barrier). */ -static inline bool test_and_change_bit(long nr, volatile unsigned long *addr) +static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) { kcsan_mb(); instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long)); diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h index 37363d570b9b..7ab1ecc37782 100644 --- a/include/asm-generic/bitops/instrumented-non-atomic.h +++ b/include/asm-generic/bitops/instrumented-non-atomic.h @@ -22,7 +22,7 @@ * region of memory concurrently, the effect may be that only one operation * succeeds. */ -static inline void __set_bit(long nr, volatile unsigned long *addr) +static __always_inline void __set_bit(long nr, volatile unsigned long *addr) { instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___set_bit(nr, addr); @@ -37,7 +37,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr) * region of memory concurrently, the effect may be that only one operation * succeeds. */ -static inline void __clear_bit(long nr, volatile unsigned long *addr) +static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) { instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___clear_bit(nr, addr); @@ -52,13 +52,13 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr) * region of memory concurrently, the effect may be that only one operation * succeeds. */ -static inline void __change_bit(long nr, volatile unsigned long *addr) +static __always_inline void __change_bit(long nr, volatile unsigned long *addr) { instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___change_bit(nr, addr); } -static inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr) +static __always_inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr) { if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC)) { /* @@ -90,7 +90,7 @@ static inline void __instrument_read_write_bitop(long nr, volatile unsigned long * This operation is non-atomic. If two instances of this operation race, one * can appear to succeed but actually fail. */ -static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) +static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) { __instrument_read_write_bitop(nr, addr); return arch___test_and_set_bit(nr, addr); @@ -104,7 +104,7 @@ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) * This operation is non-atomic. If two instances of this operation race, one * can appear to succeed but actually fail. */ -static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) +static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) { __instrument_read_write_bitop(nr, addr); return arch___test_and_clear_bit(nr, addr); @@ -118,7 +118,7 @@ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) * This operation is non-atomic. If two instances of this operation race, one * can appear to succeed but actually fail. */ -static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) +static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) { __instrument_read_write_bitop(nr, addr); return arch___test_and_change_bit(nr, addr); @@ -129,7 +129,7 @@ static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) * @nr: bit number to test * @addr: Address to start counting from */ -static inline bool test_bit(long nr, const volatile unsigned long *addr) +static __always_inline bool test_bit(long nr, const volatile unsigned long *addr) { instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long)); return arch_test_bit(nr, addr); diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h index 8f97c2927bee..fdce7a4cfc6f 100644 --- a/include/asm-generic/hyperv-tlfs.h +++ b/include/asm-generic/hyperv-tlfs.h @@ -183,11 +183,18 @@ enum HV_GENERIC_SET_FORMAT { #define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0) #define HV_HYPERCALL_FAST_BIT BIT(16) #define HV_HYPERCALL_VARHEAD_OFFSET 17 +#define HV_HYPERCALL_VARHEAD_MASK GENMASK_ULL(26, 17) +#define HV_HYPERCALL_RSVD0_MASK GENMASK_ULL(31, 27) #define HV_HYPERCALL_REP_COMP_OFFSET 32 #define HV_HYPERCALL_REP_COMP_1 BIT_ULL(32) #define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32) +#define HV_HYPERCALL_RSVD1_MASK GENMASK_ULL(47, 44) #define HV_HYPERCALL_REP_START_OFFSET 48 #define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48) +#define HV_HYPERCALL_RSVD2_MASK GENMASK_ULL(63, 60) +#define HV_HYPERCALL_RSVD_MASK (HV_HYPERCALL_RSVD0_MASK | \ + HV_HYPERCALL_RSVD1_MASK | \ + HV_HYPERCALL_RSVD2_MASK) /* hypercall status code */ #define HV_STATUS_SUCCESS 0 diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 690f741764e1..d0f7bdd2fdf2 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -59,11 +59,24 @@ extern char __noinstr_text_start[], __noinstr_text_end[]; extern __visible const void __nosave_begin, __nosave_end; /* Function descriptor handling (if any). Override in asm/sections.h */ -#ifndef dereference_function_descriptor +#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS +void *dereference_function_descriptor(void *ptr); +void *dereference_kernel_function_descriptor(void *ptr); +#else #define dereference_function_descriptor(p) ((void *)(p)) #define dereference_kernel_function_descriptor(p) ((void *)(p)) + +/* An address is simply the address of the function. */ +typedef struct { + unsigned long addr; +} func_desc_t; #endif +static inline bool have_function_descriptors(void) +{ + return IS_ENABLED(CONFIG_HAVE_FUNCTION_DESCRIPTORS); +} + /** * memory_contains - checks if an object is contained within a memory region * @begin: virtual address of the beginning of the memory region diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 2c68a545ffa7..fd7feb5c7894 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -180,7 +180,7 @@ struct mmu_table_batch { struct rcu_head rcu; #endif unsigned int nr; - void *tables[0]; + void *tables[]; }; #define MAX_TABLE_BATCH \ @@ -227,7 +227,7 @@ struct mmu_gather_batch { struct mmu_gather_batch *next; unsigned int nr; unsigned int max; - struct page *pages[0]; + struct page *pages[]; }; #define MAX_GATHER_BATCH \ diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h index 10ffa8b5c117..a5be9e61a2a2 100644 --- a/include/asm-generic/uaccess.h +++ b/include/asm-generic/uaccess.h @@ -8,6 +8,7 @@ * address space, e.g. all NOMMU machines. */ #include <linux/string.h> +#include <asm-generic/access_ok.h> #ifdef CONFIG_UACCESS_MEMCPY #include <asm/unaligned.h> @@ -77,8 +78,6 @@ do { \ goto err_label; \ } while (0) -#define HAVE_GET_KERNEL_NOFAULT 1 - static inline __must_check unsigned long raw_copy_from_user(void *to, const void __user * from, unsigned long n) { @@ -96,49 +95,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) #define INLINE_COPY_TO_USER #endif /* CONFIG_UACCESS_MEMCPY */ -#ifdef CONFIG_SET_FS -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) - -#ifndef KERNEL_DS -#define KERNEL_DS MAKE_MM_SEG(~0UL) -#endif - -#ifndef USER_DS -#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) -#endif - -#ifndef get_fs -#define get_fs() (current_thread_info()->addr_limit) - -static inline void set_fs(mm_segment_t fs) -{ - current_thread_info()->addr_limit = fs; -} -#endif - -#ifndef uaccess_kernel -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) -#endif - -#ifndef user_addr_max -#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) -#endif - -#endif /* CONFIG_SET_FS */ - -#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size)) - -/* - * The architecture should really override this if possible, at least - * doing a check on the get_fs() - */ -#ifndef __access_ok -static inline int __access_ok(unsigned long addr, unsigned long size) -{ - return 1; -} -#endif - /* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 42f3866bca69..2a10db2f0bc5 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -321,16 +321,6 @@ #define THERMAL_TABLE(name) #endif -#ifdef CONFIG_DTPM -#define DTPM_TABLE() \ - . = ALIGN(8); \ - __dtpm_table = .; \ - KEEP(*(__dtpm_table)) \ - __dtpm_table_end = .; -#else -#define DTPM_TABLE() -#endif - #define KERNEL_DTB() \ STRUCT_ALIGN(); \ __dtb_start = .; \ @@ -723,7 +713,6 @@ ACPI_PROBE_TABLE(irqchip) \ ACPI_PROBE_TABLE(timer) \ THERMAL_TABLE(governor) \ - DTPM_TABLE() \ EARLYCON_TABLE() \ LSM_TABLE() \ EARLY_LSM_TABLE() \ diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h index b62a2a56a4d4..44509d48fca2 100644 --- a/include/asm-generic/xor.h +++ b/include/asm-generic/xor.h @@ -8,7 +8,8 @@ #include <linux/prefetch.h> static void -xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +xor_8regs_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { long lines = bytes / (sizeof (long)) / 8; @@ -27,8 +28,9 @@ xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) } static void -xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) +xor_8regs_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { long lines = bytes / (sizeof (long)) / 8; @@ -48,8 +50,10 @@ xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) +xor_8regs_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { long lines = bytes / (sizeof (long)) / 8; @@ -70,8 +74,11 @@ xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) +xor_8regs_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { long lines = bytes / (sizeof (long)) / 8; @@ -93,7 +100,8 @@ xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +xor_32regs_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { long lines = bytes / (sizeof (long)) / 8; @@ -129,8 +137,9 @@ xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) } static void -xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) +xor_32regs_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { long lines = bytes / (sizeof (long)) / 8; @@ -175,8 +184,10 @@ xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) +xor_32regs_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { long lines = bytes / (sizeof (long)) / 8; @@ -230,8 +241,11 @@ xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) +xor_32regs_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { long lines = bytes / (sizeof (long)) / 8; @@ -294,7 +308,8 @@ xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +xor_8regs_p_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { long lines = bytes / (sizeof (long)) / 8 - 1; prefetchw(p1); @@ -320,8 +335,9 @@ xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) } static void -xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) +xor_8regs_p_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { long lines = bytes / (sizeof (long)) / 8 - 1; prefetchw(p1); @@ -350,8 +366,10 @@ xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) +xor_8regs_p_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { long lines = bytes / (sizeof (long)) / 8 - 1; @@ -384,8 +402,11 @@ xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) +xor_8regs_p_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { long lines = bytes / (sizeof (long)) / 8 - 1; @@ -421,7 +442,8 @@ xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +xor_32regs_p_2(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2) { long lines = bytes / (sizeof (long)) / 8 - 1; @@ -466,8 +488,9 @@ xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) } static void -xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3) +xor_32regs_p_3(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3) { long lines = bytes / (sizeof (long)) / 8 - 1; @@ -523,8 +546,10 @@ xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4) +xor_32regs_p_4(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4) { long lines = bytes / (sizeof (long)) / 8 - 1; @@ -591,8 +616,11 @@ xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, } static void -xor_32regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, - unsigned long *p3, unsigned long *p4, unsigned long *p5) +xor_32regs_p_5(unsigned long bytes, unsigned long * __restrict p1, + const unsigned long * __restrict p2, + const unsigned long * __restrict p3, + const unsigned long * __restrict p4, + const unsigned long * __restrict p5) { long lines = bytes / (sizeof (long)) / 8 - 1; diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h index e715bdb720d5..057c8964aefb 100644 --- a/include/clocksource/arm_arch_timer.h +++ b/include/clocksource/arm_arch_timer.h @@ -56,6 +56,7 @@ enum arch_timer_spi_nr { #define ARCH_TIMER_EVT_TRIGGER_MASK (0xF << ARCH_TIMER_EVT_TRIGGER_SHIFT) #define ARCH_TIMER_USR_VT_ACCESS_EN (1 << 8) /* virtual timer registers */ #define ARCH_TIMER_USR_PT_ACCESS_EN (1 << 9) /* physical timer registers */ +#define ARCH_TIMER_EVT_INTERVAL_SCALE (1 << 17) /* EVNTIS in the ARMv8 ARM */ #define ARCH_TIMER_EVT_STREAM_PERIOD_US 100 #define ARCH_TIMER_EVT_STREAM_FREQ \ diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index f76ec723ceae..f50c5d1725da 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -13,6 +13,8 @@ #include <linux/list.h> #include <linux/types.h> +#include <asm/unaligned.h> + /* * Maximum values for blocksize and alignmask, used to allocate * static buffers that are big enough for any combination of @@ -154,9 +156,11 @@ static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size) (size % sizeof(unsigned long)) == 0) { unsigned long *d = (unsigned long *)dst; unsigned long *s = (unsigned long *)src; + unsigned long l; while (size > 0) { - *d++ ^= *s++; + l = get_unaligned(d) ^ get_unaligned(s++); + put_unaligned(l, d++); size -= sizeof(unsigned long); } } else { @@ -173,9 +177,11 @@ static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2, unsigned long *d = (unsigned long *)dst; unsigned long *s1 = (unsigned long *)src1; unsigned long *s2 = (unsigned long *)src2; + unsigned long l; while (size > 0) { - *d++ = *s1++ ^ *s2++; + l = get_unaligned(s1++) ^ get_unaligned(s2++); + put_unaligned(l, d++); size -= sizeof(unsigned long); } } else { diff --git a/include/crypto/asym_tpm_subtype.h b/include/crypto/asym_tpm_subtype.h deleted file mode 100644 index 48198c36d6b9..000000000000 --- a/include/crypto/asym_tpm_subtype.h +++ /dev/null @@ -1,19 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#ifndef _LINUX_ASYM_TPM_SUBTYPE_H -#define _LINUX_ASYM_TPM_SUBTYPE_H - -#include <linux/keyctl.h> - -struct tpm_key { - void *blob; - u32 blob_len; - uint16_t key_len; /* Size in bits of the key */ - const void *pub_key; /* pointer inside blob to the public key bytes */ - uint16_t pub_key_len; /* length of the public key */ -}; - -struct tpm_key *tpm_key_create(const void *blob, uint32_t blob_len); - -extern struct asymmetric_key_subtype asym_tpm_subtype; - -#endif /* _LINUX_ASYM_TPM_SUBTYPE_H */ diff --git a/include/crypto/dh.h b/include/crypto/dh.h index d71e9858ab86..7b863e911cb4 100644 --- a/include/crypto/dh.h +++ b/include/crypto/dh.h @@ -24,21 +24,17 @@ * * @key: Private DH key * @p: Diffie-Hellman parameter P - * @q: Diffie-Hellman parameter Q * @g: Diffie-Hellman generator G * @key_size: Size of the private DH key * @p_size: Size of DH parameter P - * @q_size: Size of DH parameter Q * @g_size: Size of DH generator G */ struct dh { - void *key; - void *p; - void *q; - void *g; + const void *key; + const void *p; + const void *g; unsigned int key_size; unsigned int p_size; - unsigned int q_size; unsigned int g_size; }; @@ -83,4 +79,20 @@ int crypto_dh_encode_key(char *buf, unsigned int len, const struct dh *params); */ int crypto_dh_decode_key(const char *buf, unsigned int len, struct dh *params); +/** + * __crypto_dh_decode_key() - decode a private key without parameter checks + * @buf: Buffer holding a packet key that should be decoded + * @len: Length of the packet private key buffer + * @params: Buffer allocated by the caller that is filled with the + * unpacked DH private key. + * + * Internal function providing the same services as the exported + * crypto_dh_decode_key(), but without any of those basic parameter + * checks conducted by the latter. + * + * Return: -EINVAL if buffer has insufficient size, 0 on success + */ +int __crypto_dh_decode_key(const char *buf, unsigned int len, + struct dh *params); + #endif diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h index 659b642efada..9cb0662ebe87 100644 --- a/include/crypto/internal/kpp.h +++ b/include/crypto/internal/kpp.h @@ -10,6 +10,38 @@ #include <crypto/kpp.h> #include <crypto/algapi.h> +/** + * struct kpp_instance - KPP template instance + * @free: Callback getting invoked upon instance destruction. Must be set. + * @s: Internal. Generic crypto core instance state properly layout + * to alias with @alg as needed. + * @alg: The &struct kpp_alg implementation provided by the instance. + */ +struct kpp_instance { + void (*free)(struct kpp_instance *inst); + union { + struct { + char head[offsetof(struct kpp_alg, base)]; + struct crypto_instance base; + } s; + struct kpp_alg alg; + }; +}; + +/** + * struct crypto_kpp_spawn - KPP algorithm spawn + * @base: Internal. Generic crypto core spawn state. + * + * Template instances can get a hold on some inner KPP algorithm by + * binding a &struct crypto_kpp_spawn via + * crypto_grab_kpp(). Transforms may subsequently get instantiated + * from the referenced inner &struct kpp_alg by means of + * crypto_spawn_kpp(). + */ +struct crypto_kpp_spawn { + struct crypto_spawn base; +}; + /* * Transform internal helpers. */ @@ -33,6 +65,62 @@ static inline const char *kpp_alg_name(struct crypto_kpp *tfm) return crypto_kpp_tfm(tfm)->__crt_alg->cra_name; } +/* + * Template instance internal helpers. + */ +/** + * kpp_crypto_instance() - Cast a &struct kpp_instance to the corresponding + * generic &struct crypto_instance. + * @inst: Pointer to the &struct kpp_instance to be cast. + * Return: A pointer to the &struct crypto_instance embedded in @inst. + */ +static inline struct crypto_instance *kpp_crypto_instance( + struct kpp_instance *inst) +{ + return &inst->s.base; +} + +/** + * kpp_instance() - Cast a generic &struct crypto_instance to the corresponding + * &struct kpp_instance. + * @inst: Pointer to the &struct crypto_instance to be cast. + * Return: A pointer to the &struct kpp_instance @inst is embedded in. + */ +static inline struct kpp_instance *kpp_instance(struct crypto_instance *inst) +{ + return container_of(inst, struct kpp_instance, s.base); +} + +/** + * kpp_alg_instance() - Get the &struct kpp_instance a given KPP transform has + * been instantiated from. + * @kpp: The KPP transform instantiated from some &struct kpp_instance. + * Return: The &struct kpp_instance associated with @kpp. + */ +static inline struct kpp_instance *kpp_alg_instance(struct crypto_kpp *kpp) +{ + return kpp_instance(crypto_tfm_alg_instance(&kpp->base)); +} + +/** + * kpp_instance_ctx() - Get a pointer to a &struct kpp_instance's implementation + * specific context data. + * @inst: The &struct kpp_instance whose context data to access. + * + * A KPP template implementation may allocate extra memory beyond the + * end of a &struct kpp_instance instantiated from &crypto_template.create(). + * This function provides a means to obtain a pointer to this area. + * + * Return: A pointer to the implementation specific context data. + */ +static inline void *kpp_instance_ctx(struct kpp_instance *inst) +{ + return crypto_instance_ctx(kpp_crypto_instance(inst)); +} + +/* + * KPP algorithm (un)registration functions. + */ /** * crypto_register_kpp() -- Register key-agreement protocol primitives algorithm * @@ -56,4 +144,74 @@ int crypto_register_kpp(struct kpp_alg *alg); */ void crypto_unregister_kpp(struct kpp_alg *alg); +/** + * kpp_register_instance() - Register a KPP template instance. + * @tmpl: The instantiating template. + * @inst: The KPP template instance to be registered. + * Return: %0 on success, negative error code otherwise. + */ +int kpp_register_instance(struct crypto_template *tmpl, + struct kpp_instance *inst); + +/* + * KPP spawn related functions. + */ +/** + * crypto_grab_kpp() - Look up a KPP algorithm and bind a spawn to it. + * @spawn: The KPP spawn to bind. + * @inst: The template instance owning @spawn. + * @name: The KPP algorithm name to look up. + * @type: The type bitset to pass on to the lookup. + * @mask: The mask bismask to pass on to the lookup. + * Return: %0 on success, a negative error code otherwise. + */ +int crypto_grab_kpp(struct crypto_kpp_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +/** + * crypto_drop_kpp() - Release a spawn previously bound via crypto_grab_kpp(). + * @spawn: The spawn to release. + */ +static inline void crypto_drop_kpp(struct crypto_kpp_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +/** + * crypto_spawn_kpp_alg() - Get the algorithm a KPP spawn has been bound to. + * @spawn: The spawn to get the referenced &struct kpp_alg for. + * + * This function as well as the returned result are safe to use only + * after @spawn has been successfully bound via crypto_grab_kpp() and + * up to until the template instance owning @spawn has either been + * registered successfully or the spawn has been released again via + * crypto_drop_spawn(). + * + * Return: A pointer to the &struct kpp_alg referenced from the spawn. + */ +static inline struct kpp_alg *crypto_spawn_kpp_alg( + struct crypto_kpp_spawn *spawn) +{ + return container_of(spawn->base.alg, struct kpp_alg, base); +} + +/** + * crypto_spawn_kpp() - Create a transform from a KPP spawn. + * @spawn: The spawn previously bound to some &struct kpp_alg via + * crypto_grab_kpp(). + * + * Once a &struct crypto_kpp_spawn has been successfully bound to a + * &struct kpp_alg via crypto_grab_kpp(), transforms for the latter + * may get instantiated from the former by means of this function. + * + * Return: A pointer to the freshly created KPP transform on success + * or an ``ERR_PTR()`` otherwise. + */ +static inline struct crypto_kpp *crypto_spawn_kpp( + struct crypto_kpp_spawn *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + #endif diff --git a/include/crypto/sm3.h b/include/crypto/sm3.h index 42ea21289ba9..1f021ad0533f 100644 --- a/include/crypto/sm3.h +++ b/include/crypto/sm3.h @@ -1,5 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Common values for SM3 algorithm + * + * Copyright (C) 2017 ARM Limited or its affiliates. + * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com> + * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> */ #ifndef _CRYPTO_SM3_H @@ -30,13 +35,30 @@ struct sm3_state { u8 buffer[SM3_BLOCK_SIZE]; }; -struct shash_desc; +/* + * Stand-alone implementation of the SM3 algorithm. It is designed to + * have as little dependencies as possible so it can be used in the + * kexec_file purgatory. In other cases you should generally use the + * hash APIs from include/crypto/hash.h. Especially when hashing large + * amounts of data as those APIs may be hw-accelerated. + * + * For details see lib/crypto/sm3.c + */ -extern int crypto_sm3_update(struct shash_desc *desc, const u8 *data, - unsigned int len); +static inline void sm3_init(struct sm3_state *sctx) +{ + sctx->state[0] = SM3_IVA; + sctx->state[1] = SM3_IVB; + sctx->state[2] = SM3_IVC; + sctx->state[3] = SM3_IVD; + sctx->state[4] = SM3_IVE; + sctx->state[5] = SM3_IVF; + sctx->state[6] = SM3_IVG; + sctx->state[7] = SM3_IVH; + sctx->count = 0; +} -extern int crypto_sm3_final(struct shash_desc *desc, u8 *out); +void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len); +void sm3_final(struct sm3_state *sctx, u8 *out); -extern int crypto_sm3_finup(struct shash_desc *desc, const u8 *data, - unsigned int len, u8 *hash); #endif diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h index bda8aa7c2280..5286a53a1875 100644 --- a/include/drm/bridge/dw_mipi_dsi.h +++ b/include/drm/bridge/dw_mipi_dsi.h @@ -51,7 +51,9 @@ struct dw_mipi_dsi_plat_data { unsigned int max_data_lanes; enum drm_mode_status (*mode_valid)(void *priv_data, - const struct drm_display_mode *mode); + const struct drm_display_mode *mode, + unsigned long mode_flags, + u32 lanes, u32 format); const struct dw_mipi_dsi_phy_ops *phy_ops; const struct dw_mipi_dsi_host_ops *host_ops; diff --git a/include/drm/drm_dp_aux_bus.h b/include/drm/dp/drm_dp_aux_bus.h index 4f19b20b1dd6..4f19b20b1dd6 100644 --- a/include/drm/drm_dp_aux_bus.h +++ b/include/drm/dp/drm_dp_aux_bus.h diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/dp/drm_dp_dual_mode_helper.h index 7ee482265087..7ee482265087 100644 --- a/include/drm/drm_dp_dual_mode_helper.h +++ b/include/drm/dp/drm_dp_dual_mode_helper.h diff --git a/include/drm/drm_dp_helper.h b/include/drm/dp/drm_dp_helper.h index 30359e434c3f..51e02cf75277 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/dp/drm_dp_helper.h @@ -456,7 +456,7 @@ struct drm_panel; #define DP_FEC_CAPABILITY_1 0x091 /* 2.0 */ /* DP-HDMI2.1 PCON DSC ENCODER SUPPORT */ -#define DP_PCON_DSC_ENCODER_CAP_SIZE 0xC /* 0x9E - 0x92 */ +#define DP_PCON_DSC_ENCODER_CAP_SIZE 0xD /* 0x92 through 0x9E */ #define DP_PCON_DSC_ENCODER 0x092 # define DP_PCON_DSC_ENCODER_SUPPORTED (1 << 0) # define DP_PCON_DSC_PPS_ENC_OVERRIDE (1 << 1) @@ -560,6 +560,7 @@ struct drm_panel; # define DP_TRAINING_PATTERN_DISABLE 0 # define DP_TRAINING_PATTERN_1 1 # define DP_TRAINING_PATTERN_2 2 +# define DP_TRAINING_PATTERN_2_CDS 3 /* 2.0 E11 */ # define DP_TRAINING_PATTERN_3 3 /* 1.2 */ # define DP_TRAINING_PATTERN_4 7 /* 1.4 */ # define DP_TRAINING_PATTERN_MASK 0x3 @@ -738,11 +739,13 @@ struct drm_panel; DP_LANE_CHANNEL_EQ_DONE | \ DP_LANE_SYMBOL_LOCKED) -#define DP_LANE_ALIGN_STATUS_UPDATED 0x204 - -#define DP_INTERLANE_ALIGN_DONE (1 << 0) -#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) -#define DP_LINK_STATUS_UPDATED (1 << 7) +#define DP_LANE_ALIGN_STATUS_UPDATED 0x204 +#define DP_INTERLANE_ALIGN_DONE (1 << 0) +#define DP_128B132B_DPRX_EQ_INTERLANE_ALIGN_DONE (1 << 2) /* 2.0 E11 */ +#define DP_128B132B_DPRX_CDS_INTERLANE_ALIGN_DONE (1 << 3) /* 2.0 E11 */ +#define DP_128B132B_LT_FAILED (1 << 4) /* 2.0 E11 */ +#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) +#define DP_LINK_STATUS_UPDATED (1 << 7) #define DP_SINK_STATUS 0x205 # define DP_RECEIVE_PORT_0_STATUS (1 << 0) @@ -1038,11 +1041,8 @@ struct drm_panel; #define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */ /* DPRX Event Status Indicator */ -#define DP_SINK_COUNT_ESI 0x2002 /* 1.2 */ -/* 0-5 sink count */ -# define DP_SINK_COUNT_CP_READY (1 << 6) - -#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* 1.2 */ +#define DP_SINK_COUNT_ESI 0x2002 /* same as 0x200 */ +#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* same as 0x201 */ #define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 0x2004 /* 1.2 */ # define DP_RX_GTC_MSTR_REQ_STATUS_CHANGE (1 << 0) @@ -1115,6 +1115,7 @@ struct drm_panel; # define DP_UHBR13_5 (1 << 2) #define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */ +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_1MS_UNIT (1 << 7) # define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f # define DP_128B132B_TRAINING_AUX_RD_INTERVAL_400_US 0x00 # define DP_128B132B_TRAINING_AUX_RD_INTERVAL_4_MS 0x01 @@ -1350,6 +1351,7 @@ struct drm_panel; # define DP_PHY_REPEATER_128B132B_SUPPORTED (1 << 0) /* See DP_128B132B_SUPPORTED_LINK_RATES for values */ #define DP_PHY_REPEATER_128B132B_RATES 0xf0007 /* 2.0 */ +#define DP_PHY_REPEATER_EQ_DONE 0xf0008 /* 2.0 E11 */ enum drm_dp_phy { DP_PHY_DPRX, @@ -1528,8 +1530,6 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI int lane); u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE], int lane); -u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE], - unsigned int lane); #define DP_BRANCH_OUI_HEADER_SIZE 0xc #define DP_RECEIVER_CAP_SIZE 0xf @@ -1552,6 +1552,15 @@ void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux, void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux, const u8 caps[DP_LTTPR_PHY_CAP_SIZE]); +int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux); +bool drm_dp_128b132b_lane_channel_eq_done(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +bool drm_dp_128b132b_lane_symbol_locked(const u8 link_status[DP_LINK_STATUS_SIZE], + int lane_count); +bool drm_dp_128b132b_eq_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]); +bool drm_dp_128b132b_cds_interlane_align_done(const u8 link_status[DP_LINK_STATUS_SIZE]); +bool drm_dp_128b132b_link_training_failed(const u8 link_status[DP_LINK_STATUS_SIZE]); + u8 drm_dp_link_rate_to_bw_code(int link_rate); int drm_dp_bw_code_to_link_rate(u8 link_bw); diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/dp/drm_dp_mst_helper.h index 78044ac5b59b..08276eb8c187 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/dp/drm_dp_mst_helper.h @@ -23,7 +23,7 @@ #define _DRM_DP_MST_HELPER_H_ #include <linux/types.h> -#include <drm/drm_dp_helper.h> +#include <drm/dp/drm_dp_helper.h> #include <drm/drm_atomic.h> #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 061d87313fac..f27b4060faa2 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -649,6 +649,13 @@ struct drm_bridge_funcs { * the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops. */ void (*hpd_disable)(struct drm_bridge *bridge); + + /** + * @debugfs_init: + * + * Allows bridges to create bridge-specific debugfs files. + */ + void (*debugfs_init)(struct drm_bridge *bridge, struct dentry *root); }; /** diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h new file mode 100644 index 000000000000..572077ff8ae7 --- /dev/null +++ b/include/drm/drm_buddy.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __DRM_BUDDY_H__ +#define __DRM_BUDDY_H__ + +#include <linux/bitops.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/sched.h> + +#include <drm/drm_print.h> + +#define range_overflows(start, size, max) ({ \ + typeof(start) start__ = (start); \ + typeof(size) size__ = (size); \ + typeof(max) max__ = (max); \ + (void)(&start__ == &size__); \ + (void)(&start__ == &max__); \ + start__ >= max__ || size__ > max__ - start__; \ +}) + +#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0) +#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1) + +struct drm_buddy_block { +#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12) +#define DRM_BUDDY_HEADER_STATE GENMASK_ULL(11, 10) +#define DRM_BUDDY_ALLOCATED (1 << 10) +#define DRM_BUDDY_FREE (2 << 10) +#define DRM_BUDDY_SPLIT (3 << 10) +/* Free to be used, if needed in the future */ +#define DRM_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6) +#define DRM_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0) + u64 header; + + struct drm_buddy_block *left; + struct drm_buddy_block *right; + struct drm_buddy_block *parent; + + void *private; /* owned by creator */ + + /* + * While the block is allocated by the user through drm_buddy_alloc*, + * the user has ownership of the link, for example to maintain within + * a list, if so desired. As soon as the block is freed with + * drm_buddy_free* ownership is given back to the mm. + */ + struct list_head link; + struct list_head tmp_link; +}; + +/* Order-zero must be at least PAGE_SIZE */ +#define DRM_BUDDY_MAX_ORDER (63 - PAGE_SHIFT) + +/* + * Binary Buddy System. + * + * Locking should be handled by the user, a simple mutex around + * drm_buddy_alloc* and drm_buddy_free* should suffice. + */ +struct drm_buddy { + /* Maintain a free list for each order. */ + struct list_head *free_list; + + /* + * Maintain explicit binary tree(s) to track the allocation of the + * address space. This gives us a simple way of finding a buddy block + * and performing the potentially recursive merge step when freeing a + * block. Nodes are either allocated or free, in which case they will + * also exist on the respective free list. + */ + struct drm_buddy_block **roots; + + /* + * Anything from here is public, and remains static for the lifetime of + * the mm. Everything above is considered do-not-touch. + */ + unsigned int n_roots; + unsigned int max_order; + + /* Must be at least PAGE_SIZE */ + u64 chunk_size; + u64 size; + u64 avail; +}; + +static inline u64 +drm_buddy_block_offset(struct drm_buddy_block *block) +{ + return block->header & DRM_BUDDY_HEADER_OFFSET; +} + +static inline unsigned int +drm_buddy_block_order(struct drm_buddy_block *block) +{ + return block->header & DRM_BUDDY_HEADER_ORDER; +} + +static inline unsigned int +drm_buddy_block_state(struct drm_buddy_block *block) +{ + return block->header & DRM_BUDDY_HEADER_STATE; +} + +static inline bool +drm_buddy_block_is_allocated(struct drm_buddy_block *block) +{ + return drm_buddy_block_state(block) == DRM_BUDDY_ALLOCATED; +} + +static inline bool +drm_buddy_block_is_free(struct drm_buddy_block *block) +{ + return drm_buddy_block_state(block) == DRM_BUDDY_FREE; +} + +static inline bool +drm_buddy_block_is_split(struct drm_buddy_block *block) +{ + return drm_buddy_block_state(block) == DRM_BUDDY_SPLIT; +} + +static inline u64 +drm_buddy_block_size(struct drm_buddy *mm, + struct drm_buddy_block *block) +{ + return mm->chunk_size << drm_buddy_block_order(block); +} + +int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size); + +void drm_buddy_fini(struct drm_buddy *mm); + +struct drm_buddy_block * +drm_get_buddy(struct drm_buddy_block *block); + +int drm_buddy_alloc_blocks(struct drm_buddy *mm, + u64 start, u64 end, u64 size, + u64 min_page_size, + struct list_head *blocks, + unsigned long flags); + +int drm_buddy_block_trim(struct drm_buddy *mm, + u64 new_size, + struct list_head *blocks); + +void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block); + +void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects); + +void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p); +void drm_buddy_block_print(struct drm_buddy *mm, + struct drm_buddy_block *block, + struct drm_printer *p); + +#endif diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h index cc9de1632dd3..22deb216b59c 100644 --- a/include/drm/drm_cache.h +++ b/include/drm/drm_cache.h @@ -35,7 +35,7 @@ #include <linux/scatterlist.h> -struct dma_buf_map; +struct iosys_map; void drm_clflush_pages(struct page *pages[], unsigned long num_pages); void drm_clflush_sg(struct sg_table *st); @@ -74,7 +74,7 @@ static inline bool drm_arch_can_wc_memory(void) void drm_memcpy_init_early(void); -void drm_memcpy_from_wc(struct dma_buf_map *dst, - const struct dma_buf_map *src, +void drm_memcpy_from_wc(struct iosys_map *dst, + const struct iosys_map *src, unsigned long len); #endif diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index f07f2fb02e75..4fc8018eddda 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -3,7 +3,7 @@ #ifndef _DRM_CLIENT_H_ #define _DRM_CLIENT_H_ -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <linux/lockdep.h> #include <linux/mutex.h> #include <linux/types.h> @@ -144,7 +144,7 @@ struct drm_client_buffer { /** * @map: Virtual address for the buffer */ - struct dma_buf_map map; + struct iosys_map map; /** * @fb: DRM framebuffer @@ -156,7 +156,8 @@ struct drm_client_buffer * drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format); void drm_client_framebuffer_delete(struct drm_client_buffer *buffer); int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect); -int drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map); +int drm_client_buffer_vmap(struct drm_client_buffer *buffer, + struct iosys_map *map); void drm_client_buffer_vunmap(struct drm_client_buffer *buffer); int drm_client_modeset_create(struct drm_client_dev *client); diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index b501d0badaea..5166186146f4 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -522,9 +522,9 @@ struct drm_display_info { enum subpixel_order subpixel_order; #define DRM_COLOR_FORMAT_RGB444 (1<<0) -#define DRM_COLOR_FORMAT_YCRCB444 (1<<1) -#define DRM_COLOR_FORMAT_YCRCB422 (1<<2) -#define DRM_COLOR_FORMAT_YCRCB420 (1<<3) +#define DRM_COLOR_FORMAT_YCBCR444 (1<<1) +#define DRM_COLOR_FORMAT_YCBCR422 (1<<2) +#define DRM_COLOR_FORMAT_YCBCR420 (1<<3) /** * @panel_orientation: Read only connector property for built-in panels, @@ -592,10 +592,16 @@ struct drm_display_info { bool rgb_quant_range_selectable; /** - * @edid_hdmi_dc_modes: Mask of supported hdmi deep color modes. Even - * more stuff redundant with @bus_formats. + * @edid_hdmi_rgb444_dc_modes: Mask of supported hdmi deep color modes + * in RGB 4:4:4. Even more stuff redundant with @bus_formats. */ - u8 edid_hdmi_dc_modes; + u8 edid_hdmi_rgb444_dc_modes; + + /** + * @edid_hdmi_ycbcr444_dc_modes: Mask of supported hdmi deep color + * modes in YCbCr 4:4:4. Even more stuff redundant with @bus_formats. + */ + u8 edid_hdmi_ycbcr444_dc_modes; /** * @cea_rev: CEA revision of the HDMI sink. @@ -1136,6 +1142,13 @@ struct drm_connector_funcs { * has been received from a source outside the display driver / device. */ void (*oob_hotplug_event)(struct drm_connector *connector); + + /** + * @debugfs_init: + * + * Allows connectors to create connector-specific debugfs files. + */ + void (*debugfs_init)(struct drm_connector *connector, struct dentry *root); }; /** diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 13eeba2a750a..a70baea0636c 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -285,6 +285,10 @@ struct drm_crtc_state { * Lookup table for converting pixel data after the color conversion * matrix @ctm. See drm_crtc_enable_color_mgmt(). The blob (if not * NULL) is an array of &struct drm_color_lut. + * + * Note that for mostly historical reasons stemming from Xorg heritage, + * this is also used to store the color map (also sometimes color lut, + * CLUT or color palette) for indexed formats like DRM_FORMAT_C8. */ struct drm_property_blob *gamma_lut; @@ -1075,12 +1079,18 @@ struct drm_crtc { /** * @gamma_size: Size of legacy gamma ramp reported to userspace. Set up * by calling drm_mode_crtc_set_gamma_size(). + * + * Note that atomic drivers need to instead use + * &drm_crtc_state.gamma_lut. See drm_crtc_enable_color_mgmt(). */ uint32_t gamma_size; /** * @gamma_store: Gamma ramp values used by the legacy SETGAMMA and * GETGAMMA IOCTls. Set up by calling drm_mode_crtc_set_gamma_size(). + * + * Note that atomic drivers need to instead use + * &drm_crtc_state.gamma_lut. See drm_crtc_enable_color_mgmt(). */ uint16_t *gamma_store; @@ -1135,14 +1145,12 @@ struct drm_crtc { */ spinlock_t commit_lock; -#ifdef CONFIG_DEBUG_FS /** * @debugfs_entry: * * Debugfs directory for this CRTC. */ struct dentry *debugfs_entry; -#endif /** * @crc: diff --git a/include/drm/drm_dsc.h b/include/drm/drm_dsc.h index cf43561e60fa..ca022e960dcc 100644 --- a/include/drm/drm_dsc.h +++ b/include/drm/drm_dsc.h @@ -8,7 +8,7 @@ #ifndef DRM_DSC_H_ #define DRM_DSC_H_ -#include <drm/drm_dp_helper.h> +#include <drm/dp/drm_dp_helper.h> /* VESA Display Stream Compression DSC 1.2 constants */ #define DSC_NUM_BUF_RANGES 15 diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 18f6c700f6d0..144c495b99c4 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -401,8 +401,8 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, const struct drm_display_mode *mode); void -drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame, - const struct drm_connector_state *conn_state); +drm_hdmi_avi_infoframe_colorimetry(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state); void drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame, diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h index b30ed5de0a33..0b0937c0b2f6 100644 --- a/include/drm/drm_format_helper.h +++ b/include/drm/drm_format_helper.h @@ -43,4 +43,8 @@ int drm_fb_blit_toio(void __iomem *dst, unsigned int dst_pitch, uint32_t dst_for const void *vmap, const struct drm_framebuffer *fb, const struct drm_rect *rect); +void drm_fb_xrgb8888_to_mono_reversed(void *dst, unsigned int dst_pitch, const void *src, + const struct drm_framebuffer *fb, + const struct drm_rect *clip); + #endif /* __LINUX_DRM_FORMAT_HELPER_H */ diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 35e7f44c2a75..e2941cee14b6 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -39,7 +39,7 @@ #include <drm/drm_vma_manager.h> -struct dma_buf_map; +struct iosys_map; struct drm_gem_object; /** @@ -139,7 +139,7 @@ struct drm_gem_object_funcs { * * This callback is optional. */ - int (*vmap)(struct drm_gem_object *obj, struct dma_buf_map *map); + int (*vmap)(struct drm_gem_object *obj, struct iosys_map *map); /** * @vunmap: @@ -149,7 +149,7 @@ struct drm_gem_object_funcs { * * This callback is optional. */ - void (*vunmap)(struct drm_gem_object *obj, struct dma_buf_map *map); + void (*vunmap)(struct drm_gem_object *obj, struct iosys_map *map); /** * @mmap: diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h index 0b1e2dd2ac3f..6e3319e9001a 100644 --- a/include/drm/drm_gem_atomic_helper.h +++ b/include/drm/drm_gem_atomic_helper.h @@ -3,7 +3,7 @@ #ifndef __DRM_GEM_ATOMIC_HELPER_H__ #define __DRM_GEM_ATOMIC_HELPER_H__ -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane.h> @@ -59,7 +59,7 @@ struct drm_shadow_plane_state { * The memory mappings stored in map should be established in the plane's * prepare_fb callback and removed in the cleanup_fb callback. */ - struct dma_buf_map map[DRM_FORMAT_MAX_PLANES]; + struct iosys_map map[DRM_FORMAT_MAX_PLANES]; /** * @data: Address of each framebuffer BO's data @@ -67,7 +67,7 @@ struct drm_shadow_plane_state { * The address of the data stored in each mapping. This is different * for framebuffers with non-zero offset fields. */ - struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]; + struct iosys_map data[DRM_FORMAT_MAX_PLANES]; }; /** diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index adb507a9dbf0..fbda4ce5d5fb 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -38,7 +38,8 @@ void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj); void drm_gem_cma_print_info(const struct drm_gem_cma_object *cma_obj, struct drm_printer *p, unsigned int indent); struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_cma_object *cma_obj); -int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, struct dma_buf_map *map); +int drm_gem_cma_vmap(struct drm_gem_cma_object *cma_obj, + struct iosys_map *map); int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *vma); extern const struct vm_operations_struct drm_gem_cma_vm_ops; @@ -106,7 +107,8 @@ static inline struct sg_table *drm_gem_cma_object_get_sg_table(struct drm_gem_ob * Returns: * 0 on success or a negative error code on failure. */ -static inline int drm_gem_cma_object_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +static inline int drm_gem_cma_object_vmap(struct drm_gem_object *obj, + struct iosys_map *map) { struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h index 905727719ead..1091e4fa08cb 100644 --- a/include/drm/drm_gem_framebuffer_helper.h +++ b/include/drm/drm_gem_framebuffer_helper.h @@ -2,7 +2,7 @@ #define __DRM_GEM_FB_HELPER_H__ #include <linux/dma-buf.h> -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <drm/drm_fourcc.h> @@ -40,10 +40,10 @@ drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); int drm_gem_fb_vmap(struct drm_framebuffer *fb, - struct dma_buf_map map[static DRM_FORMAT_MAX_PLANES], - struct dma_buf_map data[DRM_FORMAT_MAX_PLANES]); + struct iosys_map map[static DRM_FORMAT_MAX_PLANES], + struct iosys_map data[DRM_FORMAT_MAX_PLANES]); void drm_gem_fb_vunmap(struct drm_framebuffer *fb, - struct dma_buf_map map[static DRM_FORMAT_MAX_PLANES]); + struct iosys_map map[static DRM_FORMAT_MAX_PLANES]); int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir); void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir); diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 311d66c9cf4b..d0a57853c188 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -113,8 +113,10 @@ int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem); -int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map); -void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct dma_buf_map *map); +int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map); +void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, + struct iosys_map *map); int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma); int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv); @@ -135,6 +137,8 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem) void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, struct drm_printer *p, unsigned int indent); +extern const struct vm_operations_struct drm_gem_shmem_vm_ops; + /* * GEM object functions */ @@ -226,7 +230,8 @@ static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_ * Returns: * 0 on success or a negative error code on failure. */ -static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, + struct iosys_map *map) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); @@ -241,7 +246,8 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, struct d * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should * use it as their &drm_gem_object_funcs.vunmap handler. */ -static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map) +static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj, + struct iosys_map *map) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h index 78040f6cc6f3..4c003b4f173e 100644 --- a/include/drm/drm_gem_ttm_helper.h +++ b/include/drm/drm_gem_ttm_helper.h @@ -10,7 +10,7 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> -struct dma_buf_map; +struct iosys_map; #define drm_gem_ttm_of_gem(gem_obj) \ container_of(gem_obj, struct ttm_buffer_object, base) @@ -18,9 +18,9 @@ struct dma_buf_map; void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent, const struct drm_gem_object *gem); int drm_gem_ttm_vmap(struct drm_gem_object *gem, - struct dma_buf_map *map); + struct iosys_map *map); void drm_gem_ttm_vunmap(struct drm_gem_object *gem, - struct dma_buf_map *map); + struct iosys_map *map); int drm_gem_ttm_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma); diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index b4ce27a72773..c083a1d71cf4 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -12,7 +12,7 @@ #include <drm/ttm/ttm_bo_driver.h> #include <linux/container_of.h> -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> struct drm_mode_create_dumb; struct drm_plane; @@ -51,7 +51,7 @@ struct vm_area_struct; */ struct drm_gem_vram_object { struct ttm_buffer_object bo; - struct dma_buf_map map; + struct iosys_map map; /** * @vmap_use_count: @@ -97,8 +97,9 @@ void drm_gem_vram_put(struct drm_gem_vram_object *gbo); s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo); int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag); int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo); -int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map); -void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, struct dma_buf_map *map); +int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map); +void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, + struct iosys_map *map); int drm_gem_vram_fill_create_dumb(struct drm_file *file, struct drm_device *dev, diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h index 05e194958265..dad2f187b64b 100644 --- a/include/drm/drm_mipi_dbi.h +++ b/include/drm/drm_mipi_dbi.h @@ -130,6 +130,14 @@ struct mipi_dbi_dev { * @dbi: MIPI DBI interface */ struct mipi_dbi dbi; + + /** + * @driver_private: Driver private data. + * Necessary for drivers with private data since devm_drm_dev_alloc() + * can't allocate structures that embed a structure which then again + * embeds drm_device. + */ + void *driver_private; }; static inline struct mipi_dbi_dev *drm_to_mipi_dbi_dev(struct drm_device *drm) @@ -194,7 +202,7 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb, #ifdef CONFIG_DEBUG_FS void mipi_dbi_debugfs_init(struct drm_minor *minor); #else -#define mipi_dbi_debugfs_init NULL +static inline void mipi_dbi_debugfs_init(struct drm_minor *minor) {} #endif #endif /* __LINUX_MIPI_DBI_H */ diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 91ca575a78de..6b5e01295348 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -918,20 +918,14 @@ struct drm_mode_config { bool async_page_flip; /** - * @allow_fb_modifiers: + * @fb_modifiers_not_supported: * - * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call. - * Note that drivers should not set this directly, it is automatically - * set in drm_universal_plane_init(). - * - * IMPORTANT: - * - * If this is set the driver must fill out the full implicit modifier - * information in their &drm_mode_config_funcs.fb_create hook for legacy - * userspace which does not set modifiers. Otherwise the GETFB2 ioctl is - * broken for modifier aware userspace. + * When this flag is set, the DRM device will not expose modifier + * support to userspace. This is only used by legacy drivers that infer + * the buffer layout through heuristics without using modifiers. New + * drivers shall not set fhis flag. */ - bool allow_fb_modifiers; + bool fb_modifiers_not_supported; /** * @normalize_zpos: diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h index c34a3e8030e1..912f1e415685 100644 --- a/include/drm/drm_mode_object.h +++ b/include/drm/drm_mode_object.h @@ -98,6 +98,10 @@ struct drm_object_properties { * Hence atomic drivers should not use drm_object_property_set_value() * and drm_object_property_get_value() on mutable objects, i.e. those * without the DRM_MODE_PROP_IMMUTABLE flag set. + * + * For atomic drivers the default value of properties is stored in this + * array, so drm_object_property_get_default_value can be used to + * retrieve it. */ uint64_t values[DRM_OBJECT_MAX_PROPERTY]; }; @@ -126,6 +130,9 @@ int drm_object_property_set_value(struct drm_mode_object *obj, int drm_object_property_get_value(struct drm_mode_object *obj, struct drm_property *property, uint64_t *value); +int drm_object_property_get_default_value(struct drm_mode_object *obj, + struct drm_property *property, + uint64_t *val); void drm_object_attach_property(struct drm_mode_object *obj, struct drm_property *property, diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index 29ba4adf0c53..2fa6b2c33b3f 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -466,6 +466,8 @@ void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags); int of_get_drm_display_mode(struct device_node *np, struct drm_display_mode *dmode, u32 *bus_flags, int index); +int of_get_drm_panel_display_mode(struct device_node *np, + struct drm_display_mode *dmode, u32 *bus_flags); #else static inline int of_get_drm_display_mode(struct device_node *np, struct drm_display_mode *dmode, @@ -473,6 +475,12 @@ static inline int of_get_drm_display_mode(struct device_node *np, { return -EINVAL; } + +static inline int of_get_drm_panel_display_mode(struct device_node *np, + struct drm_display_mode *dmode, u32 *bus_flags) +{ + return -EINVAL; +} #endif void drm_mode_set_name(struct drm_display_mode *mode); diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h index b84693fbd2b5..ec4f543c3d95 100644 --- a/include/drm/drm_modeset_lock.h +++ b/include/drm/drm_modeset_lock.h @@ -34,6 +34,7 @@ struct drm_modeset_lock; * struct drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx) * @ww_ctx: base acquire ctx * @contended: used internally for -EDEADLK handling + * @stack_depot: used internally for contention debugging * @locked: list of held locks * @trylock_only: trylock mode used in atomic contexts/panic notifiers * @interruptible: whether interruptible locking should be used. diff --git a/include/drm/drm_module.h b/include/drm/drm_module.h new file mode 100644 index 000000000000..4db1ae03d9a5 --- /dev/null +++ b/include/drm/drm_module.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef DRM_MODULE_H +#define DRM_MODULE_H + +#include <linux/pci.h> +#include <linux/platform_device.h> + +#include <drm/drm_drv.h> + +/** + * DOC: overview + * + * This library provides helpers registering DRM drivers during module + * initialization and shutdown. The provided helpers act like bus-specific + * module helpers, such as module_pci_driver(), but respect additional + * parameters that control DRM driver registration. + * + * Below is an example of initializing a DRM driver for a device on the + * PCI bus. + * + * .. code-block:: c + * + * struct pci_driver my_pci_drv = { + * }; + * + * drm_module_pci_driver(my_pci_drv); + * + * The generated code will test if DRM drivers are enabled and register + * the PCI driver my_pci_drv. For more complex module initialization, you + * can still use module_init() and module_exit() in your driver. + */ + +/* + * PCI drivers + */ + +static inline int __init drm_pci_register_driver(struct pci_driver *pci_drv) +{ + if (drm_firmware_drivers_only()) + return -ENODEV; + + return pci_register_driver(pci_drv); +} + +/** + * drm_module_pci_driver - Register a DRM driver for PCI-based devices + * @__pci_drv: the PCI driver structure + * + * Registers a DRM driver for devices on the PCI bus. The helper + * macro behaves like module_pci_driver() but tests the state of + * drm_firmware_drivers_only(). For more complex module initialization, + * use module_init() and module_exit() directly. + * + * Each module may only use this macro once. Calling it replaces + * module_init() and module_exit(). + */ +#define drm_module_pci_driver(__pci_drv) \ + module_driver(__pci_drv, drm_pci_register_driver, pci_unregister_driver) + +static inline int __init +drm_pci_register_driver_if_modeset(struct pci_driver *pci_drv, int modeset) +{ + if (drm_firmware_drivers_only() && modeset == -1) + return -ENODEV; + if (modeset == 0) + return -ENODEV; + + return pci_register_driver(pci_drv); +} + +static inline void __exit +drm_pci_unregister_driver_if_modeset(struct pci_driver *pci_drv, int modeset) +{ + pci_unregister_driver(pci_drv); +} + +/** + * drm_module_pci_driver_if_modeset - Register a DRM driver for PCI-based devices + * @__pci_drv: the PCI driver structure + * @__modeset: an additional parameter that disables the driver + * + * This macro is deprecated and only provided for existing drivers. For + * new drivers, use drm_module_pci_driver(). + * + * Registers a DRM driver for devices on the PCI bus. The helper macro + * behaves like drm_module_pci_driver() with an additional driver-specific + * flag. If __modeset is 0, the driver has been disabled, if __modeset is + * -1 the driver state depends on the global DRM state. For all other + * values, the PCI driver has been enabled. The default should be -1. + */ +#define drm_module_pci_driver_if_modeset(__pci_drv, __modeset) \ + module_driver(__pci_drv, drm_pci_register_driver_if_modeset, \ + drm_pci_unregister_driver_if_modeset, __modeset) + +/* + * Platform drivers + */ + +static inline int __init +drm_platform_driver_register(struct platform_driver *platform_drv) +{ + if (drm_firmware_drivers_only()) + return -ENODEV; + + return platform_driver_register(platform_drv); +} + +/** + * drm_module_platform_driver - Register a DRM driver for platform devices + * @__platform_drv: the platform driver structure + * + * Registers a DRM driver for devices on the platform bus. The helper + * macro behaves like module_platform_driver() but tests the state of + * drm_firmware_drivers_only(). For more complex module initialization, + * use module_init() and module_exit() directly. + * + * Each module may only use this macro once. Calling it replaces + * module_init() and module_exit(). + */ +#define drm_module_platform_driver(__platform_drv) \ + module_driver(__platform_drv, drm_platform_driver_register, \ + platform_driver_unregister) + +#endif diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index 4602f833eb51..1ba2d424a53f 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -29,6 +29,7 @@ #include <linux/list.h> struct backlight_device; +struct dentry; struct device_node; struct drm_connector; struct drm_device; @@ -125,6 +126,13 @@ struct drm_panel_funcs { */ int (*get_timings)(struct drm_panel *panel, unsigned int num_timings, struct display_timing *timings); + + /** + * @debugfs_init: + * + * Allows panels to create panels-specific debugfs files. + */ + void (*debugfs_init)(struct drm_panel *panel, struct dentry *root); }; /** diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 0c1102dc4d88..2628c7cde2da 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -516,7 +516,7 @@ struct drm_plane_funcs { * This optional hook is used for the DRM to determine if the given * format/modifier combination is valid for the plane. This allows the * DRM to generate the correct format bitmask (which formats apply to - * which modifier), and to valdiate modifiers at atomic_check time. + * which modifier), and to validate modifiers at atomic_check time. * * If not present, then any modifier in the plane's modifier * list is allowed with any of the plane's formats. @@ -803,6 +803,9 @@ void *__drmm_universal_plane_alloc(struct drm_device *dev, * * The @drm_plane_funcs.destroy hook must be NULL. * + * Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set + * @format_modifiers to NULL. The plane will advertise the linear modifier. + * * Returns: * Pointer to new plane, or ERR_PTR on failure. */ diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h index 54f2c58305d2..2a1d01e5b56b 100644 --- a/include/drm/drm_prime.h +++ b/include/drm/drm_prime.h @@ -54,7 +54,7 @@ struct device; struct dma_buf_export_info; struct dma_buf; struct dma_buf_attachment; -struct dma_buf_map; +struct iosys_map; enum dma_data_direction; @@ -83,8 +83,8 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir); -int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map); -void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map); +int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map); +void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map); int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma); diff --git a/include/drm/drm_privacy_screen_driver.h b/include/drm/drm_privacy_screen_driver.h index 24591b607675..4ef246d5706f 100644 --- a/include/drm/drm_privacy_screen_driver.h +++ b/include/drm/drm_privacy_screen_driver.h @@ -73,10 +73,21 @@ struct drm_privacy_screen { * for more info. */ enum drm_privacy_screen_status hw_state; + /** + * @drvdata: Private data owned by the privacy screen provider + */ + void *drvdata; }; +static inline +void *drm_privacy_screen_get_drvdata(struct drm_privacy_screen *priv) +{ + return priv->drvdata; +} + struct drm_privacy_screen *drm_privacy_screen_register( - struct device *parent, const struct drm_privacy_screen_ops *ops); + struct device *parent, const struct drm_privacy_screen_ops *ops, + void *data); void drm_privacy_screen_unregister(struct drm_privacy_screen *priv); void drm_privacy_screen_call_notifier_chain(struct drm_privacy_screen *priv); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index bbc22fad8d80..944f83ef9f2e 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -457,13 +457,14 @@ struct drm_gpu_scheduler { atomic_t _score; bool ready; bool free_guilty; + struct device *dev; }; int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_backend_ops *ops, uint32_t hw_submission, unsigned hang_limit, long timeout, struct workqueue_struct *timeout_wq, - atomic_t *score, const char *name); + atomic_t *score, const char *name, struct device *dev); void drm_sched_fini(struct drm_gpu_scheduler *sched); int drm_sched_job_init(struct drm_sched_job *job, diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index baf3d1d3d566..533890dc9da1 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -666,6 +666,12 @@ INTEL_VGA_DEVICE(0x46C2, info), \ INTEL_VGA_DEVICE(0x46C3, info) +/* ADL-N */ +#define INTEL_ADLN_IDS(info) \ + INTEL_VGA_DEVICE(0x46D0, info), \ + INTEL_VGA_DEVICE(0x46D1, info), \ + INTEL_VGA_DEVICE(0x46D2, info) + /* RPL-S */ #define INTEL_RPLS_IDS(info) \ INTEL_VGA_DEVICE(0xA780, info), \ diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index c17b2df9178b..155b19ee12fb 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -47,7 +47,7 @@ struct ttm_global; struct ttm_device; -struct dma_buf_map; +struct iosys_map; struct drm_mm_node; @@ -481,17 +481,17 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); * ttm_bo_vmap * * @bo: The buffer object. - * @map: pointer to a struct dma_buf_map representing the map. + * @map: pointer to a struct iosys_map representing the map. * * Sets up a kernel virtual mapping, using ioremap or vmap to the * data in the buffer object. The parameter @map returns the virtual - * address as struct dma_buf_map. Unmap the buffer with ttm_bo_vunmap(). + * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap(). * * Returns * -ENOMEM: Out of memory. * -EINVAL: Invalid range. */ -int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map); +int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map); /** * ttm_bo_vunmap @@ -501,7 +501,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map); * * Unmaps a kernel map set up by ttm_bo_vmap(). */ -void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map); +void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map); /** * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object. diff --git a/include/drm/ttm/ttm_kmap_iter.h b/include/drm/ttm/ttm_kmap_iter.h index 8bb00fd39d6c..cc5c09a211b4 100644 --- a/include/drm/ttm/ttm_kmap_iter.h +++ b/include/drm/ttm/ttm_kmap_iter.h @@ -8,7 +8,7 @@ #include <linux/types.h> struct ttm_kmap_iter; -struct dma_buf_map; +struct iosys_map; /** * struct ttm_kmap_iter_ops - Ops structure for a struct @@ -24,22 +24,22 @@ struct ttm_kmap_iter_ops { * kmap_local semantics. * @res_iter: Pointer to the struct ttm_kmap_iter representing * the resource. - * @dmap: The struct dma_buf_map holding the virtual address after + * @dmap: The struct iosys_map holding the virtual address after * the operation. * @i: The location within the resource to map. PAGE_SIZE granularity. */ void (*map_local)(struct ttm_kmap_iter *res_iter, - struct dma_buf_map *dmap, pgoff_t i); + struct iosys_map *dmap, pgoff_t i); /** * unmap_local() - Unmap a PAGE_SIZE part of the resource previously * mapped using kmap_local. * @res_iter: Pointer to the struct ttm_kmap_iter representing * the resource. - * @dmap: The struct dma_buf_map holding the virtual address after + * @dmap: The struct iosys_map holding the virtual address after * the operation. */ void (*unmap_local)(struct ttm_kmap_iter *res_iter, - struct dma_buf_map *dmap); + struct iosys_map *dmap); bool maps_tt; }; diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 5952051091cd..0f1d55262c68 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -27,7 +27,7 @@ #include <linux/types.h> #include <linux/mutex.h> -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <linux/dma-fence.h> #include <drm/drm_print.h> #include <drm/ttm/ttm_caching.h> @@ -41,7 +41,7 @@ struct ttm_resource; struct ttm_place; struct ttm_buffer_object; struct ttm_placement; -struct dma_buf_map; +struct iosys_map; struct io_mapping; struct sg_table; struct scatterlist; @@ -105,11 +105,11 @@ struct ttm_resource_manager_func { * @use_type: The memory type is enabled. * @use_tt: If a TT object should be used for the backing store. * @size: Size of the managed region. + * @bdev: ttm device this manager belongs to * @func: structure pointer implementing the range manager. See above * @move_lock: lock for move fence - * static information. bdev::driver::io_mem_free is never used. - * @lru: The lru list for this memory type. * @move: The fence of the last pipelined move operation. + * @lru: The lru list for this memory type. * * This structure is used to identify and manage memory types for a device. */ @@ -119,20 +119,26 @@ struct ttm_resource_manager { */ bool use_type; bool use_tt; + struct ttm_device *bdev; uint64_t size; const struct ttm_resource_manager_func *func; spinlock_t move_lock; /* - * Protected by the global->lru_lock. + * Protected by @move_lock. */ + struct dma_fence *move; + /* + * Protected by the bdev->lru_lock. + */ struct list_head lru[TTM_MAX_BO_PRIORITY]; - /* - * Protected by @move_lock. + /** + * @usage: How much of the resources are used, protected by the + * bdev->lru_lock. */ - struct dma_fence *move; + uint64_t usage; }; /** @@ -160,6 +166,7 @@ struct ttm_bus_placement { * @mem_type: Resource type of the allocation. * @placement: Placement flags. * @bus: Placement on io bus accessible to the CPU + * @bo: weak reference to the BO, protected by ttm_device::lru_lock * * Structure indicating the placement and space resources used by a * buffer object. @@ -170,6 +177,7 @@ struct ttm_resource { uint32_t mem_type; uint32_t placement; struct ttm_bus_placement bus; + struct ttm_buffer_object *bo; }; /** @@ -207,7 +215,7 @@ struct ttm_kmap_iter_iomap { */ struct ttm_kmap_iter_linear_io { struct ttm_kmap_iter base; - struct dma_buf_map dmap; + struct iosys_map dmap; bool needs_unmap; }; @@ -261,19 +269,26 @@ ttm_resource_manager_cleanup(struct ttm_resource_manager *man) void ttm_resource_init(struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_resource *res); +void ttm_resource_fini(struct ttm_resource_manager *man, + struct ttm_resource *res); + int ttm_resource_alloc(struct ttm_buffer_object *bo, const struct ttm_place *place, struct ttm_resource **res); void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res); bool ttm_resource_compat(struct ttm_resource *res, struct ttm_placement *placement); +void ttm_resource_set_bo(struct ttm_resource *res, + struct ttm_buffer_object *bo); void ttm_resource_manager_init(struct ttm_resource_manager *man, - unsigned long p_size); + struct ttm_device *bdev, + uint64_t size); int ttm_resource_manager_evict_all(struct ttm_device *bdev, struct ttm_resource_manager *man); +uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man); void ttm_resource_manager_debug(struct ttm_resource_manager *man, struct drm_printer *p); diff --git a/include/dt-bindings/clock/alphascale,asm9260.h b/include/dt-bindings/clock/alphascale,asm9260.h index d3871c63308b..f53f8b16883d 100644 --- a/include/dt-bindings/clock/alphascale,asm9260.h +++ b/include/dt-bindings/clock/alphascale,asm9260.h @@ -55,7 +55,7 @@ #define CLKID_AHB_I2S1 45 #define CLKID_AHB_MAC1 46 -/* devider */ +/* divider */ #define CLKID_SYS_CPU 47 #define CLKID_SYS_AHB 48 #define CLKID_SYS_I2S0M 49 diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h index 98e1b2ab6403..8498c0cd95fe 100644 --- a/include/dt-bindings/clock/at91.h +++ b/include/dt-bindings/clock/at91.h @@ -35,6 +35,7 @@ #define PMC_AUDIOIOPLL (PMC_MAIN + 7) #define PMC_ETHPLL (PMC_MAIN + 8) #define PMC_CPU (PMC_MAIN + 9) +#define PMC_MCK1 (PMC_MAIN + 10) #ifndef AT91_PMC_MOSCS #define AT91_PMC_MOSCS 0 /* MOSCS Flag */ diff --git a/include/dt-bindings/clock/axis,artpec6-clkctrl.h b/include/dt-bindings/clock/axis,artpec6-clkctrl.h index b1f4971642e6..14e424a7c08c 100644 --- a/include/dt-bindings/clock/axis,artpec6-clkctrl.h +++ b/include/dt-bindings/clock/axis,artpec6-clkctrl.h @@ -2,7 +2,7 @@ /* * ARTPEC-6 clock controller indexes * - * Copyright 2016 Axis Comunications AB. + * Copyright 2016 Axis Communications AB. */ #ifndef DT_BINDINGS_CLK_ARTPEC6_CLKCTRL_H diff --git a/include/dt-bindings/clock/boston-clock.h b/include/dt-bindings/clock/boston-clock.h index a6f009821137..38140fa87b09 100644 --- a/include/dt-bindings/clock/boston-clock.h +++ b/include/dt-bindings/clock/boston-clock.h @@ -1,7 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2016 Imagination Technologies - * - * SPDX-License-Identifier: GPL-2.0 */ #ifndef __DT_BINDINGS_CLOCK_BOSTON_CLOCK_H__ diff --git a/include/dt-bindings/clock/fsd-clk.h b/include/dt-bindings/clock/fsd-clk.h new file mode 100644 index 000000000000..c8a2af1dd1ad --- /dev/null +++ b/include/dt-bindings/clock/fsd-clk.h @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2017 - 2022: Samsung Electronics Co., Ltd. + * https://www.samsung.com + * Copyright (c) 2017-2022 Tesla, Inc. + * https://www.tesla.com + * + * The constants defined in this header are being used in dts + * and fsd platform driver. + */ + +#ifndef _DT_BINDINGS_CLOCK_FSD_H +#define _DT_BINDINGS_CLOCK_FSD_H + +/* CMU */ +#define DOUT_CMU_PLL_SHARED0_DIV4 1 +#define DOUT_CMU_PERIC_SHARED1DIV36 2 +#define DOUT_CMU_PERIC_SHARED0DIV3_TBUCLK 3 +#define DOUT_CMU_PERIC_SHARED0DIV20 4 +#define DOUT_CMU_PERIC_SHARED1DIV4_DMACLK 5 +#define DOUT_CMU_PLL_SHARED0_DIV6 6 +#define DOUT_CMU_FSYS0_SHARED1DIV4 7 +#define DOUT_CMU_FSYS0_SHARED0DIV4 8 +#define DOUT_CMU_FSYS1_SHARED0DIV8 9 +#define DOUT_CMU_FSYS1_SHARED0DIV4 10 +#define CMU_CPUCL_SWITCH_GATE 11 +#define DOUT_CMU_IMEM_TCUCLK 12 +#define DOUT_CMU_IMEM_ACLK 13 +#define DOUT_CMU_IMEM_DMACLK 14 +#define GAT_CMU_FSYS0_SHARED0DIV4 15 +#define CMU_NR_CLK 16 + +/* PERIC */ +#define PERIC_SCLK_UART0 1 +#define PERIC_PCLK_UART0 2 +#define PERIC_SCLK_UART1 3 +#define PERIC_PCLK_UART1 4 +#define PERIC_DMA0_IPCLKPORT_ACLK 5 +#define PERIC_DMA1_IPCLKPORT_ACLK 6 +#define PERIC_PWM0_IPCLKPORT_I_PCLK_S0 7 +#define PERIC_PWM1_IPCLKPORT_I_PCLK_S0 8 +#define PERIC_PCLK_SPI0 9 +#define PERIC_SCLK_SPI0 10 +#define PERIC_PCLK_SPI1 11 +#define PERIC_SCLK_SPI1 12 +#define PERIC_PCLK_SPI2 13 +#define PERIC_SCLK_SPI2 14 +#define PERIC_PCLK_TDM0 15 +#define PERIC_PCLK_HSI2C0 16 +#define PERIC_PCLK_HSI2C1 17 +#define PERIC_PCLK_HSI2C2 18 +#define PERIC_PCLK_HSI2C3 19 +#define PERIC_PCLK_HSI2C4 20 +#define PERIC_PCLK_HSI2C5 21 +#define PERIC_PCLK_HSI2C6 22 +#define PERIC_PCLK_HSI2C7 23 +#define PERIC_MCAN0_IPCLKPORT_CCLK 24 +#define PERIC_MCAN0_IPCLKPORT_PCLK 25 +#define PERIC_MCAN1_IPCLKPORT_CCLK 26 +#define PERIC_MCAN1_IPCLKPORT_PCLK 27 +#define PERIC_MCAN2_IPCLKPORT_CCLK 28 +#define PERIC_MCAN2_IPCLKPORT_PCLK 29 +#define PERIC_MCAN3_IPCLKPORT_CCLK 30 +#define PERIC_MCAN3_IPCLKPORT_PCLK 31 +#define PERIC_PCLK_ADCIF 32 +#define PERIC_EQOS_TOP_IPCLKPORT_CLK_PTP_REF_I 33 +#define PERIC_EQOS_TOP_IPCLKPORT_ACLK_I 34 +#define PERIC_EQOS_TOP_IPCLKPORT_HCLK_I 35 +#define PERIC_EQOS_TOP_IPCLKPORT_RGMII_CLK_I 36 +#define PERIC_EQOS_TOP_IPCLKPORT_CLK_RX_I 37 +#define PERIC_BUS_D_PERIC_IPCLKPORT_EQOSCLK 38 +#define PERIC_BUS_P_PERIC_IPCLKPORT_EQOSCLK 39 +#define PERIC_HCLK_TDM0 40 +#define PERIC_PCLK_TDM1 41 +#define PERIC_HCLK_TDM1 42 +#define PERIC_EQOS_PHYRXCLK_MUX 43 +#define PERIC_EQOS_PHYRXCLK 44 +#define PERIC_DOUT_RGMII_CLK 45 +#define PERIC_NR_CLK 46 + +/* FSYS0 */ +#define UFS0_MPHY_REFCLK_IXTAL24 1 +#define UFS0_MPHY_REFCLK_IXTAL26 2 +#define UFS1_MPHY_REFCLK_IXTAL24 3 +#define UFS1_MPHY_REFCLK_IXTAL26 4 +#define UFS0_TOP0_HCLK_BUS 5 +#define UFS0_TOP0_ACLK 6 +#define UFS0_TOP0_CLK_UNIPRO 7 +#define UFS0_TOP0_FMP_CLK 8 +#define UFS1_TOP1_HCLK_BUS 9 +#define UFS1_TOP1_ACLK 10 +#define UFS1_TOP1_CLK_UNIPRO 11 +#define UFS1_TOP1_FMP_CLK 12 +#define PCIE_SUBCTRL_INST0_DBI_ACLK_SOC 13 +#define PCIE_SUBCTRL_INST0_AUX_CLK_SOC 14 +#define PCIE_SUBCTRL_INST0_MSTR_ACLK_SOC 15 +#define PCIE_SUBCTRL_INST0_SLV_ACLK_SOC 16 +#define FSYS0_EQOS_TOP0_IPCLKPORT_CLK_PTP_REF_I 17 +#define FSYS0_EQOS_TOP0_IPCLKPORT_ACLK_I 18 +#define FSYS0_EQOS_TOP0_IPCLKPORT_HCLK_I 19 +#define FSYS0_EQOS_TOP0_IPCLKPORT_RGMII_CLK_I 20 +#define FSYS0_EQOS_TOP0_IPCLKPORT_CLK_RX_I 21 +#define FSYS0_DOUT_FSYS0_PERIBUS_GRP 22 +#define FSYS0_NR_CLK 23 + +/* FSYS1 */ +#define PCIE_LINK0_IPCLKPORT_DBI_ACLK 1 +#define PCIE_LINK0_IPCLKPORT_AUX_ACLK 2 +#define PCIE_LINK0_IPCLKPORT_MSTR_ACLK 3 +#define PCIE_LINK0_IPCLKPORT_SLV_ACLK 4 +#define PCIE_LINK1_IPCLKPORT_DBI_ACLK 5 +#define PCIE_LINK1_IPCLKPORT_AUX_ACLK 6 +#define PCIE_LINK1_IPCLKPORT_MSTR_ACLK 7 +#define PCIE_LINK1_IPCLKPORT_SLV_ACLK 8 +#define FSYS1_NR_CLK 9 + +/* IMEM */ +#define IMEM_DMA0_IPCLKPORT_ACLK 1 +#define IMEM_DMA1_IPCLKPORT_ACLK 2 +#define IMEM_WDT0_IPCLKPORT_PCLK 3 +#define IMEM_WDT1_IPCLKPORT_PCLK 4 +#define IMEM_WDT2_IPCLKPORT_PCLK 5 +#define IMEM_MCT_PCLK 6 +#define IMEM_TMU_CPU0_IPCLKPORT_I_CLK_TS 7 +#define IMEM_TMU_CPU2_IPCLKPORT_I_CLK_TS 8 +#define IMEM_TMU_TOP_IPCLKPORT_I_CLK_TS 9 +#define IMEM_TMU_GPU_IPCLKPORT_I_CLK_TS 10 +#define IMEM_TMU_GT_IPCLKPORT_I_CLK_TS 11 +#define IMEM_NR_CLK 12 + +/* MFC */ +#define MFC_MFC_IPCLKPORT_ACLK 1 +#define MFC_NR_CLK 2 + +/* CAM_CSI */ +#define CAM_CSI0_0_IPCLKPORT_I_ACLK 1 +#define CAM_CSI0_1_IPCLKPORT_I_ACLK 2 +#define CAM_CSI0_2_IPCLKPORT_I_ACLK 3 +#define CAM_CSI0_3_IPCLKPORT_I_ACLK 4 +#define CAM_CSI1_0_IPCLKPORT_I_ACLK 5 +#define CAM_CSI1_1_IPCLKPORT_I_ACLK 6 +#define CAM_CSI1_2_IPCLKPORT_I_ACLK 7 +#define CAM_CSI1_3_IPCLKPORT_I_ACLK 8 +#define CAM_CSI2_0_IPCLKPORT_I_ACLK 9 +#define CAM_CSI2_1_IPCLKPORT_I_ACLK 10 +#define CAM_CSI2_2_IPCLKPORT_I_ACLK 11 +#define CAM_CSI2_3_IPCLKPORT_I_ACLK 12 +#define CAM_CSI_NR_CLK 13 + +#endif /*_DT_BINDINGS_CLOCK_FSD_H */ diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h index 87f5ad5df72f..f0819d66b230 100644 --- a/include/dt-bindings/clock/marvell,mmp2.h +++ b/include/dt-bindings/clock/marvell,mmp2.h @@ -32,7 +32,7 @@ #define MMP2_CLK_I2S0 31 #define MMP2_CLK_I2S1 32 -/* apb periphrals */ +/* apb peripherals */ #define MMP2_CLK_TWSI0 60 #define MMP2_CLK_TWSI1 61 #define MMP2_CLK_TWSI2 62 @@ -60,7 +60,7 @@ #define MMP3_CLK_THERMAL2 84 #define MMP3_CLK_THERMAL3 85 -/* axi periphrals */ +/* axi peripherals */ #define MMP2_CLK_SDH0 101 #define MMP2_CLK_SDH1 102 #define MMP2_CLK_SDH2 103 diff --git a/include/dt-bindings/clock/marvell,pxa168.h b/include/dt-bindings/clock/marvell,pxa168.h index caf90436b848..db2b41f1b127 100644 --- a/include/dt-bindings/clock/marvell,pxa168.h +++ b/include/dt-bindings/clock/marvell,pxa168.h @@ -23,7 +23,7 @@ #define PXA168_CLK_UART_PLL 27 #define PXA168_CLK_USB_PLL 28 -/* apb periphrals */ +/* apb peripherals */ #define PXA168_CLK_TWSI0 60 #define PXA168_CLK_TWSI1 61 #define PXA168_CLK_TWSI2 62 @@ -45,7 +45,7 @@ #define PXA168_CLK_SSP4 78 #define PXA168_CLK_TIMER 79 -/* axi periphrals */ +/* axi peripherals */ #define PXA168_CLK_DFC 100 #define PXA168_CLK_SDH0 101 #define PXA168_CLK_SDH1 102 diff --git a/include/dt-bindings/clock/marvell,pxa910.h b/include/dt-bindings/clock/marvell,pxa910.h index 7bf46238946e..c9018ab354d0 100644 --- a/include/dt-bindings/clock/marvell,pxa910.h +++ b/include/dt-bindings/clock/marvell,pxa910.h @@ -23,7 +23,7 @@ #define PXA910_CLK_UART_PLL 27 #define PXA910_CLK_USB_PLL 28 -/* apb periphrals */ +/* apb peripherals */ #define PXA910_CLK_TWSI0 60 #define PXA910_CLK_TWSI1 61 #define PXA910_CLK_TWSI2 62 @@ -43,7 +43,7 @@ #define PXA910_CLK_TIMER0 76 #define PXA910_CLK_TIMER1 77 -/* axi periphrals */ +/* axi peripherals */ #define PXA910_CLK_DFC 100 #define PXA910_CLK_SDH0 101 #define PXA910_CLK_SDH1 102 diff --git a/include/dt-bindings/clock/microchip,mpfs-clock.h b/include/dt-bindings/clock/microchip,mpfs-clock.h new file mode 100644 index 000000000000..73f2a9324857 --- /dev/null +++ b/include/dt-bindings/clock/microchip,mpfs-clock.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Daire McNamara,<daire.mcnamara@microchip.com> + * Copyright (C) 2020 Microchip Technology Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_MICROCHIP_MPFS_H_ +#define _DT_BINDINGS_CLK_MICROCHIP_MPFS_H_ + +#define CLK_CPU 0 +#define CLK_AXI 1 +#define CLK_AHB 2 + +#define CLK_ENVM 3 +#define CLK_MAC0 4 +#define CLK_MAC1 5 +#define CLK_MMC 6 +#define CLK_TIMER 7 +#define CLK_MMUART0 8 +#define CLK_MMUART1 9 +#define CLK_MMUART2 10 +#define CLK_MMUART3 11 +#define CLK_MMUART4 12 +#define CLK_SPI0 13 +#define CLK_SPI1 14 +#define CLK_I2C0 15 +#define CLK_I2C1 16 +#define CLK_CAN0 17 +#define CLK_CAN1 18 +#define CLK_USB 19 +#define CLK_RESERVED 20 +#define CLK_RTC 21 +#define CLK_QSPI 22 +#define CLK_GPIO0 23 +#define CLK_GPIO1 24 +#define CLK_GPIO2 25 +#define CLK_DDRC 26 +#define CLK_FIC0 27 +#define CLK_FIC1 28 +#define CLK_FIC2 29 +#define CLK_FIC3 30 +#define CLK_ATHENA 31 +#define CLK_CFM 32 + +#endif /* _DT_BINDINGS_CLK_MICROCHIP_MPFS_H_ */ diff --git a/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h b/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h index f21522605b94..3e0a9b68933d 100644 --- a/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h +++ b/include/dt-bindings/clock/nuvoton,npcm7xx-clock.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * Nuvoton NPCM7xx Clock Generator binding - * clock binding number for all clocks supportted by nuvoton,npcm7xx-clk + * clock binding number for all clocks supported by nuvoton,npcm7xx-clk * * Copyright (C) 2018 Nuvoton Technologies tali.perry@nuvoton.com * diff --git a/include/dt-bindings/clock/r9a06g032-sysctrl.h b/include/dt-bindings/clock/r9a06g032-sysctrl.h index 90c0f3dc1ba1..d9d7b8b4f426 100644 --- a/include/dt-bindings/clock/r9a06g032-sysctrl.h +++ b/include/dt-bindings/clock/r9a06g032-sysctrl.h @@ -74,6 +74,7 @@ #define R9A06G032_CLK_DDRPHY_PCLK 81 /* AKA CLK_REF_SYNC_D4 */ #define R9A06G032_CLK_FW 81 /* AKA CLK_REF_SYNC_D4 */ #define R9A06G032_CLK_CRYPTO 81 /* AKA CLK_REF_SYNC_D4 */ +#define R9A06G032_CLK_WATCHDOG 82 /* AKA CLK_REF_SYNC_D8 */ #define R9A06G032_CLK_A7MP 84 /* AKA DIV_CA7 */ #define R9A06G032_HCLK_CAN0 85 #define R9A06G032_HCLK_CAN1 86 diff --git a/include/dt-bindings/clock/r9a07g054-cpg.h b/include/dt-bindings/clock/r9a07g054-cpg.h new file mode 100644 index 000000000000..43f4dbda872c --- /dev/null +++ b/include/dt-bindings/clock/r9a07g054-cpg.h @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + * + * Copyright (C) 2022 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__ +#define __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__ + +#include <dt-bindings/clock/renesas-cpg-mssr.h> + +/* R9A07G054 CPG Core Clocks */ +#define R9A07G054_CLK_I 0 +#define R9A07G054_CLK_I2 1 +#define R9A07G054_CLK_G 2 +#define R9A07G054_CLK_S0 3 +#define R9A07G054_CLK_S1 4 +#define R9A07G054_CLK_SPI0 5 +#define R9A07G054_CLK_SPI1 6 +#define R9A07G054_CLK_SD0 7 +#define R9A07G054_CLK_SD1 8 +#define R9A07G054_CLK_M0 9 +#define R9A07G054_CLK_M1 10 +#define R9A07G054_CLK_M2 11 +#define R9A07G054_CLK_M3 12 +#define R9A07G054_CLK_M4 13 +#define R9A07G054_CLK_HP 14 +#define R9A07G054_CLK_TSU 15 +#define R9A07G054_CLK_ZT 16 +#define R9A07G054_CLK_P0 17 +#define R9A07G054_CLK_P1 18 +#define R9A07G054_CLK_P2 19 +#define R9A07G054_CLK_AT 20 +#define R9A07G054_OSCCLK 21 +#define R9A07G054_CLK_P0_DIV2 22 +#define R9A07G054_CLK_DRP_M 23 +#define R9A07G054_CLK_DRP_D 24 +#define R9A07G054_CLK_DRP_A 25 + +/* R9A07G054 Module Clocks */ +#define R9A07G054_CA55_SCLK 0 +#define R9A07G054_CA55_PCLK 1 +#define R9A07G054_CA55_ATCLK 2 +#define R9A07G054_CA55_GICCLK 3 +#define R9A07G054_CA55_PERICLK 4 +#define R9A07G054_CA55_ACLK 5 +#define R9A07G054_CA55_TSCLK 6 +#define R9A07G054_GIC600_GICCLK 7 +#define R9A07G054_IA55_CLK 8 +#define R9A07G054_IA55_PCLK 9 +#define R9A07G054_MHU_PCLK 10 +#define R9A07G054_SYC_CNT_CLK 11 +#define R9A07G054_DMAC_ACLK 12 +#define R9A07G054_DMAC_PCLK 13 +#define R9A07G054_OSTM0_PCLK 14 +#define R9A07G054_OSTM1_PCLK 15 +#define R9A07G054_OSTM2_PCLK 16 +#define R9A07G054_MTU_X_MCK_MTU3 17 +#define R9A07G054_POE3_CLKM_POE 18 +#define R9A07G054_GPT_PCLK 19 +#define R9A07G054_POEG_A_CLKP 20 +#define R9A07G054_POEG_B_CLKP 21 +#define R9A07G054_POEG_C_CLKP 22 +#define R9A07G054_POEG_D_CLKP 23 +#define R9A07G054_WDT0_PCLK 24 +#define R9A07G054_WDT0_CLK 25 +#define R9A07G054_WDT1_PCLK 26 +#define R9A07G054_WDT1_CLK 27 +#define R9A07G054_WDT2_PCLK 28 +#define R9A07G054_WDT2_CLK 29 +#define R9A07G054_SPI_CLK2 30 +#define R9A07G054_SPI_CLK 31 +#define R9A07G054_SDHI0_IMCLK 32 +#define R9A07G054_SDHI0_IMCLK2 33 +#define R9A07G054_SDHI0_CLK_HS 34 +#define R9A07G054_SDHI0_ACLK 35 +#define R9A07G054_SDHI1_IMCLK 36 +#define R9A07G054_SDHI1_IMCLK2 37 +#define R9A07G054_SDHI1_CLK_HS 38 +#define R9A07G054_SDHI1_ACLK 39 +#define R9A07G054_GPU_CLK 40 +#define R9A07G054_GPU_AXI_CLK 41 +#define R9A07G054_GPU_ACE_CLK 42 +#define R9A07G054_ISU_ACLK 43 +#define R9A07G054_ISU_PCLK 44 +#define R9A07G054_H264_CLK_A 45 +#define R9A07G054_H264_CLK_P 46 +#define R9A07G054_CRU_SYSCLK 47 +#define R9A07G054_CRU_VCLK 48 +#define R9A07G054_CRU_PCLK 49 +#define R9A07G054_CRU_ACLK 50 +#define R9A07G054_MIPI_DSI_PLLCLK 51 +#define R9A07G054_MIPI_DSI_SYSCLK 52 +#define R9A07G054_MIPI_DSI_ACLK 53 +#define R9A07G054_MIPI_DSI_PCLK 54 +#define R9A07G054_MIPI_DSI_VCLK 55 +#define R9A07G054_MIPI_DSI_LPCLK 56 +#define R9A07G054_LCDC_CLK_A 57 +#define R9A07G054_LCDC_CLK_P 58 +#define R9A07G054_LCDC_CLK_D 59 +#define R9A07G054_SSI0_PCLK2 60 +#define R9A07G054_SSI0_PCLK_SFR 61 +#define R9A07G054_SSI1_PCLK2 62 +#define R9A07G054_SSI1_PCLK_SFR 63 +#define R9A07G054_SSI2_PCLK2 64 +#define R9A07G054_SSI2_PCLK_SFR 65 +#define R9A07G054_SSI3_PCLK2 66 +#define R9A07G054_SSI3_PCLK_SFR 67 +#define R9A07G054_SRC_CLKP 68 +#define R9A07G054_USB_U2H0_HCLK 69 +#define R9A07G054_USB_U2H1_HCLK 70 +#define R9A07G054_USB_U2P_EXR_CPUCLK 71 +#define R9A07G054_USB_PCLK 72 +#define R9A07G054_ETH0_CLK_AXI 73 +#define R9A07G054_ETH0_CLK_CHI 74 +#define R9A07G054_ETH1_CLK_AXI 75 +#define R9A07G054_ETH1_CLK_CHI 76 +#define R9A07G054_I2C0_PCLK 77 +#define R9A07G054_I2C1_PCLK 78 +#define R9A07G054_I2C2_PCLK 79 +#define R9A07G054_I2C3_PCLK 80 +#define R9A07G054_SCIF0_CLK_PCK 81 +#define R9A07G054_SCIF1_CLK_PCK 82 +#define R9A07G054_SCIF2_CLK_PCK 83 +#define R9A07G054_SCIF3_CLK_PCK 84 +#define R9A07G054_SCIF4_CLK_PCK 85 +#define R9A07G054_SCI0_CLKP 86 +#define R9A07G054_SCI1_CLKP 87 +#define R9A07G054_IRDA_CLKP 88 +#define R9A07G054_RSPI0_CLKB 89 +#define R9A07G054_RSPI1_CLKB 90 +#define R9A07G054_RSPI2_CLKB 91 +#define R9A07G054_CANFD_PCLK 92 +#define R9A07G054_GPIO_HCLK 93 +#define R9A07G054_ADC_ADCLK 94 +#define R9A07G054_ADC_PCLK 95 +#define R9A07G054_TSU_PCLK 96 +#define R9A07G054_STPAI_INITCLK 97 +#define R9A07G054_STPAI_ACLK 98 +#define R9A07G054_STPAI_MCLK 99 +#define R9A07G054_STPAI_DCLKIN 100 +#define R9A07G054_STPAI_ACLK_DRP 101 + +/* R9A07G054 Resets */ +#define R9A07G054_CA55_RST_1_0 0 +#define R9A07G054_CA55_RST_1_1 1 +#define R9A07G054_CA55_RST_3_0 2 +#define R9A07G054_CA55_RST_3_1 3 +#define R9A07G054_CA55_RST_4 4 +#define R9A07G054_CA55_RST_5 5 +#define R9A07G054_CA55_RST_6 6 +#define R9A07G054_CA55_RST_7 7 +#define R9A07G054_CA55_RST_8 8 +#define R9A07G054_CA55_RST_9 9 +#define R9A07G054_CA55_RST_10 10 +#define R9A07G054_CA55_RST_11 11 +#define R9A07G054_CA55_RST_12 12 +#define R9A07G054_GIC600_GICRESET_N 13 +#define R9A07G054_GIC600_DBG_GICRESET_N 14 +#define R9A07G054_IA55_RESETN 15 +#define R9A07G054_MHU_RESETN 16 +#define R9A07G054_DMAC_ARESETN 17 +#define R9A07G054_DMAC_RST_ASYNC 18 +#define R9A07G054_SYC_RESETN 19 +#define R9A07G054_OSTM0_PRESETZ 20 +#define R9A07G054_OSTM1_PRESETZ 21 +#define R9A07G054_OSTM2_PRESETZ 22 +#define R9A07G054_MTU_X_PRESET_MTU3 23 +#define R9A07G054_POE3_RST_M_REG 24 +#define R9A07G054_GPT_RST_C 25 +#define R9A07G054_POEG_A_RST 26 +#define R9A07G054_POEG_B_RST 27 +#define R9A07G054_POEG_C_RST 28 +#define R9A07G054_POEG_D_RST 29 +#define R9A07G054_WDT0_PRESETN 30 +#define R9A07G054_WDT1_PRESETN 31 +#define R9A07G054_WDT2_PRESETN 32 +#define R9A07G054_SPI_RST 33 +#define R9A07G054_SDHI0_IXRST 34 +#define R9A07G054_SDHI1_IXRST 35 +#define R9A07G054_GPU_RESETN 36 +#define R9A07G054_GPU_AXI_RESETN 37 +#define R9A07G054_GPU_ACE_RESETN 38 +#define R9A07G054_ISU_ARESETN 39 +#define R9A07G054_ISU_PRESETN 40 +#define R9A07G054_H264_X_RESET_VCP 41 +#define R9A07G054_H264_CP_PRESET_P 42 +#define R9A07G054_CRU_CMN_RSTB 43 +#define R9A07G054_CRU_PRESETN 44 +#define R9A07G054_CRU_ARESETN 45 +#define R9A07G054_MIPI_DSI_CMN_RSTB 46 +#define R9A07G054_MIPI_DSI_ARESET_N 47 +#define R9A07G054_MIPI_DSI_PRESET_N 48 +#define R9A07G054_LCDC_RESET_N 49 +#define R9A07G054_SSI0_RST_M2_REG 50 +#define R9A07G054_SSI1_RST_M2_REG 51 +#define R9A07G054_SSI2_RST_M2_REG 52 +#define R9A07G054_SSI3_RST_M2_REG 53 +#define R9A07G054_SRC_RST 54 +#define R9A07G054_USB_U2H0_HRESETN 55 +#define R9A07G054_USB_U2H1_HRESETN 56 +#define R9A07G054_USB_U2P_EXL_SYSRST 57 +#define R9A07G054_USB_PRESETN 58 +#define R9A07G054_ETH0_RST_HW_N 59 +#define R9A07G054_ETH1_RST_HW_N 60 +#define R9A07G054_I2C0_MRST 61 +#define R9A07G054_I2C1_MRST 62 +#define R9A07G054_I2C2_MRST 63 +#define R9A07G054_I2C3_MRST 64 +#define R9A07G054_SCIF0_RST_SYSTEM_N 65 +#define R9A07G054_SCIF1_RST_SYSTEM_N 66 +#define R9A07G054_SCIF2_RST_SYSTEM_N 67 +#define R9A07G054_SCIF3_RST_SYSTEM_N 68 +#define R9A07G054_SCIF4_RST_SYSTEM_N 69 +#define R9A07G054_SCI0_RST 70 +#define R9A07G054_SCI1_RST 71 +#define R9A07G054_IRDA_RST 72 +#define R9A07G054_RSPI0_RST 73 +#define R9A07G054_RSPI1_RST 74 +#define R9A07G054_RSPI2_RST 75 +#define R9A07G054_CANFD_RSTP_N 76 +#define R9A07G054_CANFD_RSTC_N 77 +#define R9A07G054_GPIO_RSTN 78 +#define R9A07G054_GPIO_PORT_RESETN 79 +#define R9A07G054_GPIO_SPARE_RESETN 80 +#define R9A07G054_ADC_PRESETN 81 +#define R9A07G054_ADC_ADRST_N 82 +#define R9A07G054_TSU_PRESETN 83 +#define R9A07G054_STPAI_ARESETN 84 + +#endif /* __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__ */ diff --git a/include/dt-bindings/clock/stm32fx-clock.h b/include/dt-bindings/clock/stm32fx-clock.h index 1cc89c548578..e5dad050d518 100644 --- a/include/dt-bindings/clock/stm32fx-clock.h +++ b/include/dt-bindings/clock/stm32fx-clock.h @@ -7,10 +7,10 @@ */ /* - * List of clocks wich are not derived from system clock (SYSCLOCK) + * List of clocks which are not derived from system clock (SYSCLOCK) * * The index of these clocks is the secondary index of DT bindings - * (see Documentatoin/devicetree/bindings/clock/st,stm32-rcc.txt) + * (see Documentation/devicetree/bindings/clock/st,stm32-rcc.txt) * * e.g: <assigned-clocks = <&rcc 1 CLK_LSE>; diff --git a/include/dt-bindings/clock/stratix10-clock.h b/include/dt-bindings/clock/stratix10-clock.h index 08b98e20b7cc..636498f9e08e 100644 --- a/include/dt-bindings/clock/stratix10-clock.h +++ b/include/dt-bindings/clock/stratix10-clock.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2017, Intel Corporation */ diff --git a/include/dt-bindings/clock/tegra234-clock.h b/include/dt-bindings/clock/tegra234-clock.h index 8d7e66e1b6ef..8cae969e8cba 100644 --- a/include/dt-bindings/clock/tegra234-clock.h +++ b/include/dt-bindings/clock/tegra234-clock.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. */ +/* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. */ #ifndef DT_BINDINGS_CLOCK_TEGRA234_CLOCK_H #define DT_BINDINGS_CLOCK_TEGRA234_CLOCK_H @@ -9,6 +9,26 @@ * @defgroup bpmp_clock_ids Clock ID's * @{ */ +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AHUB */ +#define TEGRA234_CLK_AHUB 4U +/** @brief output of gate CLK_ENB_APB2APE */ +#define TEGRA234_CLK_APB2APE 5U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_APE */ +#define TEGRA234_CLK_APE 6U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_AUD_MCLK */ +#define TEGRA234_CLK_AUD_MCLK 7U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC1 */ +#define TEGRA234_CLK_DMIC1 15U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC2 */ +#define TEGRA234_CLK_DMIC2 16U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC3 */ +#define TEGRA234_CLK_DMIC3 17U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DMIC4 */ +#define TEGRA234_CLK_DMIC4 18U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK1 */ +#define TEGRA234_CLK_DSPK1 29U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_DSPK2 */ +#define TEGRA234_CLK_DSPK2 30U /** * @brief controls the EMC clock frequency. * @details Doing a clk_set_rate on this clock will select the @@ -20,15 +40,126 @@ #define TEGRA234_CLK_EMC 31U /** @brief output of gate CLK_ENB_FUSE */ #define TEGRA234_CLK_FUSE 40U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C1 */ +#define TEGRA234_CLK_I2C1 48U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C2 */ +#define TEGRA234_CLK_I2C2 49U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C3 */ +#define TEGRA234_CLK_I2C3 50U +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C4 */ +#define TEGRA234_CLK_I2C4 51U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C6 */ +#define TEGRA234_CLK_I2C6 52U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C7 */ +#define TEGRA234_CLK_I2C7 53U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C8 */ +#define TEGRA234_CLK_I2C8 54U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2C9 */ +#define TEGRA234_CLK_I2C9 55U +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S1 */ +#define TEGRA234_CLK_I2S1 56U +/** @brief clock recovered from I2S1 input */ +#define TEGRA234_CLK_I2S1_SYNC_INPUT 57U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S2 */ +#define TEGRA234_CLK_I2S2 58U +/** @brief clock recovered from I2S2 input */ +#define TEGRA234_CLK_I2S2_SYNC_INPUT 59U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S3 */ +#define TEGRA234_CLK_I2S3 60U +/** @brief clock recovered from I2S3 input */ +#define TEGRA234_CLK_I2S3_SYNC_INPUT 61U +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S4 */ +#define TEGRA234_CLK_I2S4 62U +/** @brief clock recovered from I2S4 input */ +#define TEGRA234_CLK_I2S4_SYNC_INPUT 63U +/** output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S5 */ +#define TEGRA234_CLK_I2S5 64U +/** @brief clock recovered from I2S5 input */ +#define TEGRA234_CLK_I2S5_SYNC_INPUT 65U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_I2S6 */ +#define TEGRA234_CLK_I2S6 66U +/** @brief clock recovered from I2S6 input */ +#define TEGRA234_CLK_I2S6_SYNC_INPUT 67U +/** PLL controlled by CLK_RST_CONTROLLER_PLLA_BASE for use by audio clocks */ +#define TEGRA234_CLK_PLLA 93U +/** @brief PLLP clk output */ +#define TEGRA234_CLK_PLLP_OUT0 102U +/** @brief output of the divider CLK_RST_CONTROLLER_PLLA_OUT */ +#define TEGRA234_CLK_PLLA_OUT0 104U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM1 */ +#define TEGRA234_CLK_PWM1 105U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM2 */ +#define TEGRA234_CLK_PWM2 106U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM3 */ +#define TEGRA234_CLK_PWM3 107U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM4 */ +#define TEGRA234_CLK_PWM4 108U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM5 */ +#define TEGRA234_CLK_PWM5 109U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM6 */ +#define TEGRA234_CLK_PWM6 110U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM7 */ +#define TEGRA234_CLK_PWM7 111U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_PWM8 */ +#define TEGRA234_CLK_PWM8 112U /** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC4 */ #define TEGRA234_CLK_SDMMC4 123U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC1 */ +#define TEGRA234_CLK_SYNC_DMIC1 139U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC2 */ +#define TEGRA234_CLK_SYNC_DMIC2 140U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC3 */ +#define TEGRA234_CLK_SYNC_DMIC3 141U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DMIC4 */ +#define TEGRA234_CLK_SYNC_DMIC4 142U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK1 */ +#define TEGRA234_CLK_SYNC_DSPK1 143U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_DSPK2 */ +#define TEGRA234_CLK_SYNC_DSPK2 144U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S1 */ +#define TEGRA234_CLK_SYNC_I2S1 145U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S2 */ +#define TEGRA234_CLK_SYNC_I2S2 146U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S3 */ +#define TEGRA234_CLK_SYNC_I2S3 147U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S4 */ +#define TEGRA234_CLK_SYNC_I2S4 148U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S5 */ +#define TEGRA234_CLK_SYNC_I2S5 149U +/** @brief output of mux controlled by CLK_RST_CONTROLLER_AUDIO_SYNC_CLK_I2S6 */ +#define TEGRA234_CLK_SYNC_I2S6 150U /** @brief output of mux controlled by CLK_RST_CONTROLLER_CLK_SOURCE_UARTA */ #define TEGRA234_CLK_UARTA 155U +/** @brief output of gate CLK_ENB_PEX1_CORE_6 */ +#define TEGRA234_CLK_PEX1_C6_CORE 161U +/** @brief output of gate CLK_ENB_PEX2_CORE_7 */ +#define TEGRA234_CLK_PEX2_C7_CORE 171U +/** @brief output of gate CLK_ENB_PEX2_CORE_8 */ +#define TEGRA234_CLK_PEX2_C8_CORE 172U +/** @brief output of gate CLK_ENB_PEX2_CORE_9 */ +#define TEGRA234_CLK_PEX2_C9_CORE 173U +/** @brief output of gate CLK_ENB_PEX2_CORE_10 */ +#define TEGRA234_CLK_PEX2_C10_CORE 187U /** @brief CLK_RST_CONTROLLER_CLK_SOURCE_SDMMC_LEGACY_TM switch divider output */ #define TEGRA234_CLK_SDMMC_LEGACY_TM 219U +/** @brief output of gate CLK_ENB_PEX0_CORE_0 */ +#define TEGRA234_CLK_PEX0_C0_CORE 220U +/** @brief output of gate CLK_ENB_PEX0_CORE_1 */ +#define TEGRA234_CLK_PEX0_C1_CORE 221U +/** @brief output of gate CLK_ENB_PEX0_CORE_2 */ +#define TEGRA234_CLK_PEX0_C2_CORE 222U +/** @brief output of gate CLK_ENB_PEX0_CORE_3 */ +#define TEGRA234_CLK_PEX0_C3_CORE 223U +/** @brief output of gate CLK_ENB_PEX0_CORE_4 */ +#define TEGRA234_CLK_PEX0_C4_CORE 224U +/** @brief output of gate CLK_ENB_PEX1_CORE_5 */ +#define TEGRA234_CLK_PEX1_C5_CORE 225U /** @brief PLL controlled by CLK_RST_CONTROLLER_PLLC4_BASE */ #define TEGRA234_CLK_PLLC4 237U /** @brief 32K input clock provided by PMIC */ #define TEGRA234_CLK_CLK_32K 289U - +/** @brief CLK_RST_CONTROLLER_AZA2XBITCLK_OUT_SWITCH_DIVIDER switch divider output (aza_2xbitclk) */ +#define TEGRA234_CLK_AZA_2XBIT 457U +/** @brief aza_2xbitclk / 2 (aza_bitclk) */ +#define TEGRA234_CLK_AZA_BIT 458U #endif diff --git a/include/dt-bindings/interrupt-controller/apple-aic.h b/include/dt-bindings/interrupt-controller/apple-aic.h index 604f2bb30ac0..bf3aac0e5491 100644 --- a/include/dt-bindings/interrupt-controller/apple-aic.h +++ b/include/dt-bindings/interrupt-controller/apple-aic.h @@ -11,5 +11,7 @@ #define AIC_TMR_HV_VIRT 1 #define AIC_TMR_GUEST_PHYS 2 #define AIC_TMR_GUEST_VIRT 3 +#define AIC_CPU_PMU_E 4 +#define AIC_CPU_PMU_P 5 #endif diff --git a/include/dt-bindings/memory/tegra234-mc.h b/include/dt-bindings/memory/tegra234-mc.h index 2662f70c15c6..e3b0e9da295d 100644 --- a/include/dt-bindings/memory/tegra234-mc.h +++ b/include/dt-bindings/memory/tegra234-mc.h @@ -1,4 +1,5 @@ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. */ #ifndef DT_BINDINGS_MEMORY_TEGRA234_MC_H #define DT_BINDINGS_MEMORY_TEGRA234_MC_H @@ -7,15 +8,59 @@ #define TEGRA234_SID_INVALID 0x00 #define TEGRA234_SID_PASSTHROUGH 0x7f +/* NISO0 stream IDs */ +#define TEGRA234_SID_APE 0x02 +#define TEGRA234_SID_HDA 0x03 +#define TEGRA234_SID_PCIE0 0x12 +#define TEGRA234_SID_PCIE4 0x13 +#define TEGRA234_SID_PCIE5 0x14 +#define TEGRA234_SID_PCIE6 0x15 +#define TEGRA234_SID_PCIE9 0x1f /* NISO1 stream IDs */ #define TEGRA234_SID_SDMMC4 0x02 +#define TEGRA234_SID_PCIE1 0x05 +#define TEGRA234_SID_PCIE2 0x06 +#define TEGRA234_SID_PCIE3 0x07 +#define TEGRA234_SID_PCIE7 0x08 +#define TEGRA234_SID_PCIE8 0x09 +#define TEGRA234_SID_PCIE10 0x0b #define TEGRA234_SID_BPMP 0x10 /* * memory client IDs */ +/* High-definition audio (HDA) read clients */ +#define TEGRA234_MEMORY_CLIENT_HDAR 0x15 +/* PCIE6 read clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE6AR 0x28 +/* PCIE6 write clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE6AW 0x29 +/* PCIE7 read clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE7AR 0x2a +/* PCIE7 write clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE7AW 0x30 +/* PCIE8 read clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE8AR 0x32 +/* High-definition audio (HDA) write clients */ +#define TEGRA234_MEMORY_CLIENT_HDAW 0x35 +/* PCIE8 write clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE8AW 0x3b +/* PCIE9 read clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE9AR 0x3c +/* PCIE6r1 read clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE6AR1 0x3d +/* PCIE9 write clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE9AW 0x3e +/* PCIE10 read clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE10AR 0x3f +/* PCIE10 write clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE10AW 0x40 +/* PCIE10r1 read clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE10AR1 0x48 +/* PCIE7r1 read clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE7AR1 0x49 /* sdmmcd memory read client */ #define TEGRA234_MEMORY_CLIENT_SDMMCRAB 0x63 /* sdmmcd memory write client */ @@ -28,5 +73,35 @@ #define TEGRA234_MEMORY_CLIENT_BPMPDMAR 0x95 /* BPMPDMA write client */ #define TEGRA234_MEMORY_CLIENT_BPMPDMAW 0x96 +/* APEDMA read client */ +#define TEGRA234_MEMORY_CLIENT_APEDMAR 0x9f +/* APEDMA write client */ +#define TEGRA234_MEMORY_CLIENT_APEDMAW 0xa0 +/* PCIE0 read clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE0R 0xd8 +/* PCIE0 write clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE0W 0xd9 +/* PCIE1 read clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE1R 0xda +/* PCIE1 write clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE1W 0xdb +/* PCIE2 read clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE2AR 0xdc +/* PCIE2 write clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE2AW 0xdd +/* PCIE3 read clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE3R 0xde +/* PCIE3 write clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE3W 0xdf +/* PCIE4 read clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE4R 0xe0 +/* PCIE4 write clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE4W 0xe1 +/* PCIE5 read clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE5R 0xe2 +/* PCIE5 write clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE5W 0xe3 +/* PCIE5r1 read clients */ +#define TEGRA234_MEMORY_CLIENT_PCIE5R1 0xef #endif diff --git a/include/dt-bindings/pinctrl/k3.h b/include/dt-bindings/pinctrl/k3.h index 63e038e36ca3..a5204ab91d3e 100644 --- a/include/dt-bindings/pinctrl/k3.h +++ b/include/dt-bindings/pinctrl/k3.h @@ -41,4 +41,7 @@ #define J721S2_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) #define J721S2_WKUP_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) +#define AM62X_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) +#define AM62X_MCU_IOPAD(pa, val, muxmode) (((pa) & 0x1fff)) ((val) | (muxmode)) + #endif diff --git a/include/dt-bindings/power/imx8mp-power.h b/include/dt-bindings/power/imx8mp-power.h new file mode 100644 index 000000000000..9f90c40a2c6c --- /dev/null +++ b/include/dt-bindings/power/imx8mp-power.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (C) 2020 Pengutronix, Sascha Hauer <kernel@pengutronix.de> + */ + +#ifndef __DT_BINDINGS_IMX8MP_POWER_DOMAIN_POWER_H__ +#define __DT_BINDINGS_IMX8MP_POWER_DOMAIN_POWER_H__ + +#define IMX8MP_POWER_DOMAIN_MIPI_PHY1 0 +#define IMX8MP_POWER_DOMAIN_PCIE_PHY 1 +#define IMX8MP_POWER_DOMAIN_USB1_PHY 2 +#define IMX8MP_POWER_DOMAIN_USB2_PHY 3 +#define IMX8MP_POWER_DOMAIN_MLMIX 4 +#define IMX8MP_POWER_DOMAIN_AUDIOMIX 5 +#define IMX8MP_POWER_DOMAIN_GPU2D 6 +#define IMX8MP_POWER_DOMAIN_GPUMIX 7 +#define IMX8MP_POWER_DOMAIN_VPUMIX 8 +#define IMX8MP_POWER_DOMAIN_GPU3D 9 +#define IMX8MP_POWER_DOMAIN_MEDIAMIX 10 +#define IMX8MP_POWER_DOMAIN_VPU_G1 11 +#define IMX8MP_POWER_DOMAIN_VPU_G2 12 +#define IMX8MP_POWER_DOMAIN_VPU_VC8000E 13 +#define IMX8MP_POWER_DOMAIN_HDMIMIX 14 +#define IMX8MP_POWER_DOMAIN_HDMI_PHY 15 +#define IMX8MP_POWER_DOMAIN_MIPI_PHY2 16 +#define IMX8MP_POWER_DOMAIN_HSIOMIX 17 +#define IMX8MP_POWER_DOMAIN_MEDIAMIX_ISPDWP 18 + +#define IMX8MP_HSIOBLK_PD_USB 0 +#define IMX8MP_HSIOBLK_PD_USB_PHY1 1 +#define IMX8MP_HSIOBLK_PD_USB_PHY2 2 +#define IMX8MP_HSIOBLK_PD_PCIE 3 +#define IMX8MP_HSIOBLK_PD_PCIE_PHY 4 + +#endif diff --git a/include/dt-bindings/power/imx8mq-power.h b/include/dt-bindings/power/imx8mq-power.h index 8a513bd9166e..9f7d0f1e7c32 100644 --- a/include/dt-bindings/power/imx8mq-power.h +++ b/include/dt-bindings/power/imx8mq-power.h @@ -18,4 +18,7 @@ #define IMX8M_POWER_DOMAIN_MIPI_CSI2 9 #define IMX8M_POWER_DOMAIN_PCIE2 10 +#define IMX8MQ_VPUBLK_PD_G1 0 +#define IMX8MQ_VPUBLK_PD_G2 1 + #endif diff --git a/include/dt-bindings/power/meson-s4-power.h b/include/dt-bindings/power/meson-s4-power.h new file mode 100644 index 000000000000..462dd2cb938b --- /dev/null +++ b/include/dt-bindings/power/meson-s4-power.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (c) 2021 Amlogic, Inc. + * Author: Shunzhou Jiang <shunzhou.jiang@amlogic.com> + */ + +#ifndef _DT_BINDINGS_MESON_S4_POWER_H +#define _DT_BINDINGS_MESON_S4_POWER_H + +#define PWRC_S4_DOS_HEVC_ID 0 +#define PWRC_S4_DOS_VDEC_ID 1 +#define PWRC_S4_VPU_HDMI_ID 2 +#define PWRC_S4_USB_COMB_ID 3 +#define PWRC_S4_GE2D_ID 4 +#define PWRC_S4_ETH_ID 5 +#define PWRC_S4_DEMOD_ID 6 +#define PWRC_S4_AUDIO_ID 7 + +#endif diff --git a/include/dt-bindings/power/mt8186-power.h b/include/dt-bindings/power/mt8186-power.h new file mode 100644 index 000000000000..429f7197f6b6 --- /dev/null +++ b/include/dt-bindings/power/mt8186-power.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* + * Copyright (c) 2022 MediaTek Inc. + * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com> + */ + +#ifndef _DT_BINDINGS_POWER_MT8186_POWER_H +#define _DT_BINDINGS_POWER_MT8186_POWER_H + +#define MT8186_POWER_DOMAIN_MFG0 0 +#define MT8186_POWER_DOMAIN_MFG1 1 +#define MT8186_POWER_DOMAIN_MFG2 2 +#define MT8186_POWER_DOMAIN_MFG3 3 +#define MT8186_POWER_DOMAIN_SSUSB 4 +#define MT8186_POWER_DOMAIN_SSUSB_P1 5 +#define MT8186_POWER_DOMAIN_DIS 6 +#define MT8186_POWER_DOMAIN_IMG 7 +#define MT8186_POWER_DOMAIN_IMG2 8 +#define MT8186_POWER_DOMAIN_IPE 9 +#define MT8186_POWER_DOMAIN_CAM 10 +#define MT8186_POWER_DOMAIN_CAM_RAWA 11 +#define MT8186_POWER_DOMAIN_CAM_RAWB 12 +#define MT8186_POWER_DOMAIN_VENC 13 +#define MT8186_POWER_DOMAIN_VDEC 14 +#define MT8186_POWER_DOMAIN_WPE 15 +#define MT8186_POWER_DOMAIN_CONN_ON 16 +#define MT8186_POWER_DOMAIN_CSIRX_TOP 17 +#define MT8186_POWER_DOMAIN_ADSP_AO 18 +#define MT8186_POWER_DOMAIN_ADSP_INFRA 19 +#define MT8186_POWER_DOMAIN_ADSP_TOP 20 + +#endif /* _DT_BINDINGS_POWER_MT8186_POWER_H */ diff --git a/include/dt-bindings/power/mt8195-power.h b/include/dt-bindings/power/mt8195-power.h new file mode 100644 index 000000000000..b20ca4b3e3a8 --- /dev/null +++ b/include/dt-bindings/power/mt8195-power.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (c) 2021 MediaTek Inc. + * Author: Chun-Jie Chen <chun-jie.chen@mediatek.com> + */ + +#ifndef _DT_BINDINGS_POWER_MT8195_POWER_H +#define _DT_BINDINGS_POWER_MT8195_POWER_H + +#define MT8195_POWER_DOMAIN_PCIE_MAC_P0 0 +#define MT8195_POWER_DOMAIN_PCIE_MAC_P1 1 +#define MT8195_POWER_DOMAIN_PCIE_PHY 2 +#define MT8195_POWER_DOMAIN_SSUSB_PCIE_PHY 3 +#define MT8195_POWER_DOMAIN_CSI_RX_TOP 4 +#define MT8195_POWER_DOMAIN_ETHER 5 +#define MT8195_POWER_DOMAIN_ADSP 6 +#define MT8195_POWER_DOMAIN_AUDIO 7 +#define MT8195_POWER_DOMAIN_MFG0 8 +#define MT8195_POWER_DOMAIN_MFG1 9 +#define MT8195_POWER_DOMAIN_MFG2 10 +#define MT8195_POWER_DOMAIN_MFG3 11 +#define MT8195_POWER_DOMAIN_MFG4 12 +#define MT8195_POWER_DOMAIN_MFG5 13 +#define MT8195_POWER_DOMAIN_MFG6 14 +#define MT8195_POWER_DOMAIN_VPPSYS0 15 +#define MT8195_POWER_DOMAIN_VDOSYS0 16 +#define MT8195_POWER_DOMAIN_VPPSYS1 17 +#define MT8195_POWER_DOMAIN_VDOSYS1 18 +#define MT8195_POWER_DOMAIN_DP_TX 19 +#define MT8195_POWER_DOMAIN_EPD_TX 20 +#define MT8195_POWER_DOMAIN_HDMI_TX 21 +#define MT8195_POWER_DOMAIN_WPESYS 22 +#define MT8195_POWER_DOMAIN_VDEC0 23 +#define MT8195_POWER_DOMAIN_VDEC1 24 +#define MT8195_POWER_DOMAIN_VDEC2 25 +#define MT8195_POWER_DOMAIN_VENC 26 +#define MT8195_POWER_DOMAIN_VENC_CORE1 27 +#define MT8195_POWER_DOMAIN_IMG 28 +#define MT8195_POWER_DOMAIN_DIP 29 +#define MT8195_POWER_DOMAIN_IPE 30 +#define MT8195_POWER_DOMAIN_CAM 31 +#define MT8195_POWER_DOMAIN_CAM_RAWA 32 +#define MT8195_POWER_DOMAIN_CAM_RAWB 33 +#define MT8195_POWER_DOMAIN_CAM_MRAW 34 + +#endif /* _DT_BINDINGS_POWER_MT8195_POWER_H */ diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h index edfc1ff2acb3..c002cc6ddf55 100644 --- a/include/dt-bindings/power/qcom-rpmpd.h +++ b/include/dt-bindings/power/qcom-rpmpd.h @@ -139,6 +139,11 @@ #define MDM9607_VDDMX_AO 4 #define MDM9607_VDDMX_VFL 5 +/* MSM8226 Power Domain Indexes */ +#define MSM8226_VDDCX 0 +#define MSM8226_VDDCX_AO 1 +#define MSM8226_VDDCX_VFC 2 + /* MSM8939 Power Domains */ #define MSM8939_VDDMDCX 0 #define MSM8939_VDDMDCX_AO 1 diff --git a/include/dt-bindings/power/tegra234-powergate.h b/include/dt-bindings/power/tegra234-powergate.h new file mode 100644 index 000000000000..f610eee9bce8 --- /dev/null +++ b/include/dt-bindings/power/tegra234-powergate.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. */ + +#ifndef __ABI_MACH_T234_POWERGATE_T234_H_ +#define __ABI_MACH_T234_POWERGATE_T234_H_ + +#define TEGRA234_POWER_DOMAIN_AUD 2U +#define TEGRA234_POWER_DOMAIN_DISP 3U +#define TEGRA234_POWER_DOMAIN_PCIEX8A 5U +#define TEGRA234_POWER_DOMAIN_PCIEX4A 6U +#define TEGRA234_POWER_DOMAIN_PCIEX4BA 7U +#define TEGRA234_POWER_DOMAIN_PCIEX4BB 8U +#define TEGRA234_POWER_DOMAIN_PCIEX1A 9U +#define TEGRA234_POWER_DOMAIN_PCIEX4CA 13U +#define TEGRA234_POWER_DOMAIN_PCIEX4CB 14U +#define TEGRA234_POWER_DOMAIN_PCIEX4CC 15U +#define TEGRA234_POWER_DOMAIN_PCIEX8B 16U +#define TEGRA234_POWER_DOMAIN_MGBEA 17U +#define TEGRA234_POWER_DOMAIN_MGBEB 18U +#define TEGRA234_POWER_DOMAIN_MGBEC 19U + +#endif diff --git a/include/dt-bindings/regulator/richtek,rt5190a-regulator.h b/include/dt-bindings/regulator/richtek,rt5190a-regulator.h new file mode 100644 index 000000000000..63f99d4c1cb3 --- /dev/null +++ b/include/dt-bindings/regulator/richtek,rt5190a-regulator.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __DT_BINDINGS_RICHTEK_RT5190A_REGULATOR_H__ +#define __DT_BINDINGS_RICHTEK_RT5190A_REGULATOR_H__ + +/* + * BUCK/LDO mode constants which may be used in devicetree properties + * (eg. regulator-allowed-modes). + * See the manufacturer's datasheet for more information on these modes. + */ + +#define RT5190A_OPMODE_AUTO 0 +#define RT5190A_OPMODE_FPWM 1 + +#endif diff --git a/include/dt-bindings/regulator/ti,tps62864.h b/include/dt-bindings/regulator/ti,tps62864.h new file mode 100644 index 000000000000..8db31f23d956 --- /dev/null +++ b/include/dt-bindings/regulator/ti,tps62864.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef _DT_BINDINGS_REGULATOR_TI_TPS62864_H +#define _DT_BINDINGS_REGULATOR_TI_TPS62864_H + +#define TPS62864_MODE_NORMAL 0 +#define TPS62864_MODE_FPWM 1 + +#endif diff --git a/include/dt-bindings/reset/tegra234-reset.h b/include/dt-bindings/reset/tegra234-reset.h index 50e13bced642..1362cd5e03f0 100644 --- a/include/dt-bindings/reset/tegra234-reset.h +++ b/include/dt-bindings/reset/tegra234-reset.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. */ +/* Copyright (c) 2018-2022, NVIDIA CORPORATION. All rights reserved. */ #ifndef DT_BINDINGS_RESET_TEGRA234_RESET_H #define DT_BINDINGS_RESET_TEGRA234_RESET_H @@ -10,8 +10,51 @@ * @brief Identifiers for Resets controllable by firmware * @{ */ +#define TEGRA234_RESET_PEX1_CORE_6 11U +#define TEGRA234_RESET_PEX1_CORE_6_APB 12U +#define TEGRA234_RESET_PEX1_COMMON_APB 13U +#define TEGRA234_RESET_PEX2_CORE_7 14U +#define TEGRA234_RESET_PEX2_CORE_7_APB 15U +#define TEGRA234_RESET_HDA 20U +#define TEGRA234_RESET_HDACODEC 21U +#define TEGRA234_RESET_I2C1 24U +#define TEGRA234_RESET_PEX2_CORE_8 25U +#define TEGRA234_RESET_PEX2_CORE_8_APB 26U +#define TEGRA234_RESET_PEX2_CORE_9 27U +#define TEGRA234_RESET_PEX2_CORE_9_APB 28U +#define TEGRA234_RESET_I2C2 29U +#define TEGRA234_RESET_I2C3 30U +#define TEGRA234_RESET_I2C4 31U +#define TEGRA234_RESET_I2C6 32U +#define TEGRA234_RESET_I2C7 33U +#define TEGRA234_RESET_I2C8 34U +#define TEGRA234_RESET_I2C9 35U +#define TEGRA234_RESET_PEX2_CORE_10 56U +#define TEGRA234_RESET_PEX2_CORE_10_APB 57U +#define TEGRA234_RESET_PEX2_COMMON_APB 58U +#define TEGRA234_RESET_PWM1 68U +#define TEGRA234_RESET_PWM2 69U +#define TEGRA234_RESET_PWM3 70U +#define TEGRA234_RESET_PWM4 71U +#define TEGRA234_RESET_PWM5 72U +#define TEGRA234_RESET_PWM6 73U +#define TEGRA234_RESET_PWM7 74U +#define TEGRA234_RESET_PWM8 75U #define TEGRA234_RESET_SDMMC4 85U #define TEGRA234_RESET_UARTA 100U +#define TEGRA234_RESET_PEX0_CORE_0 116U +#define TEGRA234_RESET_PEX0_CORE_1 117U +#define TEGRA234_RESET_PEX0_CORE_2 118U +#define TEGRA234_RESET_PEX0_CORE_3 119U +#define TEGRA234_RESET_PEX0_CORE_4 120U +#define TEGRA234_RESET_PEX0_CORE_0_APB 121U +#define TEGRA234_RESET_PEX0_CORE_1_APB 122U +#define TEGRA234_RESET_PEX0_CORE_2_APB 123U +#define TEGRA234_RESET_PEX0_CORE_3_APB 124U +#define TEGRA234_RESET_PEX0_CORE_4_APB 125U +#define TEGRA234_RESET_PEX0_COMMON_APB 126U +#define TEGRA234_RESET_PEX1_CORE_5 129U +#define TEGRA234_RESET_PEX1_CORE_5_APB 130U /** @} */ diff --git a/include/dt-bindings/sound/microchip,pdmc.h b/include/dt-bindings/sound/microchip,pdmc.h new file mode 100644 index 000000000000..96cde94ce74f --- /dev/null +++ b/include/dt-bindings/sound/microchip,pdmc.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_BINDINGS_MICROCHIP_PDMC_H__ +#define __DT_BINDINGS_MICROCHIP_PDMC_H__ + +/* PDM microphone's pin placement */ +#define MCHP_PDMC_DS0 0 +#define MCHP_PDMC_DS1 1 + +/* PDM microphone clock edge sampling */ +#define MCHP_PDMC_CLK_POSITIVE 0 +#define MCHP_PDMC_CLK_NEGATIVE 1 + +#endif /* __DT_BINDINGS_MICROCHIP_PDMC_H__ */ diff --git a/include/dt-bindings/sound/tlv320aic31xx-micbias.h b/include/dt-bindings/sound/tlv320aic31xx-micbias.h deleted file mode 100644 index c6895a18a455..000000000000 --- a/include/dt-bindings/sound/tlv320aic31xx-micbias.h +++ /dev/null @@ -1,9 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __DT_TLV320AIC31XX_MICBIAS_H -#define __DT_TLV320AIC31XX_MICBIAS_H - -#define MICBIAS_2_0V 1 -#define MICBIAS_2_5V 2 -#define MICBIAS_AVDDV 3 - -#endif /* __DT_TLV320AIC31XX_MICBIAS_H */ diff --git a/include/dt-bindings/sound/tlv320aic31xx.h b/include/dt-bindings/sound/tlv320aic31xx.h new file mode 100644 index 000000000000..4a80238ab250 --- /dev/null +++ b/include/dt-bindings/sound/tlv320aic31xx.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DT_TLV320AIC31XX_H +#define __DT_TLV320AIC31XX_H + +#define MICBIAS_2_0V 1 +#define MICBIAS_2_5V 2 +#define MICBIAS_AVDDV 3 + +#define PLL_CLKIN_MCLK 0x00 +#define PLL_CLKIN_BCLK 0x01 +#define PLL_CLKIN_GPIO1 0x02 +#define PLL_CLKIN_DIN 0x03 + +#endif /* __DT_TLV320AIC31XX_H */ diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h index 6acd3cf13a18..2419a735420f 100644 --- a/include/keys/system_keyring.h +++ b/include/keys/system_keyring.h @@ -38,6 +38,20 @@ extern int restrict_link_by_builtin_and_secondary_trusted( #define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted #endif +#ifdef CONFIG_INTEGRITY_MACHINE_KEYRING +extern int restrict_link_by_builtin_secondary_and_machine( + struct key *dest_keyring, + const struct key_type *type, + const union key_payload *payload, + struct key *restrict_key); +extern void __init set_machine_trusted_keys(struct key *keyring); +#else +#define restrict_link_by_builtin_secondary_and_machine restrict_link_by_builtin_trusted +static inline void __init set_machine_trusted_keys(struct key *keyring) +{ +} +#endif + extern struct pkcs7_message *pkcs7; #ifdef CONFIG_SYSTEM_BLACKLIST_KEYRING extern int mark_hash_blacklisted(const char *hash); diff --git a/include/kunit/assert.h b/include/kunit/assert.h index ccbc36c0b02f..4b52e12c2ae8 100644 --- a/include/kunit/assert.h +++ b/include/kunit/assert.h @@ -29,57 +29,33 @@ enum kunit_assert_type { }; /** + * struct kunit_loc - Identifies the source location of a line of code. + * @line: the line number in the file. + * @file: the file name. + */ +struct kunit_loc { + int line; + const char *file; +}; + +#define KUNIT_CURRENT_LOC { .file = __FILE__, .line = __LINE__ } + +/** * struct kunit_assert - Data for printing a failed assertion or expectation. - * @test: the test case this expectation/assertion is associated with. - * @type: the type (either an expectation or an assertion) of this kunit_assert. - * @line: the source code line number that the expectation/assertion is at. - * @file: the file path of the source file that the expectation/assertion is in. - * @message: an optional message to provide additional context. * @format: a function which formats the data in this kunit_assert to a string. * * Represents a failed expectation/assertion. Contains all the data necessary to * format a string to a user reporting the failure. */ struct kunit_assert { - struct kunit *test; - enum kunit_assert_type type; - int line; - const char *file; - struct va_format message; void (*format)(const struct kunit_assert *assert, + const struct va_format *message, struct string_stream *stream); }; -/** - * KUNIT_INIT_VA_FMT_NULL - Default initializer for struct va_format. - * - * Used inside a struct initialization block to initialize struct va_format to - * default values where fmt and va are null. - */ -#define KUNIT_INIT_VA_FMT_NULL { .fmt = NULL, .va = NULL } - -/** - * KUNIT_INIT_ASSERT_STRUCT() - Initializer for a &struct kunit_assert. - * @kunit: The test case that this expectation/assertion is associated with. - * @assert_type: The type (assertion or expectation) of this kunit_assert. - * @fmt: The formatting function which builds a string out of this kunit_assert. - * - * The base initializer for a &struct kunit_assert. - */ -#define KUNIT_INIT_ASSERT_STRUCT(kunit, assert_type, fmt) { \ - .test = kunit, \ - .type = assert_type, \ - .file = __FILE__, \ - .line = __LINE__, \ - .message = KUNIT_INIT_VA_FMT_NULL, \ - .format = fmt \ -} - -void kunit_base_assert_format(const struct kunit_assert *assert, - struct string_stream *stream); - -void kunit_assert_print_msg(const struct kunit_assert *assert, - struct string_stream *stream); +void kunit_assert_prologue(const struct kunit_loc *loc, + enum kunit_assert_type type, + struct string_stream *stream); /** * struct kunit_fail_assert - Represents a plain fail expectation/assertion. @@ -92,20 +68,17 @@ struct kunit_fail_assert { }; void kunit_fail_assert_format(const struct kunit_assert *assert, + const struct va_format *message, struct string_stream *stream); /** - * KUNIT_INIT_FAIL_ASSERT_STRUCT() - Initializer for &struct kunit_fail_assert. - * @test: The test case that this expectation/assertion is associated with. - * @type: The type (assertion or expectation) of this kunit_assert. + * KUNIT_INIT_FAIL_ASSERT_STRUCT - Initializer for &struct kunit_fail_assert. * * Initializes a &struct kunit_fail_assert. Intended to be used in * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros. */ -#define KUNIT_INIT_FAIL_ASSERT_STRUCT(test, type) { \ - .assert = KUNIT_INIT_ASSERT_STRUCT(test, \ - type, \ - kunit_fail_assert_format) \ +#define KUNIT_INIT_FAIL_ASSERT_STRUCT { \ + .assert = { .format = kunit_fail_assert_format }, \ } /** @@ -125,22 +98,19 @@ struct kunit_unary_assert { }; void kunit_unary_assert_format(const struct kunit_assert *assert, + const struct va_format *message, struct string_stream *stream); /** * KUNIT_INIT_UNARY_ASSERT_STRUCT() - Initializes &struct kunit_unary_assert. - * @test: The test case that this expectation/assertion is associated with. - * @type: The type (assertion or expectation) of this kunit_assert. * @cond: A string representation of the expression asserted true or false. * @expect_true: True if of type KUNIT_{EXPECT|ASSERT}_TRUE, false otherwise. * * Initializes a &struct kunit_unary_assert. Intended to be used in * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros. */ -#define KUNIT_INIT_UNARY_ASSERT_STRUCT(test, type, cond, expect_true) { \ - .assert = KUNIT_INIT_ASSERT_STRUCT(test, \ - type, \ - kunit_unary_assert_format), \ +#define KUNIT_INIT_UNARY_ASSERT_STRUCT(cond, expect_true) { \ + .assert = { .format = kunit_unary_assert_format }, \ .condition = cond, \ .expected_true = expect_true \ } @@ -162,35 +132,43 @@ struct kunit_ptr_not_err_assert { }; void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert, + const struct va_format *message, struct string_stream *stream); /** * KUNIT_INIT_PTR_NOT_ERR_ASSERT_STRUCT() - Initializes a * &struct kunit_ptr_not_err_assert. - * @test: The test case that this expectation/assertion is associated with. - * @type: The type (assertion or expectation) of this kunit_assert. * @txt: A string representation of the expression passed to the expectation. * @val: The actual evaluated pointer value of the expression. * * Initializes a &struct kunit_ptr_not_err_assert. Intended to be used in * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros. */ -#define KUNIT_INIT_PTR_NOT_ERR_STRUCT(test, type, txt, val) { \ - .assert = KUNIT_INIT_ASSERT_STRUCT(test, \ - type, \ - kunit_ptr_not_err_assert_format), \ +#define KUNIT_INIT_PTR_NOT_ERR_STRUCT(txt, val) { \ + .assert = { .format = kunit_ptr_not_err_assert_format }, \ .text = txt, \ .value = val \ } /** + * struct kunit_binary_assert_text - holds strings for &struct + * kunit_binary_assert and friends to try and make the structs smaller. + * @operation: A string representation of the comparison operator (e.g. "=="). + * @left_text: A string representation of the left expression (e.g. "2+2"). + * @right_text: A string representation of the right expression (e.g. "2+2"). + */ +struct kunit_binary_assert_text { + const char *operation; + const char *left_text; + const char *right_text; +}; + +/** * struct kunit_binary_assert - An expectation/assertion that compares two * non-pointer values (for example, KUNIT_EXPECT_EQ(test, 1 + 1, 2)). * @assert: The parent of this type. - * @operation: A string representation of the comparison operator (e.g. "=="). - * @left_text: A string representation of the expression in the left slot. + * @text: Holds the textual representations of the operands and op (e.g. "=="). * @left_value: The actual evaluated value of the expression in the left slot. - * @right_text: A string representation of the expression in the right slot. * @right_value: The actual evaluated value of the expression in the right slot. * * Represents an expectation/assertion that compares two non-pointer values. For @@ -199,44 +177,36 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert, */ struct kunit_binary_assert { struct kunit_assert assert; - const char *operation; - const char *left_text; + const struct kunit_binary_assert_text *text; long long left_value; - const char *right_text; long long right_value; }; void kunit_binary_assert_format(const struct kunit_assert *assert, + const struct va_format *message, struct string_stream *stream); /** - * KUNIT_INIT_BINARY_ASSERT_STRUCT() - Initializes a - * &struct kunit_binary_assert. - * @test: The test case that this expectation/assertion is associated with. - * @type: The type (assertion or expectation) of this kunit_assert. - * @op_str: A string representation of the comparison operator (e.g. "=="). - * @left_str: A string representation of the expression in the left slot. + * KUNIT_INIT_BINARY_ASSERT_STRUCT() - Initializes a binary assert like + * kunit_binary_assert, kunit_binary_ptr_assert, etc. + * + * @format_func: a function which formats the assert to a string. + * @text_: Pointer to a kunit_binary_assert_text. * @left_val: The actual evaluated value of the expression in the left slot. - * @right_str: A string representation of the expression in the right slot. * @right_val: The actual evaluated value of the expression in the right slot. * - * Initializes a &struct kunit_binary_assert. Intended to be used in - * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros. + * Initializes a binary assert like kunit_binary_assert, + * kunit_binary_ptr_assert, etc. This relies on these structs having the same + * fields but with different types for left_val/right_val. + * This is ultimately used by binary assertion macros like KUNIT_EXPECT_EQ, etc. */ -#define KUNIT_INIT_BINARY_ASSERT_STRUCT(test, \ - type, \ - op_str, \ - left_str, \ +#define KUNIT_INIT_BINARY_ASSERT_STRUCT(format_func, \ + text_, \ left_val, \ - right_str, \ right_val) { \ - .assert = KUNIT_INIT_ASSERT_STRUCT(test, \ - type, \ - kunit_binary_assert_format), \ - .operation = op_str, \ - .left_text = left_str, \ + .assert = { .format = format_func }, \ + .text = text_, \ .left_value = left_val, \ - .right_text = right_str, \ .right_value = right_val \ } @@ -244,10 +214,8 @@ void kunit_binary_assert_format(const struct kunit_assert *assert, * struct kunit_binary_ptr_assert - An expectation/assertion that compares two * pointer values (for example, KUNIT_EXPECT_PTR_EQ(test, foo, bar)). * @assert: The parent of this type. - * @operation: A string representation of the comparison operator (e.g. "=="). - * @left_text: A string representation of the expression in the left slot. + * @text: Holds the textual representations of the operands and op (e.g. "=="). * @left_value: The actual evaluated value of the expression in the left slot. - * @right_text: A string representation of the expression in the right slot. * @right_value: The actual evaluated value of the expression in the right slot. * * Represents an expectation/assertion that compares two pointer values. For @@ -256,55 +224,21 @@ void kunit_binary_assert_format(const struct kunit_assert *assert, */ struct kunit_binary_ptr_assert { struct kunit_assert assert; - const char *operation; - const char *left_text; + const struct kunit_binary_assert_text *text; const void *left_value; - const char *right_text; const void *right_value; }; void kunit_binary_ptr_assert_format(const struct kunit_assert *assert, + const struct va_format *message, struct string_stream *stream); /** - * KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT() - Initializes a - * &struct kunit_binary_ptr_assert. - * @test: The test case that this expectation/assertion is associated with. - * @type: The type (assertion or expectation) of this kunit_assert. - * @op_str: A string representation of the comparison operator (e.g. "=="). - * @left_str: A string representation of the expression in the left slot. - * @left_val: The actual evaluated value of the expression in the left slot. - * @right_str: A string representation of the expression in the right slot. - * @right_val: The actual evaluated value of the expression in the right slot. - * - * Initializes a &struct kunit_binary_ptr_assert. Intended to be used in - * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros. - */ -#define KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT(test, \ - type, \ - op_str, \ - left_str, \ - left_val, \ - right_str, \ - right_val) { \ - .assert = KUNIT_INIT_ASSERT_STRUCT(test, \ - type, \ - kunit_binary_ptr_assert_format), \ - .operation = op_str, \ - .left_text = left_str, \ - .left_value = left_val, \ - .right_text = right_str, \ - .right_value = right_val \ -} - -/** * struct kunit_binary_str_assert - An expectation/assertion that compares two * string values (for example, KUNIT_EXPECT_STREQ(test, foo, "bar")). * @assert: The parent of this type. - * @operation: A string representation of the comparison operator (e.g. "=="). - * @left_text: A string representation of the expression in the left slot. + * @text: Holds the textual representations of the operands and comparator. * @left_value: The actual evaluated value of the expression in the left slot. - * @right_text: A string representation of the expression in the right slot. * @right_value: The actual evaluated value of the expression in the right slot. * * Represents an expectation/assertion that compares two string values. For @@ -313,45 +247,13 @@ void kunit_binary_ptr_assert_format(const struct kunit_assert *assert, */ struct kunit_binary_str_assert { struct kunit_assert assert; - const char *operation; - const char *left_text; + const struct kunit_binary_assert_text *text; const char *left_value; - const char *right_text; const char *right_value; }; void kunit_binary_str_assert_format(const struct kunit_assert *assert, + const struct va_format *message, struct string_stream *stream); -/** - * KUNIT_INIT_BINARY_STR_ASSERT_STRUCT() - Initializes a - * &struct kunit_binary_str_assert. - * @test: The test case that this expectation/assertion is associated with. - * @type: The type (assertion or expectation) of this kunit_assert. - * @op_str: A string representation of the comparison operator (e.g. "=="). - * @left_str: A string representation of the expression in the left slot. - * @left_val: The actual evaluated value of the expression in the left slot. - * @right_str: A string representation of the expression in the right slot. - * @right_val: The actual evaluated value of the expression in the right slot. - * - * Initializes a &struct kunit_binary_str_assert. Intended to be used in - * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros. - */ -#define KUNIT_INIT_BINARY_STR_ASSERT_STRUCT(test, \ - type, \ - op_str, \ - left_str, \ - left_val, \ - right_str, \ - right_val) { \ - .assert = KUNIT_INIT_ASSERT_STRUCT(test, \ - type, \ - kunit_binary_str_assert_format), \ - .operation = op_str, \ - .left_text = left_str, \ - .left_value = left_val, \ - .right_text = right_str, \ - .right_value = right_val \ -} - #endif /* _KUNIT_ASSERT_H */ diff --git a/include/kunit/test.h b/include/kunit/test.h index b26400731c02..00b9ff7783ab 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -12,6 +12,7 @@ #include <kunit/assert.h> #include <kunit/try-catch.h> +#include <linux/compiler.h> #include <linux/container_of.h> #include <linux/err.h> #include <linux/init.h> @@ -770,26 +771,32 @@ void __printf(2, 3) kunit_log_append(char *log, const char *fmt, ...); */ #define KUNIT_SUCCEED(test) do {} while (0) -void kunit_do_assertion(struct kunit *test, - struct kunit_assert *assert, - bool pass, - const char *fmt, ...); - -#define KUNIT_ASSERTION(test, pass, assert_class, INITIALIZER, fmt, ...) do { \ - struct assert_class __assertion = INITIALIZER; \ - kunit_do_assertion(test, \ - &__assertion.assert, \ - pass, \ - fmt, \ - ##__VA_ARGS__); \ +void kunit_do_failed_assertion(struct kunit *test, + const struct kunit_loc *loc, + enum kunit_assert_type type, + struct kunit_assert *assert, + const char *fmt, ...); + +#define KUNIT_ASSERTION(test, assert_type, pass, assert_class, INITIALIZER, fmt, ...) do { \ + if (unlikely(!(pass))) { \ + static const struct kunit_loc __loc = KUNIT_CURRENT_LOC; \ + struct assert_class __assertion = INITIALIZER; \ + kunit_do_failed_assertion(test, \ + &__loc, \ + assert_type, \ + &__assertion.assert, \ + fmt, \ + ##__VA_ARGS__); \ + } \ } while (0) #define KUNIT_FAIL_ASSERTION(test, assert_type, fmt, ...) \ KUNIT_ASSERTION(test, \ + assert_type, \ false, \ kunit_fail_assert, \ - KUNIT_INIT_FAIL_ASSERT_STRUCT(test, assert_type), \ + KUNIT_INIT_FAIL_ASSERT_STRUCT, \ fmt, \ ##__VA_ARGS__) @@ -817,11 +824,10 @@ void kunit_do_assertion(struct kunit *test, fmt, \ ...) \ KUNIT_ASSERTION(test, \ + assert_type, \ !!(condition) == !!expected_true, \ kunit_unary_assert, \ - KUNIT_INIT_UNARY_ASSERT_STRUCT(test, \ - assert_type, \ - #condition, \ + KUNIT_INIT_UNARY_ASSERT_STRUCT(#condition, \ expected_true), \ fmt, \ ##__VA_ARGS__) @@ -834,9 +840,6 @@ void kunit_do_assertion(struct kunit *test, fmt, \ ##__VA_ARGS__) -#define KUNIT_TRUE_ASSERTION(test, assert_type, condition) \ - KUNIT_TRUE_MSG_ASSERTION(test, assert_type, condition, NULL) - #define KUNIT_FALSE_MSG_ASSERTION(test, assert_type, condition, fmt, ...) \ KUNIT_UNARY_ASSERTION(test, \ assert_type, \ @@ -845,9 +848,6 @@ void kunit_do_assertion(struct kunit *test, fmt, \ ##__VA_ARGS__) -#define KUNIT_FALSE_ASSERTION(test, assert_type, condition) \ - KUNIT_FALSE_MSG_ASSERTION(test, assert_type, condition, NULL) - /* * A factory macro for defining the assertions and expectations for the basic * comparisons defined for the built in types. @@ -864,7 +864,7 @@ void kunit_do_assertion(struct kunit *test, */ #define KUNIT_BASE_BINARY_ASSERTION(test, \ assert_class, \ - ASSERT_CLASS_INIT, \ + format_func, \ assert_type, \ left, \ op, \ @@ -872,353 +872,56 @@ void kunit_do_assertion(struct kunit *test, fmt, \ ...) \ do { \ - typeof(left) __left = (left); \ - typeof(right) __right = (right); \ + const typeof(left) __left = (left); \ + const typeof(right) __right = (right); \ + static const struct kunit_binary_assert_text __text = { \ + .operation = #op, \ + .left_text = #left, \ + .right_text = #right, \ + }; \ \ KUNIT_ASSERTION(test, \ + assert_type, \ __left op __right, \ assert_class, \ - ASSERT_CLASS_INIT(test, \ - assert_type, \ - #op, \ - #left, \ - __left, \ - #right, \ - __right), \ + KUNIT_INIT_BINARY_ASSERT_STRUCT(format_func, \ + &__text, \ + __left, \ + __right), \ fmt, \ ##__VA_ARGS__); \ } while (0) -#define KUNIT_BASE_EQ_MSG_ASSERTION(test, \ - assert_class, \ - ASSERT_CLASS_INIT, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ...) \ - KUNIT_BASE_BINARY_ASSERTION(test, \ - assert_class, \ - ASSERT_CLASS_INIT, \ - assert_type, \ - left, ==, right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BASE_NE_MSG_ASSERTION(test, \ - assert_class, \ - ASSERT_CLASS_INIT, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ...) \ - KUNIT_BASE_BINARY_ASSERTION(test, \ - assert_class, \ - ASSERT_CLASS_INIT, \ - assert_type, \ - left, !=, right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BASE_LT_MSG_ASSERTION(test, \ - assert_class, \ - ASSERT_CLASS_INIT, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ...) \ - KUNIT_BASE_BINARY_ASSERTION(test, \ - assert_class, \ - ASSERT_CLASS_INIT, \ - assert_type, \ - left, <, right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BASE_LE_MSG_ASSERTION(test, \ - assert_class, \ - ASSERT_CLASS_INIT, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ...) \ - KUNIT_BASE_BINARY_ASSERTION(test, \ - assert_class, \ - ASSERT_CLASS_INIT, \ - assert_type, \ - left, <=, right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BASE_GT_MSG_ASSERTION(test, \ - assert_class, \ - ASSERT_CLASS_INIT, \ - assert_type, \ - left, \ - right, \ - fmt, \ +#define KUNIT_BINARY_INT_ASSERTION(test, \ + assert_type, \ + left, \ + op, \ + right, \ + fmt, \ ...) \ KUNIT_BASE_BINARY_ASSERTION(test, \ - assert_class, \ - ASSERT_CLASS_INIT, \ + kunit_binary_assert, \ + kunit_binary_assert_format, \ assert_type, \ - left, >, right, \ + left, op, right, \ fmt, \ ##__VA_ARGS__) -#define KUNIT_BASE_GE_MSG_ASSERTION(test, \ - assert_class, \ - ASSERT_CLASS_INIT, \ - assert_type, \ - left, \ - right, \ - fmt, \ +#define KUNIT_BINARY_PTR_ASSERTION(test, \ + assert_type, \ + left, \ + op, \ + right, \ + fmt, \ ...) \ KUNIT_BASE_BINARY_ASSERTION(test, \ - assert_class, \ - ASSERT_CLASS_INIT, \ - assert_type, \ - left, >=, right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BINARY_EQ_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\ - KUNIT_BASE_EQ_MSG_ASSERTION(test, \ - kunit_binary_assert, \ - KUNIT_INIT_BINARY_ASSERT_STRUCT, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BINARY_EQ_ASSERTION(test, assert_type, left, right) \ - KUNIT_BINARY_EQ_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - NULL) - -#define KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ...) \ - KUNIT_BASE_EQ_MSG_ASSERTION(test, \ - kunit_binary_ptr_assert, \ - KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BINARY_PTR_EQ_ASSERTION(test, assert_type, left, right) \ - KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - NULL) - -#define KUNIT_BINARY_NE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\ - KUNIT_BASE_NE_MSG_ASSERTION(test, \ - kunit_binary_assert, \ - KUNIT_INIT_BINARY_ASSERT_STRUCT, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BINARY_NE_ASSERTION(test, assert_type, left, right) \ - KUNIT_BINARY_NE_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - NULL) - -#define KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ...) \ - KUNIT_BASE_NE_MSG_ASSERTION(test, \ kunit_binary_ptr_assert, \ - KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BINARY_PTR_NE_ASSERTION(test, assert_type, left, right) \ - KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - NULL) - -#define KUNIT_BINARY_LT_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\ - KUNIT_BASE_LT_MSG_ASSERTION(test, \ - kunit_binary_assert, \ - KUNIT_INIT_BINARY_ASSERT_STRUCT, \ + kunit_binary_ptr_assert_format, \ assert_type, \ - left, \ - right, \ + left, op, right, \ fmt, \ ##__VA_ARGS__) -#define KUNIT_BINARY_LT_ASSERTION(test, assert_type, left, right) \ - KUNIT_BINARY_LT_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - NULL) - -#define KUNIT_BINARY_PTR_LT_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ...) \ - KUNIT_BASE_LT_MSG_ASSERTION(test, \ - kunit_binary_ptr_assert, \ - KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BINARY_PTR_LT_ASSERTION(test, assert_type, left, right) \ - KUNIT_BINARY_PTR_LT_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - NULL) - -#define KUNIT_BINARY_LE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\ - KUNIT_BASE_LE_MSG_ASSERTION(test, \ - kunit_binary_assert, \ - KUNIT_INIT_BINARY_ASSERT_STRUCT, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BINARY_LE_ASSERTION(test, assert_type, left, right) \ - KUNIT_BINARY_LE_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - NULL) - -#define KUNIT_BINARY_PTR_LE_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ...) \ - KUNIT_BASE_LE_MSG_ASSERTION(test, \ - kunit_binary_ptr_assert, \ - KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BINARY_PTR_LE_ASSERTION(test, assert_type, left, right) \ - KUNIT_BINARY_PTR_LE_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - NULL) - -#define KUNIT_BINARY_GT_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\ - KUNIT_BASE_GT_MSG_ASSERTION(test, \ - kunit_binary_assert, \ - KUNIT_INIT_BINARY_ASSERT_STRUCT, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BINARY_GT_ASSERTION(test, assert_type, left, right) \ - KUNIT_BINARY_GT_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - NULL) - -#define KUNIT_BINARY_PTR_GT_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ...) \ - KUNIT_BASE_GT_MSG_ASSERTION(test, \ - kunit_binary_ptr_assert, \ - KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BINARY_PTR_GT_ASSERTION(test, assert_type, left, right) \ - KUNIT_BINARY_PTR_GT_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - NULL) - -#define KUNIT_BINARY_GE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\ - KUNIT_BASE_GE_MSG_ASSERTION(test, \ - kunit_binary_assert, \ - KUNIT_INIT_BINARY_ASSERT_STRUCT, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BINARY_GE_ASSERTION(test, assert_type, left, right) \ - KUNIT_BINARY_GE_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - NULL) - -#define KUNIT_BINARY_PTR_GE_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ...) \ - KUNIT_BASE_GE_MSG_ASSERTION(test, \ - kunit_binary_ptr_assert, \ - KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BINARY_PTR_GE_ASSERTION(test, assert_type, left, right) \ - KUNIT_BINARY_PTR_GE_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - NULL) - #define KUNIT_BINARY_STR_ASSERTION(test, \ assert_type, \ left, \ @@ -1228,85 +931,43 @@ do { \ ...) \ do { \ const char *__left = (left); \ - const char *__right = (right); \ + const char *__right = (right); \ + static const struct kunit_binary_assert_text __text = { \ + .operation = #op, \ + .left_text = #left, \ + .right_text = #right, \ + }; \ \ KUNIT_ASSERTION(test, \ + assert_type, \ strcmp(__left, __right) op 0, \ kunit_binary_str_assert, \ - KUNIT_INIT_BINARY_STR_ASSERT_STRUCT(test, \ - assert_type, \ - #op, \ - #left, \ + KUNIT_INIT_BINARY_ASSERT_STRUCT(kunit_binary_str_assert_format,\ + &__text, \ __left, \ - #right, \ __right), \ fmt, \ ##__VA_ARGS__); \ } while (0) -#define KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ...) \ - KUNIT_BINARY_STR_ASSERTION(test, \ - assert_type, \ - left, ==, right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BINARY_STR_EQ_ASSERTION(test, assert_type, left, right) \ - KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - NULL) - -#define KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - fmt, \ - ...) \ - KUNIT_BINARY_STR_ASSERTION(test, \ - assert_type, \ - left, !=, right, \ - fmt, \ - ##__VA_ARGS__) - -#define KUNIT_BINARY_STR_NE_ASSERTION(test, assert_type, left, right) \ - KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \ - assert_type, \ - left, \ - right, \ - NULL) - #define KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \ assert_type, \ ptr, \ fmt, \ ...) \ do { \ - typeof(ptr) __ptr = (ptr); \ + const typeof(ptr) __ptr = (ptr); \ \ KUNIT_ASSERTION(test, \ + assert_type, \ !IS_ERR_OR_NULL(__ptr), \ kunit_ptr_not_err_assert, \ - KUNIT_INIT_PTR_NOT_ERR_STRUCT(test, \ - assert_type, \ - #ptr, \ + KUNIT_INIT_PTR_NOT_ERR_STRUCT(#ptr, \ __ptr), \ fmt, \ ##__VA_ARGS__); \ } while (0) -#define KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, assert_type, ptr) \ - KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \ - assert_type, \ - ptr, \ - NULL) - /** * KUNIT_EXPECT_TRUE() - Causes a test failure when the expression is not true. * @test: The test context object. @@ -1319,7 +980,7 @@ do { \ * *expectation failure*. */ #define KUNIT_EXPECT_TRUE(test, condition) \ - KUNIT_TRUE_ASSERTION(test, KUNIT_EXPECTATION, condition) + KUNIT_EXPECT_TRUE_MSG(test, condition, NULL) #define KUNIT_EXPECT_TRUE_MSG(test, condition, fmt, ...) \ KUNIT_TRUE_MSG_ASSERTION(test, \ @@ -1338,7 +999,7 @@ do { \ * KUNIT_EXPECT_TRUE() for more information. */ #define KUNIT_EXPECT_FALSE(test, condition) \ - KUNIT_FALSE_ASSERTION(test, KUNIT_EXPECTATION, condition) + KUNIT_EXPECT_FALSE_MSG(test, condition, NULL) #define KUNIT_EXPECT_FALSE_MSG(test, condition, fmt, ...) \ KUNIT_FALSE_MSG_ASSERTION(test, \ @@ -1359,15 +1020,14 @@ do { \ * more information. */ #define KUNIT_EXPECT_EQ(test, left, right) \ - KUNIT_BINARY_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right) + KUNIT_EXPECT_EQ_MSG(test, left, right, NULL) #define KUNIT_EXPECT_EQ_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_EQ_MSG_ASSERTION(test, \ - KUNIT_EXPECTATION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_INT_ASSERTION(test, \ + KUNIT_EXPECTATION, \ + left, ==, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_EXPECT_PTR_EQ() - Expects that pointers @left and @right are equal. @@ -1381,18 +1041,14 @@ do { \ * more information. */ #define KUNIT_EXPECT_PTR_EQ(test, left, right) \ - KUNIT_BINARY_PTR_EQ_ASSERTION(test, \ - KUNIT_EXPECTATION, \ - left, \ - right) + KUNIT_EXPECT_PTR_EQ_MSG(test, left, right, NULL) #define KUNIT_EXPECT_PTR_EQ_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \ - KUNIT_EXPECTATION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_PTR_ASSERTION(test, \ + KUNIT_EXPECTATION, \ + left, ==, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_EXPECT_NE() - An expectation that @left and @right are not equal. @@ -1406,15 +1062,14 @@ do { \ * more information. */ #define KUNIT_EXPECT_NE(test, left, right) \ - KUNIT_BINARY_NE_ASSERTION(test, KUNIT_EXPECTATION, left, right) + KUNIT_EXPECT_NE_MSG(test, left, right, NULL) #define KUNIT_EXPECT_NE_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_NE_MSG_ASSERTION(test, \ - KUNIT_EXPECTATION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_INT_ASSERTION(test, \ + KUNIT_EXPECTATION, \ + left, !=, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_EXPECT_PTR_NE() - Expects that pointers @left and @right are not equal. @@ -1428,18 +1083,14 @@ do { \ * more information. */ #define KUNIT_EXPECT_PTR_NE(test, left, right) \ - KUNIT_BINARY_PTR_NE_ASSERTION(test, \ - KUNIT_EXPECTATION, \ - left, \ - right) + KUNIT_EXPECT_PTR_NE_MSG(test, left, right, NULL) #define KUNIT_EXPECT_PTR_NE_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \ - KUNIT_EXPECTATION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_PTR_ASSERTION(test, \ + KUNIT_EXPECTATION, \ + left, !=, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_EXPECT_LT() - An expectation that @left is less than @right. @@ -1453,15 +1104,14 @@ do { \ * more information. */ #define KUNIT_EXPECT_LT(test, left, right) \ - KUNIT_BINARY_LT_ASSERTION(test, KUNIT_EXPECTATION, left, right) + KUNIT_EXPECT_LT_MSG(test, left, right, NULL) #define KUNIT_EXPECT_LT_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_LT_MSG_ASSERTION(test, \ - KUNIT_EXPECTATION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_INT_ASSERTION(test, \ + KUNIT_EXPECTATION, \ + left, <, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_EXPECT_LE() - Expects that @left is less than or equal to @right. @@ -1475,15 +1125,14 @@ do { \ * more information. */ #define KUNIT_EXPECT_LE(test, left, right) \ - KUNIT_BINARY_LE_ASSERTION(test, KUNIT_EXPECTATION, left, right) + KUNIT_EXPECT_LE_MSG(test, left, right, NULL) #define KUNIT_EXPECT_LE_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_LE_MSG_ASSERTION(test, \ - KUNIT_EXPECTATION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_INT_ASSERTION(test, \ + KUNIT_ASSERTION, \ + left, <=, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_EXPECT_GT() - An expectation that @left is greater than @right. @@ -1497,15 +1146,14 @@ do { \ * more information. */ #define KUNIT_EXPECT_GT(test, left, right) \ - KUNIT_BINARY_GT_ASSERTION(test, KUNIT_EXPECTATION, left, right) + KUNIT_EXPECT_GT_MSG(test, left, right, NULL) #define KUNIT_EXPECT_GT_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_GT_MSG_ASSERTION(test, \ - KUNIT_EXPECTATION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_INT_ASSERTION(test, \ + KUNIT_EXPECTATION, \ + left, >, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_EXPECT_GE() - Expects that @left is greater than or equal to @right. @@ -1519,15 +1167,14 @@ do { \ * more information. */ #define KUNIT_EXPECT_GE(test, left, right) \ - KUNIT_BINARY_GE_ASSERTION(test, KUNIT_EXPECTATION, left, right) + KUNIT_EXPECT_GE_MSG(test, left, right, NULL) #define KUNIT_EXPECT_GE_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_GE_MSG_ASSERTION(test, \ - KUNIT_EXPECTATION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_INT_ASSERTION(test, \ + KUNIT_EXPECTATION, \ + left, >=, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_EXPECT_STREQ() - Expects that strings @left and @right are equal. @@ -1541,15 +1188,14 @@ do { \ * for more information. */ #define KUNIT_EXPECT_STREQ(test, left, right) \ - KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right) + KUNIT_EXPECT_STREQ_MSG(test, left, right, NULL) #define KUNIT_EXPECT_STREQ_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \ - KUNIT_EXPECTATION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_STR_ASSERTION(test, \ + KUNIT_EXPECTATION, \ + left, ==, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_EXPECT_STRNEQ() - Expects that strings @left and @right are not equal. @@ -1563,15 +1209,14 @@ do { \ * for more information. */ #define KUNIT_EXPECT_STRNEQ(test, left, right) \ - KUNIT_BINARY_STR_NE_ASSERTION(test, KUNIT_EXPECTATION, left, right) + KUNIT_EXPECT_STRNEQ_MSG(test, left, right, NULL) #define KUNIT_EXPECT_STRNEQ_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \ - KUNIT_EXPECTATION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_STR_ASSERTION(test, \ + KUNIT_EXPECTATION, \ + left, !=, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_EXPECT_NOT_ERR_OR_NULL() - Expects that @ptr is not null and not err. @@ -1584,7 +1229,7 @@ do { \ * more information. */ #define KUNIT_EXPECT_NOT_ERR_OR_NULL(test, ptr) \ - KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, KUNIT_EXPECTATION, ptr) + KUNIT_EXPECT_NOT_ERR_OR_NULL_MSG(test, ptr, NULL) #define KUNIT_EXPECT_NOT_ERR_OR_NULL_MSG(test, ptr, fmt, ...) \ KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \ @@ -1608,7 +1253,7 @@ do { \ * this is otherwise known as an *assertion failure*. */ #define KUNIT_ASSERT_TRUE(test, condition) \ - KUNIT_TRUE_ASSERTION(test, KUNIT_ASSERTION, condition) + KUNIT_ASSERT_TRUE_MSG(test, condition, NULL) #define KUNIT_ASSERT_TRUE_MSG(test, condition, fmt, ...) \ KUNIT_TRUE_MSG_ASSERTION(test, \ @@ -1627,7 +1272,7 @@ do { \ * (see KUNIT_ASSERT_TRUE()) when the assertion is not met. */ #define KUNIT_ASSERT_FALSE(test, condition) \ - KUNIT_FALSE_ASSERTION(test, KUNIT_ASSERTION, condition) + KUNIT_ASSERT_FALSE_MSG(test, condition, NULL) #define KUNIT_ASSERT_FALSE_MSG(test, condition, fmt, ...) \ KUNIT_FALSE_MSG_ASSERTION(test, \ @@ -1647,15 +1292,14 @@ do { \ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met. */ #define KUNIT_ASSERT_EQ(test, left, right) \ - KUNIT_BINARY_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right) + KUNIT_ASSERT_EQ_MSG(test, left, right, NULL) #define KUNIT_ASSERT_EQ_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_EQ_MSG_ASSERTION(test, \ - KUNIT_ASSERTION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_INT_ASSERTION(test, \ + KUNIT_ASSERTION, \ + left, ==, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_ASSERT_PTR_EQ() - Asserts that pointers @left and @right are equal. @@ -1668,15 +1312,14 @@ do { \ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met. */ #define KUNIT_ASSERT_PTR_EQ(test, left, right) \ - KUNIT_BINARY_PTR_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right) + KUNIT_ASSERT_PTR_EQ_MSG(test, left, right, NULL) #define KUNIT_ASSERT_PTR_EQ_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \ - KUNIT_ASSERTION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_PTR_ASSERTION(test, \ + KUNIT_ASSERTION, \ + left, ==, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_ASSERT_NE() - An assertion that @left and @right are not equal. @@ -1689,15 +1332,14 @@ do { \ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met. */ #define KUNIT_ASSERT_NE(test, left, right) \ - KUNIT_BINARY_NE_ASSERTION(test, KUNIT_ASSERTION, left, right) + KUNIT_ASSERT_NE_MSG(test, left, right, NULL) #define KUNIT_ASSERT_NE_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_NE_MSG_ASSERTION(test, \ - KUNIT_ASSERTION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_INT_ASSERTION(test, \ + KUNIT_ASSERTION, \ + left, !=, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_ASSERT_PTR_NE() - Asserts that pointers @left and @right are not equal. @@ -1711,15 +1353,14 @@ do { \ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met. */ #define KUNIT_ASSERT_PTR_NE(test, left, right) \ - KUNIT_BINARY_PTR_NE_ASSERTION(test, KUNIT_ASSERTION, left, right) + KUNIT_ASSERT_PTR_NE_MSG(test, left, right, NULL) #define KUNIT_ASSERT_PTR_NE_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \ - KUNIT_ASSERTION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_PTR_ASSERTION(test, \ + KUNIT_ASSERTION, \ + left, !=, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_ASSERT_LT() - An assertion that @left is less than @right. * @test: The test context object. @@ -1732,15 +1373,14 @@ do { \ * is not met. */ #define KUNIT_ASSERT_LT(test, left, right) \ - KUNIT_BINARY_LT_ASSERTION(test, KUNIT_ASSERTION, left, right) + KUNIT_ASSERT_LT_MSG(test, left, right, NULL) #define KUNIT_ASSERT_LT_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_LT_MSG_ASSERTION(test, \ - KUNIT_ASSERTION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_INT_ASSERTION(test, \ + KUNIT_EXPECTATION, \ + left, <, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_ASSERT_LE() - An assertion that @left is less than or equal to @right. * @test: The test context object. @@ -1753,15 +1393,14 @@ do { \ * KUNIT_ASSERT_TRUE()) when the assertion is not met. */ #define KUNIT_ASSERT_LE(test, left, right) \ - KUNIT_BINARY_LE_ASSERTION(test, KUNIT_ASSERTION, left, right) + KUNIT_ASSERT_LE_MSG(test, left, right, NULL) #define KUNIT_ASSERT_LE_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_LE_MSG_ASSERTION(test, \ - KUNIT_ASSERTION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_INT_ASSERTION(test, \ + KUNIT_ASSERTION, \ + left, <=, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_ASSERT_GT() - An assertion that @left is greater than @right. @@ -1775,15 +1414,14 @@ do { \ * is not met. */ #define KUNIT_ASSERT_GT(test, left, right) \ - KUNIT_BINARY_GT_ASSERTION(test, KUNIT_ASSERTION, left, right) + KUNIT_ASSERT_GT_MSG(test, left, right, NULL) #define KUNIT_ASSERT_GT_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_GT_MSG_ASSERTION(test, \ - KUNIT_ASSERTION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_INT_ASSERTION(test, \ + KUNIT_EXPECTATION, \ + left, >, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_ASSERT_GE() - Assertion that @left is greater than or equal to @right. @@ -1797,15 +1435,14 @@ do { \ * is not met. */ #define KUNIT_ASSERT_GE(test, left, right) \ - KUNIT_BINARY_GE_ASSERTION(test, KUNIT_ASSERTION, left, right) + KUNIT_ASSERT_GE_MSG(test, left, right, NULL) #define KUNIT_ASSERT_GE_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_GE_MSG_ASSERTION(test, \ - KUNIT_ASSERTION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_INT_ASSERTION(test, \ + KUNIT_ASSERTION, \ + left, >=, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_ASSERT_STREQ() - An assertion that strings @left and @right are equal. @@ -1818,15 +1455,14 @@ do { \ * assertion failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met. */ #define KUNIT_ASSERT_STREQ(test, left, right) \ - KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right) + KUNIT_ASSERT_STREQ_MSG(test, left, right, NULL) #define KUNIT_ASSERT_STREQ_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \ - KUNIT_ASSERTION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_STR_ASSERTION(test, \ + KUNIT_ASSERTION, \ + left, ==, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_ASSERT_STRNEQ() - Expects that strings @left and @right are not equal. @@ -1840,15 +1476,14 @@ do { \ * for more information. */ #define KUNIT_ASSERT_STRNEQ(test, left, right) \ - KUNIT_BINARY_STR_NE_ASSERTION(test, KUNIT_ASSERTION, left, right) + KUNIT_ASSERT_STRNEQ_MSG(test, left, right, NULL) #define KUNIT_ASSERT_STRNEQ_MSG(test, left, right, fmt, ...) \ - KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \ - KUNIT_ASSERTION, \ - left, \ - right, \ - fmt, \ - ##__VA_ARGS__) + KUNIT_BINARY_STR_ASSERTION(test, \ + KUNIT_ASSERTION, \ + left, !=, right, \ + fmt, \ + ##__VA_ARGS__) /** * KUNIT_ASSERT_NOT_ERR_OR_NULL() - Assertion that @ptr is not null and not err. @@ -1861,7 +1496,7 @@ do { \ * KUNIT_ASSERT_TRUE()) when the assertion is not met. */ #define KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr) \ - KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, KUNIT_ASSERTION, ptr) + KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, NULL) #define KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, fmt, ...) \ KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \ diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index f9ed4c171d7b..20193416d214 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -29,6 +29,11 @@ struct kvm_pmu { struct irq_work overflow_work; }; +struct arm_pmu_entry { + struct list_head entry; + struct arm_pmu *arm_pmu; +}; + DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available); static __always_inline bool kvm_arm_support_pmu_v3(void) diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index 5b58bd2fe088..68b96c3826c3 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -13,14 +13,11 @@ #define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1) #define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2) #define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0) +#define KVM_ARM_PSCI_1_1 PSCI_VERSION(1, 1) -#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0 +#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_1 -/* - * We need the KVM pointer independently from the vcpu as we can call - * this from HYP, and need to apply kern_hyp_va on it... - */ -static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm) +static inline int kvm_psci_version(struct kvm_vcpu *vcpu) { /* * Our PSCI implementation stays the same across versions from diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 6274758648e3..d7136d13aa44 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -526,7 +526,7 @@ acpi_status acpi_release_memory(acpi_handle handle, struct resource *res, int acpi_resources_are_enforced(void); #ifdef CONFIG_HIBERNATION -void __init acpi_check_s4_hw_signature(int check); +extern int acpi_check_s4_hw_signature; #endif #ifdef CONFIG_PM_SLEEP @@ -580,6 +580,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); extern bool osc_sb_apei_support_acked; extern bool osc_pc_lpi_support_confirmed; extern bool osc_sb_native_usb4_support_confirmed; +extern bool osc_sb_cppc_not_supported; /* USB4 Capabilities */ #define OSC_USB_USB3_TUNNELING 0x00000001 @@ -691,7 +692,7 @@ int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); int acpi_device_modalias(struct device *, char *, int); struct platform_device *acpi_create_platform_device(struct acpi_device *, - struct property_entry *); + const struct property_entry *); #define ACPI_PTR(_ptr) (_ptr) static inline void acpi_device_set_enumerated(struct acpi_device *adev) @@ -930,7 +931,7 @@ static inline int acpi_device_modalias(struct device *dev, static inline struct platform_device * acpi_create_platform_device(struct acpi_device *adev, - struct property_entry *properties) + const struct property_entry *properties) { return NULL; } @@ -1023,7 +1024,15 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b); - +#ifdef CONFIG_X86 +struct acpi_s2idle_dev_ops { + struct list_head list_node; + void (*prepare)(void); + void (*restore)(void); +}; +int acpi_register_lps0_dev(struct acpi_s2idle_dev_ops *arg); +void acpi_unregister_lps0_dev(struct acpi_s2idle_dev_ops *arg); +#endif /* CONFIG_X86 */ #ifndef CONFIG_IA64 void arch_reserve_mem_area(acpi_physical_address addr, size_t size); #else diff --git a/include/linux/acpi_agdi.h b/include/linux/acpi_agdi.h new file mode 100644 index 000000000000..f477f0b452fa --- /dev/null +++ b/include/linux/acpi_agdi.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ACPI_AGDI_H__ +#define __ACPI_AGDI_H__ + +#include <linux/acpi.h> + +#ifdef CONFIG_ACPI_AGDI +void __init acpi_agdi_init(void); +#else +static inline void acpi_agdi_init(void) {} +#endif +#endif /* __ACPI_AGDI_H__ */ diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 6c7f47846971..6562f543c3e0 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -117,30 +117,9 @@ void amba_device_put(struct amba_device *); int amba_device_add(struct amba_device *, struct resource *); int amba_device_register(struct amba_device *, struct resource *); void amba_device_unregister(struct amba_device *); -struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int); int amba_request_regions(struct amba_device *, const char *); void amba_release_regions(struct amba_device *); -static inline int amba_pclk_enable(struct amba_device *dev) -{ - return clk_enable(dev->pclk); -} - -static inline void amba_pclk_disable(struct amba_device *dev) -{ - clk_disable(dev->pclk); -} - -static inline int amba_pclk_prepare(struct amba_device *dev) -{ - return clk_prepare(dev->pclk); -} - -static inline void amba_pclk_unprepare(struct amba_device *dev) -{ - clk_unprepare(dev->pclk); -} - /* Some drivers don't use the struct amba_device */ #define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) #define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index cce6136b300a..58cbe18d825c 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -11,6 +11,10 @@ void topology_normalize_cpu_scale(void); int topology_update_cpu_topology(void); +#ifdef CONFIG_ACPI_CPPC_LIB +void topology_init_cpu_capacity_cppc(void); +#endif + struct device_node; bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu); diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 63ccb5252190..220c8c60e021 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -92,6 +92,11 @@ ARM_SMCCC_SMC_32, \ 0, 0x7fff) +#define ARM_SMCCC_ARCH_WORKAROUND_3 \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 0x3fff) + #define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ ARM_SMCCC_SMC_32, \ diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h index 0a241c5c911d..14dc461b0e82 100644 --- a/include/linux/arm_sdei.h +++ b/include/linux/arm_sdei.h @@ -46,9 +46,11 @@ int sdei_unregister_ghes(struct ghes *ghes); /* For use by arch code when CPU hotplug notifiers are not appropriate. */ int sdei_mask_local_cpu(void); int sdei_unmask_local_cpu(void); +void __init sdei_init(void); #else static inline int sdei_mask_local_cpu(void) { return 0; } static inline int sdei_unmask_local_cpu(void) { return 0; } +static inline void sdei_init(void) { } #endif /* CONFIG_ARM_SDE_INTERFACE */ diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h index a3dba31df01e..6db58d180866 100644 --- a/include/linux/atomic/atomic-arch-fallback.h +++ b/include/linux/atomic/atomic-arch-fallback.h @@ -151,7 +151,16 @@ static __always_inline int arch_atomic_read_acquire(const atomic_t *v) { - return smp_load_acquire(&(v)->counter); + int ret; + + if (__native_word(atomic_t)) { + ret = smp_load_acquire(&(v)->counter); + } else { + ret = arch_atomic_read(v); + __atomic_acquire_fence(); + } + + return ret; } #define arch_atomic_read_acquire arch_atomic_read_acquire #endif @@ -160,7 +169,12 @@ arch_atomic_read_acquire(const atomic_t *v) static __always_inline void arch_atomic_set_release(atomic_t *v, int i) { - smp_store_release(&(v)->counter, i); + if (__native_word(atomic_t)) { + smp_store_release(&(v)->counter, i); + } else { + __atomic_release_fence(); + arch_atomic_set(v, i); + } } #define arch_atomic_set_release arch_atomic_set_release #endif @@ -1258,7 +1272,16 @@ arch_atomic_dec_if_positive(atomic_t *v) static __always_inline s64 arch_atomic64_read_acquire(const atomic64_t *v) { - return smp_load_acquire(&(v)->counter); + s64 ret; + + if (__native_word(atomic64_t)) { + ret = smp_load_acquire(&(v)->counter); + } else { + ret = arch_atomic64_read(v); + __atomic_acquire_fence(); + } + + return ret; } #define arch_atomic64_read_acquire arch_atomic64_read_acquire #endif @@ -1267,7 +1290,12 @@ arch_atomic64_read_acquire(const atomic64_t *v) static __always_inline void arch_atomic64_set_release(atomic64_t *v, s64 i) { - smp_store_release(&(v)->counter, i); + if (__native_word(atomic64_t)) { + smp_store_release(&(v)->counter, i); + } else { + __atomic_release_fence(); + arch_atomic64_set(v, i); + } } #define arch_atomic64_set_release arch_atomic64_set_release #endif @@ -2358,4 +2386,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v) #endif #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// cca554917d7ea73d5e3e7397dd70c484cad9b2c4 +// 8e2cc06bc0d2c0967d2f8424762bd48555ee40ae diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 993c5628a726..e863c88df95f 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -207,14 +207,6 @@ struct backing_dev_info { #endif }; -enum { - BLK_RW_ASYNC = 0, - BLK_RW_SYNC = 1, -}; - -void clear_bdi_congested(struct backing_dev_info *bdi, int sync); -void set_bdi_congested(struct backing_dev_info *bdi, int sync); - struct wb_lock_cookie { bool locked; unsigned long flags; diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 483979c1b9f4..87ce24d238f3 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -135,13 +135,6 @@ static inline bool writeback_in_progress(struct bdi_writeback *wb) struct backing_dev_info *inode_to_bdi(struct inode *inode); -static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) -{ - return wb->congested & cong_bits; -} - -long congestion_wait(int sync, long timeout); - static inline bool mapping_can_writeback(struct address_space *mapping) { return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK; @@ -162,7 +155,6 @@ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, gfp_t gfp); void wb_memcg_offline(struct mem_cgroup *memcg); void wb_blkcg_offline(struct blkcg *blkcg); -int inode_congested(struct inode *inode, int cong_bits); /** * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode @@ -390,50 +382,8 @@ static inline void wb_blkcg_offline(struct blkcg *blkcg) { } -static inline int inode_congested(struct inode *inode, int cong_bits) -{ - return wb_congested(&inode_to_bdi(inode)->wb, cong_bits); -} - #endif /* CONFIG_CGROUP_WRITEBACK */ -static inline int inode_read_congested(struct inode *inode) -{ - return inode_congested(inode, 1 << WB_sync_congested); -} - -static inline int inode_write_congested(struct inode *inode) -{ - return inode_congested(inode, 1 << WB_async_congested); -} - -static inline int inode_rw_congested(struct inode *inode) -{ - return inode_congested(inode, (1 << WB_sync_congested) | - (1 << WB_async_congested)); -} - -static inline int bdi_congested(struct backing_dev_info *bdi, int cong_bits) -{ - return wb_congested(&bdi->wb, cong_bits); -} - -static inline int bdi_read_congested(struct backing_dev_info *bdi) -{ - return bdi_congested(bdi, 1 << WB_sync_congested); -} - -static inline int bdi_write_congested(struct backing_dev_info *bdi) -{ - return bdi_congested(bdi, 1 << WB_async_congested); -} - -static inline int bdi_rw_congested(struct backing_dev_info *bdi) -{ - return bdi_congested(bdi, (1 << WB_sync_congested) | - (1 << WB_async_congested)); -} - const char *bdi_dev_name(struct backing_dev_info *bdi); #endif /* _LINUX_BACKING_DEV_H */ diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index d35b9206096d..e3314f746bfa 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -3,6 +3,7 @@ #define LINUX_BCMA_DRIVER_CC_H_ #include <linux/platform_device.h> +#include <linux/platform_data/brcmnand.h> #include <linux/gpio.h> /** ChipCommon core registers. **/ @@ -599,6 +600,10 @@ struct bcma_sflash { #ifdef CONFIG_BCMA_NFLASH struct bcma_nflash { + /* Must be the fist member for the brcmnand driver to + * de-reference that structure. + */ + struct brcmnand_platform_data brcmnand_info; bool present; bool boot; /* This is the flash the SoC boots from */ }; diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 049cf9421d83..3dc20c4f394c 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -8,6 +8,7 @@ #include <uapi/linux/binfmts.h> struct filename; +struct coredump_params; #define CORENAME_MAX_SIZE 128 @@ -77,18 +78,6 @@ struct linux_binprm { #define BINPRM_FLAGS_PRESERVE_ARGV0_BIT 3 #define BINPRM_FLAGS_PRESERVE_ARGV0 (1 << BINPRM_FLAGS_PRESERVE_ARGV0_BIT) -/* Function parameter for binfmt->coredump */ -struct coredump_params { - const kernel_siginfo_t *siginfo; - struct pt_regs *regs; - struct file *file; - unsigned long limit; - unsigned long mm_flags; - loff_t written; - loff_t pos; - loff_t to_skip; -}; - /* * This structure defines the functions that are used to load the binary formats that * linux accepts. @@ -98,8 +87,10 @@ struct linux_binfmt { struct module *module; int (*load_binary)(struct linux_binprm *); int (*load_shlib)(struct file *); +#ifdef CONFIG_COREDUMP int (*core_dump)(struct coredump_params *cprm); unsigned long min_coredump; /* minimal dump size */ +#endif } __randomize_layout; extern void __register_binfmt(struct linux_binfmt *fmt, int insert); diff --git a/include/linux/bio.h b/include/linux/bio.h index 4c21f6e69e18..278cc81cc1e7 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -65,7 +65,6 @@ static inline bool bio_no_advance_iter(const struct bio *bio) { return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE || - bio_op(bio) == REQ_OP_WRITE_SAME || bio_op(bio) == REQ_OP_WRITE_ZEROES; } @@ -186,8 +185,6 @@ static inline unsigned bio_segments(struct bio *bio) case REQ_OP_SECURE_ERASE: case REQ_OP_WRITE_ZEROES: return 0; - case REQ_OP_WRITE_SAME: - return 1; default: break; } diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 6093fa6db260..c9be1657f03d 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -19,6 +19,9 @@ * * Example: * + * #include <linux/bitfield.h> + * #include <linux/bits.h> + * * #define REG_FIELD_A GENMASK(6, 0) * #define REG_FIELD_B BIT(7) * #define REG_FIELD_C GENMASK(15, 8) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 3a41d50b85d3..7aa5c54901a9 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -917,8 +917,7 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq) } #define queue_for_each_hw_ctx(q, hctx, i) \ - for ((i) = 0; (i) < (q)->nr_hw_queues && \ - ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) + xa_for_each(&(q)->hctx_table, (i), (hctx)) #define hctx_for_each_ctx(hctx, ctx, i) \ for ((i) = 0; (i) < (hctx)->nr_ctx && \ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index ba8cfa57255f..dd0763a1c674 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -323,7 +323,8 @@ enum { BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion * of this bio. */ BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ - BIO_TRACKED, /* set if bio goes through the rq_qos path */ + BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */ + BIO_QOS_MERGED, /* but went through rq_qos merge path */ BIO_REMAPPED, BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */ BIO_PERCPU_CACHE, /* can participate in per-cpu alloc cache */ @@ -360,8 +361,6 @@ enum req_opf { REQ_OP_DISCARD = 3, /* securely erase sectors */ REQ_OP_SECURE_ERASE = 5, - /* write the same sector many times */ - REQ_OP_WRITE_SAME = 7, /* write the zero filled sector many times */ REQ_OP_WRITE_ZEROES = 9, /* Open a zone */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 226836ccd060..60d016138997 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -248,7 +248,6 @@ struct queue_limits { unsigned int io_opt; unsigned int max_discard_sectors; unsigned int max_hw_discard_sectors; - unsigned int max_write_same_sectors; unsigned int max_write_zeroes_sectors; unsigned int max_zone_append_sectors; unsigned int discard_granularity; @@ -355,7 +354,7 @@ struct request_queue { unsigned int queue_depth; /* hw dispatch queues */ - struct blk_mq_hw_ctx **queue_hw_ctx; + struct xarray hctx_table; unsigned int nr_hw_queues; /* @@ -912,9 +911,6 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, return min(q->limits.max_discard_sectors, UINT_MAX >> SECTOR_SHIFT); - if (unlikely(op == REQ_OP_WRITE_SAME)) - return q->limits.max_write_same_sectors; - if (unlikely(op == REQ_OP_WRITE_ZEROES)) return q->limits.max_write_zeroes_sectors; @@ -957,8 +953,6 @@ extern void blk_queue_max_discard_segments(struct request_queue *, extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors); -extern void blk_queue_max_write_same_sectors(struct request_queue *q, - unsigned int max_write_same_sectors); extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, unsigned int max_write_same_sectors); extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); @@ -1096,9 +1090,6 @@ static inline long nr_blockdev_pages(void) extern void blk_io_schedule(void); -extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, - sector_t nr_sects, gfp_t gfp_mask, struct page *page); - #define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, @@ -1325,16 +1316,6 @@ static inline int bdev_discard_alignment(struct block_device *bdev) return q->limits.discard_alignment; } -static inline unsigned int bdev_write_same(struct block_device *bdev) -{ - struct request_queue *q = bdev_get_queue(bdev); - - if (q) - return q->limits.max_write_same_sectors; - - return 0; -} - static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); @@ -1454,6 +1435,8 @@ enum blk_unique_id { struct block_device_operations { void (*submit_bio)(struct bio *bio); + int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob, + unsigned int flags); int (*open) (struct block_device *, fmode_t); void (*release) (struct gendisk *, fmode_t); int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index b525d8cdc25b..88a51b242adc 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -8,6 +8,7 @@ #include <linux/jump_label.h> #include <linux/percpu.h> #include <linux/rbtree.h> +#include <net/sock.h> #include <uapi/linux/bpf.h> struct sock; @@ -165,11 +166,23 @@ int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, void *value, u64 flags); +/* Opportunistic check to see whether we have any BPF program attached*/ +static inline bool cgroup_bpf_sock_enabled(struct sock *sk, + enum cgroup_bpf_attach_type type) +{ + struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); + struct bpf_prog_array *array; + + array = rcu_access_pointer(cgrp->bpf.effective[type]); + return array != &bpf_empty_prog_array.hdr; +} + /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(CGROUP_INET_INGRESS)) \ + if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \ + cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS)) \ __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ CGROUP_INET_INGRESS); \ \ @@ -181,7 +194,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, int __ret = 0; \ if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \ typeof(sk) __sk = sk_to_full_sk(sk); \ - if (sk_fullsock(__sk)) \ + if (sk_fullsock(__sk) && \ + cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \ __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ CGROUP_INET_EGRESS); \ } \ @@ -347,7 +361,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, kernel_optval) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT)) \ + if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \ + cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \ __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \ optname, optval, \ optlen, \ @@ -367,7 +382,8 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, max_optlen, retval) \ ({ \ int __ret = retval; \ - if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ + if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \ + cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \ if (!(sock)->sk_prot->bpf_bypass_getsockopt || \ !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \ tcp_bpf_bypass_getsockopt, \ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index d0ad379d1e62..bdb5298735ce 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -194,6 +194,17 @@ struct bpf_map { struct work_struct work; struct mutex freeze_mutex; atomic64_t writecnt; + /* 'Ownership' of program-containing map is claimed by the first program + * that is going to use this map or by the first program which FD is + * stored in the map to make sure that all callers and callees have the + * same prog type, JITed flag and xdp_has_frags flag. + */ + struct { + spinlock_t lock; + enum bpf_prog_type type; + bool jited; + bool xdp_has_frags; + } owner; }; static inline bool map_value_has_spin_lock(const struct bpf_map *map) @@ -320,7 +331,18 @@ enum bpf_type_flag { */ MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS), - __BPF_TYPE_LAST_FLAG = MEM_ALLOC, + /* MEM is in user address space. */ + MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS), + + /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged + * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In + * order to drop this tag, it must be passed into bpf_per_cpu_ptr() + * or bpf_this_cpu_ptr(), which will return the pointer corresponding + * to the specified cpu. + */ + MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS), + + __BPF_TYPE_LAST_FLAG = MEM_PERCPU, }; /* Max number of base types. */ @@ -502,7 +524,6 @@ enum bpf_reg_type { */ PTR_TO_MEM, /* reg points to valid memory region */ PTR_TO_BUF, /* reg points to a read/write buffer */ - PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */ PTR_TO_FUNC, /* reg points to a bpf program function */ __BPF_REG_TYPE_MAX, @@ -576,8 +597,7 @@ struct bpf_verifier_ops { const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype, - u32 *next_btf_id); - bool (*check_kfunc_call)(u32 kfunc_btf_id, struct module *owner); + u32 *next_btf_id, enum bpf_type_flag *flag); }; struct bpf_prog_offload_ops { @@ -832,8 +852,8 @@ void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym); void bpf_image_ksym_del(struct bpf_ksym *ksym); void bpf_ksym_add(struct bpf_ksym *ksym); void bpf_ksym_del(struct bpf_ksym *ksym); -int bpf_jit_charge_modmem(u32 pages); -void bpf_jit_uncharge_modmem(u32 pages); +int bpf_jit_charge_modmem(u32 size); +void bpf_jit_uncharge_modmem(u32 size); bool bpf_prog_has_trampoline(const struct bpf_prog *prog); #else static inline int bpf_trampoline_link_prog(struct bpf_prog *prog, @@ -938,6 +958,8 @@ struct bpf_prog_aux { bool func_proto_unreliable; bool sleepable; bool tail_call_reachable; + bool xdp_has_frags; + bool use_bpf_prog_pack; struct hlist_node tramp_hlist; /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ const struct btf_type *attach_func_proto; @@ -998,16 +1020,6 @@ struct bpf_prog_aux { }; struct bpf_array_aux { - /* 'Ownership' of prog array is claimed by the first program that - * is going to use this map or by the first program which FD is - * stored in the map to make sure that all callers and callees have - * the same prog type and JITed flag. - */ - struct { - spinlock_t lock; - enum bpf_prog_type type; - bool jited; - } owner; /* Programs with direct jumps into programs part of this array. */ struct list_head poke_progs; struct bpf_map *map; @@ -1182,7 +1194,14 @@ struct bpf_event_entry { struct rcu_head rcu; }; -bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); +static inline bool map_type_contains_progs(struct bpf_map *map) +{ + return map->map_type == BPF_MAP_TYPE_PROG_ARRAY || + map->map_type == BPF_MAP_TYPE_DEVMAP || + map->map_type == BPF_MAP_TYPE_CPUMAP; +} + +bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp); int bpf_prog_calc_tag(struct bpf_prog *fp); const struct bpf_func_proto *bpf_get_trace_printk_proto(void); @@ -1224,6 +1243,19 @@ struct bpf_prog_array { struct bpf_prog_array_item items[]; }; +struct bpf_empty_prog_array { + struct bpf_prog_array hdr; + struct bpf_prog *null_prog; +}; + +/* to avoid allocating empty bpf_prog_array for cgroups that + * don't have bpf program attached use one global 'bpf_empty_prog_array' + * It will not be modified the caller of bpf_prog_array_alloc() + * (since caller requested prog_cnt == 0) + * that pointer should be 'freed' by bpf_prog_array_free() + */ +extern struct bpf_empty_prog_array bpf_empty_prog_array; + struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); void bpf_prog_array_free(struct bpf_prog_array *progs); int bpf_prog_array_length(struct bpf_prog_array *progs); @@ -1250,6 +1282,7 @@ struct bpf_run_ctx {}; struct bpf_cg_run_ctx { struct bpf_run_ctx run_ctx; const struct bpf_prog_array_item *prog_item; + int retval; }; struct bpf_trace_run_ctx { @@ -1282,19 +1315,19 @@ static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); -static __always_inline u32 +static __always_inline int BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, const void *ctx, bpf_prog_run_fn run_prog, - u32 *ret_flags) + int retval, u32 *ret_flags) { const struct bpf_prog_array_item *item; const struct bpf_prog *prog; const struct bpf_prog_array *array; struct bpf_run_ctx *old_run_ctx; struct bpf_cg_run_ctx run_ctx; - u32 ret = 1; u32 func_ret; + run_ctx.retval = retval; migrate_disable(); rcu_read_lock(); array = rcu_dereference(array_rcu); @@ -1303,27 +1336,29 @@ BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, while ((prog = READ_ONCE(item->prog))) { run_ctx.prog_item = item; func_ret = run_prog(prog, ctx); - ret &= (func_ret & 1); + if (!(func_ret & 1) && !IS_ERR_VALUE((long)run_ctx.retval)) + run_ctx.retval = -EPERM; *(ret_flags) |= (func_ret >> 1); item++; } bpf_reset_run_ctx(old_run_ctx); rcu_read_unlock(); migrate_enable(); - return ret; + return run_ctx.retval; } -static __always_inline u32 +static __always_inline int BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, - const void *ctx, bpf_prog_run_fn run_prog) + const void *ctx, bpf_prog_run_fn run_prog, + int retval) { const struct bpf_prog_array_item *item; const struct bpf_prog *prog; const struct bpf_prog_array *array; struct bpf_run_ctx *old_run_ctx; struct bpf_cg_run_ctx run_ctx; - u32 ret = 1; + run_ctx.retval = retval; migrate_disable(); rcu_read_lock(); array = rcu_dereference(array_rcu); @@ -1331,13 +1366,14 @@ BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); while ((prog = READ_ONCE(item->prog))) { run_ctx.prog_item = item; - ret &= run_prog(prog, ctx); + if (!run_prog(prog, ctx) && !IS_ERR_VALUE((long)run_ctx.retval)) + run_ctx.retval = -EPERM; item++; } bpf_reset_run_ctx(old_run_ctx); rcu_read_unlock(); migrate_enable(); - return ret; + return run_ctx.retval; } static __always_inline u32 @@ -1390,19 +1426,21 @@ out: * 0: NET_XMIT_SUCCESS skb should be transmitted * 1: NET_XMIT_DROP skb should be dropped and cn * 2: NET_XMIT_CN skb should be transmitted and cn - * 3: -EPERM skb should be dropped + * 3: -err skb should be dropped */ #define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ ({ \ u32 _flags = 0; \ bool _cn; \ u32 _ret; \ - _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \ + _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, 0, &_flags); \ _cn = _flags & BPF_RET_SET_CN; \ - if (_ret) \ + if (_ret && !IS_ERR_VALUE((long)_ret)) \ + _ret = -EFAULT; \ + if (!_ret) \ _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ else \ - _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ + _ret = (_cn ? NET_XMIT_DROP : _ret); \ _ret; \ }) @@ -1723,7 +1761,6 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); -bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner); bool btf_ctx_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info); @@ -1753,7 +1790,7 @@ static inline bool bpf_tracing_btf_ctx_access(int off, int size, int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype, - u32 *next_btf_id); + u32 *next_btf_id, enum bpf_type_flag *flag); bool btf_struct_ids_match(struct bpf_verifier_log *log, const struct btf *btf, u32 id, int off, const struct btf *need_btf, u32 need_type_id); @@ -1792,6 +1829,11 @@ struct bpf_core_ctx { int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, int relo_idx, void *insn); +static inline bool unprivileged_ebpf_enabled(void) +{ + return !sysctl_unprivileged_bpf_disabled; +} + #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { @@ -1861,11 +1903,6 @@ static inline int bpf_obj_get_user(const char __user *pathname, int flags) return -EOPNOTSUPP; } -static inline bool dev_map_can_have_prog(struct bpf_map *map) -{ - return false; -} - static inline void __dev_flush(void) { } @@ -1929,11 +1966,6 @@ static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, return -EOPNOTSUPP; } -static inline bool cpu_map_prog_allowed(struct bpf_map *map) -{ - return false; -} - static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type) { @@ -1975,12 +2007,6 @@ static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, return -ENOTSUPP; } -static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, - struct module *owner) -{ - return false; -} - static inline void bpf_map_put(struct bpf_map *map) { } @@ -2011,6 +2037,12 @@ bpf_jit_find_kfunc_model(const struct bpf_prog *prog, { return NULL; } + +static inline bool unprivileged_ebpf_enabled(void) +{ + return false; +} + #endif /* CONFIG_BPF_SYSCALL */ void __bpf_free_used_btfs(struct bpf_prog_aux *aux, @@ -2075,6 +2107,9 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog, int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); +int sock_map_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr); + void sock_map_unhash(struct sock *sk); void sock_map_close(struct sock *sk, long timeout); #else @@ -2128,6 +2163,12 @@ static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void { return -EOPNOTSUPP; } + +static inline int sock_map_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + return -EINVAL; +} #endif /* CONFIG_BPF_SYSCALL */ #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ @@ -2226,6 +2267,7 @@ extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto; extern const struct bpf_func_proto bpf_find_vma_proto; extern const struct bpf_func_proto bpf_loop_proto; extern const struct bpf_func_proto bpf_strncmp_proto; +extern const struct bpf_func_proto bpf_copy_from_user_task_proto; const struct bpf_func_proto *tracing_prog_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); @@ -2338,6 +2380,8 @@ enum bpf_text_poke_type { int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, void *addr1, void *addr2); +void *bpf_arch_text_copy(void *dst, void *src, size_t len); + struct btf_id_set; bool btf_id_set_contains(const struct btf_id_set *set, u32 id); diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h index 37b3906af8b1..493e63258497 100644 --- a/include/linux/bpf_local_storage.h +++ b/include/linux/bpf_local_storage.h @@ -154,16 +154,17 @@ void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem); struct bpf_local_storage_elem * bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value, - bool charge_mem); + bool charge_mem, gfp_t gfp_flags); int bpf_local_storage_alloc(void *owner, struct bpf_local_storage_map *smap, - struct bpf_local_storage_elem *first_selem); + struct bpf_local_storage_elem *first_selem, + gfp_t gfp_flags); struct bpf_local_storage_data * bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, - void *value, u64 map_flags); + void *value, u64 map_flags, gfp_t gfp_flags); void bpf_local_storage_free_rcu(struct rcu_head *rcu); diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 48a91c51c015..3e24ad0c4b3c 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -140,3 +140,4 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp) #ifdef CONFIG_PERF_EVENTS BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf) #endif +BPF_LINK_TYPE(BPF_LINK_TYPE_KPROBE_MULTI, kprobe_multi) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index e9993172f892..c1fc4af47f69 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -521,6 +521,12 @@ bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); int check_ptr_off_reg(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno); +int check_func_arg_reg_off(struct bpf_verifier_env *env, + const struct bpf_reg_state *reg, int regno, + enum bpf_arg_type arg_type, + bool is_release_func); +int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, + u32 regno); int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 regno, u32 mem_size); @@ -564,4 +570,9 @@ static inline u32 type_flag(u32 type) return type & ~BPF_BASE_TYPE_MASK; } +static inline enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog) +{ + return prog->aux->dst_prog ? prog->aux->dst_prog->type : prog->type; +} + #endif /* _LINUX_BPF_VERIFIER_H */ diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index 6b211323a489..9e97ced2896d 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -10,7 +10,6 @@ #define _BLK_BSG_ #include <linux/blkdev.h> -#include <scsi/scsi_request.h> struct bsg_job; struct request; diff --git a/include/linux/btf.h b/include/linux/btf.h index 0c74348cbc9d..36bc09b8e890 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -12,11 +12,33 @@ #define BTF_TYPE_EMIT(type) ((void)(type *)0) #define BTF_TYPE_EMIT_ENUM(enum_val) ((void)enum_val) +enum btf_kfunc_type { + BTF_KFUNC_TYPE_CHECK, + BTF_KFUNC_TYPE_ACQUIRE, + BTF_KFUNC_TYPE_RELEASE, + BTF_KFUNC_TYPE_RET_NULL, + BTF_KFUNC_TYPE_MAX, +}; + struct btf; struct btf_member; struct btf_type; union bpf_attr; struct btf_show; +struct btf_id_set; + +struct btf_kfunc_id_set { + struct module *owner; + union { + struct { + struct btf_id_set *check_set; + struct btf_id_set *acquire_set; + struct btf_id_set *release_set; + struct btf_id_set *ret_null_set; + }; + struct btf_id_set *sets[BTF_KFUNC_TYPE_MAX]; + }; +}; extern const struct file_operations btf_fops; @@ -216,6 +238,11 @@ static inline bool btf_type_is_var(const struct btf_type *t) return BTF_INFO_KIND(t->info) == BTF_KIND_VAR; } +static inline bool btf_type_is_type_tag(const struct btf_type *t) +{ + return BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG; +} + /* union is only a special case of struct: * all its offsetof(member) == 0 */ @@ -300,6 +327,11 @@ static inline const struct btf_var_secinfo *btf_type_var_secinfo( return (const struct btf_var_secinfo *)(t + 1); } +static inline struct btf_param *btf_params(const struct btf_type *t) +{ + return (struct btf_param *)(t + 1); +} + #ifdef CONFIG_BPF_SYSCALL struct bpf_prog; @@ -307,6 +339,11 @@ const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); const char *btf_name_by_offset(const struct btf *btf, u32 offset); struct btf *btf_parse_vmlinux(void); struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog); +bool btf_kfunc_id_set_contains(const struct btf *btf, + enum bpf_prog_type prog_type, + enum btf_kfunc_type type, u32 kfunc_btf_id); +int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, + const struct btf_kfunc_id_set *s); #else static inline const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) @@ -318,50 +355,18 @@ static inline const char *btf_name_by_offset(const struct btf *btf, { return NULL; } -#endif - -struct kfunc_btf_id_set { - struct list_head list; - struct btf_id_set *set; - struct module *owner; -}; - -struct kfunc_btf_id_list { - struct list_head list; - struct mutex mutex; -}; - -#ifdef CONFIG_DEBUG_INFO_BTF_MODULES -void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l, - struct kfunc_btf_id_set *s); -void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l, - struct kfunc_btf_id_set *s); -bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id, - struct module *owner); - -extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list; -extern struct kfunc_btf_id_list prog_test_kfunc_list; -#else -static inline void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l, - struct kfunc_btf_id_set *s) -{ -} -static inline void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l, - struct kfunc_btf_id_set *s) +static inline bool btf_kfunc_id_set_contains(const struct btf *btf, + enum bpf_prog_type prog_type, + enum btf_kfunc_type type, + u32 kfunc_btf_id) { + return false; } -static inline bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, - u32 kfunc_id, struct module *owner) +static inline int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, + const struct btf_kfunc_id_set *s) { - return false; + return 0; } - -static struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list __maybe_unused; -static struct kfunc_btf_id_list prog_test_kfunc_list __maybe_unused; #endif -#define DEFINE_KFUNC_BTF_ID_SET(set, name) \ - struct kfunc_btf_id_set name = { LIST_HEAD_INIT(name.list), (set), \ - THIS_MODULE } - #endif diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index 919c0fde1c51..bc5d9cc34e4c 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -11,6 +11,7 @@ struct btf_id_set { #ifdef CONFIG_DEBUG_INFO_BTF #include <linux/compiler.h> /* for __PASTE */ +#include <linux/compiler_attributes.h> /* for __maybe_unused */ /* * Following macros help to define lists of BTF IDs placed @@ -146,14 +147,14 @@ extern struct btf_id_set name; #else -#define BTF_ID_LIST(name) static u32 name[5]; +#define BTF_ID_LIST(name) static u32 __maybe_unused name[5]; #define BTF_ID(prefix, name) #define BTF_ID_UNUSED -#define BTF_ID_LIST_GLOBAL(name, n) u32 name[n]; -#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1]; -#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 name[1]; -#define BTF_SET_START(name) static struct btf_id_set name = { 0 }; -#define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 }; +#define BTF_ID_LIST_GLOBAL(name, n) u32 __maybe_unused name[n]; +#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 __maybe_unused name[1]; +#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 __maybe_unused name[1]; +#define BTF_SET_START(name) static struct btf_id_set __maybe_unused name = { 0 }; +#define BTF_SET_START_GLOBAL(name) static struct btf_id_set __maybe_unused name = { 0 }; #define BTF_SET_END(name) #endif /* CONFIG_DEBUG_INFO_BTF */ diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 36f33685c8c0..bcb4fe9b8575 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -144,6 +144,7 @@ BUFFER_FNS(Defer_Completion, defer_completion) ((struct buffer_head *)page_private(page)); \ }) #define page_has_buffers(page) PagePrivate(page) +#define folio_buffers(folio) folio_get_private(folio) void buffer_check_dirty_writeback(struct page *page, bool *dirty, bool *writeback); @@ -216,16 +217,14 @@ extern int buffer_heads_over_limit; * Generic address_space_operations implementations for buffer_head-backed * address_spaces. */ -void block_invalidatepage(struct page *page, unsigned int offset, - unsigned int length); +void block_invalidate_folio(struct folio *folio, size_t offset, size_t length); int block_write_full_page(struct page *page, get_block_t *get_block, struct writeback_control *wbc); int __block_write_full_page(struct inode *inode, struct page *page, get_block_t *get_block, struct writeback_control *wbc, bh_end_io_t *handler); int block_read_full_page(struct page*, get_block_t*); -int block_is_partially_uptodate(struct page *page, unsigned long from, - unsigned long count); +bool block_is_partially_uptodate(struct folio *, size_t from, size_t count); int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, get_block_t *get_block); int __block_write_begin(struct page *page, loff_t pos, unsigned len, @@ -398,7 +397,7 @@ __bread(struct block_device *bdev, sector_t block, unsigned size) return __bread_gfp(bdev, block, size, __GFP_MOVABLE); } -extern int __set_page_dirty_buffers(struct page *page); +bool block_dirty_folio(struct address_space *mapping, struct folio *folio); #else /* CONFIG_BLOCK */ diff --git a/include/linux/cacheflush.h b/include/linux/cacheflush.h index fef8b607f97e..a6189d21f2ba 100644 --- a/include/linux/cacheflush.h +++ b/include/linux/cacheflush.h @@ -4,6 +4,8 @@ #include <asm/cacheflush.h> +struct folio; + #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO void flush_dcache_folio(struct folio *folio); diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h index a81652d1c6f3..7ae21c0f7f23 100644 --- a/include/linux/can/bittiming.h +++ b/include/linux/can/bittiming.h @@ -113,7 +113,7 @@ struct can_tdc_const { }; #ifdef CONFIG_CAN_CALC_BITTIMING -int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, +int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc); void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const, @@ -121,7 +121,7 @@ void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const, u32 *ctrlmode, u32 ctrlmode_supported); #else /* !CONFIG_CAN_CALC_BITTIMING */ static inline int -can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, +can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc) { netdev_err(dev, "bit-timing calculation not available\n"); @@ -136,7 +136,7 @@ can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const, } #endif /* CONFIG_CAN_CALC_BITTIMING */ -int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, +int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc, const u32 *bitrate_const, const unsigned int bitrate_const_cnt); diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 7ad6c3d0db7d..86bf82dbd8b8 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -28,8 +28,8 @@ #define CEPH_INO_ROOT 1 -#define CEPH_INO_CEPH 2 /* hidden .ceph dir */ -#define CEPH_INO_DOTDOT 3 /* used by ceph fuse for parent (..) */ +#define CEPH_INO_CEPH 2 /* hidden .ceph dir */ +#define CEPH_INO_GLOBAL_SNAPREALM 3 /* global dummy snaprealm */ /* arbitrary limit on max # of monitors (cluster of 3 is typical) */ #define CEPH_MAX_MON 31 @@ -328,6 +328,7 @@ enum { CEPH_MDS_OP_LOOKUPPARENT = 0x00103, CEPH_MDS_OP_LOOKUPINO = 0x00104, CEPH_MDS_OP_LOOKUPNAME = 0x00105, + CEPH_MDS_OP_GETVXATTR = 0x00106, CEPH_MDS_OP_SETXATTR = 0x01105, CEPH_MDS_OP_RMXATTR = 0x01106, diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index edf62eaa6285..00af2c98da75 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -284,6 +284,7 @@ DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) extern struct kmem_cache *ceph_inode_cachep; extern struct kmem_cache *ceph_cap_cachep; +extern struct kmem_cache *ceph_cap_snap_cachep; extern struct kmem_cache *ceph_cap_flush_cachep; extern struct kmem_cache *ceph_dentry_cachep; extern struct kmem_cache *ceph_file_cachep; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 75c151413fda..0d1ada8968d7 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -450,6 +450,7 @@ extern struct mutex cgroup_mutex; extern spinlock_t css_set_lock; #define task_css_set_check(task, __c) \ rcu_dereference_check((task)->cgroups, \ + rcu_read_lock_sched_held() || \ lockdep_is_held(&cgroup_mutex) || \ lockdep_is_held(&css_set_lock) || \ ((task)->flags & PF_EXITING) || (__c)) @@ -791,11 +792,9 @@ static inline void cgroup_account_cputime(struct task_struct *task, cpuacct_charge(task, delta_exec); - rcu_read_lock(); cgrp = task_dfl_cgroup(task); if (cgroup_parent(cgrp)) __cgroup_account_cputime(cgrp, delta_exec); - rcu_read_unlock(); } static inline void cgroup_account_cputime_field(struct task_struct *task, @@ -806,11 +805,9 @@ static inline void cgroup_account_cputime_field(struct task_struct *task, cpuacct_account_field(task, index, delta_exec); - rcu_read_lock(); cgrp = task_dfl_cgroup(task); if (cgroup_parent(cgrp)) __cgroup_account_cputime_field(cgrp, index, delta_exec); - rcu_read_unlock(); } #else /* CONFIG_CGROUPS */ diff --git a/include/linux/cgroup_api.h b/include/linux/cgroup_api.h new file mode 100644 index 000000000000..d0cfe8025111 --- /dev/null +++ b/include/linux/cgroup_api.h @@ -0,0 +1 @@ +#include <linux/cgroup.h> diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index ccb3f034bfa9..3484309b59bf 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -78,6 +78,10 @@ #define AT91_PMC_MAINRDY (1 << 16) /* Main Clock Ready */ #define AT91_CKGR_PLLAR 0x28 /* PLL A Register */ + +#define AT91_PMC_RATIO 0x2c /* Processor clock ratio register [SAMA7G5 only] */ +#define AT91_PMC_RATIO_RATIO (0xf) /* CPU clock ratio. */ + #define AT91_CKGR_PLLBR 0x2c /* PLL B Register */ #define AT91_PMC_DIV (0xff << 0) /* Divider */ #define AT91_PMC_PLLCOUNT (0x3f << 8) /* PLL Counter */ diff --git a/include/linux/cma.h b/include/linux/cma.h index bd801023504b..90fd742fd1ef 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -20,6 +20,14 @@ #define CMA_MAX_NAME 64 +/* + * TODO: once the buddy -- especially pageblock merging and alloc_contig_range() + * -- can deal with only some pageblocks of a higher-order page being + * MIGRATE_CMA, we can use pageblock_nr_pages. + */ +#define CMA_MIN_ALIGNMENT_PAGES MAX_ORDER_NR_PAGES +#define CMA_MIN_ALIGNMENT_BYTES (PAGE_SIZE * CMA_MIN_ALIGNMENT_PAGES) + struct cma; extern unsigned long totalcma_pages; @@ -50,4 +58,6 @@ extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count); extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); + +extern void cma_reserve_pages_on_error(struct cma *cma); #endif diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 3c4de9b6c6e3..babb1347148c 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -68,3 +68,28 @@ #define __nocfi __attribute__((__no_sanitize__("cfi"))) #define __cficanonical __attribute__((__cfi_canonical_jump_table__)) + +/* + * Turn individual warnings and errors on and off locally, depending + * on version. + */ +#define __diag_clang(version, severity, s) \ + __diag_clang_ ## version(__diag_clang_ ## severity s) + +/* Severity used in pragma directives */ +#define __diag_clang_ignore ignored +#define __diag_clang_warn warning +#define __diag_clang_error error + +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(clang diagnostic s)) + +#if CONFIG_CLANG_VERSION >= 110000 +#define __diag_clang_11(s) __diag(s) +#else +#define __diag_clang_11(s) +#endif + +#define __diag_ignore_all(option, comment) \ + __diag_clang(11, ignore, option) diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index ccbbd31b3aae..52299c957c98 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -97,6 +97,10 @@ #define KASAN_ABI_VERSION 4 #endif +#ifdef CONFIG_SHADOW_CALL_STACK +#define __noscs __attribute__((__no_sanitize__("shadow-call-stack"))) +#endif + #if __has_attribute(__no_sanitize_address__) #define __no_sanitize_address __attribute__((no_sanitize_address)) #else @@ -151,6 +155,9 @@ #define __diag_GCC_8(s) #endif +#define __diag_ignore_all(option, comment) \ + __diag_GCC(8, ignore, option) + /* * Prior to 9.1, -Wno-alloc-size-larger-than (and therefore the "alloc_size" * attribute) do not work, and must be disabled. diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 3c1795fdb568..1c2c33ae1b37 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -4,6 +4,14 @@ #ifndef __ASSEMBLY__ +#if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \ + __has_attribute(btf_type_tag) +# define BTF_TYPE_TAG(value) __attribute__((btf_type_tag(#value))) +#else +# define BTF_TYPE_TAG(value) /* nothing */ +#endif + +/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */ #ifdef __CHECKER__ /* address spaces */ # define __kernel __attribute__((address_space(0))) @@ -32,10 +40,10 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } # ifdef STRUCTLEAK_PLUGIN # define __user __attribute__((user)) # else -# define __user +# define __user BTF_TYPE_TAG(user) # endif # define __iomem -# define __percpu +# define __percpu BTF_TYPE_TAG(percpu) # define __rcu # define __chk_user_ptr(x) (void)0 # define __chk_io_ptr(x) (void)0 @@ -137,8 +145,6 @@ struct ftrace_likely_data { */ #define __naked __attribute__((__naked__)) notrace -#define __compiler_offsetof(a, b) __builtin_offsetof(a, b) - /* * Prefer gnu_inline, so that extern inline functions do not emit an * externally visible function. This makes extern inline behave as per gnu89 @@ -368,4 +374,8 @@ struct ftrace_likely_data { #define __diag_error(compiler, version, option, comment) \ __diag_ ## compiler(version, error, option) +#ifndef __diag_ignore_all +#define __diag_ignore_all(option, comment) +#endif + #endif /* __LINUX_COMPILER_TYPES_H */ diff --git a/include/linux/coredump.h b/include/linux/coredump.h index 248a68c668b4..08a1d3e7e46d 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -12,22 +12,34 @@ struct core_vma_metadata { unsigned long start, end; unsigned long flags; unsigned long dump_size; + unsigned long pgoff; + struct file *file; +}; + +struct coredump_params { + const kernel_siginfo_t *siginfo; + struct pt_regs *regs; + struct file *file; + unsigned long limit; + unsigned long mm_flags; + loff_t written; + loff_t pos; + loff_t to_skip; + int vma_count; + size_t vma_data_size; + struct core_vma_metadata *vma_meta; }; /* * These are the only things you should do on a core-file: use only these * functions to write out all the necessary info. */ -struct coredump_params; extern void dump_skip_to(struct coredump_params *cprm, unsigned long to); extern void dump_skip(struct coredump_params *cprm, size_t nr); extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); extern int dump_align(struct coredump_params *cprm, int align); int dump_user_range(struct coredump_params *cprm, unsigned long start, unsigned long len); -int dump_vma_snapshot(struct coredump_params *cprm, int *vma_count, - struct core_vma_metadata **vma_meta, - size_t *vma_data_size_ptr); extern void do_coredump(const kernel_siginfo_t *siginfo); #else static inline void do_coredump(const kernel_siginfo_t *siginfo) {} diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 3522a272b74d..35c7d6db4139 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -661,6 +661,11 @@ struct gov_attr_set { /* sysfs ops for cpufreq governors */ extern const struct sysfs_ops governor_sysfs_ops; +static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj) +{ + return container_of(kobj, struct gov_attr_set, kobj); +} + void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node); void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node); unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 411a428ace4d..82e33137f917 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -100,6 +100,7 @@ enum cpuhp_state { CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, CPUHP_PADATA_DEAD, CPUHP_AP_DTPM_CPU_DEAD, + CPUHP_RANDOM_PREPARE, CPUHP_WORKQUEUE_PREP, CPUHP_POWER_NUMA_PREPARE, CPUHP_HRTIMERS_PREPARE, @@ -165,6 +166,7 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, CPUHP_AP_PERF_ARM_ACPI_STARTING, CPUHP_AP_PERF_ARM_STARTING, + CPUHP_AP_PERF_RISCV_STARTING, CPUHP_AP_ARM_L2X0_STARTING, CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, @@ -231,6 +233,7 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE, + CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE, CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, @@ -240,6 +243,7 @@ enum cpuhp_state { CPUHP_AP_PERF_CSKY_ONLINE, CPUHP_AP_WATCHDOG_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, + CPUHP_AP_RANDOM_ONLINE, CPUHP_AP_RCUTREE_ONLINE, CPUHP_AP_BASE_CACHEINFO_ONLINE, CPUHP_AP_ONLINE_DYN, diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 64dae70d31f5..fe29ac7cc469 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -102,7 +102,7 @@ extern atomic_t __num_online_cpus; extern cpumask_t cpus_booted_once_mask; -static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) +static __always_inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) { #ifdef CONFIG_DEBUG_PER_CPU_MAPS WARN_ON_ONCE(cpu >= bits); @@ -110,7 +110,7 @@ static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits) } /* verify cpu argument to cpumask_* operators */ -static inline unsigned int cpumask_check(unsigned int cpu) +static __always_inline unsigned int cpumask_check(unsigned int cpu) { cpu_max_bits_warn(cpu, nr_cpumask_bits); return cpu; @@ -341,12 +341,12 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool * @cpu: cpu number (< nr_cpu_ids) * @dstp: the cpumask pointer */ -static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) +static __always_inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) { set_bit(cpumask_check(cpu), cpumask_bits(dstp)); } -static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) +static __always_inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) { __set_bit(cpumask_check(cpu), cpumask_bits(dstp)); } @@ -357,12 +357,12 @@ static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) * @cpu: cpu number (< nr_cpu_ids) * @dstp: the cpumask pointer */ -static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) +static __always_inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) { clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); } -static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) +static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) { __clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); } @@ -374,7 +374,7 @@ static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) * * Returns 1 if @cpu is set in @cpumask, else returns 0 */ -static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask) +static __always_inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask) { return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); } @@ -388,7 +388,7 @@ static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask) * * test_and_set_bit wrapper for cpumasks. */ -static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) +static __always_inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) { return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask)); } @@ -402,7 +402,7 @@ static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) * * test_and_clear_bit wrapper for cpumasks. */ -static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) +static __always_inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) { return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask)); } diff --git a/include/linux/cpumask_api.h b/include/linux/cpumask_api.h new file mode 100644 index 000000000000..83bd3ebe82b0 --- /dev/null +++ b/include/linux/cpumask_api.h @@ -0,0 +1 @@ +#include <linux/cpumask.h> diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 855869e1fd32..2324ab6f1846 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -133,6 +133,15 @@ #define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000 /* + * Mark an algorithm as a service implementation only usable by a + * template and never by a normal user of the kernel crypto API. + * This is intended to be used by algorithms that are themselves + * not FIPS-approved but may instead be used to implement parts of + * a FIPS-approved algorithm (e.g., dh vs. ffdhe2048(dh)). + */ +#define CRYPTO_ALG_FIPS_INTERNAL 0x00020000 + +/* * Transform masks and values (for crt_flags). */ #define CRYPTO_TFM_NEED_KEY 0x00000001 diff --git a/include/linux/damon.h b/include/linux/damon.h index 5e1e3a128b77..f23cbfa4248d 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -60,19 +60,18 @@ struct damon_region { /** * struct damon_target - Represents a monitoring target. - * @id: Unique identifier for this target. + * @pid: The PID of the virtual address space to monitor. * @nr_regions: Number of monitoring target regions of this target. * @regions_list: Head of the monitoring target regions of this target. * @list: List head for siblings. * * Each monitoring context could have multiple targets. For example, a context * for virtual memory address spaces could have multiple target processes. The - * @id of each target should be unique among the targets of the context. For - * example, in the virtual address monitoring context, it could be a pidfd or - * an address of an mm_struct. + * @pid should be set for appropriate &struct damon_operations including the + * virtual address spaces monitoring operations. */ struct damon_target { - unsigned long id; + struct pid *pid; unsigned int nr_regions; struct list_head regions_list; struct list_head list; @@ -88,6 +87,7 @@ struct damon_target { * @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE. * @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE. * @DAMOS_STAT: Do nothing but count the stat. + * @NR_DAMOS_ACTIONS: Total number of DAMOS actions */ enum damos_action { DAMOS_WILLNEED, @@ -96,6 +96,7 @@ enum damos_action { DAMOS_HUGEPAGE, DAMOS_NOHUGEPAGE, DAMOS_STAT, /* Do nothing but only record the stat */ + NR_DAMOS_ACTIONS, }; /** @@ -121,9 +122,9 @@ enum damos_action { * uses smaller one as the effective quota. * * For selecting regions within the quota, DAMON prioritizes current scheme's - * target memory regions using the &struct damon_primitive->get_scheme_score. + * target memory regions using the &struct damon_operations->get_scheme_score. * You could customize the prioritization logic by setting &weight_sz, - * &weight_nr_accesses, and &weight_age, because monitoring primitives are + * &weight_nr_accesses, and &weight_age, because monitoring operations are * encouraged to respect those. */ struct damos_quota { @@ -158,10 +159,12 @@ struct damos_quota { * * @DAMOS_WMARK_NONE: Ignore the watermarks of the given scheme. * @DAMOS_WMARK_FREE_MEM_RATE: Free memory rate of the system in [0,1000]. + * @NR_DAMOS_WMARK_METRICS: Total number of DAMOS watermark metrics */ enum damos_wmark_metric { DAMOS_WMARK_NONE, DAMOS_WMARK_FREE_MEM_RATE, + NR_DAMOS_WMARK_METRICS, }; /** @@ -254,13 +257,26 @@ struct damos { struct list_head list; }; +/** + * enum damon_ops_id - Identifier for each monitoring operations implementation + * + * @DAMON_OPS_VADDR: Monitoring operations for virtual address spaces + * @DAMON_OPS_PADDR: Monitoring operations for the physical address space + */ +enum damon_ops_id { + DAMON_OPS_VADDR, + DAMON_OPS_PADDR, + NR_DAMON_OPS, +}; + struct damon_ctx; /** - * struct damon_primitive - Monitoring primitives for given use cases. + * struct damon_operations - Monitoring operations for given use cases. * - * @init: Initialize primitive-internal data structures. - * @update: Update primitive-internal data structures. + * @id: Identifier of this operations set. + * @init: Initialize operations-related data structures. + * @update: Update operations-related data structures. * @prepare_access_checks: Prepare next access check of target regions. * @check_accesses: Check the accesses to target regions. * @reset_aggregated: Reset aggregated accesses monitoring results. @@ -270,18 +286,20 @@ struct damon_ctx; * @cleanup: Clean up the context. * * DAMON can be extended for various address spaces and usages. For this, - * users should register the low level primitives for their target address - * space and usecase via the &damon_ctx.primitive. Then, the monitoring thread + * users should register the low level operations for their target address + * space and usecase via the &damon_ctx.ops. Then, the monitoring thread * (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting - * the monitoring, @update after each &damon_ctx.primitive_update_interval, and + * the monitoring, @update after each &damon_ctx.ops_update_interval, and * @check_accesses, @target_valid and @prepare_access_checks after each * &damon_ctx.sample_interval. Finally, @reset_aggregated is called after each * &damon_ctx.aggr_interval. * - * @init should initialize primitive-internal data structures. For example, + * Each &struct damon_operations instance having valid @id can be registered + * via damon_register_ops() and selected by damon_select_ops() later. + * @init should initialize operations-related data structures. For example, * this could be used to construct proper monitoring target regions and link * those to @damon_ctx.adaptive_targets. - * @update should update the primitive-internal data structures. For example, + * @update should update the operations-related data structures. For example, * this could be used to update monitoring target regions for current status. * @prepare_access_checks should manipulate the monitoring regions to be * prepared for the next access check. @@ -301,7 +319,8 @@ struct damon_ctx; * monitoring. * @cleanup is called from @kdamond just before its termination. */ -struct damon_primitive { +struct damon_operations { + enum damon_ops_id id; void (*init)(struct damon_ctx *context); void (*update)(struct damon_ctx *context); void (*prepare_access_checks)(struct damon_ctx *context); @@ -355,15 +374,15 @@ struct damon_callback { * * @sample_interval: The time between access samplings. * @aggr_interval: The time between monitor results aggregations. - * @primitive_update_interval: The time between monitoring primitive updates. + * @ops_update_interval: The time between monitoring operations updates. * * For each @sample_interval, DAMON checks whether each region is accessed or * not. It aggregates and keeps the access information (number of accesses to * each region) for @aggr_interval time. DAMON also checks whether the target * memory regions need update (e.g., by ``mmap()`` calls from the application, * in case of virtual memory monitoring) and applies the changes for each - * @primitive_update_interval. All time intervals are in micro-seconds. - * Please refer to &struct damon_primitive and &struct damon_callback for more + * @ops_update_interval. All time intervals are in micro-seconds. + * Please refer to &struct damon_operations and &struct damon_callback for more * detail. * * @kdamond: Kernel thread who does the monitoring. @@ -375,7 +394,7 @@ struct damon_callback { * * Once started, the monitoring thread runs until explicitly required to be * terminated or every monitoring target is invalid. The validity of the - * targets is checked via the &damon_primitive.target_valid of @primitive. The + * targets is checked via the &damon_operations.target_valid of @ops. The * termination can also be explicitly requested by writing non-zero to * @kdamond_stop. The thread sets @kdamond to NULL when it terminates. * Therefore, users can know whether the monitoring is ongoing or terminated by @@ -385,7 +404,7 @@ struct damon_callback { * Note that the monitoring thread protects only @kdamond and @kdamond_stop via * @kdamond_lock. Accesses to other fields must be protected by themselves. * - * @primitive: Set of monitoring primitives for given use cases. + * @ops: Set of monitoring operations for given use cases. * @callback: Set of callbacks for monitoring events notifications. * * @min_nr_regions: The minimum number of adaptive monitoring regions. @@ -396,17 +415,17 @@ struct damon_callback { struct damon_ctx { unsigned long sample_interval; unsigned long aggr_interval; - unsigned long primitive_update_interval; + unsigned long ops_update_interval; /* private: internal use only */ struct timespec64 last_aggregation; - struct timespec64 last_primitive_update; + struct timespec64 last_ops_update; /* public: */ struct task_struct *kdamond; struct mutex kdamond_lock; - struct damon_primitive primitive; + struct damon_operations ops; struct damon_callback callback; unsigned long min_nr_regions; @@ -475,7 +494,7 @@ struct damos *damon_new_scheme( void damon_add_scheme(struct damon_ctx *ctx, struct damos *s); void damon_destroy_scheme(struct damos *s); -struct damon_target *damon_new_target(unsigned long id); +struct damon_target *damon_new_target(void); void damon_add_target(struct damon_ctx *ctx, struct damon_target *t); bool damon_targets_empty(struct damon_ctx *ctx); void damon_free_target(struct damon_target *t); @@ -484,28 +503,18 @@ unsigned int damon_nr_regions(struct damon_target *t); struct damon_ctx *damon_new_ctx(void); void damon_destroy_ctx(struct damon_ctx *ctx); -int damon_set_targets(struct damon_ctx *ctx, - unsigned long *ids, ssize_t nr_ids); int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int, - unsigned long aggr_int, unsigned long primitive_upd_int, + unsigned long aggr_int, unsigned long ops_upd_int, unsigned long min_nr_reg, unsigned long max_nr_reg); int damon_set_schemes(struct damon_ctx *ctx, struct damos **schemes, ssize_t nr_schemes); int damon_nr_running_ctxs(void); +int damon_register_ops(struct damon_operations *ops); +int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id); -int damon_start(struct damon_ctx **ctxs, int nr_ctxs); +int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive); int damon_stop(struct damon_ctx **ctxs, int nr_ctxs); #endif /* CONFIG_DAMON */ -#ifdef CONFIG_DAMON_VADDR -bool damon_va_target_valid(void *t); -void damon_va_set_primitives(struct damon_ctx *ctx); -#endif /* CONFIG_DAMON_VADDR */ - -#ifdef CONFIG_DAMON_PADDR -bool damon_pa_target_valid(void *t); -void damon_pa_set_primitives(struct damon_ctx *ctx); -#endif /* CONFIG_DAMON_PADDR */ - #endif /* _DAMON_H */ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index b26fecf6c8e8..c2a3758c4aaa 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -317,12 +317,6 @@ struct dm_target { unsigned num_secure_erase_bios; /* - * The number of WRITE SAME bios that will be submitted to the target. - * The bio number can be accessed with dm_bio_get_target_bio_nr. - */ - unsigned num_write_same_bios; - - /* * The number of WRITE ZEROES bios that will be submitted to the target. * The bio number can be accessed with dm_bio_get_target_bio_nr. */ @@ -358,10 +352,16 @@ struct dm_target { bool limit_swap_bios:1; /* - * Set if this target implements a a zoned device and needs emulation of + * Set if this target implements a zoned device and needs emulation of * zone append operations using regular writes. */ bool emulate_zone_append:1; + + /* + * Set if the target will submit IO using dm_submit_bio_remap() + * after returning DM_MAPIO_SUBMITTED from its map function. + */ + bool accounts_remapped_io:1; }; void *dm_per_bio_data(struct bio *bio, size_t data_size); @@ -465,6 +465,7 @@ int dm_suspended(struct dm_target *ti); int dm_post_suspending(struct dm_target *ti); int dm_noflush_suspending(struct dm_target *ti); void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); +void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone); union map_info *dm_get_rq_mapinfo(struct request *rq); #ifdef CONFIG_BLK_DEV_ZONED diff --git a/include/linux/dma-buf-map.h b/include/linux/dma-buf-map.h index 278d489e4bdd..19fa0b5ae5ec 100644 --- a/include/linux/dma-buf-map.h +++ b/include/linux/dma-buf-map.h @@ -52,13 +52,13 @@ * * struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(0xdeadbeaf); * - * dma_buf_map_set_vaddr(&map. 0xdeadbeaf); + * dma_buf_map_set_vaddr(&map, 0xdeadbeaf); * * To set an address in I/O memory, use dma_buf_map_set_vaddr_iomem(). * * .. code-block:: c * - * dma_buf_map_set_vaddr_iomem(&map. 0xdeadbeaf); + * dma_buf_map_set_vaddr_iomem(&map, 0xdeadbeaf); * * Instances of struct dma_buf_map do not have to be cleaned up, but * can be cleared to NULL with dma_buf_map_clear(). Cleared mappings diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 7ab50076e7a6..2097760e8e95 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -13,7 +13,7 @@ #ifndef __DMA_BUF_H__ #define __DMA_BUF_H__ -#include <linux/dma-buf-map.h> +#include <linux/iosys-map.h> #include <linux/file.h> #include <linux/err.h> #include <linux/scatterlist.h> @@ -283,8 +283,8 @@ struct dma_buf_ops { */ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); - int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map); - void (*vunmap)(struct dma_buf *dmabuf, struct dma_buf_map *map); + int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map); + void (*vunmap)(struct dma_buf *dmabuf, struct iosys_map *map); }; /** @@ -347,7 +347,7 @@ struct dma_buf { * @vmap_ptr: * The current vmap ptr if @vmapping_counter > 0. Protected by @lock. */ - struct dma_buf_map vmap_ptr; + struct iosys_map vmap_ptr; /** * @exp_name: @@ -628,6 +628,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dma_buf, int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long); -int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map); -void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map); +int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map); +void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map); #endif /* __DMA_BUF_H__ */ diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h index 303dd712220f..fec374f69e12 100644 --- a/include/linux/dma-fence-array.h +++ b/include/linux/dma-fence-array.h @@ -45,19 +45,6 @@ struct dma_fence_array { struct irq_work work; }; -extern const struct dma_fence_ops dma_fence_array_ops; - -/** - * dma_fence_is_array - check if a fence is from the array subsclass - * @fence: fence to test - * - * Return true if it is a dma_fence_array and false otherwise. - */ -static inline bool dma_fence_is_array(struct dma_fence *fence) -{ - return fence->ops == &dma_fence_array_ops; -} - /** * to_dma_fence_array - cast a fence to a dma_fence_array * @fence: fence to cast to a dma_fence_array @@ -68,7 +55,7 @@ static inline bool dma_fence_is_array(struct dma_fence *fence) static inline struct dma_fence_array * to_dma_fence_array(struct dma_fence *fence) { - if (fence->ops != &dma_fence_array_ops) + if (!fence || !dma_fence_is_array(fence)) return NULL; return container_of(fence, struct dma_fence_array, base); diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h index 54fe3443fd2c..10d51bcdf7b7 100644 --- a/include/linux/dma-fence-chain.h +++ b/include/linux/dma-fence-chain.h @@ -49,7 +49,6 @@ struct dma_fence_chain { spinlock_t lock; }; -extern const struct dma_fence_ops dma_fence_chain_ops; /** * to_dma_fence_chain - cast a fence to a dma_fence_chain @@ -61,13 +60,28 @@ extern const struct dma_fence_ops dma_fence_chain_ops; static inline struct dma_fence_chain * to_dma_fence_chain(struct dma_fence *fence) { - if (!fence || fence->ops != &dma_fence_chain_ops) + if (!fence || !dma_fence_is_chain(fence)) return NULL; return container_of(fence, struct dma_fence_chain, base); } /** + * dma_fence_chain_contained - return the contained fence + * @fence: the fence to test + * + * If the fence is a dma_fence_chain the function returns the fence contained + * inside the chain object, otherwise it returns the fence itself. + */ +static inline struct dma_fence * +dma_fence_chain_contained(struct dma_fence *fence) +{ + struct dma_fence_chain *chain = to_dma_fence_chain(fence); + + return chain ? chain->fence : fence; +} + +/** * dma_fence_chain_alloc * * Returns a new struct dma_fence_chain object or NULL on failure. diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 1ea691753bd3..775cdc0b4f24 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -587,4 +587,42 @@ struct dma_fence *dma_fence_get_stub(void); struct dma_fence *dma_fence_allocate_private_stub(void); u64 dma_fence_context_alloc(unsigned num); +extern const struct dma_fence_ops dma_fence_array_ops; +extern const struct dma_fence_ops dma_fence_chain_ops; + +/** + * dma_fence_is_array - check if a fence is from the array subclass + * @fence: the fence to test + * + * Return true if it is a dma_fence_array and false otherwise. + */ +static inline bool dma_fence_is_array(struct dma_fence *fence) +{ + return fence->ops == &dma_fence_array_ops; +} + +/** + * dma_fence_is_chain - check if a fence is from the chain subclass + * @fence: the fence to test + * + * Return true if it is a dma_fence_chain and false otherwise. + */ +static inline bool dma_fence_is_chain(struct dma_fence *fence) +{ + return fence->ops == &dma_fence_chain_ops; +} + +/** + * dma_fence_is_container - check if a fence is a container for other fences + * @fence: the fence to test + * + * Return true if this fence is a container for other fences, false otherwise. + * This is important since we can't build up large fence structure or otherwise + * we run into recursion during operation on those fences. + */ +static inline bool dma_fence_is_container(struct dma_fence *fence) +{ + return dma_fence_is_array(fence) || dma_fence_is_chain(fence); +} + #endif /* __LINUX_DMA_FENCE_H */ diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index eebf04325b34..afdfdfac729f 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -153,6 +153,13 @@ struct dma_resv { * struct dma_resv_iter - current position into the dma_resv fences * * Don't touch this directly in the driver, use the accessor function instead. + * + * IMPORTANT + * + * When using the lockless iterators like dma_resv_iter_next_unlocked() or + * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted. + * Code which accumulates statistics or similar needs to check for this with + * dma_resv_iter_is_restarted(). */ struct dma_resv_iter { /** @obj: The dma_resv object we iterate over */ @@ -243,7 +250,11 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) * &dma_resv.lock and using RCU instead. The cursor needs to be initialized * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside * the iterator a reference to the dma_fence is held and the RCU lock dropped. - * When the dma_resv is modified the iteration starts over again. + * + * Beware that the iterator can be restarted when the struct dma_resv for + * @cursor is modified. Code which accumulates statistics or similar needs to + * check for this with dma_resv_iter_is_restarted(). For this reason prefer the + * lock iterator dma_resv_for_each_fence() whenever possible. */ #define dma_resv_for_each_fence_unlocked(cursor, fence) \ for (fence = dma_resv_iter_first_unlocked(cursor); \ @@ -458,8 +469,8 @@ void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); -int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, - unsigned *pshared_count, struct dma_fence ***pshared); +int dma_resv_get_fences(struct dma_resv *obj, bool write, + unsigned int *num_fences, struct dma_fence ***fences); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, unsigned long timeout); diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index 939a1beaddf7..3ed117e299ec 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -32,31 +32,29 @@ int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto); void dsa_tag_8021q_unregister(struct dsa_switch *ds); -struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, - u16 tpid, u16 tci); +int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port, + struct dsa_bridge bridge); -void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id); +void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port, + struct dsa_bridge bridge); -int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port, - struct dsa_bridge bridge); +struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, + u16 tpid, u16 tci); -void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port, - struct dsa_bridge bridge); +void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id, + int *vbid); -u16 dsa_8021q_bridge_tx_fwd_offload_vid(unsigned int bridge_num); +struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master, + int vbid); -u16 dsa_tag_8021q_tx_vid(const struct dsa_port *dp); +u16 dsa_tag_8021q_bridge_vid(unsigned int bridge_num); -u16 dsa_tag_8021q_rx_vid(const struct dsa_port *dp); +u16 dsa_tag_8021q_standalone_vid(const struct dsa_port *dp); int dsa_8021q_rx_switch_id(u16 vid); int dsa_8021q_rx_source_port(u16 vid); -bool vid_is_dsa_8021q_rxvlan(u16 vid); - -bool vid_is_dsa_8021q_txvlan(u16 vid); - bool vid_is_dsa_8021q(u16 vid); #endif /* _NET_DSA_8021Q_H */ diff --git a/include/linux/dsa/tag_qca.h b/include/linux/dsa/tag_qca.h new file mode 100644 index 000000000000..4359fb0221cf --- /dev/null +++ b/include/linux/dsa/tag_qca.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __TAG_QCA_H +#define __TAG_QCA_H + +#define QCA_HDR_LEN 2 +#define QCA_HDR_VERSION 0x2 + +#define QCA_HDR_RECV_VERSION GENMASK(15, 14) +#define QCA_HDR_RECV_PRIORITY GENMASK(13, 11) +#define QCA_HDR_RECV_TYPE GENMASK(10, 6) +#define QCA_HDR_RECV_FRAME_IS_TAGGED BIT(3) +#define QCA_HDR_RECV_SOURCE_PORT GENMASK(2, 0) + +/* Packet type for recv */ +#define QCA_HDR_RECV_TYPE_NORMAL 0x0 +#define QCA_HDR_RECV_TYPE_MIB 0x1 +#define QCA_HDR_RECV_TYPE_RW_REG_ACK 0x2 + +#define QCA_HDR_XMIT_VERSION GENMASK(15, 14) +#define QCA_HDR_XMIT_PRIORITY GENMASK(13, 11) +#define QCA_HDR_XMIT_CONTROL GENMASK(10, 8) +#define QCA_HDR_XMIT_FROM_CPU BIT(7) +#define QCA_HDR_XMIT_DP_BIT GENMASK(6, 0) + +/* Packet type for xmit */ +#define QCA_HDR_XMIT_TYPE_NORMAL 0x0 +#define QCA_HDR_XMIT_TYPE_RW_REG 0x1 + +/* Check code for a valid mgmt packet. Switch will ignore the packet + * with this wrong. + */ +#define QCA_HDR_MGMT_CHECK_CODE_VAL 0x5 + +/* Specific define for in-band MDIO read/write with Ethernet packet */ +#define QCA_HDR_MGMT_SEQ_LEN 4 /* 4 byte for the seq */ +#define QCA_HDR_MGMT_COMMAND_LEN 4 /* 4 byte for the command */ +#define QCA_HDR_MGMT_DATA1_LEN 4 /* First 4 byte for the mdio data */ +#define QCA_HDR_MGMT_HEADER_LEN (QCA_HDR_MGMT_SEQ_LEN + \ + QCA_HDR_MGMT_COMMAND_LEN + \ + QCA_HDR_MGMT_DATA1_LEN) + +#define QCA_HDR_MGMT_DATA2_LEN 12 /* Other 12 byte for the mdio data */ +#define QCA_HDR_MGMT_PADDING_LEN 34 /* Padding to reach the min Ethernet packet */ + +#define QCA_HDR_MGMT_PKT_LEN (QCA_HDR_MGMT_HEADER_LEN + \ + QCA_HDR_LEN + \ + QCA_HDR_MGMT_DATA2_LEN + \ + QCA_HDR_MGMT_PADDING_LEN) + +#define QCA_HDR_MGMT_SEQ_NUM GENMASK(31, 0) /* 63, 32 */ +#define QCA_HDR_MGMT_CHECK_CODE GENMASK(31, 29) /* 31, 29 */ +#define QCA_HDR_MGMT_CMD BIT(28) /* 28 */ +#define QCA_HDR_MGMT_LENGTH GENMASK(23, 20) /* 23, 20 */ +#define QCA_HDR_MGMT_ADDR GENMASK(18, 0) /* 18, 0 */ + +/* Special struct emulating a Ethernet header */ +struct qca_mgmt_ethhdr { + u32 command; /* command bit 31:0 */ + u32 seq; /* seq 63:32 */ + u32 mdio_data; /* first 4byte mdio */ + __be16 hdr; /* qca hdr */ +} __packed; + +enum mdio_cmd { + MDIO_WRITE = 0x0, + MDIO_READ +}; + +struct mib_ethhdr { + u32 data[3]; /* first 3 mib counter */ + __be16 hdr; /* qca hdr */ +} __packed; + +struct qca_tagger_data { + void (*rw_reg_ack_handler)(struct dsa_switch *ds, + struct sk_buff *skb); + void (*mib_autocast_handler)(struct dsa_switch *ds, + struct sk_buff *skb); +}; + +#endif /* __TAG_QCA_H */ diff --git a/include/linux/dtpm.h b/include/linux/dtpm.h index d37e5d06a357..a4a13514b730 100644 --- a/include/linux/dtpm.h +++ b/include/linux/dtpm.h @@ -32,28 +32,25 @@ struct dtpm_ops { void (*release)(struct dtpm *); }; -typedef int (*dtpm_init_t)(void); +struct device_node; -struct dtpm_descr { - dtpm_init_t init; +struct dtpm_subsys_ops { + const char *name; + int (*init)(void); + void (*exit)(void); + int (*setup)(struct dtpm *, struct device_node *); }; -/* Init section thermal table */ -extern struct dtpm_descr __dtpm_table[]; -extern struct dtpm_descr __dtpm_table_end[]; - -#define DTPM_TABLE_ENTRY(name, __init) \ - static struct dtpm_descr __dtpm_table_entry_##name \ - __used __section("__dtpm_table") = { \ - .init = __init, \ - } - -#define DTPM_DECLARE(name, init) DTPM_TABLE_ENTRY(name, init) +enum DTPM_NODE_TYPE { + DTPM_NODE_VIRTUAL = 0, + DTPM_NODE_DT, +}; -#define for_each_dtpm_table(__dtpm) \ - for (__dtpm = __dtpm_table; \ - __dtpm < __dtpm_table_end; \ - __dtpm++) +struct dtpm_node { + enum DTPM_NODE_TYPE type; + const char *name; + struct dtpm_node *parent; +}; static inline struct dtpm *to_dtpm(struct powercap_zone *zone) { @@ -70,4 +67,7 @@ void dtpm_unregister(struct dtpm *dtpm); int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent); +int dtpm_create_hierarchy(struct of_device_id *dtpm_match_table); + +void dtpm_destroy_hierarchy(void); #endif diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 746e081879a5..f8e206e82476 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -114,7 +114,7 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg #endif } -#if (defined(CONFIG_UML) && defined(CONFIG_X86_32)) || defined(CONFIG_IA64) +#ifdef CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS /* * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out * extra segments containing the gate DSO contents. Dumping its @@ -149,6 +149,6 @@ static inline size_t elf_core_extra_data_size(void) { return 0; } -#endif +#endif /* CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS */ #endif /* _LINUX_ELFCORE_H */ diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index 2e2b8d6140ed..141952f4fee8 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -454,10 +454,21 @@ irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs); * * Conditional reschedule with additional sanity checks. */ -void irqentry_exit_cond_resched(void); +void raw_irqentry_exit_cond_resched(void); #ifdef CONFIG_PREEMPT_DYNAMIC -DECLARE_STATIC_CALL(irqentry_exit_cond_resched, irqentry_exit_cond_resched); +#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) +#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched +#define irqentry_exit_cond_resched_dynamic_disabled NULL +DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched); +#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)() +#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) +DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); +void dynamic_irqentry_exit_cond_resched(void); +#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched() #endif +#else /* CONFIG_PREEMPT_DYNAMIC */ +#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched() +#endif /* CONFIG_PREEMPT_DYNAMIC */ /** * irqentry_exit - Handle return from exception that used irqentry_enter() diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 2ad71cc90b37..92b10e67d5f8 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -134,7 +134,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr) #endif } -static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2]) +static inline bool is_multicast_ether_addr_64bits(const u8 *addr) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 #ifdef __BIG_ENDIAN @@ -372,8 +372,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2) * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits. */ -static inline bool ether_addr_equal_64bits(const u8 addr1[6+2], - const u8 addr2[6+2]) +static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2) { #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2); diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 11efc45de66a..4af58459a1e7 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -70,17 +70,23 @@ enum { /** * struct kernel_ethtool_ringparam - RX/TX ring configuration * @rx_buf_len: Current length of buffers on the rx ring. + * @tcp_data_split: Scatter packet headers and data to separate buffers + * @cqe_size: Size of TX/RX completion queue event */ struct kernel_ethtool_ringparam { u32 rx_buf_len; + u8 tcp_data_split; + u32 cqe_size; }; /** * enum ethtool_supported_ring_param - indicator caps for setting ring params * @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len + * @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size */ enum ethtool_supported_ring_param { ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0), + ETHTOOL_RING_USE_CQE_SIZE = BIT(1), }; #define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit)) diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index e525f6957c49..2d04f6448cde 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -64,6 +64,8 @@ static inline struct dentry *fault_create_debugfs_attr(const char *name, struct kmem_cache; +bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); + int should_failslab(struct kmem_cache *s, gfp_t gfpflags); #ifdef CONFIG_FAILSLAB extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags); diff --git a/include/linux/fb.h b/include/linux/fb.h index 02f362c661c8..9a77ab615c36 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -204,6 +204,7 @@ struct fb_pixmap { struct fb_deferred_io { /* delay between mkwrite and deferred handler */ unsigned long delay; + bool sort_pagelist; /* sort pagelist by offset */ struct mutex lock; /* mutex that protects the page list */ struct list_head pagelist; /* list of touched pages */ /* callback */ @@ -502,6 +503,7 @@ struct fb_info { } *apertures; bool skip_vt_switch; /* no VT switch on suspend/resume required */ + bool forced_out; /* set when being removed by another driver */ }; static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { diff --git a/include/linux/filter.h b/include/linux/filter.h index 71fa57b88bfc..ed0c0ff42ad5 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -548,7 +548,7 @@ struct sock_fprog_kern { #define BPF_IMAGE_ALIGNMENT 8 struct bpf_binary_header { - u32 pages; + u32 size; u8 image[] __aligned(BPF_IMAGE_ALIGNMENT); }; @@ -566,13 +566,15 @@ struct bpf_prog { gpl_compatible:1, /* Is filter GPL compatible? */ cb_access:1, /* Is control block accessed? */ dst_needed:1, /* Do we need dst entry? */ + blinding_requested:1, /* needs constant blinding */ blinded:1, /* Was blinded */ is_func:1, /* program is a bpf function */ kprobe_override:1, /* Do we override a kprobe? */ has_callchain_buf:1, /* callchain buffer allocated? */ enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ - call_get_func_ip:1; /* Do we call get_func_ip() */ + call_get_func_ip:1, /* Do we call get_func_ip() */ + tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ @@ -886,17 +888,8 @@ static inline void bpf_prog_lock_ro(struct bpf_prog *fp) static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) { set_vm_flush_reset_perms(hdr); - set_memory_ro((unsigned long)hdr, hdr->pages); - set_memory_x((unsigned long)hdr, hdr->pages); -} - -static inline struct bpf_binary_header * -bpf_jit_binary_hdr(const struct bpf_prog *fp) -{ - unsigned long real_start = (unsigned long)fp->bpf_func; - unsigned long addr = real_start & PAGE_MASK; - - return (void *)addr; + set_memory_ro((unsigned long)hdr, hdr->size >> PAGE_SHIFT); + set_memory_x((unsigned long)hdr, hdr->size >> PAGE_SHIFT); } int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); @@ -1068,6 +1061,18 @@ void *bpf_jit_alloc_exec(unsigned long size); void bpf_jit_free_exec(void *addr); void bpf_jit_free(struct bpf_prog *fp); +struct bpf_binary_header * +bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image, + unsigned int alignment, + struct bpf_binary_header **rw_hdr, + u8 **rw_image, + bpf_jit_fill_hole_t bpf_fill_ill_insns); +int bpf_jit_binary_pack_finalize(struct bpf_prog *prog, + struct bpf_binary_header *ro_header, + struct bpf_binary_header *rw_header); +void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header, + struct bpf_binary_header *rw_header); + int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, struct bpf_jit_poke_descriptor *poke); @@ -1356,7 +1361,10 @@ struct bpf_sockopt_kern { s32 level; s32 optname; s32 optlen; - s32 retval; + /* for retval in struct bpf_cg_run_ctx */ + struct task_struct *current_task; + /* Temporary "register" for indirect stores to ppos. */ + u64 tmp_reg; }; int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len); diff --git a/include/linux/firmware/imx/svc/rm.h b/include/linux/firmware/imx/svc/rm.h index 456b6a59d29b..31456f897aa9 100644 --- a/include/linux/firmware/imx/svc/rm.h +++ b/include/linux/firmware/imx/svc/rm.h @@ -59,11 +59,16 @@ enum imx_sc_rm_func { #if IS_ENABLED(CONFIG_IMX_SCU) bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource); +int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt); #else static inline bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource) { return true; } +static inline int imx_sc_rm_get_resource_owner(struct imx_sc_ipc *ipc, u16 resource, u8 *pt) +{ + return -EOPNOTSUPP; +} #endif #endif diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 907cb01890cf..f6783f58c64a 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -93,6 +93,7 @@ enum pm_api_id { PM_FPGA_LOAD = 22, PM_FPGA_GET_STATUS = 23, PM_GET_CHIPID = 24, + PM_SECURE_SHA = 26, PM_PINCTRL_REQUEST = 28, PM_PINCTRL_RELEASE = 29, PM_PINCTRL_GET_FUNCTION = 30, @@ -427,6 +428,7 @@ int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities, const u32 qos, const enum zynqmp_pm_request_ack ack); int zynqmp_pm_aes_engine(const u64 address, u32 *out); +int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags); int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags); int zynqmp_pm_fpga_get_status(u32 *value); int zynqmp_pm_write_ggs(u32 index, u32 value); @@ -601,6 +603,12 @@ static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out) return -ENODEV; } +static inline int zynqmp_pm_sha_hash(const u64 address, const u32 size, + const u32 flags) +{ + return -ENODEV; +} + static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags) { diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h new file mode 100644 index 000000000000..1c2bde0ead73 --- /dev/null +++ b/include/linux/fprobe.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Simple ftrace probe wrapper */ +#ifndef _LINUX_FPROBE_H +#define _LINUX_FPROBE_H + +#include <linux/compiler.h> +#include <linux/ftrace.h> +#include <linux/rethook.h> + +/** + * struct fprobe - ftrace based probe. + * @ops: The ftrace_ops. + * @nmissed: The counter for missing events. + * @flags: The status flag. + * @rethook: The rethook data structure. (internal data) + * @entry_handler: The callback function for function entry. + * @exit_handler: The callback function for function exit. + */ +struct fprobe { +#ifdef CONFIG_FUNCTION_TRACER + /* + * If CONFIG_FUNCTION_TRACER is not set, CONFIG_FPROBE is disabled too. + * But user of fprobe may keep embedding the struct fprobe on their own + * code. To avoid build error, this will keep the fprobe data structure + * defined here, but remove ftrace_ops data structure. + */ + struct ftrace_ops ops; +#endif + unsigned long nmissed; + unsigned int flags; + struct rethook *rethook; + + void (*entry_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs); + void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs); +}; + +/* This fprobe is soft-disabled. */ +#define FPROBE_FL_DISABLED 1 + +/* + * This fprobe handler will be shared with kprobes. + * This flag must be set before registering. + */ +#define FPROBE_FL_KPROBE_SHARED 2 + +static inline bool fprobe_disabled(struct fprobe *fp) +{ + return (fp) ? fp->flags & FPROBE_FL_DISABLED : false; +} + +static inline bool fprobe_shared_with_kprobes(struct fprobe *fp) +{ + return (fp) ? fp->flags & FPROBE_FL_KPROBE_SHARED : false; +} + +#ifdef CONFIG_FPROBE +int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter); +int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num); +int register_fprobe_syms(struct fprobe *fp, const char **syms, int num); +int unregister_fprobe(struct fprobe *fp); +#else +static inline int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter) +{ + return -EOPNOTSUPP; +} +static inline int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num) +{ + return -EOPNOTSUPP; +} +static inline int register_fprobe_syms(struct fprobe *fp, const char **syms, int num) +{ + return -EOPNOTSUPP; +} +static inline int unregister_fprobe(struct fprobe *fp) +{ + return -EOPNOTSUPP; +} +#endif + +/** + * disable_fprobe() - Disable fprobe + * @fp: The fprobe to be disabled. + * + * This will soft-disable @fp. Note that this doesn't remove the ftrace + * hooks from the function entry. + */ +static inline void disable_fprobe(struct fprobe *fp) +{ + if (fp) + fp->flags |= FPROBE_FL_DISABLED; +} + +/** + * enable_fprobe() - Enable fprobe + * @fp: The fprobe to be enabled. + * + * This will soft-enable @fp. + */ +static inline void enable_fprobe(struct fprobe *fp) +{ + if (fp) + fp->flags &= ~FPROBE_FL_DISABLED; +} + +#endif diff --git a/include/linux/fs.h b/include/linux/fs.h index e2d892b201b0..183160872133 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -42,6 +42,7 @@ #include <linux/mount.h> #include <linux/cred.h> #include <linux/mnt_idmapping.h> +#include <linux/slab.h> #include <asm/byteorder.h> #include <uapi/linux/fs.h> @@ -327,7 +328,6 @@ struct kiocb { void (*ki_complete)(struct kiocb *iocb, long ret); void *private; int ki_flags; - u16 ki_hint; u16 ki_ioprio; /* See linux/ioprio.h */ struct wait_page_queue *ki_waitq; /* for async buffered IO */ randomized_struct_fields_end @@ -367,8 +367,8 @@ struct address_space_operations { /* Write back some dirty pages from this mapping. */ int (*writepages)(struct address_space *, struct writeback_control *); - /* Set a page dirty. Return true if this dirtied it */ - int (*set_page_dirty)(struct page *page); + /* Mark a folio dirty. Return true if this dirtied it */ + bool (*dirty_folio)(struct address_space *, struct folio *); /* * Reads in the requested pages. Unlike ->readpage(), this is @@ -387,7 +387,7 @@ struct address_space_operations { /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ sector_t (*bmap)(struct address_space *, sector_t); - void (*invalidatepage) (struct page *, unsigned int, unsigned int); + void (*invalidate_folio) (struct folio *, size_t offset, size_t len); int (*releasepage) (struct page *, gfp_t); void (*freepage)(struct page *); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); @@ -399,9 +399,9 @@ struct address_space_operations { struct page *, struct page *, enum migrate_mode); bool (*isolate_page)(struct page *, isolate_mode_t); void (*putback_page)(struct page *); - int (*launder_page) (struct page *); - int (*is_partially_uptodate) (struct page *, unsigned long, - unsigned long); + int (*launder_folio)(struct folio *); + bool (*is_partially_uptodate) (struct folio *, size_t from, + size_t count); void (*is_dirty_writeback) (struct page *, bool *, bool *); int (*error_remove_page)(struct address_space *, struct page *); @@ -930,10 +930,15 @@ struct fown_struct { * struct file_ra_state - Track a file's readahead state. * @start: Where the most recent readahead started. * @size: Number of pages read in the most recent readahead. - * @async_size: Start next readahead when this many pages are left. - * @ra_pages: Maximum size of a readahead request. + * @async_size: Numer of pages that were/are not needed immediately + * and so were/are genuinely "ahead". Start next readahead when + * the first of these pages is accessed. + * @ra_pages: Maximum size of a readahead request, copied from the bdi. * @mmap_miss: How many mmap accesses missed in the page cache. * @prev_pos: The last byte in the most recent read request. + * + * When this structure is passed to ->readahead(), the "most recent" + * readahead means the current readahead. */ struct file_ra_state { pgoff_t start; @@ -967,7 +972,6 @@ struct file { * Must not be taken from IRQ context. */ spinlock_t f_lock; - enum rw_hint f_write_hint; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; @@ -1435,6 +1439,7 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */ #define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */ +#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */ /* Possible states of 'frozen' field */ enum { @@ -2215,31 +2220,13 @@ static inline bool HAS_UNMAPPED_ID(struct user_namespace *mnt_userns, !gid_valid(i_gid_into_mnt(mnt_userns, inode)); } -static inline enum rw_hint file_write_hint(struct file *file) -{ - if (file->f_write_hint != WRITE_LIFE_NOT_SET) - return file->f_write_hint; - - return file_inode(file)->i_write_hint; -} - static inline int iocb_flags(struct file *file); -static inline u16 ki_hint_validate(enum rw_hint hint) -{ - typeof(((struct kiocb *)0)->ki_hint) max_hint = -1; - - if (hint <= max_hint) - return hint; - return 0; -} - static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) { *kiocb = (struct kiocb) { .ki_filp = filp, .ki_flags = iocb_flags(filp), - .ki_hint = ki_hint_validate(file_write_hint(filp)), .ki_ioprio = get_current_ioprio(), }; } @@ -2250,7 +2237,6 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, *kiocb = (struct kiocb) { .ki_filp = filp, .ki_flags = kiocb_src->ki_flags, - .ki_hint = kiocb_src->ki_hint, .ki_ioprio = kiocb_src->ki_ioprio, .ki_pos = kiocb_src->ki_pos, }; @@ -2746,54 +2732,6 @@ extern void init_special_inode(struct inode *, umode_t, dev_t); extern void make_bad_inode(struct inode *); extern bool is_bad_inode(struct inode *); -unsigned long invalidate_mapping_pages(struct address_space *mapping, - pgoff_t start, pgoff_t end); - -void invalidate_mapping_pagevec(struct address_space *mapping, - pgoff_t start, pgoff_t end, - unsigned long *nr_pagevec); - -static inline void invalidate_remote_inode(struct inode *inode) -{ - if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || - S_ISLNK(inode->i_mode)) - invalidate_mapping_pages(inode->i_mapping, 0, -1); -} -extern int invalidate_inode_pages2(struct address_space *mapping); -extern int invalidate_inode_pages2_range(struct address_space *mapping, - pgoff_t start, pgoff_t end); -extern int write_inode_now(struct inode *, int); -extern int filemap_fdatawrite(struct address_space *); -extern int filemap_flush(struct address_space *); -extern int filemap_fdatawait_keep_errors(struct address_space *mapping); -extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, - loff_t lend); -extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping, - loff_t start_byte, loff_t end_byte); - -static inline int filemap_fdatawait(struct address_space *mapping) -{ - return filemap_fdatawait_range(mapping, 0, LLONG_MAX); -} - -extern bool filemap_range_has_page(struct address_space *, loff_t lstart, - loff_t lend); -extern int filemap_write_and_wait_range(struct address_space *mapping, - loff_t lstart, loff_t lend); -extern int __filemap_fdatawrite_range(struct address_space *mapping, - loff_t start, loff_t end, int sync_mode); -extern int filemap_fdatawrite_range(struct address_space *mapping, - loff_t start, loff_t end); -extern int filemap_check_errors(struct address_space *mapping); -extern void __filemap_set_wb_err(struct address_space *mapping, int err); -int filemap_fdatawrite_wbc(struct address_space *mapping, - struct writeback_control *wbc); - -static inline int filemap_write_and_wait(struct address_space *mapping) -{ - return filemap_write_and_wait_range(mapping, 0, LLONG_MAX); -} - extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart, loff_t lend); extern int __must_check file_check_and_advance_wb_err(struct file *file); @@ -2805,67 +2743,6 @@ static inline int file_write_and_wait(struct file *file) return file_write_and_wait_range(file, 0, LLONG_MAX); } -/** - * filemap_set_wb_err - set a writeback error on an address_space - * @mapping: mapping in which to set writeback error - * @err: error to be set in mapping - * - * When writeback fails in some way, we must record that error so that - * userspace can be informed when fsync and the like are called. We endeavor - * to report errors on any file that was open at the time of the error. Some - * internal callers also need to know when writeback errors have occurred. - * - * When a writeback error occurs, most filesystems will want to call - * filemap_set_wb_err to record the error in the mapping so that it will be - * automatically reported whenever fsync is called on the file. - */ -static inline void filemap_set_wb_err(struct address_space *mapping, int err) -{ - /* Fastpath for common case of no error */ - if (unlikely(err)) - __filemap_set_wb_err(mapping, err); -} - -/** - * filemap_check_wb_err - has an error occurred since the mark was sampled? - * @mapping: mapping to check for writeback errors - * @since: previously-sampled errseq_t - * - * Grab the errseq_t value from the mapping, and see if it has changed "since" - * the given value was sampled. - * - * If it has then report the latest error set, otherwise return 0. - */ -static inline int filemap_check_wb_err(struct address_space *mapping, - errseq_t since) -{ - return errseq_check(&mapping->wb_err, since); -} - -/** - * filemap_sample_wb_err - sample the current errseq_t to test for later errors - * @mapping: mapping to be sampled - * - * Writeback errors are always reported relative to a particular sample point - * in the past. This function provides those sample points. - */ -static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) -{ - return errseq_sample(&mapping->wb_err); -} - -/** - * file_sample_sb_err - sample the current errseq_t to test for later errors - * @file: file pointer to be sampled - * - * Grab the most current superblock-level errseq_t value for the given - * struct file. - */ -static inline errseq_t file_sample_sb_err(struct file *file) -{ - return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); -} - extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync); extern int vfs_fsync(struct file *file, int datasync); @@ -3108,6 +2985,16 @@ extern void free_inode_nonrcu(struct inode *inode); extern int should_remove_suid(struct dentry *); extern int file_remove_privs(struct file *); +/* + * This must be used for allocating filesystems specific inodes to set + * up the inode reclaim context correctly. + */ +static inline void * +alloc_inode_sb(struct super_block *sb, struct kmem_cache *cache, gfp_t gfp) +{ + return kmem_cache_alloc_lru(cache, &sb->s_inode_lru, gfp); +} + extern void __insert_inode_hash(struct inode *, unsigned long hashval); static inline void insert_inode_hash(struct inode *inode) { @@ -3130,6 +3017,7 @@ extern int sb_min_blocksize(struct super_block *, int); extern int generic_file_mmap(struct file *, struct vm_area_struct *); extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *); +int generic_write_checks_count(struct kiocb *iocb, loff_t *count); extern int generic_write_check_limits(struct file *file, loff_t pos, loff_t *count); extern int generic_file_rw_checks(struct file *file_in, struct file *file_out); @@ -3173,6 +3061,7 @@ extern loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size); extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t); extern loff_t no_seek_end_llseek(struct file *, loff_t, int); +int rw_verify_area(int, struct file *, const loff_t *, size_t); extern int generic_file_open(struct inode * inode, struct file * filp); extern int nonseekable_open(struct inode * inode, struct file * filp); extern int stream_open(struct inode * inode, struct file * filp); @@ -3322,8 +3211,6 @@ extern int simple_rename(struct user_namespace *, struct inode *, extern void simple_recursive_removal(struct dentry *, void (*callback)(struct dentry *)); extern int noop_fsync(struct file *, loff_t, loff_t, int); -extern void noop_invalidatepage(struct page *page, unsigned int offset, - unsigned int length); extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter); extern int simple_empty(struct dentry *); extern int simple_write_begin(struct file *file, struct address_space *mapping, @@ -3608,15 +3495,4 @@ extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, extern int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice); -/* - * Flush file data before changing attributes. Caller must hold any locks - * required to prevent further writes to this file until we're done setting - * flags. - */ -static inline int inode_drain_writes(struct inode *inode) -{ - inode_dio_wait(inode); - return filemap_write_and_wait(inode->i_mapping); -} - #endif /* _LINUX_FS_H */ diff --git a/include/linux/fs_api.h b/include/linux/fs_api.h new file mode 100644 index 000000000000..83be38d6d413 --- /dev/null +++ b/include/linux/fs_api.h @@ -0,0 +1 @@ +#include <linux/fs.h> diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 296c5f1d9f35..d44ff747a657 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -616,9 +616,11 @@ static inline void fscache_write_to_cache(struct fscache_cookie *cookie, } #if __fscache_available -extern int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cookie); +bool fscache_dirty_folio(struct address_space *mapping, struct folio *folio, + struct fscache_cookie *cookie); #else -#define fscache_set_page_dirty(PAGE, COOKIE) (__set_page_dirty_nobuffers((PAGE))) +#define fscache_dirty_folio(MAPPING, FOLIO, COOKIE) \ + filemap_dirty_folio(MAPPING, FOLIO) #endif /** @@ -626,7 +628,7 @@ extern int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cook * @wbc: The writeback control * @cookie: The cookie referring to the cache object * - * Unpin the writeback resources pinned by fscache_set_page_dirty(). This is + * Unpin the writeback resources pinned by fscache_dirty_folio(). This is * intended to be called by the netfs's ->write_inode() method. */ static inline void fscache_unpin_writeback(struct writeback_control *wbc, diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 91ea9477e9bd..50d92d805bd8 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -714,6 +714,10 @@ bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, bool fscrypt_mergeable_bio_bh(struct bio *bio, const struct buffer_head *next_bh); +bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter); + +u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks); + #else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode) @@ -742,6 +746,20 @@ static inline bool fscrypt_mergeable_bio_bh(struct bio *bio, { return true; } + +static inline bool fscrypt_dio_supported(struct kiocb *iocb, + struct iov_iter *iter) +{ + const struct inode *inode = file_inode(iocb->ki_filp); + + return !fscrypt_needs_contents_encryption(inode); +} + +static inline u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, + u64 nr_blocks) +{ + return nr_blocks; +} #endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ /** diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 790c31844db5..0805b74cae44 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -601,6 +601,25 @@ extern void fsnotify_remove_queued_event(struct fsnotify_group *group, /* functions used to manipulate the marks attached to inodes */ +/* Get mask for calculating object interest taking ignored mask into account */ +static inline __u32 fsnotify_calc_mask(struct fsnotify_mark *mark) +{ + __u32 mask = mark->mask; + + if (!mark->ignored_mask) + return mask; + + /* Interest in FS_MODIFY may be needed for clearing ignored mask */ + if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) + mask |= FS_MODIFY; + + /* + * If mark is interested in ignoring events on children, the object must + * show interest in those events for fsnotify_parent() to notice it. + */ + return mask | (mark->ignored_mask & ALL_FSNOTIFY_EVENTS); +} + /* Get mask of events for a list of marks */ extern __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn); /* Calculate mask of events for a list of marks */ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 9999e29187de..ed8cf433a46a 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -30,6 +30,12 @@ #define ARCH_SUPPORTS_FTRACE_OPS 0 #endif +#ifdef CONFIG_TRACING +extern void ftrace_boot_snapshot(void); +#else +static inline void ftrace_boot_snapshot(void) { } +#endif + #ifdef CONFIG_FUNCTION_TRACER struct ftrace_ops; struct ftrace_regs; @@ -215,7 +221,10 @@ struct ftrace_ops_hash { void ftrace_free_init_mem(void); void ftrace_free_mem(struct module *mod, void *start, void *end); #else -static inline void ftrace_free_init_mem(void) { } +static inline void ftrace_free_init_mem(void) +{ + ftrace_boot_snapshot(); +} static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } #endif @@ -512,6 +521,8 @@ struct dyn_ftrace { int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, int remove, int reset); +int ftrace_set_filter_ips(struct ftrace_ops *ops, unsigned long *ips, + unsigned int cnt, int remove, int reset); int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, int len, int reset); int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, @@ -802,6 +813,7 @@ static inline unsigned long ftrace_location(unsigned long ip) #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) +#define ftrace_set_filter_ips(ops, ips, cnt, remove, reset) ({ -ENODEV; }) #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_free_filter(ops) do { } while (0) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 80f63c862be5..0fa17fb85de5 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -54,9 +54,17 @@ struct vm_area_struct; #define ___GFP_THISNODE 0x200000u #define ___GFP_ACCOUNT 0x400000u #define ___GFP_ZEROTAGS 0x800000u -#define ___GFP_SKIP_KASAN_POISON 0x1000000u +#ifdef CONFIG_KASAN_HW_TAGS +#define ___GFP_SKIP_ZERO 0x1000000u +#define ___GFP_SKIP_KASAN_UNPOISON 0x2000000u +#define ___GFP_SKIP_KASAN_POISON 0x4000000u +#else +#define ___GFP_SKIP_ZERO 0 +#define ___GFP_SKIP_KASAN_UNPOISON 0 +#define ___GFP_SKIP_KASAN_POISON 0 +#endif #ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x2000000u +#define ___GFP_NOLOCKDEP 0x8000000u #else #define ___GFP_NOLOCKDEP 0 #endif @@ -79,7 +87,7 @@ struct vm_area_struct; * DOC: Page mobility and placement hints * * Page mobility and placement hints - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * --------------------------------- * * These flags provide hints about how mobile the page is. Pages with similar * mobility are placed within the same pageblocks to minimise problems due @@ -112,7 +120,7 @@ struct vm_area_struct; * DOC: Watermark modifiers * * Watermark modifiers -- controls access to emergency reserves - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * ------------------------------------------------------------ * * %__GFP_HIGH indicates that the caller is high-priority and that granting * the request is necessary before the system can make forward progress. @@ -144,7 +152,7 @@ struct vm_area_struct; * DOC: Reclaim modifiers * * Reclaim modifiers - * ~~~~~~~~~~~~~~~~~ + * ----------------- * Please note that all the following flags are only applicable to sleepable * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them). * @@ -224,7 +232,7 @@ struct vm_area_struct; * DOC: Action modifiers * * Action modifiers - * ~~~~~~~~~~~~~~~~ + * ---------------- * * %__GFP_NOWARN suppresses allocation failure reports. * @@ -232,31 +240,40 @@ struct vm_area_struct; * * %__GFP_ZERO returns a zeroed page on success. * - * %__GFP_ZEROTAGS returns a page with zeroed memory tags on success, if - * __GFP_ZERO is set. + * %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself + * is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that + * __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting + * memory tags at the same time as zeroing memory has minimal additional + * performace impact. + * + * %__GFP_SKIP_KASAN_UNPOISON makes KASAN skip unpoisoning on page allocation. + * Only effective in HW_TAGS mode. * - * %__GFP_SKIP_KASAN_POISON returns a page which does not need to be poisoned - * on deallocation. Typically used for userspace pages. Currently only has an - * effect in HW tags mode. + * %__GFP_SKIP_KASAN_POISON makes KASAN skip poisoning on page deallocation. + * Typically, used for userspace pages. Only effective in HW_TAGS mode. */ #define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) #define __GFP_COMP ((__force gfp_t)___GFP_COMP) #define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) #define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS) -#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON) +#define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO) +#define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON) +#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON) /* Disable lockdep for GFP context tracking */ #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (24 + \ + 3 * IS_ENABLED(CONFIG_KASAN_HW_TAGS) + \ + IS_ENABLED(CONFIG_LOCKDEP)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** * DOC: Useful GFP flag combinations * * Useful GFP flag combinations - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * ---------------------------- * * Useful GFP flag combinations that are commonly used. It is recommended * that subsystems start with one of these combinations and then set/clear diff --git a/include/linux/gfp_api.h b/include/linux/gfp_api.h new file mode 100644 index 000000000000..5a05a2764a86 --- /dev/null +++ b/include/linux/gfp_api.h @@ -0,0 +1 @@ +#include <linux/gfp.h> diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 3ad67b4a72be..c3aa8b330e1c 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -8,27 +8,16 @@ #include <linux/err.h> struct device; - -/** - * Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are - * preferable to the old integer-based handles. - * - * Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid - * until the GPIO is released. - */ struct gpio_desc; - -/** - * Opaque descriptor for a structure of GPIO array attributes. This structure - * is attached to struct gpiod_descs obtained from gpiod_get_array() and can be - * passed back to get/set array functions in order to activate fast processing - * path if applicable. - */ struct gpio_array; /** - * Struct containing an array of descriptors that can be obtained using - * gpiod_get_array(). + * struct gpio_descs - Struct containing an array of descriptors that can be + * obtained using gpiod_get_array() + * + * @info: Pointer to the opaque gpio_array structure + * @ndescs: Number of held descriptors + * @desc: Array of pointers to GPIO descriptors */ struct gpio_descs { struct gpio_array *info; @@ -43,8 +32,16 @@ struct gpio_descs { #define GPIOD_FLAGS_BIT_NONEXCLUSIVE BIT(4) /** - * Optional flags that can be passed to one of gpiod_* to configure direction - * and output value. These values cannot be OR'd. + * enum gpiod_flags - Optional flags that can be passed to one of gpiod_* to + * configure direction and output value. These values + * cannot be OR'd. + * + * @GPIOD_ASIS: Don't change anything + * @GPIOD_IN: Set lines to input mode + * @GPIOD_OUT_LOW: Set lines to output and drive them low + * @GPIOD_OUT_HIGH: Set lines to output and drive them high + * @GPIOD_OUT_LOW_OPEN_DRAIN: Set lines to open-drain output and drive them low + * @GPIOD_OUT_HIGH_OPEN_DRAIN: Set lines to open-drain output and drive them high */ enum gpiod_flags { GPIOD_ASIS = 0, diff --git a/include/linux/greybus/greybus_manifest.h b/include/linux/greybus/greybus_manifest.h index 6e62fe478712..bef9eb2093e9 100644 --- a/include/linux/greybus/greybus_manifest.h +++ b/include/linux/greybus/greybus_manifest.h @@ -100,7 +100,7 @@ enum { struct greybus_descriptor_string { __u8 length; __u8 id; - __u8 string[0]; + __u8 string[]; } __packed; /* @@ -175,7 +175,7 @@ struct greybus_manifest_header { struct greybus_manifest { struct greybus_manifest_header header; - struct greybus_descriptor descriptors[0]; + struct greybus_descriptor descriptors[]; } __packed; #endif /* __GREYBUS_MANIFEST_H */ diff --git a/include/linux/greybus/hd.h b/include/linux/greybus/hd.h index d3faf0c1a569..718e2857054e 100644 --- a/include/linux/greybus/hd.h +++ b/include/linux/greybus/hd.h @@ -58,7 +58,7 @@ struct gb_host_device { struct gb_svc *svc; /* Private data for the host driver */ - unsigned long hd_priv[0] __aligned(sizeof(s64)); + unsigned long hd_priv[] __aligned(sizeof(s64)); }; #define to_gb_host_device(d) container_of(d, struct gb_host_device, dev) diff --git a/include/linux/greybus/module.h b/include/linux/greybus/module.h index 47b839af145d..3efe2133acfd 100644 --- a/include/linux/greybus/module.h +++ b/include/linux/greybus/module.h @@ -23,7 +23,7 @@ struct gb_module { bool disconnected; - struct gb_interface *interfaces[0]; + struct gb_interface *interfaces[]; }; #define to_gb_module(d) container_of(d, struct gb_module, dev) diff --git a/include/linux/hashtable_api.h b/include/linux/hashtable_api.h new file mode 100644 index 000000000000..c268ac2c5c0e --- /dev/null +++ b/include/linux/hashtable_api.h @@ -0,0 +1 @@ +#include <linux/hashtable.h> diff --git a/include/linux/hid.h b/include/linux/hid.h index 7487b0586fe6..4363a63b9775 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -342,12 +342,12 @@ struct hid_item { * HID device quirks. */ -/* +/* * Increase this if you need to configure more HID quirks at module load time */ #define MAX_USBHID_BOOT_QUIRKS 4 -#define HID_QUIRK_INVERT BIT(0) +/* BIT(0) reserved for backward compatibility, was HID_QUIRK_INVERT */ #define HID_QUIRK_NOTOUCH BIT(1) #define HID_QUIRK_IGNORE BIT(2) #define HID_QUIRK_NOGET BIT(3) @@ -476,31 +476,50 @@ struct hid_field { unsigned report_count; /* number of this field in the report */ unsigned report_type; /* (input,output,feature) */ __s32 *value; /* last known value(s) */ + __s32 *new_value; /* newly read value(s) */ + __s32 *usages_priorities; /* priority of each usage when reading the report + * bits 8-16 are reserved for hid-input usage + */ __s32 logical_minimum; __s32 logical_maximum; __s32 physical_minimum; __s32 physical_maximum; __s32 unit_exponent; unsigned unit; + bool ignored; /* this field is ignored in this event */ struct hid_report *report; /* associated report */ unsigned index; /* index into report->field[] */ /* hidinput data */ struct hid_input *hidinput; /* associated input structure */ __u16 dpad; /* dpad input code */ + unsigned int slot_idx; /* slot index in a report */ }; #define HID_MAX_FIELDS 256 +struct hid_field_entry { + struct list_head list; + struct hid_field *field; + unsigned int index; + __s32 priority; +}; + struct hid_report { struct list_head list; struct list_head hidinput_list; + struct list_head field_entry_list; /* ordered list of input fields */ unsigned int id; /* id of this report */ unsigned int type; /* report type */ unsigned int application; /* application usage for this report */ struct hid_field *field[HID_MAX_FIELDS]; /* fields of the report */ + struct hid_field_entry *field_entries; /* allocated memory of input field_entry */ unsigned maxfield; /* maximum valid field index */ unsigned size; /* size of the report (bits) */ struct hid_device *device; /* associated device */ + + /* tool related state */ + bool tool_active; /* whether the current tool is active */ + unsigned int tool; /* BTN_TOOL_* */ }; #define HID_MAX_IDS 256 diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h index 0a0b2b09b1b8..a77be5630209 100644 --- a/include/linux/highmem-internal.h +++ b/include/linux/highmem-internal.h @@ -246,6 +246,16 @@ do { \ __kunmap_atomic(__addr); \ } while (0) +/** + * kunmap_local - Unmap a page mapped via kmap_local_page(). + * @__addr: An address within the page mapped + * + * @__addr can be any address within the mapped page. Commonly it is the + * address return from kmap_local_page(), but it can also include offsets. + * + * Unmapping should be done in the reverse order of the mapping. See + * kmap_local_page() for details. + */ #define kunmap_local(__addr) \ do { \ BUILD_BUG_ON(__same_type((__addr), struct page *)); \ diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h new file mode 100644 index 000000000000..177f7b7cd414 --- /dev/null +++ b/include/linux/hisi_acc_qm.h @@ -0,0 +1,490 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2019 HiSilicon Limited. */ +#ifndef HISI_ACC_QM_H +#define HISI_ACC_QM_H + +#include <linux/bitfield.h> +#include <linux/debugfs.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/pci.h> + +#define QM_QNUM_V1 4096 +#define QM_QNUM_V2 1024 +#define QM_MAX_VFS_NUM_V2 63 + +/* qm user domain */ +#define QM_ARUSER_M_CFG_1 0x100088 +#define AXUSER_SNOOP_ENABLE BIT(30) +#define AXUSER_CMD_TYPE GENMASK(14, 12) +#define AXUSER_CMD_SMMU_NORMAL 1 +#define AXUSER_NS BIT(6) +#define AXUSER_NO BIT(5) +#define AXUSER_FP BIT(4) +#define AXUSER_SSV BIT(0) +#define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \ + FIELD_PREP(AXUSER_CMD_TYPE, \ + AXUSER_CMD_SMMU_NORMAL) | \ + AXUSER_NS | AXUSER_NO | AXUSER_FP) +#define QM_ARUSER_M_CFG_ENABLE 0x100090 +#define ARUSER_M_CFG_ENABLE 0xfffffffe +#define QM_AWUSER_M_CFG_1 0x100098 +#define QM_AWUSER_M_CFG_ENABLE 0x1000a0 +#define AWUSER_M_CFG_ENABLE 0xfffffffe +#define QM_WUSER_M_CFG_ENABLE 0x1000a8 +#define WUSER_M_CFG_ENABLE 0xffffffff + +/* mailbox */ +#define QM_MB_CMD_SQC 0x0 +#define QM_MB_CMD_CQC 0x1 +#define QM_MB_CMD_EQC 0x2 +#define QM_MB_CMD_AEQC 0x3 +#define QM_MB_CMD_SQC_BT 0x4 +#define QM_MB_CMD_CQC_BT 0x5 +#define QM_MB_CMD_SQC_VFT_V2 0x6 +#define QM_MB_CMD_STOP_QP 0x8 +#define QM_MB_CMD_SRC 0xc +#define QM_MB_CMD_DST 0xd + +#define QM_MB_CMD_SEND_BASE 0x300 +#define QM_MB_EVENT_SHIFT 8 +#define QM_MB_BUSY_SHIFT 13 +#define QM_MB_OP_SHIFT 14 +#define QM_MB_CMD_DATA_ADDR_L 0x304 +#define QM_MB_CMD_DATA_ADDR_H 0x308 +#define QM_MB_MAX_WAIT_CNT 6000 + +/* doorbell */ +#define QM_DOORBELL_CMD_SQ 0 +#define QM_DOORBELL_CMD_CQ 1 +#define QM_DOORBELL_CMD_EQ 2 +#define QM_DOORBELL_CMD_AEQ 3 + +#define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000 +#define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000 +#define QM_QP_MAX_NUM_SHIFT 11 +#define QM_DB_CMD_SHIFT_V2 12 +#define QM_DB_RAND_SHIFT_V2 16 +#define QM_DB_INDEX_SHIFT_V2 32 +#define QM_DB_PRIORITY_SHIFT_V2 48 +#define QM_VF_STATE 0x60 + +/* qm cache */ +#define QM_CACHE_CTL 0x100050 +#define SQC_CACHE_ENABLE BIT(0) +#define CQC_CACHE_ENABLE BIT(1) +#define SQC_CACHE_WB_ENABLE BIT(4) +#define SQC_CACHE_WB_THRD GENMASK(10, 5) +#define CQC_CACHE_WB_ENABLE BIT(11) +#define CQC_CACHE_WB_THRD GENMASK(17, 12) +#define QM_AXI_M_CFG 0x1000ac +#define AXI_M_CFG 0xffff +#define QM_AXI_M_CFG_ENABLE 0x1000b0 +#define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014 +#define AXI_M_CFG_ENABLE 0xffffffff +#define QM_PEH_AXUSER_CFG 0x1000cc +#define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0 +#define PEH_AXUSER_CFG 0x401001 +#define PEH_AXUSER_CFG_ENABLE 0xffffffff + +#define QM_AXI_RRESP BIT(0) +#define QM_AXI_BRESP BIT(1) +#define QM_ECC_MBIT BIT(2) +#define QM_ECC_1BIT BIT(3) +#define QM_ACC_GET_TASK_TIMEOUT BIT(4) +#define QM_ACC_DO_TASK_TIMEOUT BIT(5) +#define QM_ACC_WB_NOT_READY_TIMEOUT BIT(6) +#define QM_SQ_CQ_VF_INVALID BIT(7) +#define QM_CQ_VF_INVALID BIT(8) +#define QM_SQ_VF_INVALID BIT(9) +#define QM_DB_TIMEOUT BIT(10) +#define QM_OF_FIFO_OF BIT(11) +#define QM_DB_RANDOM_INVALID BIT(12) +#define QM_MAILBOX_TIMEOUT BIT(13) +#define QM_FLR_TIMEOUT BIT(14) + +#define QM_BASE_NFE (QM_AXI_RRESP | QM_AXI_BRESP | QM_ECC_MBIT | \ + QM_ACC_GET_TASK_TIMEOUT | QM_DB_TIMEOUT | \ + QM_OF_FIFO_OF | QM_DB_RANDOM_INVALID | \ + QM_MAILBOX_TIMEOUT | QM_FLR_TIMEOUT) +#define QM_BASE_CE QM_ECC_1BIT + +#define QM_Q_DEPTH 1024 +#define QM_MIN_QNUM 2 +#define HISI_ACC_SGL_SGE_NR_MAX 255 +#define QM_SHAPER_CFG 0x100164 +#define QM_SHAPER_ENABLE BIT(30) +#define QM_SHAPER_TYPE1_OFFSET 10 + +/* page number for queue file region */ +#define QM_DOORBELL_PAGE_NR 1 + +/* uacce mode of the driver */ +#define UACCE_MODE_NOUACCE 0 /* don't use uacce */ +#define UACCE_MODE_SVA 1 /* use uacce sva mode */ +#define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce" + +enum qm_stop_reason { + QM_NORMAL, + QM_SOFT_RESET, + QM_FLR, +}; + +enum qm_state { + QM_INIT = 0, + QM_START, + QM_CLOSE, + QM_STOP, +}; + +enum qp_state { + QP_INIT = 1, + QP_START, + QP_STOP, + QP_CLOSE, +}; + +enum qm_hw_ver { + QM_HW_UNKNOWN = -1, + QM_HW_V1 = 0x20, + QM_HW_V2 = 0x21, + QM_HW_V3 = 0x30, +}; + +enum qm_fun_type { + QM_HW_PF, + QM_HW_VF, +}; + +enum qm_debug_file { + CURRENT_QM, + CURRENT_Q, + CLEAR_ENABLE, + DEBUG_FILE_NUM, +}; + +enum qm_vf_state { + QM_READY = 0, + QM_NOT_READY, +}; + +struct qm_dfx { + atomic64_t err_irq_cnt; + atomic64_t aeq_irq_cnt; + atomic64_t abnormal_irq_cnt; + atomic64_t create_qp_err_cnt; + atomic64_t mb_err_cnt; +}; + +struct debugfs_file { + enum qm_debug_file index; + struct mutex lock; + struct qm_debug *debug; +}; + +struct qm_debug { + u32 curr_qm_qp_num; + u32 sqe_mask_offset; + u32 sqe_mask_len; + struct qm_dfx dfx; + struct dentry *debug_root; + struct dentry *qm_d; + struct debugfs_file files[DEBUG_FILE_NUM]; +}; + +struct qm_shaper_factor { + u32 func_qos; + u64 cir_b; + u64 cir_u; + u64 cir_s; + u64 cbs_s; +}; + +struct qm_dma { + void *va; + dma_addr_t dma; + size_t size; +}; + +struct hisi_qm_status { + u32 eq_head; + bool eqc_phase; + u32 aeq_head; + bool aeqc_phase; + atomic_t flags; + int stop_reason; +}; + +struct hisi_qm; + +struct hisi_qm_err_info { + char *acpi_rst; + u32 msi_wr_port; + u32 ecc_2bits_mask; + u32 dev_ce_mask; + u32 ce; + u32 nfe; + u32 fe; +}; + +struct hisi_qm_err_status { + u32 is_qm_ecc_mbit; + u32 is_dev_ecc_mbit; +}; + +struct hisi_qm_err_ini { + int (*hw_init)(struct hisi_qm *qm); + void (*hw_err_enable)(struct hisi_qm *qm); + void (*hw_err_disable)(struct hisi_qm *qm); + u32 (*get_dev_hw_err_status)(struct hisi_qm *qm); + void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts); + void (*open_axi_master_ooo)(struct hisi_qm *qm); + void (*close_axi_master_ooo)(struct hisi_qm *qm); + void (*open_sva_prefetch)(struct hisi_qm *qm); + void (*close_sva_prefetch)(struct hisi_qm *qm); + void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts); + void (*err_info_init)(struct hisi_qm *qm); +}; + +struct hisi_qm_list { + struct mutex lock; + struct list_head list; + int (*register_to_crypto)(struct hisi_qm *qm); + void (*unregister_from_crypto)(struct hisi_qm *qm); +}; + +struct hisi_qm { + enum qm_hw_ver ver; + enum qm_fun_type fun_type; + const char *dev_name; + struct pci_dev *pdev; + void __iomem *io_base; + void __iomem *db_io_base; + u32 sqe_size; + u32 qp_base; + u32 qp_num; + u32 qp_in_used; + u32 ctrl_qp_num; + u32 max_qp_num; + u32 vfs_num; + u32 db_interval; + struct list_head list; + struct hisi_qm_list *qm_list; + + struct qm_dma qdma; + struct qm_sqc *sqc; + struct qm_cqc *cqc; + struct qm_eqe *eqe; + struct qm_aeqe *aeqe; + dma_addr_t sqc_dma; + dma_addr_t cqc_dma; + dma_addr_t eqe_dma; + dma_addr_t aeqe_dma; + + struct hisi_qm_status status; + const struct hisi_qm_err_ini *err_ini; + struct hisi_qm_err_info err_info; + struct hisi_qm_err_status err_status; + unsigned long misc_ctl; /* driver removing and reset sched */ + + struct rw_semaphore qps_lock; + struct idr qp_idr; + struct hisi_qp *qp_array; + + struct mutex mailbox_lock; + + const struct hisi_qm_hw_ops *ops; + + struct qm_debug debug; + + u32 error_mask; + + struct workqueue_struct *wq; + struct work_struct work; + struct work_struct rst_work; + struct work_struct cmd_process; + + const char *algs; + bool use_sva; + bool is_frozen; + + /* doorbell isolation enable */ + bool use_db_isolation; + resource_size_t phys_base; + resource_size_t db_phys_base; + struct uacce_device *uacce; + int mode; + struct qm_shaper_factor *factor; + u32 mb_qos; + u32 type_rate; +}; + +struct hisi_qp_status { + atomic_t used; + u16 sq_tail; + u16 cq_head; + bool cqc_phase; + atomic_t flags; +}; + +struct hisi_qp_ops { + int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm); +}; + +struct hisi_qp { + u32 qp_id; + u8 alg_type; + u8 req_type; + + struct qm_dma qdma; + void *sqe; + struct qm_cqe *cqe; + dma_addr_t sqe_dma; + dma_addr_t cqe_dma; + + struct hisi_qp_status qp_status; + struct hisi_qp_ops *hw_ops; + void *qp_ctx; + void (*req_cb)(struct hisi_qp *qp, void *data); + void (*event_cb)(struct hisi_qp *qp); + + struct hisi_qm *qm; + bool is_resetting; + bool is_in_kernel; + u16 pasid; + struct uacce_queue *uacce_q; +}; + +static inline int q_num_set(const char *val, const struct kernel_param *kp, + unsigned int device) +{ + struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, + device, NULL); + u32 n, q_num; + int ret; + + if (!val) + return -EINVAL; + + if (!pdev) { + q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2); + pr_info("No device found currently, suppose queue number is %u\n", + q_num); + } else { + if (pdev->revision == QM_HW_V1) + q_num = QM_QNUM_V1; + else + q_num = QM_QNUM_V2; + } + + ret = kstrtou32(val, 10, &n); + if (ret || n < QM_MIN_QNUM || n > q_num) + return -EINVAL; + + return param_set_int(val, kp); +} + +static inline int vfs_num_set(const char *val, const struct kernel_param *kp) +{ + u32 n; + int ret; + + if (!val) + return -EINVAL; + + ret = kstrtou32(val, 10, &n); + if (ret < 0) + return ret; + + if (n > QM_MAX_VFS_NUM_V2) + return -EINVAL; + + return param_set_int(val, kp); +} + +static inline int mode_set(const char *val, const struct kernel_param *kp) +{ + u32 n; + int ret; + + if (!val) + return -EINVAL; + + ret = kstrtou32(val, 10, &n); + if (ret != 0 || (n != UACCE_MODE_SVA && + n != UACCE_MODE_NOUACCE)) + return -EINVAL; + + return param_set_int(val, kp); +} + +static inline int uacce_mode_set(const char *val, const struct kernel_param *kp) +{ + return mode_set(val, kp); +} + +static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list) +{ + INIT_LIST_HEAD(&qm_list->list); + mutex_init(&qm_list->lock); +} + +int hisi_qm_init(struct hisi_qm *qm); +void hisi_qm_uninit(struct hisi_qm *qm); +int hisi_qm_start(struct hisi_qm *qm); +int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r); +struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type); +int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg); +int hisi_qm_stop_qp(struct hisi_qp *qp); +void hisi_qm_release_qp(struct hisi_qp *qp); +int hisi_qp_send(struct hisi_qp *qp, const void *msg); +int hisi_qm_get_free_qp_num(struct hisi_qm *qm); +int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number); +void hisi_qm_debug_init(struct hisi_qm *qm); +enum qm_hw_ver hisi_qm_get_hw_version(struct pci_dev *pdev); +void hisi_qm_debug_regs_clear(struct hisi_qm *qm); +int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs); +int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen); +int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs); +void hisi_qm_dev_err_init(struct hisi_qm *qm); +void hisi_qm_dev_err_uninit(struct hisi_qm *qm); +pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, + pci_channel_state_t state); +pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev); +void hisi_qm_reset_prepare(struct pci_dev *pdev); +void hisi_qm_reset_done(struct pci_dev *pdev); + +int hisi_qm_wait_mb_ready(struct hisi_qm *qm); +int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, + bool op); + +struct hisi_acc_sgl_pool; +struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, + struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool, + u32 index, dma_addr_t *hw_sgl_dma); +void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, + struct hisi_acc_hw_sgl *hw_sgl); +struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, + u32 count, u32 sge_nr); +void hisi_acc_free_sgl_pool(struct device *dev, + struct hisi_acc_sgl_pool *pool); +int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, + u8 alg_type, int node, struct hisi_qp **qps); +void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num); +void hisi_qm_dev_shutdown(struct pci_dev *pdev); +void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list); +int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list); +void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list); +int hisi_qm_resume(struct device *dev); +int hisi_qm_suspend(struct device *dev); +void hisi_qm_pm_uninit(struct hisi_qm *qm); +void hisi_qm_pm_init(struct hisi_qm *qm); +int hisi_qm_get_dfx_access(struct hisi_qm *qm); +void hisi_qm_put_dfx_access(struct hisi_qm *qm); +void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset); + +/* Used by VFIO ACC live migration driver */ +struct pci_driver *hisi_sec_get_pf_driver(void); +struct pci_driver *hisi_hpre_get_pf_driver(void); +struct pci_driver *hisi_zip_get_pf_driver(void); +#endif diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 2fd2e91d5107..d5a6f101f843 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -9,14 +9,9 @@ #ifndef LINUX_HMM_H #define LINUX_HMM_H -#include <linux/kconfig.h> -#include <linux/pgtable.h> +#include <linux/mm.h> -#include <linux/device.h> -#include <linux/migrate.h> -#include <linux/memremap.h> -#include <linux/completion.h> -#include <linux/mmu_notifier.h> +struct mmu_interval_notifier; /* * On output: diff --git a/include/linux/hrtimer_api.h b/include/linux/hrtimer_api.h new file mode 100644 index 000000000000..8d9700894468 --- /dev/null +++ b/include/linux/hrtimer_api.h @@ -0,0 +1 @@ +#include <linux/hrtimer.h> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index e4c18ba8d3bf..2999190adc22 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -183,9 +183,8 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, void prep_transhuge_page(struct page *page); void free_transhuge_page(struct page *page); -bool is_transparent_hugepage(struct page *page); -bool can_split_huge_page(struct page *page, int *pextra_pins); +bool can_split_folio(struct folio *folio, int *pextra_pins); int split_huge_page_to_list(struct page *page, struct list_head *list); static inline int split_huge_page(struct page *page) { @@ -194,7 +193,7 @@ static inline int split_huge_page(struct page *page) void deferred_split_huge_page(struct page *page); void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, - unsigned long address, bool freeze, struct page *page); + unsigned long address, bool freeze, struct folio *folio); #define split_huge_pmd(__vma, __pmd, __address) \ do { \ @@ -207,7 +206,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, - bool freeze, struct page *page); + bool freeze, struct folio *folio); void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, unsigned long address); @@ -251,30 +250,6 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, } /** - * thp_order - Order of a transparent huge page. - * @page: Head page of a transparent huge page. - */ -static inline unsigned int thp_order(struct page *page) -{ - VM_BUG_ON_PGFLAGS(PageTail(page), page); - if (PageHead(page)) - return HPAGE_PMD_ORDER; - return 0; -} - -/** - * thp_nr_pages - The number of regular pages in this huge page. - * @page: The head page of a huge page. - */ -static inline int thp_nr_pages(struct page *page) -{ - VM_BUG_ON_PGFLAGS(PageTail(page), page); - if (PageHead(page)) - return HPAGE_PMD_NR; - return 1; -} - -/** * folio_test_pmd_mappable - Can we map this folio with a PMD? * @folio: The folio to test */ @@ -336,18 +311,6 @@ static inline struct list_head *page_deferred_list(struct page *page) #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) -static inline unsigned int thp_order(struct page *page) -{ - VM_BUG_ON_PGFLAGS(PageTail(page), page); - return 0; -} - -static inline int thp_nr_pages(struct page *page) -{ - VM_BUG_ON_PGFLAGS(PageTail(page), page); - return 1; -} - static inline bool folio_test_pmd_mappable(struct folio *folio) { return false; @@ -377,17 +340,12 @@ static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, static inline void prep_transhuge_page(struct page *page) {} -static inline bool is_transparent_hugepage(struct page *page) -{ - return false; -} - #define transparent_hugepage_flags 0UL #define thp_get_unmapped_area NULL static inline bool -can_split_huge_page(struct page *page, int *pextra_pins) +can_split_folio(struct folio *folio, int *pextra_pins) { BUILD_BUG(); return false; @@ -406,9 +364,9 @@ static inline void deferred_split_huge_page(struct page *page) {} do { } while (0) static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, - unsigned long address, bool freeze, struct page *page) {} + unsigned long address, bool freeze, struct folio *folio) {} static inline void split_huge_pmd_address(struct vm_area_struct *vma, - unsigned long address, bool freeze, struct page *page) {} + unsigned long address, bool freeze, struct folio *folio) {} #define split_huge_pud(__vma, __pmd, __address) \ do { } while (0) @@ -483,15 +441,10 @@ static inline bool thp_migration_supported(void) } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -/** - * thp_size - Size of a transparent huge page. - * @page: Head page of a transparent huge page. - * - * Return: Number of bytes in this page. - */ -static inline unsigned long thp_size(struct page *page) +static inline int split_folio_to_list(struct folio *folio, + struct list_head *list) { - return PAGE_SIZE << thp_order(page); + return split_huge_page_to_list(&folio->page, list); } #endif /* _LINUX_HUGE_MM_H */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d1897a69c540..53c1b6082a4c 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -754,7 +754,7 @@ static inline void arch_clear_hugepage_flags(struct page *page) { } static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) { - return entry; + return pte_mkhuge(entry); } #endif @@ -970,6 +970,11 @@ static inline struct hstate *page_hstate(struct page *page) return NULL; } +static inline struct hstate *size_to_hstate(unsigned long size) +{ + return NULL; +} + static inline unsigned long huge_page_size(struct hstate *h) { return PAGE_SIZE; @@ -1075,12 +1080,6 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr } #endif /* CONFIG_HUGETLB_PAGE */ -#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP -extern bool hugetlb_free_vmemmap_enabled; -#else -#define hugetlb_free_vmemmap_enabled false -#endif - static inline spinlock_t *huge_pte_lock(struct hstate *h, struct mm_struct *mm, pte_t *pte) { diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 8e6dd908da21..aa1d4da03538 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -60,7 +60,5 @@ extern int devm_hwrng_register(struct device *dev, struct hwrng *rng); /** Unregister a Hardware Random Number Generator driver. */ extern void hwrng_unregister(struct hwrng *rng); extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng); -/** Feed random bits into the pool. */ -extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy); #endif /* LINUX_HWRANDOM_H_ */ diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index fad1f1df26df..eba380b76d15 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -332,12 +332,14 @@ enum hwmon_pwm_attributes { hwmon_pwm_enable, hwmon_pwm_mode, hwmon_pwm_freq, + hwmon_pwm_auto_channels_temp, }; #define HWMON_PWM_INPUT BIT(hwmon_pwm_input) #define HWMON_PWM_ENABLE BIT(hwmon_pwm_enable) #define HWMON_PWM_MODE BIT(hwmon_pwm_mode) #define HWMON_PWM_FREQ BIT(hwmon_pwm_freq) +#define HWMON_PWM_AUTO_CHANNELS_TEMP BIT(hwmon_pwm_auto_channels_temp) enum hwmon_intrusion_attributes { hwmon_intrusion_alarm, diff --git a/include/linux/i3c/ccc.h b/include/linux/i3c/ccc.h index 73b0982cc519..ad59a4ae60d1 100644 --- a/include/linux/i3c/ccc.h +++ b/include/linux/i3c/ccc.h @@ -132,7 +132,7 @@ struct i3c_ccc_dev_desc { struct i3c_ccc_defslvs { u8 count; struct i3c_ccc_dev_desc master; - struct i3c_ccc_dev_desc slaves[0]; + struct i3c_ccc_dev_desc slaves[]; } __packed; /** @@ -240,7 +240,7 @@ struct i3c_ccc_bridged_slave_desc { */ struct i3c_ccc_setbrgtgt { u8 count; - struct i3c_ccc_bridged_slave_desc bslaves[0]; + struct i3c_ccc_bridged_slave_desc bslaves[]; } __packed; /** @@ -318,7 +318,7 @@ enum i3c_ccc_setxtime_subcmd { */ struct i3c_ccc_setxtime { u8 subcmd; - u8 data[0]; + u8 data[]; } __packed; #define I3C_CCC_GETXTIME_SYNC_MODE BIT(0) diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 559b6c644938..75d40acb60c1 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -9,7 +9,7 @@ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (c) 2018 - 2021 Intel Corporation + * Copyright (c) 2018 - 2022 Intel Corporation */ #ifndef LINUX_IEEE80211_H @@ -18,6 +18,7 @@ #include <linux/types.h> #include <linux/if_ether.h> #include <linux/etherdevice.h> +#include <linux/bitfield.h> #include <asm/byteorder.h> #include <asm/unaligned.h> @@ -1023,6 +1024,8 @@ struct ieee80211_tpc_report_ie { #define IEEE80211_ADDBA_EXT_FRAG_LEVEL_MASK GENMASK(2, 1) #define IEEE80211_ADDBA_EXT_FRAG_LEVEL_SHIFT 1 #define IEEE80211_ADDBA_EXT_NO_FRAG BIT(0) +#define IEEE80211_ADDBA_EXT_BUF_SIZE_MASK GENMASK(7, 5) +#define IEEE80211_ADDBA_EXT_BUF_SIZE_SHIFT 10 struct ieee80211_addba_ext_ie { u8 data; @@ -1697,10 +1700,12 @@ struct ieee80211_ht_operation { * A-MPDU buffer sizes * According to HT size varies from 8 to 64 frames * HE adds the ability to have up to 256 frames. + * EHT adds the ability to have up to 1K frames. */ #define IEEE80211_MIN_AMPDU_BUF 0x8 #define IEEE80211_MAX_AMPDU_BUF_HT 0x40 -#define IEEE80211_MAX_AMPDU_BUF 0x100 +#define IEEE80211_MAX_AMPDU_BUF_HE 0x100 +#define IEEE80211_MAX_AMPDU_BUF_EHT 0x400 /* Spatial Multiplexing Power Save Modes (for capability) */ @@ -1925,6 +1930,111 @@ struct ieee80211_mu_edca_param_set { struct ieee80211_he_mu_edca_param_ac_rec ac_vo; } __packed; +#define IEEE80211_EHT_MCS_NSS_RX 0x0f +#define IEEE80211_EHT_MCS_NSS_TX 0xf0 + +/** + * struct ieee80211_eht_mcs_nss_supp_20mhz_only - EHT 20MHz only station max + * supported NSS for per MCS. + * + * For each field below, bits 0 - 3 indicate the maximal number of spatial + * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams + * for Tx. + * + * @rx_tx_mcs7_max_nss: indicates the maximum number of spatial streams + * supported for reception and the maximum number of spatial streams + * supported for transmission for MCS 0 - 7. + * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams + * supported for reception and the maximum number of spatial streams + * supported for transmission for MCS 8 - 9. + * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams + * supported for reception and the maximum number of spatial streams + * supported for transmission for MCS 10 - 11. + * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams + * supported for reception and the maximum number of spatial streams + * supported for transmission for MCS 12 - 13. + */ +struct ieee80211_eht_mcs_nss_supp_20mhz_only { + u8 rx_tx_mcs7_max_nss; + u8 rx_tx_mcs9_max_nss; + u8 rx_tx_mcs11_max_nss; + u8 rx_tx_mcs13_max_nss; +}; + +/** + * struct ieee80211_eht_mcs_nss_supp_bw - EHT max supported NSS per MCS (except + * 20MHz only stations). + * + * For each field below, bits 0 - 3 indicate the maximal number of spatial + * streams for Rx, and bits 4 - 7 indicate the maximal number of spatial streams + * for Tx. + * + * @rx_tx_mcs9_max_nss: indicates the maximum number of spatial streams + * supported for reception and the maximum number of spatial streams + * supported for transmission for MCS 0 - 9. + * @rx_tx_mcs11_max_nss: indicates the maximum number of spatial streams + * supported for reception and the maximum number of spatial streams + * supported for transmission for MCS 10 - 11. + * @rx_tx_mcs13_max_nss: indicates the maximum number of spatial streams + * supported for reception and the maximum number of spatial streams + * supported for transmission for MCS 12 - 13. + */ +struct ieee80211_eht_mcs_nss_supp_bw { + u8 rx_tx_mcs9_max_nss; + u8 rx_tx_mcs11_max_nss; + u8 rx_tx_mcs13_max_nss; +}; + +/** + * struct ieee80211_eht_cap_elem_fixed - EHT capabilities fixed data + * + * This structure is the "EHT Capabilities element" fixed fields as + * described in P802.11be_D1.4 section 9.4.2.313. + * + * @mac_cap_info: MAC capabilities, see IEEE80211_EHT_MAC_CAP* + * @phy_cap_info: PHY capabilities, see IEEE80211_EHT_PHY_CAP* + */ +struct ieee80211_eht_cap_elem_fixed { + u8 mac_cap_info[2]; + u8 phy_cap_info[9]; +} __packed; + +/** + * struct ieee80211_eht_cap_elem - EHT capabilities element + * @fixed: fixed parts, see &ieee80211_eht_cap_elem_fixed + * @optional: optional parts + */ +struct ieee80211_eht_cap_elem { + struct ieee80211_eht_cap_elem_fixed fixed; + + /* + * Followed by: + * Supported EHT-MCS And NSS Set field: 4, 3, 6 or 9 octets. + * EHT PPE Thresholds field: variable length. + */ + u8 optional[]; +} __packed; + +/** + * struct ieee80211_eht_operation - eht operation element + * + * This structure is the "EHT Operation Element" fields as + * described in P802.11be_D1.4 section 9.4.2.311 + * + * FIXME: The spec is unclear how big the fields are, and doesn't + * indicate the "Disabled Subchannel Bitmap Present" in the + * structure (Figure 9-1002a) at all ... + */ +struct ieee80211_eht_operation { + u8 chan_width; + u8 ccfs; + u8 present_bm; + + u8 disable_subchannel_bitmap[]; +} __packed; + +#define IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT 0x1 + /* 802.11ac VHT Capabilities */ #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001 @@ -2129,6 +2239,8 @@ enum ieee80211_client_reg_power { #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10 +#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL 0x1e + #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe @@ -2309,6 +2421,7 @@ ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap) #define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78 #define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3) #define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3) +#define IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE (7) /* * Calculate 802.11ax HE capabilities IE PPE field size @@ -2338,6 +2451,29 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) return n; } +static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len) +{ + const struct ieee80211_he_cap_elem *he_cap_ie_elem = (const void *)data; + u8 needed = sizeof(*he_cap_ie_elem); + + if (len < needed) + return false; + + needed += ieee80211_he_mcs_nss_size(he_cap_ie_elem); + if (len < needed) + return false; + + if (he_cap_ie_elem->phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { + if (len < needed + 1) + return false; + needed += ieee80211_he_ppe_size(data[needed], + he_cap_ie_elem->phy_cap_info); + } + + return len >= needed; +} + /* HE Operation defines */ #define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007 #define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008 @@ -2427,7 +2563,7 @@ struct ieee80211_tx_pwr_env { static inline u8 ieee80211_he_oper_size(const u8 *he_oper_ie) { - struct ieee80211_he_operation *he_oper = (void *)he_oper_ie; + const struct ieee80211_he_operation *he_oper = (const void *)he_oper_ie; u8 oper_len = sizeof(struct ieee80211_he_operation); u32 he_oper_params; @@ -2460,7 +2596,7 @@ ieee80211_he_oper_size(const u8 *he_oper_ie) static inline const struct ieee80211_he_6ghz_oper * ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper) { - const u8 *ret = (void *)&he_oper->optional; + const u8 *ret = (const void *)&he_oper->optional; u32 he_oper_params; if (!he_oper) @@ -2475,7 +2611,7 @@ ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper) if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS) ret++; - return (void *)ret; + return (const void *)ret; } /* HE Spatial Reuse defines */ @@ -2496,7 +2632,7 @@ ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper) static inline u8 ieee80211_he_spr_size(const u8 *he_spr_ie) { - struct ieee80211_he_spr *he_spr = (void *)he_spr_ie; + const struct ieee80211_he_spr *he_spr = (const void *)he_spr_ie; u8 spr_len = sizeof(struct ieee80211_he_spr); u8 he_spr_params; @@ -2599,6 +2735,194 @@ ieee80211_he_spr_size(const u8 *he_spr_ie) #define S1G_OPER_CH_WIDTH_PRIMARY_1MHZ BIT(0) #define S1G_OPER_CH_WIDTH_OPER GENMASK(4, 1) +/* EHT MAC capabilities as defined in P802.11be_D1.4 section 9.4.2.313.2 */ +#define IEEE80211_EHT_MAC_CAP0_NSEP_PRIO_ACCESS 0x01 +#define IEEE80211_EHT_MAC_CAP0_OM_CONTROL 0x02 +#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 0x04 +#define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 0x08 +#define IEEE80211_EHT_MAC_CAP0_RESTRICTED_TWT 0x10 +#define IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC 0x20 +#define IEEE80211_EHT_MAC_CAP0_MAX_AMPDU_LEN_MASK 0xc0 +#define IEEE80211_EHT_MAC_CAP0_MAX_AMPDU_LEN_3895 0 +#define IEEE80211_EHT_MAC_CAP0_MAX_AMPDU_LEN_7991 1 +#define IEEE80211_EHT_MAC_CAP0_MAX_AMPDU_LEN_11454 2 + +/* EHT PHY capabilities as defined in P802.11be_D1.4 section 9.4.2.313.3 */ +#define IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ 0x02 +#define IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ 0x04 +#define IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI 0x08 +#define IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO 0x10 +#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER 0x20 +#define IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE 0x40 + +/* EHT beamformee number of spatial streams <= 80MHz is split */ +#define IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK 0x80 +#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK 0x03 + +#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK 0x1c +#define IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK 0xe0 + +#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK 0x07 +#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK 0x38 + +/* EHT number of sounding dimensions for 320MHz is split */ +#define IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK 0xc0 +#define IEEE80211_EHT_PHY_CAP3_SOUNDING_DIM_320MHZ_MASK 0x01 +#define IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK 0x02 +#define IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK 0x04 +#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK 0x08 +#define IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK 0x10 +#define IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK 0x20 +#define IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK 0x40 +#define IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK 0x80 + +#define IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO 0x01 +#define IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP 0x02 +#define IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP 0x04 +#define IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI 0x08 +#define IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK 0xf0 + +#define IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK 0x01 +#define IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP 0x02 +#define IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP 0x04 +#define IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT 0x08 +#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK 0x30 +#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_0US 0 +#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_8US 1 +#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US 2 +#define IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_20US 3 + +/* Maximum number of supported EHT LTF is split */ +#define IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK 0xc0 +#define IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK 0x07 + +#define IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK 0x78 +#define IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP 0x80 + +#define IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW 0x01 +#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ 0x02 +#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ 0x04 +#define IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ 0x08 +#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ 0x10 +#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ 0x20 +#define IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ 0x40 +#define IEEE80211_EHT_PHY_CAP7_TB_SOUNDING_FDBK_RATE_LIMIT 0x80 + +#define IEEE80211_EHT_PHY_CAP8_RX_1024QAM_WIDER_BW_DL_OFDMA 0x01 +#define IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA 0x02 + +/* + * EHT operation channel width as defined in P802.11be_D1.4 section 9.4.2.311 + */ +#define IEEE80211_EHT_OPER_CHAN_WIDTH 0x7 +#define IEEE80211_EHT_OPER_CHAN_WIDTH_20MHZ 0 +#define IEEE80211_EHT_OPER_CHAN_WIDTH_40MHZ 1 +#define IEEE80211_EHT_OPER_CHAN_WIDTH_80MHZ 2 +#define IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ 3 +#define IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ 4 + +/* Calculate 802.11be EHT capabilities IE Tx/Rx EHT MCS NSS Support Field size */ +static inline u8 +ieee80211_eht_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap, + const struct ieee80211_eht_cap_elem_fixed *eht_cap) +{ + u8 count = 0; + + /* on 2.4 GHz, if it supports 40 MHz, the result is 3 */ + if (he_cap->phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G) + return 3; + + /* on 2.4 GHz, these three bits are reserved, so should be 0 */ + if (he_cap->phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G) + count += 3; + + if (he_cap->phy_cap_info[0] & + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) + count += 3; + + if (eht_cap->phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) + count += 3; + + return count ? count : 4; +} + +/* 802.11be EHT PPE Thresholds */ +#define IEEE80211_EHT_PPE_THRES_NSS_POS 0 +#define IEEE80211_EHT_PPE_THRES_NSS_MASK 0xf +#define IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK 0x1f0 +#define IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE 3 +#define IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE 9 + +/* + * Calculate 802.11be EHT capabilities IE EHT field size + */ +static inline u8 +ieee80211_eht_ppe_size(u16 ppe_thres_hdr, const u8 *phy_cap_info) +{ + u32 n; + + if (!(phy_cap_info[5] & + IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT)) + return 0; + + n = hweight16(ppe_thres_hdr & + IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); + n *= 1 + u16_get_bits(ppe_thres_hdr, IEEE80211_EHT_PPE_THRES_NSS_MASK); + + /* + * Each pair is 6 bits, and we need to add the 9 "header" bits to the + * total size. + */ + n = n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2 + + IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE; + return DIV_ROUND_UP(n, 8); +} + +static inline bool +ieee80211_eht_capa_size_ok(const u8 *he_capa, const u8 *data, u8 len) +{ + const struct ieee80211_eht_cap_elem_fixed *elem = (const void *)data; + u8 needed = sizeof(struct ieee80211_eht_cap_elem_fixed); + + if (len < needed || !he_capa) + return false; + + needed += ieee80211_eht_mcs_nss_size((const void *)he_capa, + (const void *)data); + if (len < needed) + return false; + + if (elem->phy_cap_info[5] & + IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) { + u16 ppe_thres_hdr; + + if (len < needed + sizeof(ppe_thres_hdr)) + return false; + + ppe_thres_hdr = get_unaligned_le16(data + needed); + needed += ieee80211_eht_ppe_size(ppe_thres_hdr, + elem->phy_cap_info); + } + + return len >= needed; +} + +static inline bool +ieee80211_eht_oper_size_ok(const u8 *data, u8 len) +{ + const struct ieee80211_eht_operation *elem = (const void *)data; + u8 needed = sizeof(*elem); + + if (len < needed) + return false; + + if (elem->present_bm & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT) + needed += 2; + + return len >= needed; +} #define LISTEN_INT_USF GENMASK(15, 14) #define LISTEN_INT_UI GENMASK(13, 0) @@ -3054,6 +3378,9 @@ enum ieee80211_eid_ext { WLAN_EID_EXT_SHORT_SSID_LIST = 58, WLAN_EID_EXT_HE_6GHZ_CAPA = 59, WLAN_EID_EXT_UL_MU_POWER_CAPA = 60, + WLAN_EID_EXT_EHT_OPERATION = 106, + WLAN_EID_EXT_EHT_MULTI_LINK = 107, + WLAN_EID_EXT_EHT_CAPABILITY = 108, }; /* Action category code */ @@ -4005,10 +4332,10 @@ static inline bool for_each_element_completed(const struct element *element, #define IEEE80211_RNR_TBTT_PARAMS_COLOC_AP 0x40 struct ieee80211_neighbor_ap_info { - u8 tbtt_info_hdr; - u8 tbtt_info_len; - u8 op_class; - u8 channel; + u8 tbtt_info_hdr; + u8 tbtt_info_len; + u8 op_class; + u8 channel; } __packed; enum ieee80211_range_params_max_total_ltf { diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index b712217f7030..1ed52441972f 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h @@ -52,6 +52,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev) case ARPHRD_VOID: case ARPHRD_NONE: case ARPHRD_RAWIP: + case ARPHRD_PIMREG: return false; default: return true; diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 509e18c7e740..d62ef428e3aa 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -58,6 +58,7 @@ struct br_ip_list { #define BR_MRP_LOST_CONT BIT(18) #define BR_MRP_LOST_IN_CONT BIT(19) #define BR_TX_FWD_OFFLOAD BIT(20) +#define BR_PORT_LOCKED BIT(21) #define BR_DEFAULT_AGEING_TIME (300 * HZ) @@ -118,6 +119,9 @@ int br_vlan_get_info(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo); int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo); +bool br_mst_enabled(const struct net_device *dev); +int br_mst_get_info(const struct net_device *dev, u16 msti, unsigned long *vids); +int br_mst_get_state(const struct net_device *dev, u16 msti, u8 *state); #else static inline bool br_vlan_enabled(const struct net_device *dev) { @@ -150,6 +154,22 @@ static inline int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid, { return -EINVAL; } + +static inline bool br_mst_enabled(const struct net_device *dev) +{ + return false; +} + +static inline int br_mst_get_info(const struct net_device *dev, u16 msti, + unsigned long *vids) +{ + return -EINVAL; +} +static inline int br_mst_get_state(const struct net_device *dev, u16 msti, + u8 *state) +{ + return -EINVAL; +} #endif #if IS_ENABLED(CONFIG_BRIDGE) diff --git a/include/linux/if_hsr.h b/include/linux/if_hsr.h index 38bbc537d4e4..408539d5ea5f 100644 --- a/include/linux/if_hsr.h +++ b/include/linux/if_hsr.h @@ -9,6 +9,22 @@ enum hsr_version { PRP_V1, }; +/* HSR Tag. + * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB, + * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest, + * h_source, h_proto = 0x88FB }, and add { path, LSDU_size, sequence Nr, + * encapsulated protocol } instead. + * + * Field names as defined in the IEC:2010 standard for HSR. + */ +struct hsr_tag { + __be16 path_and_LSDU_size; + __be16 sequence_nr; + __be16 encap_proto; +} __packed; + +#define HSR_HLEN 6 + #if IS_ENABLED(CONFIG_HSR) extern bool is_hsr_master(struct net_device *dev); extern int hsr_get_version(struct net_device *dev, enum hsr_version *ver); diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 10c94a3936ca..b42294739063 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -21,6 +21,7 @@ struct macvlan_dev { struct hlist_node hlist; struct macvlan_port *port; struct net_device *lowerdev; + netdevice_tracker dev_tracker; void *accel_priv; struct vlan_pcpu_stats __percpu *pcpu_stats; diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 674aeead6260..ead323243e7b 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -150,6 +150,7 @@ struct in_ifaddr { __be32 ifa_broadcast; unsigned char ifa_scope; unsigned char ifa_prefixlen; + unsigned char ifa_proto; __u32 ifa_flags; char ifa_label[IFNAMSIZ]; diff --git a/include/linux/init.h b/include/linux/init.h index d82b4b2e1d25..baf0b29a7010 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -320,12 +320,19 @@ struct obs_kernel_param { __aligned(__alignof__(struct obs_kernel_param)) \ = { __setup_str_##unique_id, fn, early } +/* + * NOTE: __setup functions return values: + * @fn returns 1 (or non-zero) if the option argument is "handled" + * and returns 0 if the option argument is "not handled". + */ #define __setup(str, fn) \ __setup_param(str, fn, fn, 0) /* - * NOTE: fn is as per module_param, not __setup! - * Emits warning if fn returns non-zero. + * NOTE: @fn is as per module_param, not __setup! + * I.e., @fn returns 0 for no error or non-zero for error + * (possibly @fn returns a -errno value, but it does not matter). + * Emits warning if @fn returns non-zero. */ #define early_param(str, fn) \ __setup_param(str, fn, fn, 1) diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 69230fd695ea..2f9891cb3d00 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -525,12 +525,6 @@ struct context_entry { */ #define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1) -/* - * Domain represents a virtual machine which demands iommu nested - * translation mode support. - */ -#define DOMAIN_FLAG_NESTING_MODE BIT(2) - struct dmar_domain { int nid; /* node id */ @@ -548,7 +542,6 @@ struct dmar_domain { u8 iommu_snooping: 1; /* indicate snooping control feature */ struct list_head devices; /* all devices' list */ - struct list_head subdevices; /* all subdevices' list */ struct iova_domain iovad; /* iova's that belong to this domain */ struct dma_pte *pgd; /* virtual address */ @@ -563,11 +556,6 @@ struct dmar_domain { 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ u64 max_addr; /* maximum mapped address */ - u32 default_pasid; /* - * The default pasid used for non-SVM - * traffic on mediated devices. - */ - struct iommu_domain domain; /* generic domain data structure for iommu core */ }; @@ -590,7 +578,6 @@ struct intel_iommu { #ifdef CONFIG_INTEL_IOMMU unsigned long *domain_ids; /* bitmap of domains */ - struct dmar_domain ***domains; /* ptr to domains */ spinlock_t lock; /* protect context, domain ids */ struct root_entry *root_entry; /* virtual address */ @@ -620,21 +607,11 @@ struct intel_iommu { void *perf_statistic; }; -/* Per subdevice private data */ -struct subdev_domain_info { - struct list_head link_phys; /* link to phys device siblings */ - struct list_head link_domain; /* link to domain siblings */ - struct device *pdev; /* physical device derived from */ - struct dmar_domain *domain; /* aux-domain */ - int users; /* user count */ -}; - /* PCI domain-device relationship */ struct device_domain_info { struct list_head link; /* link to domain siblings */ struct list_head global; /* link to global list */ struct list_head table; /* link to pasid table */ - struct list_head subdevices; /* subdevices sibling */ u32 segment; /* PCI segment number */ u8 bus; /* PCI bus number */ u8 devfn; /* PCI devfn number */ @@ -645,7 +622,6 @@ struct device_domain_info { u8 pri_enabled:1; u8 ats_supported:1; u8 ats_enabled:1; - u8 auxd_enabled:1; /* Multiple domains per device */ u8 ats_qdep; struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ struct intel_iommu *iommu; /* IOMMU used by this device */ @@ -717,7 +693,6 @@ static inline int nr_pte_to_next_page(struct dma_pte *pte) } extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); -extern int dmar_find_matched_atsr_unit(struct pci_dev *dev); extern int dmar_enable_qi(struct intel_iommu *iommu); extern void dmar_disable_qi(struct intel_iommu *iommu); @@ -757,17 +732,12 @@ int for_each_device_domain(int (*fn)(struct device_domain_info *info, void *data), void *data); void iommu_flush_write_buffer(struct intel_iommu *iommu); int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev); -struct dmar_domain *find_domain(struct device *dev); -struct device_domain_info *get_domain_info(struct device *dev); struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); #ifdef CONFIG_INTEL_IOMMU_SVM extern void intel_svm_check(struct intel_iommu *iommu); extern int intel_svm_enable_prq(struct intel_iommu *iommu); extern int intel_svm_finish_prq(struct intel_iommu *iommu); -int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, - struct iommu_gpasid_bind_data *data); -int intel_svm_unbind_gpasid(struct device *dev, u32 pasid); struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata); void intel_svm_unbind(struct iommu_sva *handle); @@ -795,7 +765,6 @@ struct intel_svm { unsigned int flags; u32 pasid; - int gpasid; /* In case that guest PASID is different from host PASID */ struct list_head devs; }; #else @@ -813,6 +782,8 @@ bool context_present(struct context_entry *context); struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, u8 devfn, int alloc); +extern const struct iommu_ops intel_iommu_ops; + #ifdef CONFIG_INTEL_IOMMU extern int iommu_calculate_agaw(struct intel_iommu *iommu); extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 1b73bab7eeff..b3b125b332aa 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -25,17 +25,5 @@ * do such IOTLB flushes automatically. */ #define SVM_FLAG_SUPERVISOR_MODE BIT(0) -/* - * The SVM_FLAG_GUEST_MODE flag is used when a PASID bind is for guest - * processes. Compared to the host bind, the primary differences are: - * 1. mm life cycle management - * 2. fault reporting - */ -#define SVM_FLAG_GUEST_MODE BIT(1) -/* - * The SVM_FLAG_GUEST_PASID flag is used when a guest has its own PASID space, - * which requires guest and host PASID translation at both directions. - */ -#define SVM_FLAG_GUEST_PASID BIT(2) #endif /* __INTEL_SVM_H__ */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 9367f1cb2e3c..f40754caaefa 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -579,7 +579,16 @@ enum NR_SOFTIRQS }; -#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) +/* + * The following vectors can be safely ignored after ksoftirqd is parked: + * + * _ RCU: + * 1) rcutree_migrate_callbacks() migrates the queue. + * 2) rcu_report_dead() reports the final quiescent states. + * + * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue + */ +#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ)) /* map softirq index to softirq name. update 'softirq_to_name' in * kernel/softirq.c when adding a new softirq. diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 649a4d7c241b..1814e698d861 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -9,11 +9,14 @@ struct sock *io_uring_get_socket(struct file *file); void __io_uring_cancel(bool cancel_all); void __io_uring_free(struct task_struct *tsk); +void io_uring_unreg_ringfd(void); static inline void io_uring_files_cancel(void) { - if (current->io_uring) + if (current->io_uring) { + io_uring_unreg_ringfd(); __io_uring_cancel(false); + } } static inline void io_uring_task_cancel(void) { diff --git a/include/linux/ioasid.h b/include/linux/ioasid.h index e9dacd4b9f6b..af1c9d62e642 100644 --- a/include/linux/ioasid.h +++ b/include/linux/ioasid.h @@ -34,13 +34,16 @@ struct ioasid_allocator_ops { #if IS_ENABLED(CONFIG_IOASID) ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max, void *private); -void ioasid_get(ioasid_t ioasid); -bool ioasid_put(ioasid_t ioasid); +void ioasid_free(ioasid_t ioasid); void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid, bool (*getter)(void *)); int ioasid_register_allocator(struct ioasid_allocator_ops *allocator); void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator); int ioasid_set_data(ioasid_t ioasid, void *data); +static inline bool pasid_valid(ioasid_t ioasid) +{ + return ioasid != INVALID_IOASID; +} #else /* !CONFIG_IOASID */ static inline ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, @@ -49,14 +52,7 @@ static inline ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, return INVALID_IOASID; } -static inline void ioasid_get(ioasid_t ioasid) -{ -} - -static inline bool ioasid_put(ioasid_t ioasid) -{ - return false; -} +static inline void ioasid_free(ioasid_t ioasid) { } static inline void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid, bool (*getter)(void *)) @@ -78,5 +74,10 @@ static inline int ioasid_set_data(ioasid_t ioasid, void *data) return -ENOTSUPP; } +static inline bool pasid_valid(ioasid_t ioasid) +{ + return false; +} + #endif /* CONFIG_IOASID */ #endif /* __LINUX_IOASID_H */ diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 97a3a2edb585..b76f0dd149fb 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -227,12 +227,9 @@ ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, const struct iomap_ops *ops); int iomap_readpage(struct page *page, const struct iomap_ops *ops); void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); -int iomap_is_partially_uptodate(struct page *page, unsigned long from, - unsigned long count); +bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count); int iomap_releasepage(struct page *page, gfp_t gfp_mask); void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len); -void iomap_invalidatepage(struct page *page, unsigned int offset, - unsigned int len); #ifdef CONFIG_MIGRATION int iomap_migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index de0c57a567c8..9208eca4b0d1 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -37,6 +37,7 @@ struct iommu_group; struct bus_type; struct device; struct iommu_domain; +struct iommu_domain_ops; struct notifier_block; struct iommu_sva; struct iommu_fault_event; @@ -88,7 +89,7 @@ struct iommu_domain_geometry { struct iommu_domain { unsigned type; - const struct iommu_ops *ops; + const struct iommu_domain_ops *ops; unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ iommu_fault_handler_t handler; void *handler_token; @@ -144,7 +145,6 @@ struct iommu_resv_region { /** * enum iommu_dev_features - Per device IOMMU features - * @IOMMU_DEV_FEAT_AUX: Auxiliary domain feature * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally * enabling %IOMMU_DEV_FEAT_SVA requires @@ -157,7 +157,6 @@ struct iommu_resv_region { * iommu_dev_has_feature(), and enable it using iommu_dev_enable_feature(). */ enum iommu_dev_features { - IOMMU_DEV_FEAT_AUX, IOMMU_DEV_FEAT_SVA, IOMMU_DEV_FEAT_IOPF, }; @@ -194,48 +193,28 @@ struct iommu_iotlb_gather { * struct iommu_ops - iommu ops and capabilities * @capable: check capability * @domain_alloc: allocate iommu domain - * @domain_free: free iommu domain - * @attach_dev: attach device to an iommu domain - * @detach_dev: detach device from an iommu domain - * @map: map a physically contiguous memory region to an iommu domain - * @map_pages: map a physically contiguous set of pages of the same size to - * an iommu domain. - * @unmap: unmap a physically contiguous memory region from an iommu domain - * @unmap_pages: unmap a number of pages of the same size from an iommu domain - * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain - * @iotlb_sync_map: Sync mappings created recently using @map to the hardware - * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush - * queue - * @iova_to_phys: translate iova to physical address * @probe_device: Add device to iommu driver handling * @release_device: Remove device from iommu driver handling * @probe_finalize: Do final setup work after the device is added to an IOMMU * group and attached to the groups domain * @device_group: find iommu group for a particular device - * @enable_nesting: Enable nesting - * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) * @get_resv_regions: Request list of reserved regions for a device * @put_resv_regions: Free list of reserved regions for a device - * @apply_resv_region: Temporary helper call-back for iova reserved ranges * @of_xlate: add OF master IDs to iommu grouping * @is_attach_deferred: Check if domain attach should be deferred from iommu * driver init to device driver init (default no) * @dev_has/enable/disable_feat: per device entries to check/enable/disable * iommu specific features. * @dev_feat_enabled: check enabled feature - * @aux_attach/detach_dev: aux-domain specific attach/detach entries. - * @aux_get_pasid: get the pasid given an aux-domain * @sva_bind: Bind process address space to device * @sva_unbind: Unbind process address space from device * @sva_get_pasid: Get PASID associated to a SVA handle * @page_response: handle page request response - * @cache_invalidate: invalidate translation caches - * @sva_bind_gpasid: bind guest pasid and mm - * @sva_unbind_gpasid: unbind guest pasid and mm * @def_domain_type: device default domain type, return value: * - IOMMU_DOMAIN_IDENTITY: must use an identity domain * - IOMMU_DOMAIN_DMA: must use a dma domain * - 0: use the default setting + * @default_domain_ops: the default ops for domains * @pgsize_bitmap: bitmap of all possible supported page sizes * @owner: Driver module providing these ops */ @@ -244,43 +223,18 @@ struct iommu_ops { /* Domain allocation and freeing by the iommu driver */ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); - void (*domain_free)(struct iommu_domain *); - int (*attach_dev)(struct iommu_domain *domain, struct device *dev); - void (*detach_dev)(struct iommu_domain *domain, struct device *dev); - int (*map)(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp); - int (*map_pages)(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t pgsize, size_t pgcount, - int prot, gfp_t gfp, size_t *mapped); - size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, - size_t size, struct iommu_iotlb_gather *iotlb_gather); - size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, - size_t pgsize, size_t pgcount, - struct iommu_iotlb_gather *iotlb_gather); - void (*flush_iotlb_all)(struct iommu_domain *domain); - void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, - size_t size); - void (*iotlb_sync)(struct iommu_domain *domain, - struct iommu_iotlb_gather *iotlb_gather); - phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); struct iommu_device *(*probe_device)(struct device *dev); void (*release_device)(struct device *dev); void (*probe_finalize)(struct device *dev); struct iommu_group *(*device_group)(struct device *dev); - int (*enable_nesting)(struct iommu_domain *domain); - int (*set_pgtable_quirks)(struct iommu_domain *domain, - unsigned long quirks); /* Request/Free a list of reserved regions for a device */ void (*get_resv_regions)(struct device *dev, struct list_head *list); void (*put_resv_regions)(struct device *dev, struct list_head *list); - void (*apply_resv_region)(struct device *dev, - struct iommu_domain *domain, - struct iommu_resv_region *region); int (*of_xlate)(struct device *dev, struct of_phandle_args *args); - bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); + bool (*is_attach_deferred)(struct device *dev); /* Per device IOMMU features */ bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f); @@ -288,11 +242,6 @@ struct iommu_ops { int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); - /* Aux-domain specific attach/detach entries */ - int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev); - void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev); - int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev); - struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm, void *drvdata); void (*sva_unbind)(struct iommu_sva *handle); @@ -301,20 +250,64 @@ struct iommu_ops { int (*page_response)(struct device *dev, struct iommu_fault_event *evt, struct iommu_page_response *msg); - int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev, - struct iommu_cache_invalidate_info *inv_info); - int (*sva_bind_gpasid)(struct iommu_domain *domain, - struct device *dev, struct iommu_gpasid_bind_data *data); - - int (*sva_unbind_gpasid)(struct device *dev, u32 pasid); int (*def_domain_type)(struct device *dev); + const struct iommu_domain_ops *default_domain_ops; unsigned long pgsize_bitmap; struct module *owner; }; /** + * struct iommu_domain_ops - domain specific operations + * @attach_dev: attach an iommu domain to a device + * @detach_dev: detach an iommu domain from a device + * @map: map a physically contiguous memory region to an iommu domain + * @map_pages: map a physically contiguous set of pages of the same size to + * an iommu domain. + * @unmap: unmap a physically contiguous memory region from an iommu domain + * @unmap_pages: unmap a number of pages of the same size from an iommu domain + * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain + * @iotlb_sync_map: Sync mappings created recently using @map to the hardware + * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush + * queue + * @iova_to_phys: translate iova to physical address + * @enable_nesting: Enable nesting + * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) + * @free: Release the domain after use. + */ +struct iommu_domain_ops { + int (*attach_dev)(struct iommu_domain *domain, struct device *dev); + void (*detach_dev)(struct iommu_domain *domain, struct device *dev); + + int (*map)(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot, gfp_t gfp); + int (*map_pages)(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped); + size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, + size_t size, struct iommu_iotlb_gather *iotlb_gather); + size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *iotlb_gather); + + void (*flush_iotlb_all)(struct iommu_domain *domain); + void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, + size_t size); + void (*iotlb_sync)(struct iommu_domain *domain, + struct iommu_iotlb_gather *iotlb_gather); + + phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, + dma_addr_t iova); + + int (*enable_nesting)(struct iommu_domain *domain); + int (*set_pgtable_quirks)(struct iommu_domain *domain, + unsigned long quirks); + + void (*free)(struct iommu_domain *domain); +}; + +/** * struct iommu_device - IOMMU core representation of one IOMMU hardware * instance * @list: Used by the iommu-core to keep a list of registered iommus @@ -403,6 +396,17 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) }; } +static inline const struct iommu_ops *dev_iommu_ops(struct device *dev) +{ + /* + * Assume that valid ops must be installed if iommu_probe_device() + * has succeeded. The device ops are essentially for internal use + * within the IOMMU subsystem itself, so we should be able to trust + * ourselves not to misuse the helper. + */ + return dev->iommu->iommu_dev->ops; +} + #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ #define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ @@ -421,14 +425,6 @@ extern int iommu_attach_device(struct iommu_domain *domain, struct device *dev); extern void iommu_detach_device(struct iommu_domain *domain, struct device *dev); -extern int iommu_uapi_cache_invalidate(struct iommu_domain *domain, - struct device *dev, - void __user *uinfo); - -extern int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata); -extern int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata); extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, ioasid_t pasid); extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); @@ -672,9 +668,6 @@ void iommu_release_device(struct device *dev); int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f); -int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev); -void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev); -int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev); struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, @@ -1019,23 +1012,6 @@ iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) return -ENODEV; } -static inline int -iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) -{ - return -ENODEV; -} - -static inline void -iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) -{ -} - -static inline int -iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) -{ - return -ENODEV; -} - static inline struct iommu_sva * iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) { @@ -1051,33 +1027,6 @@ static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) return IOMMU_PASID_INVALID; } -static inline int -iommu_uapi_cache_invalidate(struct iommu_domain *domain, - struct device *dev, - struct iommu_cache_invalidate_info *inv_info) -{ - return -ENODEV; -} - -static inline int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata) -{ - return -ENODEV; -} - -static inline int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, - struct device *dev, void __user *udata) -{ - return -ENODEV; -} - -static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain, - struct device *dev, - ioasid_t pasid) -{ - return -ENODEV; -} - static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) { return NULL; diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h new file mode 100644 index 000000000000..e69a002d5aa4 --- /dev/null +++ b/include/linux/iosys-map.h @@ -0,0 +1,459 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Pointer abstraction for IO/system memory + */ + +#ifndef __IOSYS_MAP_H__ +#define __IOSYS_MAP_H__ + +#include <linux/io.h> +#include <linux/string.h> + +/** + * DOC: overview + * + * When accessing a memory region, depending on its location, users may have to + * access it with I/O operations or memory load/store operations. For example, + * copying to system memory could be done with memcpy(), copying to I/O memory + * would be done with memcpy_toio(). + * + * .. code-block:: c + * + * void *vaddr = ...; // pointer to system memory + * memcpy(vaddr, src, len); + * + * void *vaddr_iomem = ...; // pointer to I/O memory + * memcpy_toio(vaddr, _iomem, src, len); + * + * The user of such pointer may not have information about the mapping of that + * region or may want to have a single code path to handle operations on that + * buffer, regardless if it's located in system or IO memory. The type + * :c:type:`struct iosys_map <iosys_map>` and its helpers abstract that so the + * buffer can be passed around to other drivers or have separate duties inside + * the same driver for allocation, read and write operations. + * + * Open-coding access to :c:type:`struct iosys_map <iosys_map>` is considered + * bad style. Rather then accessing its fields directly, use one of the provided + * helper functions, or implement your own. For example, instances of + * :c:type:`struct iosys_map <iosys_map>` can be initialized statically with + * IOSYS_MAP_INIT_VADDR(), or at runtime with iosys_map_set_vaddr(). These + * helpers will set an address in system memory. + * + * .. code-block:: c + * + * struct iosys_map map = IOSYS_MAP_INIT_VADDR(0xdeadbeaf); + * + * iosys_map_set_vaddr(&map, 0xdeadbeaf); + * + * To set an address in I/O memory, use iosys_map_set_vaddr_iomem(). + * + * .. code-block:: c + * + * iosys_map_set_vaddr_iomem(&map, 0xdeadbeaf); + * + * Instances of struct iosys_map do not have to be cleaned up, but + * can be cleared to NULL with iosys_map_clear(). Cleared mappings + * always refer to system memory. + * + * .. code-block:: c + * + * iosys_map_clear(&map); + * + * Test if a mapping is valid with either iosys_map_is_set() or + * iosys_map_is_null(). + * + * .. code-block:: c + * + * if (iosys_map_is_set(&map) != iosys_map_is_null(&map)) + * // always true + * + * Instances of :c:type:`struct iosys_map <iosys_map>` can be compared for + * equality with iosys_map_is_equal(). Mappings that point to different memory + * spaces, system or I/O, are never equal. That's even true if both spaces are + * located in the same address space, both mappings contain the same address + * value, or both mappings refer to NULL. + * + * .. code-block:: c + * + * struct iosys_map sys_map; // refers to system memory + * struct iosys_map io_map; // refers to I/O memory + * + * if (iosys_map_is_equal(&sys_map, &io_map)) + * // always false + * + * A set up instance of struct iosys_map can be used to access or manipulate the + * buffer memory. Depending on the location of the memory, the provided helpers + * will pick the correct operations. Data can be copied into the memory with + * iosys_map_memcpy_to(). The address can be manipulated with iosys_map_incr(). + * + * .. code-block:: c + * + * const void *src = ...; // source buffer + * size_t len = ...; // length of src + * + * iosys_map_memcpy_to(&map, src, len); + * iosys_map_incr(&map, len); // go to first byte after the memcpy + */ + +/** + * struct iosys_map - Pointer to IO/system memory + * @vaddr_iomem: The buffer's address if in I/O memory + * @vaddr: The buffer's address if in system memory + * @is_iomem: True if the buffer is located in I/O memory, or false + * otherwise. + */ +struct iosys_map { + union { + void __iomem *vaddr_iomem; + void *vaddr; + }; + bool is_iomem; +}; + +/** + * IOSYS_MAP_INIT_VADDR - Initializes struct iosys_map to an address in system memory + * @vaddr_: A system-memory address + */ +#define IOSYS_MAP_INIT_VADDR(vaddr_) \ + { \ + .vaddr = (vaddr_), \ + .is_iomem = false, \ + } + +/** + * IOSYS_MAP_INIT_OFFSET - Initializes struct iosys_map from another iosys_map + * @map_: The dma-buf mapping structure to copy from + * @offset_: Offset to add to the other mapping + * + * Initializes a new iosys_map struct based on another passed as argument. It + * does a shallow copy of the struct so it's possible to update the back storage + * without changing where the original map points to. It is the equivalent of + * doing: + * + * .. code-block:: c + * + * iosys_map map = other_map; + * iosys_map_incr(&map, &offset); + * + * Example usage: + * + * .. code-block:: c + * + * void foo(struct device *dev, struct iosys_map *base_map) + * { + * ... + * struct iosys_map map = IOSYS_MAP_INIT_OFFSET(base_map, FIELD_OFFSET); + * ... + * } + * + * The advantage of using the initializer over just increasing the offset with + * iosys_map_incr() like above is that the new map will always point to the + * right place of the buffer during its scope. It reduces the risk of updating + * the wrong part of the buffer and having no compiler warning about that. If + * the assignment to IOSYS_MAP_INIT_OFFSET() is forgotten, the compiler can warn + * about the use of uninitialized variable. + */ +#define IOSYS_MAP_INIT_OFFSET(map_, offset_) ({ \ + struct iosys_map copy = *map_; \ + iosys_map_incr(©, offset_); \ + copy; \ +}) + +/** + * iosys_map_set_vaddr - Sets a iosys mapping structure to an address in system memory + * @map: The iosys_map structure + * @vaddr: A system-memory address + * + * Sets the address and clears the I/O-memory flag. + */ +static inline void iosys_map_set_vaddr(struct iosys_map *map, void *vaddr) +{ + map->vaddr = vaddr; + map->is_iomem = false; +} + +/** + * iosys_map_set_vaddr_iomem - Sets a iosys mapping structure to an address in I/O memory + * @map: The iosys_map structure + * @vaddr_iomem: An I/O-memory address + * + * Sets the address and the I/O-memory flag. + */ +static inline void iosys_map_set_vaddr_iomem(struct iosys_map *map, + void __iomem *vaddr_iomem) +{ + map->vaddr_iomem = vaddr_iomem; + map->is_iomem = true; +} + +/** + * iosys_map_is_equal - Compares two iosys mapping structures for equality + * @lhs: The iosys_map structure + * @rhs: A iosys_map structure to compare with + * + * Two iosys mapping structures are equal if they both refer to the same type of memory + * and to the same address within that memory. + * + * Returns: + * True is both structures are equal, or false otherwise. + */ +static inline bool iosys_map_is_equal(const struct iosys_map *lhs, + const struct iosys_map *rhs) +{ + if (lhs->is_iomem != rhs->is_iomem) + return false; + else if (lhs->is_iomem) + return lhs->vaddr_iomem == rhs->vaddr_iomem; + else + return lhs->vaddr == rhs->vaddr; +} + +/** + * iosys_map_is_null - Tests for a iosys mapping to be NULL + * @map: The iosys_map structure + * + * Depending on the state of struct iosys_map.is_iomem, tests if the + * mapping is NULL. + * + * Returns: + * True if the mapping is NULL, or false otherwise. + */ +static inline bool iosys_map_is_null(const struct iosys_map *map) +{ + if (map->is_iomem) + return !map->vaddr_iomem; + return !map->vaddr; +} + +/** + * iosys_map_is_set - Tests if the iosys mapping has been set + * @map: The iosys_map structure + * + * Depending on the state of struct iosys_map.is_iomem, tests if the + * mapping has been set. + * + * Returns: + * True if the mapping is been set, or false otherwise. + */ +static inline bool iosys_map_is_set(const struct iosys_map *map) +{ + return !iosys_map_is_null(map); +} + +/** + * iosys_map_clear - Clears a iosys mapping structure + * @map: The iosys_map structure + * + * Clears all fields to zero, including struct iosys_map.is_iomem, so + * mapping structures that were set to point to I/O memory are reset for + * system memory. Pointers are cleared to NULL. This is the default. + */ +static inline void iosys_map_clear(struct iosys_map *map) +{ + if (map->is_iomem) { + map->vaddr_iomem = NULL; + map->is_iomem = false; + } else { + map->vaddr = NULL; + } +} + +/** + * iosys_map_memcpy_to - Memcpy into offset of iosys_map + * @dst: The iosys_map structure + * @dst_offset: The offset from which to copy + * @src: The source buffer + * @len: The number of byte in src + * + * Copies data into a iosys_map with an offset. The source buffer is in + * system memory. Depending on the buffer's location, the helper picks the + * correct method of accessing the memory. + */ +static inline void iosys_map_memcpy_to(struct iosys_map *dst, size_t dst_offset, + const void *src, size_t len) +{ + if (dst->is_iomem) + memcpy_toio(dst->vaddr_iomem + dst_offset, src, len); + else + memcpy(dst->vaddr + dst_offset, src, len); +} + +/** + * iosys_map_memcpy_from - Memcpy from iosys_map into system memory + * @dst: Destination in system memory + * @src: The iosys_map structure + * @src_offset: The offset from which to copy + * @len: The number of byte in src + * + * Copies data from a iosys_map with an offset. The dest buffer is in + * system memory. Depending on the mapping location, the helper picks the + * correct method of accessing the memory. + */ +static inline void iosys_map_memcpy_from(void *dst, const struct iosys_map *src, + size_t src_offset, size_t len) +{ + if (src->is_iomem) + memcpy_fromio(dst, src->vaddr_iomem + src_offset, len); + else + memcpy(dst, src->vaddr + src_offset, len); +} + +/** + * iosys_map_incr - Increments the address stored in a iosys mapping + * @map: The iosys_map structure + * @incr: The number of bytes to increment + * + * Increments the address stored in a iosys mapping. Depending on the + * buffer's location, the correct value will be updated. + */ +static inline void iosys_map_incr(struct iosys_map *map, size_t incr) +{ + if (map->is_iomem) + map->vaddr_iomem += incr; + else + map->vaddr += incr; +} + +/** + * iosys_map_memset - Memset iosys_map + * @dst: The iosys_map structure + * @offset: Offset from dst where to start setting value + * @value: The value to set + * @len: The number of bytes to set in dst + * + * Set value in iosys_map. Depending on the buffer's location, the helper + * picks the correct method of accessing the memory. + */ +static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, + int value, size_t len) +{ + if (dst->is_iomem) + memset_io(dst->vaddr_iomem + offset, value, len); + else + memset(dst->vaddr + offset, value, len); +} + +/** + * iosys_map_rd - Read a C-type value from the iosys_map + * + * @map__: The iosys_map structure + * @offset__: The offset from which to read + * @type__: Type of the value being read + * + * Read a C type value from iosys_map, handling possible un-aligned accesses to + * the mapping. + * + * Returns: + * The value read from the mapping. + */ +#define iosys_map_rd(map__, offset__, type__) ({ \ + type__ val; \ + iosys_map_memcpy_from(&val, map__, offset__, sizeof(val)); \ + val; \ +}) + +/** + * iosys_map_wr - Write a C-type value to the iosys_map + * + * @map__: The iosys_map structure + * @offset__: The offset from the mapping to write to + * @type__: Type of the value being written + * @val__: Value to write + * + * Write a C-type value to the iosys_map, handling possible un-aligned accesses + * to the mapping. + */ +#define iosys_map_wr(map__, offset__, type__, val__) ({ \ + type__ val = (val__); \ + iosys_map_memcpy_to(map__, offset__, &val, sizeof(val)); \ +}) + +/** + * iosys_map_rd_field - Read a member from a struct in the iosys_map + * + * @map__: The iosys_map structure + * @struct_offset__: Offset from the beggining of the map, where the struct + * is located + * @struct_type__: The struct describing the layout of the mapping + * @field__: Member of the struct to read + * + * Read a value from iosys_map considering its layout is described by a C struct + * starting at @struct_offset__. The field offset and size is calculated and its + * value read handling possible un-aligned memory accesses. For example: suppose + * there is a @struct foo defined as below and the value ``foo.field2.inner2`` + * needs to be read from the iosys_map: + * + * .. code-block:: c + * + * struct foo { + * int field1; + * struct { + * int inner1; + * int inner2; + * } field2; + * int field3; + * } __packed; + * + * This is the expected memory layout of a buffer using iosys_map_rd_field(): + * + * +------------------------------+--------------------------+ + * | Address | Content | + * +==============================+==========================+ + * | buffer + 0000 | start of mmapped buffer | + * | | pointed by iosys_map | + * +------------------------------+--------------------------+ + * | ... | ... | + * +------------------------------+--------------------------+ + * | buffer + ``struct_offset__`` | start of ``struct foo`` | + * +------------------------------+--------------------------+ + * | ... | ... | + * +------------------------------+--------------------------+ + * | buffer + wwww | ``foo.field2.inner2`` | + * +------------------------------+--------------------------+ + * | ... | ... | + * +------------------------------+--------------------------+ + * | buffer + yyyy | end of ``struct foo`` | + * +------------------------------+--------------------------+ + * | ... | ... | + * +------------------------------+--------------------------+ + * | buffer + zzzz | end of mmaped buffer | + * +------------------------------+--------------------------+ + * + * Values automatically calculated by this macro or not needed are denoted by + * wwww, yyyy and zzzz. This is the code to read that value: + * + * .. code-block:: c + * + * x = iosys_map_rd_field(&map, offset, struct foo, field2.inner2); + * + * Returns: + * The value read from the mapping. + */ +#define iosys_map_rd_field(map__, struct_offset__, struct_type__, field__) ({ \ + struct_type__ *s; \ + iosys_map_rd(map__, struct_offset__ + offsetof(struct_type__, field__), \ + typeof(s->field__)); \ +}) + +/** + * iosys_map_wr_field - Write to a member of a struct in the iosys_map + * + * @map__: The iosys_map structure + * @struct_offset__: Offset from the beggining of the map, where the struct + * is located + * @struct_type__: The struct describing the layout of the mapping + * @field__: Member of the struct to read + * @val__: Value to write + * + * Write a value to the iosys_map considering its layout is described by a C struct + * starting at @struct_offset__. The field offset and size is calculated and the + * @val__ is written handling possible un-aligned memory accesses. Refer to + * iosys_map_rd_field() for expected usage and memory layout. + */ +#define iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__) ({ \ + struct_type__ *s; \ + iosys_map_wr(map__, struct_offset__ + offsetof(struct_type__, field__), \ + typeof(s->field__), val__); \ +}) + +#endif /* __IOSYS_MAP_H__ */ diff --git a/include/linux/iova.h b/include/linux/iova.h index cea79cb9f26c..320a70e40233 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -21,18 +21,8 @@ struct iova { unsigned long pfn_lo; /* Lowest allocated pfn */ }; -struct iova_magazine; -struct iova_cpu_rcache; -#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ -#define MAX_GLOBAL_MAGS 32 /* magazines per bin */ - -struct iova_rcache { - spinlock_t lock; - unsigned long depot_size; - struct iova_magazine *depot[MAX_GLOBAL_MAGS]; - struct iova_cpu_rcache __percpu *cpu_rcaches; -}; +struct iova_rcache; /* holds all the iova translations for a domain */ struct iova_domain { @@ -46,7 +36,7 @@ struct iova_domain { unsigned long max32_alloc_size; /* Size of last failed allocation */ struct iova anchor; /* rbtree lookup anchor */ - struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ + struct iova_rcache *rcaches; struct hlist_node cpuhp_dead; }; @@ -102,6 +92,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn); +int iova_domain_init_rcaches(struct iova_domain *iovad); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); void put_iova_domain(struct iova_domain *iovad); #else diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index a59d25f19385..16870f86c74d 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -51,7 +51,7 @@ struct ipv6_devconf { __s32 use_optimistic; #endif #ifdef CONFIG_IPV6_MROUTE - __s32 mc_forwarding; + atomic_t mc_forwarding; #endif __s32 disable_ipv6; __s32 drop_unicast_in_l2_multicast; @@ -371,19 +371,12 @@ static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) return NULL; } -static inline struct inet6_request_sock * - inet6_rsk(const struct request_sock *rsk) -{ - return NULL; -} - static inline struct raw6_sock *raw6_sk(const struct sock *sk) { return NULL; } #define inet6_rcv_saddr(__sk) NULL -#define tcp_twsk_ipv6only(__sk) 0 #define inet_v6_ipv6only(__sk) 0 #endif /* IS_ENABLED(CONFIG_IPV6) */ #endif /* _IPV6_H */ diff --git a/include/linux/irq.h b/include/linux/irq.h index 848e1e12c5c6..f92788ccdba2 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -456,7 +456,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) /** * struct irq_chip - hardware interrupt chip descriptor * - * @parent_device: pointer to parent device for irqchip * @name: name for /proc/interrupts * @irq_startup: start up the interrupt (defaults to ->enable if NULL) * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) @@ -503,7 +502,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) * @flags: chip specific flags */ struct irq_chip { - struct device *parent_device; const char *name; unsigned int (*irq_startup)(struct irq_data *data); void (*irq_shutdown)(struct irq_data *data); @@ -712,10 +710,11 @@ extern struct irq_chip no_irq_chip; extern struct irq_chip dummy_irq_chip; extern void -irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, +irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, irq_flow_handler_t handle, const char *name); -static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, +static inline void irq_set_chip_and_handler(unsigned int irq, + const struct irq_chip *chip, irq_flow_handler_t handle) { irq_set_chip_and_handler_name(irq, chip, handle, NULL); @@ -805,7 +804,7 @@ static inline void irq_set_percpu_devid_flags(unsigned int irq) } /* Set/get chip/data for an IRQ: */ -extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); +extern int irq_set_chip(unsigned int irq, const struct irq_chip *chip); extern int irq_set_handler_data(unsigned int irq, void *data); extern int irq_set_chip_data(unsigned int irq, void *data); extern int irq_set_irq_type(unsigned int irq, unsigned int type); diff --git a/include/linux/irqchip/versatile-fpga.h b/include/linux/irqchip/versatile-fpga.h deleted file mode 100644 index a978fc8c7996..000000000000 --- a/include/linux/irqchip/versatile-fpga.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef PLAT_FPGA_IRQ_H -#define PLAT_FPGA_IRQ_H - -struct device_node; -struct pt_regs; - -void fpga_handle_irq(struct pt_regs *regs); -void fpga_irq_init(void __iomem *, const char *, int, int, u32, - struct device_node *node); -int fpga_irq_of_init(struct device_node *node, - struct device_node *parent); - -#endif diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 93d270ca0c56..a77584593f7d 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -160,6 +160,7 @@ static inline void generic_handle_irq_desc(struct irq_desc *desc) int handle_irq_desc(struct irq_desc *desc); int generic_handle_irq(unsigned int irq); +int generic_handle_irq_safe(unsigned int irq); #ifdef CONFIG_IRQ_DOMAIN /* diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index d476405802e9..00d577f90883 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -151,6 +151,8 @@ struct irq_domain_chip_generic; * @gc: Pointer to a list of generic chips. There is a helper function for * setting up one or more generic chips for interrupt controllers * drivers using the generic chip library which uses this pointer. + * @dev: Pointer to a device that the domain represent, and that will be + * used for power management purposes. * @parent: Pointer to parent irq_domain to support hierarchy irq_domains * * Revmap data, used internally by irq_domain @@ -171,6 +173,7 @@ struct irq_domain { struct fwnode_handle *fwnode; enum irq_domain_bus_token bus_token; struct irq_domain_chip_generic *gc; + struct device *dev; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY struct irq_domain *parent; #endif @@ -226,6 +229,13 @@ static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) return to_of_node(d->fwnode); } +static inline void irq_domain_set_pm_device(struct irq_domain *d, + struct device *dev) +{ + if (d) + d->dev = dev; +} + #ifdef CONFIG_IRQ_DOMAIN struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, const char *name, phys_addr_t *pa); @@ -469,7 +479,8 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest); extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, unsigned int virq); extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, - irq_hw_number_t hwirq, struct irq_chip *chip, + irq_hw_number_t hwirq, + const struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler, void *handler_data, const char *handler_name); extern void irq_domain_reset_irq_data(struct irq_data *irq_data); @@ -512,7 +523,7 @@ extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, - struct irq_chip *chip, + const struct irq_chip *chip, void *chip_data); extern void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 9c3ada74ffb1..de9536680b2b 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -554,9 +554,6 @@ struct transaction_chp_stats_s { * ->j_list_lock * * j_state_lock - * ->t_handle_lock - * - * j_state_lock * ->j_list_lock (journal_unmap_buffer) * */ @@ -1530,8 +1527,8 @@ void jbd2_journal_set_triggers(struct buffer_head *, struct jbd2_buffer_trigger_type *type); extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); extern int jbd2_journal_forget (handle_t *, struct buffer_head *); -extern int jbd2_journal_invalidatepage(journal_t *, - struct page *, unsigned int, unsigned int); +int jbd2_journal_invalidate_folio(journal_t *, struct folio *, + size_t offset, size_t length); extern int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page); extern int jbd2_journal_stop(handle_t *); extern int jbd2_journal_flush(journal_t *journal, unsigned int flags); diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 48b9b2a82767..107751cc047b 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -82,10 +82,9 @@ extern bool static_key_initialized; "%s(): static key '%pS' used before call to jump_label_init()", \ __func__, (key)) -#ifdef CONFIG_JUMP_LABEL - struct static_key { atomic_t enabled; +#ifdef CONFIG_JUMP_LABEL /* * Note: * To make anonymous unions work with old compilers, the static @@ -104,13 +103,9 @@ struct static_key { struct jump_entry *entries; struct static_key_mod *next; }; +#endif /* CONFIG_JUMP_LABEL */ }; -#else -struct static_key { - atomic_t enabled; -}; -#endif /* CONFIG_JUMP_LABEL */ #endif /* __ASSEMBLY__ */ #ifdef CONFIG_JUMP_LABEL @@ -251,10 +246,10 @@ extern void static_key_disable_cpuslocked(struct static_key *key); */ #define STATIC_KEY_INIT_TRUE \ { .enabled = { 1 }, \ - { .entries = (void *)JUMP_TYPE_TRUE } } + { .type = JUMP_TYPE_TRUE } } #define STATIC_KEY_INIT_FALSE \ { .enabled = { 0 }, \ - { .entries = (void *)JUMP_TYPE_FALSE } } + { .type = JUMP_TYPE_FALSE } } #else /* !CONFIG_JUMP_LABEL */ diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 4176c7eca7b5..ce1bd2fbf23e 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -48,7 +48,7 @@ static inline int is_ksym_addr(unsigned long addr) static inline void *dereference_symbol_descriptor(void *ptr) { -#ifdef HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR +#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS struct module *mod; ptr = dereference_kernel_function_descriptor(ptr); diff --git a/include/linux/kasan-enabled.h b/include/linux/kasan-enabled.h new file mode 100644 index 000000000000..6f612d69ea0c --- /dev/null +++ b/include/linux/kasan-enabled.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KASAN_ENABLED_H +#define _LINUX_KASAN_ENABLED_H + +#include <linux/static_key.h> + +#ifdef CONFIG_KASAN_HW_TAGS + +DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); + +static __always_inline bool kasan_enabled(void) +{ + return static_branch_likely(&kasan_flag_enabled); +} + +static inline bool kasan_hw_tags_enabled(void) +{ + return kasan_enabled(); +} + +#else /* CONFIG_KASAN_HW_TAGS */ + +static inline bool kasan_enabled(void) +{ + return IS_ENABLED(CONFIG_KASAN); +} + +static inline bool kasan_hw_tags_enabled(void) +{ + return false; +} + +#endif /* CONFIG_KASAN_HW_TAGS */ + +#endif /* LINUX_KASAN_ENABLED_H */ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 4a45562d8893..ceebcb9de7bf 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -3,6 +3,7 @@ #define _LINUX_KASAN_H #include <linux/bug.h> +#include <linux/kasan-enabled.h> #include <linux/kernel.h> #include <linux/static_key.h> #include <linux/types.h> @@ -18,13 +19,15 @@ struct task_struct; #include <linux/linkage.h> #include <asm/kasan.h> -/* kasan_data struct is used in KUnit tests for KASAN expected failures */ -struct kunit_kasan_expectation { - bool report_found; -}; - #endif +typedef unsigned int __bitwise kasan_vmalloc_flags_t; + +#define KASAN_VMALLOC_NONE 0x00u +#define KASAN_VMALLOC_INIT 0x01u +#define KASAN_VMALLOC_VM_ALLOC 0x02u +#define KASAN_VMALLOC_PROT_NORMAL 0x04u + #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) #include <linux/pgtable.h> @@ -83,47 +86,8 @@ static inline void kasan_disable_current(void) {} #ifdef CONFIG_KASAN_HW_TAGS -DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled); - -static __always_inline bool kasan_enabled(void) -{ - return static_branch_likely(&kasan_flag_enabled); -} - -static inline bool kasan_hw_tags_enabled(void) -{ - return kasan_enabled(); -} - -void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags); -void kasan_free_pages(struct page *page, unsigned int order); - #else /* CONFIG_KASAN_HW_TAGS */ -static inline bool kasan_enabled(void) -{ - return IS_ENABLED(CONFIG_KASAN); -} - -static inline bool kasan_hw_tags_enabled(void) -{ - return false; -} - -static __always_inline void kasan_alloc_pages(struct page *page, - unsigned int order, gfp_t flags) -{ - /* Only available for integrated init. */ - BUILD_BUG(); -} - -static __always_inline void kasan_free_pages(struct page *page, - unsigned int order) -{ - /* Only available for integrated init. */ - BUILD_BUG(); -} - #endif /* CONFIG_KASAN_HW_TAGS */ static inline bool kasan_has_integrated_init(void) @@ -303,10 +267,6 @@ static __always_inline bool kasan_check_byte(const void *addr) return true; } - -bool kasan_save_enable_multi_shot(void); -void kasan_restore_multi_shot(bool enabled); - #else /* CONFIG_KASAN */ static inline slab_flags_t kasan_never_merge(void) @@ -435,34 +395,71 @@ static inline void kasan_init_hw_tags(void) { } #ifdef CONFIG_KASAN_VMALLOC +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) + +void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); int kasan_populate_vmalloc(unsigned long addr, unsigned long size); -void kasan_poison_vmalloc(const void *start, unsigned long size); -void kasan_unpoison_vmalloc(const void *start, unsigned long size); void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end); -void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); +#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ + +static inline void kasan_populate_early_vm_area_shadow(void *start, + unsigned long size) +{ } +static inline int kasan_populate_vmalloc(unsigned long start, + unsigned long size) +{ + return 0; +} +static inline void kasan_release_vmalloc(unsigned long start, + unsigned long end, + unsigned long free_region_start, + unsigned long free_region_end) { } + +#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ + +void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, + kasan_vmalloc_flags_t flags); +static __always_inline void *kasan_unpoison_vmalloc(const void *start, + unsigned long size, + kasan_vmalloc_flags_t flags) +{ + if (kasan_enabled()) + return __kasan_unpoison_vmalloc(start, size, flags); + return (void *)start; +} + +void __kasan_poison_vmalloc(const void *start, unsigned long size); +static __always_inline void kasan_poison_vmalloc(const void *start, + unsigned long size) +{ + if (kasan_enabled()) + __kasan_poison_vmalloc(start, size); +} #else /* CONFIG_KASAN_VMALLOC */ +static inline void kasan_populate_early_vm_area_shadow(void *start, + unsigned long size) { } static inline int kasan_populate_vmalloc(unsigned long start, unsigned long size) { return 0; } - -static inline void kasan_poison_vmalloc(const void *start, unsigned long size) -{ } -static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size) -{ } static inline void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, - unsigned long free_region_end) {} + unsigned long free_region_end) { } -static inline void kasan_populate_early_vm_area_shadow(void *start, - unsigned long size) +static inline void *kasan_unpoison_vmalloc(const void *start, + unsigned long size, + kasan_vmalloc_flags_t flags) +{ + return (void *)start; +} +static inline void kasan_poison_vmalloc(const void *start, unsigned long size) { } #endif /* CONFIG_KASAN_VMALLOC */ @@ -471,17 +468,17 @@ static inline void kasan_populate_early_vm_area_shadow(void *start, !defined(CONFIG_KASAN_VMALLOC) /* - * These functions provide a special case to support backing module - * allocations with real shadow memory. With KASAN vmalloc, the special - * case is unnecessary, as the work is handled in the generic case. + * These functions allocate and free shadow memory for kernel modules. + * They are only required when KASAN_VMALLOC is not supported, as otherwise + * shadow memory is allocated by the generic vmalloc handlers. */ -int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask); -void kasan_free_shadow(const struct vm_struct *vm); +int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask); +void kasan_free_module_shadow(const struct vm_struct *vm); #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ -static inline int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask) { return 0; } -static inline void kasan_free_shadow(const struct vm_struct *vm) {} +static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; } +static inline void kasan_free_module_shadow(const struct vm_struct *vm) {} #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 3877478681b9..08ba5995aa8b 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -108,7 +108,7 @@ struct user; extern int __cond_resched(void); # define might_resched() __cond_resched() -#elif defined(CONFIG_PREEMPT_DYNAMIC) +#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) extern int __cond_resched(void); @@ -119,6 +119,11 @@ static __always_inline void might_resched(void) static_call_mod(might_resched)(); } +#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) + +extern int dynamic_might_resched(void); +# define might_resched() dynamic_might_resched() + #else # define might_resched() do { } while (0) diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 0c994ae37729..58d1b58a971e 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -20,6 +20,12 @@ #include <uapi/linux/kexec.h> +/* Location of a reserved region to hold the crash kernel. + */ +extern struct resource crashk_res; +extern struct resource crashk_low_res; +extern note_buf_t __percpu *crash_notes; + #ifdef CONFIG_KEXEC_CORE #include <linux/list.h> #include <linux/compat.h> @@ -350,12 +356,6 @@ extern int kexec_load_disabled; #define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \ KEXEC_FILE_NO_INITRAMFS) -/* Location of a reserved region to hold the crash kernel. - */ -extern struct resource crashk_res; -extern struct resource crashk_low_res; -extern note_buf_t __percpu *crash_notes; - /* flag to track if kexec reboot is in progress */ extern bool kexec_in_progress; diff --git a/include/linux/kobject_api.h b/include/linux/kobject_api.h new file mode 100644 index 000000000000..6e36a054c2d6 --- /dev/null +++ b/include/linux/kobject_api.h @@ -0,0 +1 @@ +#include <linux/kobject.h> diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 19b884353b15..5f1859836deb 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -427,6 +427,9 @@ static inline struct kprobe *kprobe_running(void) { return NULL; } +#define kprobe_busy_begin() do {} while (0) +#define kprobe_busy_end() do {} while (0) + static inline int register_kprobe(struct kprobe *p) { return -EOPNOTSUPP; diff --git a/include/linux/kref_api.h b/include/linux/kref_api.h new file mode 100644 index 000000000000..d67e554721d2 --- /dev/null +++ b/include/linux/kref_api.h @@ -0,0 +1 @@ +#include <linux/kref.h> diff --git a/include/linux/ksm.h b/include/linux/ksm.h index a38a5bca1ba5..0630e545f4cb 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -51,7 +51,7 @@ static inline void ksm_exit(struct mm_struct *mm) struct page *ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address); -void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); +void rmap_walk_ksm(struct folio *folio, const struct rmap_walk_control *rwc); void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); #else /* !CONFIG_KSM */ @@ -78,8 +78,8 @@ static inline struct page *ksm_might_need_to_copy(struct page *page, return page; } -static inline void rmap_walk_ksm(struct page *page, - struct rmap_walk_control *rwc) +static inline void rmap_walk_ksm(struct folio *folio, + const struct rmap_walk_control *rwc) { } diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 3df4ea04716f..de5d75bafd66 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -141,12 +141,6 @@ struct kthread_delayed_work { struct timer_list timer; }; -#define KTHREAD_WORKER_INIT(worker) { \ - .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \ - .work_list = LIST_HEAD_INIT((worker).work_list), \ - .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ - } - #define KTHREAD_WORK_INIT(work, fn) { \ .node = LIST_HEAD_INIT((work).node), \ .func = (fn), \ @@ -158,9 +152,6 @@ struct kthread_delayed_work { TIMER_IRQSAFE), \ } -#define DEFINE_KTHREAD_WORKER(worker) \ - struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) - #define DEFINE_KTHREAD_WORK(work, fn) \ struct kthread_work work = KTHREAD_WORK_INIT(work, fn) @@ -168,19 +159,6 @@ struct kthread_delayed_work { struct kthread_delayed_work dwork = \ KTHREAD_DELAYED_WORK_INIT(dwork, fn) -/* - * kthread_worker.lock needs its own lockdep class key when defined on - * stack with lockdep enabled. Use the following macros in such cases. - */ -#ifdef CONFIG_LOCKDEP -# define KTHREAD_WORKER_INIT_ONSTACK(worker) \ - ({ kthread_init_worker(&worker); worker; }) -# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ - struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) -#else -# define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) -#endif - extern void __kthread_init_worker(struct kthread_worker *worker, const char *name, struct lock_class_key *key); diff --git a/include/linux/ktime_api.h b/include/linux/ktime_api.h new file mode 100644 index 000000000000..f697d493960f --- /dev/null +++ b/include/linux/ktime_api.h @@ -0,0 +1 @@ +#include <linux/ktime.h> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f11039944c08..9536ffa0473b 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -153,10 +153,9 @@ static inline bool is_error_page(struct page *page) * Bits 4-7 are reserved for more arch-independent bits. */ #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) -#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_UNBLOCK 2 #define KVM_REQ_UNHALT 3 -#define KVM_REQ_VM_DEAD (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_GPC_INVALIDATE (5 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQUEST_ARCH_BASE 8 @@ -1325,7 +1324,6 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target); void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); void kvm_flush_remote_tlbs(struct kvm *kvm); -void kvm_reload_remote_mmus(struct kvm *kvm); #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); diff --git a/include/linux/libata.h b/include/linux/libata.h index 7f99b4d78822..9b1d3d8b1252 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -519,7 +519,10 @@ struct ata_taskfile { u8 hob_lbam; u8 hob_lbah; - u8 feature; + union { + u8 error; + u8 feature; + }; u8 nsect; u8 lbal; u8 lbam; @@ -527,7 +530,10 @@ struct ata_taskfile { u8 device; - u8 command; /* IO operation */ + union { + u8 status; + u8 command; + }; u32 auxiliary; /* auxiliary field */ /* from SATA 3.1 and */ @@ -1081,7 +1087,7 @@ extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, extern bool ata_link_online(struct ata_link *link); extern bool ata_link_offline(struct ata_link *link); #ifdef CONFIG_PM -extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); +extern void ata_host_suspend(struct ata_host *host, pm_message_t mesg); extern void ata_host_resume(struct ata_host *host); extern void ata_sas_port_suspend(struct ata_port *ap); extern void ata_sas_port_resume(struct ata_port *ap); diff --git a/include/linux/linkage.h b/include/linux/linkage.h index dbf8506decca..acb1ad2356f1 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -165,7 +165,18 @@ #ifndef SYM_END #define SYM_END(name, sym_type) \ .type name sym_type ASM_NL \ - .size name, .-name + .set .L__sym_size_##name, .-name ASM_NL \ + .size name, .L__sym_size_##name +#endif + +/* SYM_ALIAS -- use only if you have to */ +#ifndef SYM_ALIAS +#define SYM_ALIAS(alias, name, sym_type, linkage) \ + linkage(alias) ASM_NL \ + .set alias, name ASM_NL \ + .type alias sym_type ASM_NL \ + .set .L__sym_size_##alias, .L__sym_size_##name ASM_NL \ + .size alias, .L__sym_size_##alias #endif /* === code annotations === */ @@ -200,30 +211,8 @@ SYM_ENTRY(name, linkage, SYM_A_NONE) #endif -/* - * SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for one - * function - */ -#ifndef SYM_FUNC_START_LOCAL_ALIAS -#define SYM_FUNC_START_LOCAL_ALIAS(name) \ - SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) -#endif - -/* - * SYM_FUNC_START_ALIAS -- use where there are two global names for one - * function - */ -#ifndef SYM_FUNC_START_ALIAS -#define SYM_FUNC_START_ALIAS(name) \ - SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) -#endif - /* SYM_FUNC_START -- use for global functions */ #ifndef SYM_FUNC_START -/* - * The same as SYM_FUNC_START_ALIAS, but we will need to distinguish these two - * later. - */ #define SYM_FUNC_START(name) \ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) #endif @@ -236,7 +225,6 @@ /* SYM_FUNC_START_LOCAL -- use for local functions */ #ifndef SYM_FUNC_START_LOCAL -/* the same as SYM_FUNC_START_LOCAL_ALIAS, see comment near SYM_FUNC_START */ #define SYM_FUNC_START_LOCAL(name) \ SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN) #endif @@ -259,22 +247,39 @@ SYM_START(name, SYM_L_WEAK, SYM_A_NONE) #endif -/* SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function */ -#ifndef SYM_FUNC_END_ALIAS -#define SYM_FUNC_END_ALIAS(name) \ - SYM_END(name, SYM_T_FUNC) -#endif - /* * SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START, * SYM_FUNC_START_WEAK, ... */ #ifndef SYM_FUNC_END -/* the same as SYM_FUNC_END_ALIAS, see comment near SYM_FUNC_START */ #define SYM_FUNC_END(name) \ SYM_END(name, SYM_T_FUNC) #endif +/* + * SYM_FUNC_ALIAS -- define a global alias for an existing function + */ +#ifndef SYM_FUNC_ALIAS +#define SYM_FUNC_ALIAS(alias, name) \ + SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_GLOBAL) +#endif + +/* + * SYM_FUNC_ALIAS_LOCAL -- define a local alias for an existing function + */ +#ifndef SYM_FUNC_ALIAS_LOCAL +#define SYM_FUNC_ALIAS_LOCAL(alias, name) \ + SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_LOCAL) +#endif + +/* + * SYM_FUNC_ALIAS_WEAK -- define a weak global alias for an existing function + */ +#ifndef SYM_FUNC_ALIAS_WEAK +#define SYM_FUNC_ALIAS_WEAK(alias, name) \ + SYM_ALIAS(alias, name, SYM_T_FUNC, SYM_L_WEAK) +#endif + /* SYM_CODE_START -- use for non-C (special) functions */ #ifndef SYM_CODE_START #define SYM_CODE_START(name) \ diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h index f8397f300fcd..15e0e0209da4 100644 --- a/include/linux/linkmode.h +++ b/include/linux/linkmode.h @@ -66,11 +66,6 @@ static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr, linkmode_clear_bit(nr, addr); } -static inline void linkmode_change_bit(int nr, volatile unsigned long *addr) -{ - __change_bit(nr, addr); -} - static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr) { return test_bit(nr, addr); diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 1b5fceb565df..b35968ee9fb5 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -11,6 +11,7 @@ #include <linux/list.h> #include <linux/nodemask.h> #include <linux/shrinker.h> +#include <linux/xarray.h> struct mem_cgroup; @@ -33,8 +34,8 @@ struct list_lru_one { struct list_lru_memcg { struct rcu_head rcu; - /* array of per cgroup lists, indexed by memcg_cache_id */ - struct list_lru_one *lru[]; + /* array of per cgroup per node lists, indexed by node id */ + struct list_lru_one node[]; }; struct list_lru_node { @@ -42,11 +43,7 @@ struct list_lru_node { spinlock_t lock; /* global list, used for the root cgroup in cgroup aware lrus */ struct list_lru_one lru; -#ifdef CONFIG_MEMCG_KMEM - /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */ - struct list_lru_memcg __rcu *memcg_lrus; -#endif - long nr_items; + long nr_items; } ____cacheline_aligned_in_smp; struct list_lru { @@ -55,6 +52,7 @@ struct list_lru { struct list_head list; int shrinker_id; bool memcg_aware; + struct xarray xa; #endif }; @@ -69,8 +67,9 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware, #define list_lru_init_memcg(lru, shrinker) \ __list_lru_init((lru), true, NULL, shrinker) -int memcg_update_all_list_lrus(int num_memcgs); -void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg); +int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, + gfp_t gfp); +void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent); /** * list_lru_add: add an element to the lru list's tail diff --git a/include/linux/llist_api.h b/include/linux/llist_api.h new file mode 100644 index 000000000000..625bec0393a1 --- /dev/null +++ b/include/linux/llist_api.h @@ -0,0 +1 @@ +#include <linux/llist.h> diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index 975e33b793a7..6d635e8306d6 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -44,9 +44,9 @@ static inline void local_lock_debug_init(local_lock_t *l) } #else /* CONFIG_DEBUG_LOCK_ALLOC */ # define LOCAL_LOCK_DEBUG_INIT(lockname) -static inline void local_lock_acquire(local_lock_t *l) { } -static inline void local_lock_release(local_lock_t *l) { } -static inline void local_lock_debug_init(local_lock_t *l) { } +# define local_lock_acquire(__ll) do { typecheck(local_lock_t *, __ll); } while (0) +# define local_lock_release(__ll) do { typecheck(local_lock_t *, __ll); } while (0) +# define local_lock_debug_init(__ll) do { typecheck(local_lock_t *, __ll); } while (0) #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ #define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) } diff --git a/include/linux/lockdep_api.h b/include/linux/lockdep_api.h new file mode 100644 index 000000000000..907e66979ab2 --- /dev/null +++ b/include/linux/lockdep_api.h @@ -0,0 +1 @@ +#include <linux/lockdep.h> diff --git a/include/linux/log2.h b/include/linux/log2.h index df0b155c2141..9f30d087a128 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -18,7 +18,7 @@ * - the arch is not required to handle n==0 if implementing the fallback */ #ifndef CONFIG_ARCH_HAS_ILOG2_U32 -static inline __attribute__((const)) +static __always_inline __attribute__((const)) int __ilog2_u32(u32 n) { return fls(n) - 1; @@ -26,7 +26,7 @@ int __ilog2_u32(u32 n) #endif #ifndef CONFIG_ARCH_HAS_ILOG2_U64 -static inline __attribute__((const)) +static __always_inline __attribute__((const)) int __ilog2_u64(u64 n) { return fls64(n) - 1; diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 819ec92dc2a8..db924fe379c9 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -332,6 +332,8 @@ LSM_HOOK(int, 0, sctp_bind_connect, struct sock *sk, int optname, struct sockaddr *address, int addrlen) LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_association *asoc, struct sock *sk, struct sock *newsk) +LSM_HOOK(int, 0, sctp_assoc_established, struct sctp_association *asoc, + struct sk_buff *skb) #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 3bf5c658bc44..419b5febc3ca 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1046,6 +1046,11 @@ * @asoc pointer to current sctp association structure. * @sk pointer to current sock structure. * @newsk pointer to new sock structure. + * @sctp_assoc_established: + * Passes the @asoc and @chunk->skb of the association COOKIE_ACK packet + * to the security module. + * @asoc pointer to sctp association structure. + * @skb pointer to skbuff of association packet. * * Security hooks for Infiniband * diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 0abbd685703b..a68dce3873fc 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -34,6 +34,7 @@ enum memcg_stat_item { MEMCG_SOCK, MEMCG_PERCPU_B, MEMCG_VMALLOC, + MEMCG_KMEM, MEMCG_NR_STAT, }; @@ -523,6 +524,20 @@ static inline struct mem_cgroup *page_memcg_check(struct page *page) return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); } +static inline struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg) +{ + struct mem_cgroup *memcg; + + rcu_read_lock(); +retry: + memcg = obj_cgroup_memcg(objcg); + if (unlikely(!css_tryget(&memcg->css))) + goto retry; + rcu_read_unlock(); + + return memcg; +} + #ifdef CONFIG_MEMCG_KMEM /* * folio_memcg_kmem - Check if the folio has the memcg_kmem flag set. @@ -841,9 +856,7 @@ static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) */ static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) { - if (!memcg->memory.parent) - return NULL; - return mem_cgroup_from_counter(memcg->memory.parent, memory); + return mem_cgroup_from_css(memcg->css.parent); } static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, @@ -1672,18 +1685,6 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); extern struct static_key_false memcg_kmem_enabled_key; -extern int memcg_nr_cache_ids; -void memcg_get_cache_ids(void); -void memcg_put_cache_ids(void); - -/* - * Helper macro to loop through all memcg-specific caches. Callers must still - * check if the cache is valid (it is either valid or NULL). - * the slab_mutex must be held when looping through those caches - */ -#define for_each_memcg_cache_index(_idx) \ - for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++) - static inline bool memcg_kmem_enabled(void) { return static_branch_likely(&memcg_kmem_enabled_key); @@ -1707,7 +1708,7 @@ static inline void memcg_kmem_uncharge_page(struct page *page, int order) * A helper for accessing memcg's kmem_id, used for getting * corresponding LRU lists. */ -static inline int memcg_cache_id(struct mem_cgroup *memcg) +static inline int memcg_kmem_id(struct mem_cgroup *memcg) { return memcg ? memcg->kmemcg_id : -1; } @@ -1740,27 +1741,16 @@ static inline void __memcg_kmem_uncharge_page(struct page *page, int order) { } -#define for_each_memcg_cache_index(_idx) \ - for (; NULL; ) - static inline bool memcg_kmem_enabled(void) { return false; } -static inline int memcg_cache_id(struct mem_cgroup *memcg) +static inline int memcg_kmem_id(struct mem_cgroup *memcg) { return -1; } -static inline void memcg_get_cache_ids(void) -{ -} - -static inline void memcg_put_cache_ids(void) -{ -} - static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) { return NULL; diff --git a/include/linux/memory.h b/include/linux/memory.h index 88eb587b5143..aa619464a1df 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -70,6 +70,13 @@ struct memory_block { unsigned long state; /* serialized by the dev->lock */ int online_type; /* for passing data to online routine */ int nid; /* NID for this memory block */ + /* + * The single zone of this memory block if all PFNs of this memory block + * that are System RAM (not a memory hole, not ZONE_DEVICE ranges) are + * managed by a single zone. NULL if multiple zones (including nodes) + * apply. + */ + struct zone *zone; struct device dev; /* * Number of vmemmap pages. These pages @@ -161,6 +168,11 @@ int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func, }) #define register_hotmemory_notifier(nb) register_memory_notifier(nb) #define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb) + +#ifdef CONFIG_NUMA +void memory_block_add_nid(struct memory_block *mem, int nid, + enum meminit_context context); +#endif /* CONFIG_NUMA */ #endif /* CONFIG_MEMORY_HOTPLUG */ /* diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index be48e003a518..1ce6f8044f1e 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -16,6 +16,62 @@ struct memory_group; struct resource; struct vmem_altmap; +#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION +/* + * For supporting node-hotadd, we have to allocate a new pgdat. + * + * If an arch has generic style NODE_DATA(), + * node_data[nid] = kzalloc() works well. But it depends on the architecture. + * + * In general, generic_alloc_nodedata() is used. + * + */ +extern pg_data_t *arch_alloc_nodedata(int nid); +extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); + +#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ + +#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) + +#ifdef CONFIG_NUMA +/* + * XXX: node aware allocation can't work well to get new node's memory at this time. + * Because, pgdat for the new node is not allocated/initialized yet itself. + * To use new node's memory, more consideration will be necessary. + */ +#define generic_alloc_nodedata(nid) \ +({ \ + memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \ +}) +/* + * This definition is just for error path in node hotadd. + * For node hotremove, we have to replace this. + */ +#define generic_free_nodedata(pgdat) kfree(pgdat) + +extern pg_data_t *node_data[]; +static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) +{ + node_data[nid] = pgdat; +} + +#else /* !CONFIG_NUMA */ + +/* never called */ +static inline pg_data_t *generic_alloc_nodedata(int nid) +{ + BUG(); + return NULL; +} +static inline void generic_free_nodedata(pg_data_t *pgdat) +{ +} +static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) +{ +} +#endif /* CONFIG_NUMA */ +#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ + #ifdef CONFIG_MEMORY_HOTPLUG struct page *pfn_to_online_page(unsigned long pfn); @@ -107,8 +163,6 @@ extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); extern int online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *zone, struct memory_group *group); -extern struct zone *test_pages_in_a_zone(unsigned long start_pfn, - unsigned long end_pfn); extern void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn); @@ -154,66 +208,6 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, struct mhp_params *params); #endif /* ARCH_HAS_ADD_PAGES */ -#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION -/* - * For supporting node-hotadd, we have to allocate a new pgdat. - * - * If an arch has generic style NODE_DATA(), - * node_data[nid] = kzalloc() works well. But it depends on the architecture. - * - * In general, generic_alloc_nodedata() is used. - * Now, arch_free_nodedata() is just defined for error path of node_hot_add. - * - */ -extern pg_data_t *arch_alloc_nodedata(int nid); -extern void arch_free_nodedata(pg_data_t *pgdat); -extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); - -#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ - -#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) -#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat) - -#ifdef CONFIG_NUMA -/* - * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat. - * XXX: kmalloc_node() can't work well to get new node's memory at this time. - * Because, pgdat for the new node is not allocated/initialized yet itself. - * To use new node's memory, more consideration will be necessary. - */ -#define generic_alloc_nodedata(nid) \ -({ \ - kzalloc(sizeof(pg_data_t), GFP_KERNEL); \ -}) -/* - * This definition is just for error path in node hotadd. - * For node hotremove, we have to replace this. - */ -#define generic_free_nodedata(pgdat) kfree(pgdat) - -extern pg_data_t *node_data[]; -static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) -{ - node_data[nid] = pgdat; -} - -#else /* !CONFIG_NUMA */ - -/* never called */ -static inline pg_data_t *generic_alloc_nodedata(int nid) -{ - BUG(); - return NULL; -} -static inline void generic_free_nodedata(pg_data_t *pgdat) -{ -} -static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) -{ -} -#endif /* CONFIG_NUMA */ -#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ - void get_online_mems(void); void put_online_mems(void); @@ -297,7 +291,7 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {} extern void try_offline_node(int nid); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages, - struct memory_group *group); + struct zone *zone, struct memory_group *group); extern int remove_memory(u64 start, u64 size); extern void __remove_memory(u64 start, u64 size); extern int offline_and_remove_memory(u64 start, u64 size); @@ -306,7 +300,7 @@ extern int offline_and_remove_memory(u64 start, u64 size); static inline void try_offline_node(int nid) {} static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages, - struct memory_group *group) + struct zone *zone, struct memory_group *group) { return -EINVAL; } @@ -323,7 +317,7 @@ extern void set_zone_contiguous(struct zone *zone); extern void clear_zone_contiguous(struct zone *zone); #ifdef CONFIG_MEMORY_HOTPLUG -extern void __ref free_area_init_core_hotplug(int nid); +extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat); extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); extern int add_memory_resource(int nid, struct resource *resource, diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 1fafcc38acba..8af304f6b504 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -1,6 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MEMREMAP_H_ #define _LINUX_MEMREMAP_H_ + +#include <linux/mm.h> #include <linux/range.h> #include <linux/ioport.h> #include <linux/percpu-refcount.h> @@ -66,9 +68,9 @@ enum memory_type { struct dev_pagemap_ops { /* - * Called once the page refcount reaches 1. (ZONE_DEVICE pages never - * reach 0 refcount unless there is a refcount bug. This allows the - * device driver to implement its own memory management.) + * Called once the page refcount reaches 0. The reference count will be + * reset to one by the core code after the method is called to prepare + * for handing out the page again. */ void (*page_free)(struct page *page); @@ -129,6 +131,25 @@ static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap) return 1 << pgmap->vmemmap_shift; } +static inline bool is_device_private_page(const struct page *page) +{ + return IS_ENABLED(CONFIG_DEVICE_PRIVATE) && + is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_PRIVATE; +} + +static inline bool folio_is_device_private(const struct folio *folio) +{ + return is_device_private_page(&folio->page); +} + +static inline bool is_pci_p2pdma_page(const struct page *page) +{ + return IS_ENABLED(CONFIG_PCI_P2PDMA) && + is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; +} + #ifdef CONFIG_ZONE_DEVICE void *memremap_pages(struct dev_pagemap *pgmap, int nid); void memunmap_pages(struct dev_pagemap *pgmap); diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h index cbf9d7619493..e7a7e70fdb38 100644 --- a/include/linux/mfd/dbx500-prcmu.h +++ b/include/linux/mfd/dbx500-prcmu.h @@ -556,31 +556,11 @@ static inline void prcmu_clear(unsigned int reg, u32 bits) #define PRCMU_QOS_ARM_OPP 3 #define PRCMU_QOS_DEFAULT_VALUE -1 -#ifdef CONFIG_DBX500_PRCMU_QOS_POWER - -unsigned long prcmu_qos_get_cpufreq_opp_delay(void); -void prcmu_qos_set_cpufreq_opp_delay(unsigned long); -void prcmu_qos_force_opp(int, s32); -int prcmu_qos_requirement(int pm_qos_class); -int prcmu_qos_add_requirement(int pm_qos_class, char *name, s32 value); -int prcmu_qos_update_requirement(int pm_qos_class, char *name, s32 new_value); -void prcmu_qos_remove_requirement(int pm_qos_class, char *name); -int prcmu_qos_add_notifier(int prcmu_qos_class, - struct notifier_block *notifier); -int prcmu_qos_remove_notifier(int prcmu_qos_class, - struct notifier_block *notifier); - -#else - static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void) { return 0; } -static inline void prcmu_qos_set_cpufreq_opp_delay(unsigned long n) {} - -static inline void prcmu_qos_force_opp(int prcmu_qos_class, s32 i) {} - static inline int prcmu_qos_requirement(int prcmu_qos_class) { return 0; @@ -613,6 +593,4 @@ static inline int prcmu_qos_remove_notifier(int prcmu_qos_class, return 0; } -#endif - #endif /* __MACH_PRCMU_H */ diff --git a/include/linux/mfd/idt82p33_reg.h b/include/linux/mfd/idt82p33_reg.h index 129a6c078221..1db532feeb91 100644 --- a/include/linux/mfd/idt82p33_reg.h +++ b/include/linux/mfd/idt82p33_reg.h @@ -7,6 +7,8 @@ #ifndef HAVE_IDT82P33_REG #define HAVE_IDT82P33_REG +#define REG_ADDR(page, offset) (((page) << 0x7) | ((offset) & 0x7f)) + /* Register address */ #define DPLL1_TOD_CNFG 0x134 #define DPLL2_TOD_CNFG 0x1B4 @@ -41,6 +43,7 @@ #define REG_SOFT_RESET 0X381 #define OUT_MUX_CNFG(outn) REG_ADDR(0x6, (0xC * (outn))) +#define TOD_TRIGGER(wr_trig, rd_trig) ((wr_trig & 0xf) << 4 | (rd_trig & 0xf)) /* Register bit definitions */ #define SYNC_TOD BIT(1) diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h index 6a88e34cb955..945bde1fe55c 100644 --- a/include/linux/mfd/intel_soc_pmic.h +++ b/include/linux/mfd/intel_soc_pmic.h @@ -13,6 +13,13 @@ #include <linux/regmap.h> +enum intel_cht_wc_models { + INTEL_CHT_WC_UNKNOWN, + INTEL_CHT_WC_GPD_WIN_POCKET, + INTEL_CHT_WC_XIAOMI_MIPAD2, + INTEL_CHT_WC_LENOVO_YOGABOOK1, +}; + /** * struct intel_soc_pmic - Intel SoC PMIC data * @irq: Master interrupt number of the parent PMIC device @@ -39,6 +46,7 @@ struct intel_soc_pmic { struct regmap_irq_chip_data *irq_chip_data_crit; struct device *dev; struct intel_scu_ipc_dev *scu; + enum intel_cht_wc_models cht_wc_model; }; int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address, diff --git a/include/linux/mfd/iqs62x.h b/include/linux/mfd/iqs62x.h index 5ced55eae11b..ffc86010af74 100644 --- a/include/linux/mfd/iqs62x.h +++ b/include/linux/mfd/iqs62x.h @@ -14,6 +14,11 @@ #define IQS624_PROD_NUM 0x43 #define IQS625_PROD_NUM 0x4E +#define IQS620_HW_NUM_V0 0x82 +#define IQS620_HW_NUM_V1 IQS620_HW_NUM_V0 +#define IQS620_HW_NUM_V2 IQS620_HW_NUM_V0 +#define IQS620_HW_NUM_V3 0x92 + #define IQS621_ALS_FLAGS 0x16 #define IQS622_ALS_FLAGS 0x14 @@ -129,6 +134,8 @@ struct iqs62x_core { struct completion fw_done; enum iqs62x_ui_sel ui_sel; unsigned long event_cache; + u8 sw_num; + u8 hw_num; }; extern const struct iqs62x_event_desc iqs62x_events[IQS62X_NUM_EVENTS]; diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h index 39967a5eca6d..ea4a4b1b246a 100644 --- a/include/linux/mfd/lpc_ich.h +++ b/include/linux/mfd/lpc_ich.h @@ -8,7 +8,7 @@ #ifndef LPC_ICH_H #define LPC_ICH_H -#include <linux/platform_data/x86/intel-spi.h> +#include <linux/platform_data/x86/spi-intel.h> /* GPIO resources */ #define ICH_RES_GPIO 0 diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h index b1482b3cf353..3acceeedbaba 100644 --- a/include/linux/mfd/max77686-private.h +++ b/include/linux/mfd/max77686-private.h @@ -152,7 +152,7 @@ enum max77686_rtc_reg { MAX77686_RTC_WEEKDAY = 0x0A, MAX77686_RTC_MONTH = 0x0B, MAX77686_RTC_YEAR = 0x0C, - MAX77686_RTC_DATE = 0x0D, + MAX77686_RTC_MONTHDAY = 0x0D, MAX77686_ALARM1_SEC = 0x0E, MAX77686_ALARM1_MIN = 0x0F, MAX77686_ALARM1_HOUR = 0x10, @@ -352,7 +352,7 @@ enum max77802_rtc_reg { MAX77802_RTC_WEEKDAY = 0xCA, MAX77802_RTC_MONTH = 0xCB, MAX77802_RTC_YEAR = 0xCC, - MAX77802_RTC_DATE = 0xCD, + MAX77802_RTC_MONTHDAY = 0xCD, MAX77802_RTC_AE1 = 0xCE, MAX77802_ALARM1_SEC = 0xCF, MAX77802_ALARM1_MIN = 0xD0, diff --git a/include/linux/mfd/max77714.h b/include/linux/mfd/max77714.h new file mode 100644 index 000000000000..a970dc455426 --- /dev/null +++ b/include/linux/mfd/max77714.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Maxim MAX77714 Register and data structures definition. + * + * Copyright (C) 2022 Luca Ceresoli + * Author: Luca Ceresoli <luca@lucaceresoli.net> + */ + +#ifndef __LINUX_MFD_MAX77714_H_ +#define __LINUX_MFD_MAX77714_H_ + +#include <linux/bits.h> + +#define MAX77714_INT_TOP 0x00 +#define MAX77714_INT_TOPM 0x07 /* Datasheet says "read only", but it is RW */ + +#define MAX77714_INT_TOP_ONOFF BIT(1) +#define MAX77714_INT_TOP_RTC BIT(3) +#define MAX77714_INT_TOP_GPIO BIT(4) +#define MAX77714_INT_TOP_LDO BIT(5) +#define MAX77714_INT_TOP_SD BIT(6) +#define MAX77714_INT_TOP_GLBL BIT(7) + +#define MAX77714_32K_STATUS 0x30 +#define MAX77714_32K_STATUS_SIOSCOK BIT(5) +#define MAX77714_32K_STATUS_XOSCOK BIT(4) +#define MAX77714_32K_STATUS_32KSOURCE BIT(3) +#define MAX77714_32K_STATUS_32KLOAD_MSK 0x3 +#define MAX77714_32K_STATUS_32KLOAD_SHF 1 +#define MAX77714_32K_STATUS_CRYSTAL_CFG BIT(0) + +#define MAX77714_32K_CONFIG 0x31 +#define MAX77714_32K_CONFIG_XOSC_RETRY BIT(4) + +#define MAX77714_CNFG_GLBL2 0x91 +#define MAX77714_WDTEN BIT(2) +#define MAX77714_WDTSLPC BIT(3) +#define MAX77714_TWD_MASK 0x3 +#define MAX77714_TWD_2s 0x0 +#define MAX77714_TWD_16s 0x1 +#define MAX77714_TWD_64s 0x2 +#define MAX77714_TWD_128s 0x3 + +#define MAX77714_CNFG_GLBL3 0x92 +#define MAX77714_WDTC BIT(0) + +#define MAX77714_CNFG2_ONOFF 0x94 +#define MAX77714_WD_RST_WK BIT(5) + +/* Interrupts */ +enum { + MAX77714_IRQ_TOP_ONOFF, + MAX77714_IRQ_TOP_RTC, /* Real-time clock */ + MAX77714_IRQ_TOP_GPIO, /* GPIOs */ + MAX77714_IRQ_TOP_LDO, /* Low-dropout regulators */ + MAX77714_IRQ_TOP_SD, /* Step-down regulators */ + MAX77714_IRQ_TOP_GLBL, /* "Global resources": Low-Battery, overtemp... */ +}; + +#endif /* __LINUX_MFD_MAX77714_H_ */ diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h index 201139b12140..3d33517f178c 100644 --- a/include/linux/mfd/mt6358/registers.h +++ b/include/linux/mfd/mt6358/registers.h @@ -94,6 +94,10 @@ #define MT6358_BUCK_VCORE_CON0 0x1488 #define MT6358_BUCK_VCORE_DBG0 0x149e #define MT6358_BUCK_VCORE_DBG1 0x14a0 +#define MT6358_BUCK_VCORE_SSHUB_CON0 0x14a4 +#define MT6358_BUCK_VCORE_SSHUB_CON1 0x14a6 +#define MT6358_BUCK_VCORE_SSHUB_ELR0 MT6358_BUCK_VCORE_SSHUB_CON1 +#define MT6358_BUCK_VCORE_SSHUB_DBG1 MT6358_BUCK_VCORE_DBG1 #define MT6358_BUCK_VCORE_ELR0 0x14aa #define MT6358_BUCK_VGPU_CON0 0x1508 #define MT6358_BUCK_VGPU_DBG0 0x151e @@ -169,6 +173,9 @@ #define MT6358_LDO_VSRAM_OTHERS_CON0 0x1ba6 #define MT6358_LDO_VSRAM_OTHERS_DBG0 0x1bc0 #define MT6358_LDO_VSRAM_OTHERS_DBG1 0x1bc2 +#define MT6358_LDO_VSRAM_OTHERS_SSHUB_CON0 0x1bc4 +#define MT6358_LDO_VSRAM_OTHERS_SSHUB_CON1 0x1bc6 +#define MT6358_LDO_VSRAM_OTHERS_SSHUB_DBG1 MT6358_LDO_VSRAM_OTHERS_DBG1 #define MT6358_LDO_VSRAM_GPU_CON0 0x1bc8 #define MT6358_LDO_VSRAM_GPU_DBG0 0x1be2 #define MT6358_LDO_VSRAM_GPU_DBG1 0x1be4 diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h index 56f210eebc54..1cf78726503b 100644 --- a/include/linux/mfd/mt6397/core.h +++ b/include/linux/mfd/mt6397/core.h @@ -14,6 +14,7 @@ enum chip_id { MT6323_CHIP_ID = 0x23, MT6358_CHIP_ID = 0x58, MT6359_CHIP_ID = 0x59, + MT6366_CHIP_ID = 0x66, MT6391_CHIP_ID = 0x91, MT6397_CHIP_ID = 0x97, }; diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h index a96e6d43ca06..58602032e642 100644 --- a/include/linux/mfd/rk808.h +++ b/include/linux/mfd/rk808.h @@ -373,6 +373,7 @@ enum rk805_reg { #define SWITCH2_EN BIT(6) #define SWITCH1_EN BIT(5) #define DEV_OFF_RST BIT(3) +#define DEV_RST BIT(2) #define DEV_OFF BIT(0) #define RTC_STOP BIT(0) diff --git a/include/linux/mfd/sy7636a.h b/include/linux/mfd/sy7636a.h new file mode 100644 index 000000000000..22f03b2f851e --- /dev/null +++ b/include/linux/mfd/sy7636a.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Functions to access SY3686A power management chip. + * + * Copyright (C) 2021 reMarkable AS - http://www.remarkable.com/ + */ + +#ifndef __MFD_SY7636A_H +#define __MFD_SY7636A_H + +#define SY7636A_REG_OPERATION_MODE_CRL 0x00 +/* It is set if a gpio is used to control the regulator */ +#define SY7636A_OPERATION_MODE_CRL_VCOMCTL BIT(6) +#define SY7636A_OPERATION_MODE_CRL_ONOFF BIT(7) +#define SY7636A_REG_VCOM_ADJUST_CTRL_L 0x01 +#define SY7636A_REG_VCOM_ADJUST_CTRL_H 0x02 +#define SY7636A_REG_VCOM_ADJUST_CTRL_MASK 0x01ff +#define SY7636A_REG_VLDO_VOLTAGE_ADJULST_CTRL 0x03 +#define SY7636A_REG_POWER_ON_DELAY_TIME 0x06 +#define SY7636A_REG_FAULT_FLAG 0x07 +#define SY7636A_FAULT_FLAG_PG BIT(0) +#define SY7636A_REG_TERMISTOR_READOUT 0x08 + +#define SY7636A_REG_MAX 0x08 + +#define VCOM_ADJUST_CTRL_MASK 0x1ff +// Used to shift the high byte +#define VCOM_ADJUST_CTRL_SHIFT 8 +// Used to scale from VCOM_ADJUST_CTRL to mv +#define VCOM_ADJUST_CTRL_SCAL 10000 + +#define FAULT_FLAG_SHIFT 1 + +#endif /* __LINUX_MFD_SY7636A_H */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index db96e10eb8da..90e75d5a54d6 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -48,7 +48,15 @@ int folio_migrate_mapping(struct address_space *mapping, struct folio *newfolio, struct folio *folio, int extra_count); extern bool numa_demotion_enabled; +extern void migrate_on_reclaim_init(void); +#ifdef CONFIG_HOTPLUG_CPU +extern void set_migration_target_nodes(void); #else +static inline void set_migration_target_nodes(void) {} +#endif +#else + +static inline void set_migration_target_nodes(void) {} static inline void putback_movable_pages(struct list_head *l) {} static inline int migrate_pages(struct list_head *l, new_page_t new, diff --git a/include/linux/mii.h b/include/linux/mii.h index 12ea29e04293..5ee13083cec7 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -355,56 +355,6 @@ static inline u32 mii_adv_to_ethtool_adv_x(u32 adv) } /** - * mii_lpa_mod_linkmode_adv_sgmii - * @lp_advertising: pointer to destination link mode. - * @lpa: value of the MII_LPA register - * - * A small helper function that translates MII_LPA bits to - * linkmode advertisement settings for SGMII. - * Leaves other bits unchanged. - */ -static inline void -mii_lpa_mod_linkmode_lpa_sgmii(unsigned long *lp_advertising, u32 lpa) -{ - u32 speed_duplex = lpa & LPA_SGMII_DPX_SPD_MASK; - - linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, lp_advertising, - speed_duplex == LPA_SGMII_1000HALF); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, lp_advertising, - speed_duplex == LPA_SGMII_1000FULL); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, lp_advertising, - speed_duplex == LPA_SGMII_100HALF); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, lp_advertising, - speed_duplex == LPA_SGMII_100FULL); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, lp_advertising, - speed_duplex == LPA_SGMII_10HALF); - - linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, lp_advertising, - speed_duplex == LPA_SGMII_10FULL); -} - -/** - * mii_lpa_to_linkmode_adv_sgmii - * @advertising: pointer to destination link mode. - * @lpa: value of the MII_LPA register - * - * A small helper function that translates MII_ADVERTISE bits - * to linkmode advertisement settings when in SGMII mode. - * Clears the old value of advertising. - */ -static inline void mii_lpa_to_linkmode_lpa_sgmii(unsigned long *lp_advertising, - u32 lpa) -{ - linkmode_zero(lp_advertising); - - mii_lpa_mod_linkmode_lpa_sgmii(lp_advertising, lpa); -} - -/** * mii_adv_mod_linkmode_adv_t * @advertising:pointer to destination link mode. * @adv: value of the MII_ADVERTISE register diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 7bfb67363434..cb15308b5cb0 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -183,6 +183,8 @@ static inline void mlx5_cq_put(struct mlx5_core_cq *cq) complete(&cq->free); } +int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + u32 *in, int inlen, u32 *out, int outlen); int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen, u32 *out, int outlen); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 78655d8d13a7..9424503eb8d3 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -264,6 +264,14 @@ enum { struct mlx5_cmd_stats { u64 sum; u64 n; + /* number of times command failed */ + u64 failed; + /* number of times command failed on bad status returned by FW */ + u64 failed_mbox_status; + /* last command failed returned errno */ + u32 last_failed_errno; + /* last bad status returned by FW */ + u8 last_failed_mbox_status; struct dentry *root; /* protect command average calculations */ spinlock_t lock; @@ -543,6 +551,15 @@ struct mlx5_adev { int idx; }; +struct mlx5_debugfs_entries { + struct dentry *dbg_root; + struct dentry *qp_debugfs; + struct dentry *eq_debugfs; + struct dentry *cq_debugfs; + struct dentry *cmdif_debugfs; + struct dentry *pages_debugfs; +}; + struct mlx5_ft_pool; struct mlx5_priv { /* IRQ table valid only for real pci devices PF or VF */ @@ -553,21 +570,19 @@ struct mlx5_priv { struct mlx5_nb pg_nb; struct workqueue_struct *pg_wq; struct xarray page_root_xa; - int fw_pages; + u32 fw_pages; atomic_t reg_pages; struct list_head free_list; - int vfs_pages; - int host_pf_pages; + u32 vfs_pages; + u32 host_pf_pages; + u32 fw_pages_alloc_failed; + u32 give_pages_dropped; + u32 reclaim_pages_discard; struct mlx5_core_health health; struct list_head traps; - /* start: qp staff */ - struct dentry *qp_debugfs; - struct dentry *eq_debugfs; - struct dentry *cq_debugfs; - struct dentry *cmdif_debugfs; - /* end: qp staff */ + struct mlx5_debugfs_entries dbg; /* start: alloc staff */ /* protect buffer allocation according to numa node */ @@ -577,7 +592,6 @@ struct mlx5_priv { struct mutex pgdir_mutex; struct list_head pgdir_list; /* end: alloc staff */ - struct dentry *dbg_root; struct list_head ctx_list; spinlock_t ctx_lock; @@ -863,20 +877,10 @@ struct mlx5_hca_vport_context { bool grh_required; }; -static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset) -{ - return buf->frags->buf + offset; -} - #define STRUCT_FIELD(header, field) \ .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \ .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field -static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev) -{ - return pci_get_drvdata(pdev); -} - extern struct dentry *mlx5_debugfs_root; static inline u16 fw_rev_maj(struct mlx5_core_dev *dev) @@ -965,6 +969,8 @@ typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context); struct mlx5_async_work { struct mlx5_async_ctx *ctx; mlx5_async_cbk_t user_callback; + u16 opcode; /* cmd opcode */ + void *out; /* pointer to the cmd output buffer */ }; void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, @@ -973,7 +979,9 @@ void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx); int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, void *out, int out_size, mlx5_async_cbk_t callback, struct mlx5_async_work *work); - +void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out); +int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); +int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out); int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); @@ -991,7 +999,6 @@ int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); -void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); bool mlx5_cmd_is_down(struct mlx5_core_dev *dev); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); @@ -1002,9 +1009,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev); void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); void mlx5_trigger_health_work(struct mlx5_core_dev *dev); -int mlx5_buf_alloc(struct mlx5_core_dev *dev, - int size, struct mlx5_frag_buf *buf); -void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, struct mlx5_frag_buf *buf, int node); void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); @@ -1023,6 +1027,8 @@ int mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); void mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); +void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev); void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages, bool ec_function); int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); @@ -1030,15 +1036,18 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); void mlx5_register_debugfs(void); void mlx5_unregister_debugfs(void); -void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); +struct dentry *mlx5_debugfs_get_dev_root(struct mlx5_core_dev *dev); void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev); void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, + void *data_out, int size_out, u16 reg_id, int arg, + int write, bool verbose); int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); @@ -1143,6 +1152,9 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, u64 length, u16 uid, phys_addr_t addr, u32 obj_id); +struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev); +void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev); + #ifdef CONFIG_MLX5_CORE_IPOIB struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, struct ib_device *ibdev, diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index b1aad14689e3..e3bfed68b08a 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -224,6 +224,7 @@ struct mlx5_flow_act { u32 flags; struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; struct ib_counters *counters; + struct mlx5_flow_group *fg; }; #define MLX5_DECLARE_FLOW_ACT(name) \ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 598ac3bcc901..7d2d0ba82144 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -64,13 +64,6 @@ enum { }; enum { - MLX5_MODIFY_TIR_BITMASK_LRO = 0x0, - MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE = 0x1, - MLX5_MODIFY_TIR_BITMASK_HASH = 0x2, - MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN = 0x3 -}; - -enum { MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0, MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3, @@ -127,6 +120,11 @@ enum { MLX5_CMD_OP_QUERY_SF_PARTITION = 0x111, MLX5_CMD_OP_ALLOC_SF = 0x113, MLX5_CMD_OP_DEALLOC_SF = 0x114, + MLX5_CMD_OP_SUSPEND_VHCA = 0x115, + MLX5_CMD_OP_RESUME_VHCA = 0x116, + MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE = 0x117, + MLX5_CMD_OP_SAVE_VHCA_STATE = 0x118, + MLX5_CMD_OP_LOAD_VHCA_STATE = 0x119, MLX5_CMD_OP_CREATE_MKEY = 0x200, MLX5_CMD_OP_QUERY_MKEY = 0x201, MLX5_CMD_OP_DESTROY_MKEY = 0x202, @@ -500,7 +498,10 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 tcp_sport[0x10]; u8 tcp_dport[0x10]; - u8 reserved_at_c0[0x18]; + u8 reserved_at_c0[0x10]; + u8 ipv4_ihl[0x4]; + u8 reserved_at_c4[0x4]; + u8 ttl_hoplimit[0x8]; u8 udp_sport[0x10]; @@ -1350,6 +1351,7 @@ enum mlx5_fc_bulk_alloc_bitmask { enum { MLX5_STEERING_FORMAT_CONNECTX_5 = 0, MLX5_STEERING_FORMAT_CONNECTX_6DX = 1, + MLX5_STEERING_FORMAT_CONNECTX_7 = 2, }; struct mlx5_ifc_cmd_hca_cap_bits { @@ -1426,8 +1428,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_130[0xa]; u8 log_max_ra_res_dc[0x6]; - u8 reserved_at_140[0x6]; + u8 reserved_at_140[0x5]; u8 release_all_pages[0x1]; + u8 must_not_use[0x1]; u8 reserved_at_147[0x2]; u8 roce_accl[0x1]; u8 log_max_ra_req_qp[0x6]; @@ -1757,7 +1760,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_682[0x1]; u8 log_max_sf[0x5]; u8 apu[0x1]; - u8 reserved_at_689[0x7]; + u8 reserved_at_689[0x4]; + u8 migration[0x1]; + u8 reserved_at_68e[0x2]; u8 log_min_sf_size[0x8]; u8 max_num_sf_partitions[0x8]; @@ -3434,7 +3439,6 @@ enum { enum { MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO = BIT(0), MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO = BIT(1), - MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO = BIT(2), }; enum { @@ -9694,7 +9698,10 @@ struct mlx5_ifc_pcam_reg_bits { }; struct mlx5_ifc_mcam_enhanced_features_bits { - u8 reserved_at_0[0x6b]; + u8 reserved_at_0[0x5d]; + u8 mcia_32dwords[0x1]; + u8 reserved_at_5e[0xc]; + u8 reset_state[0x1]; u8 ptpcyc2realtime_modify[0x1]; u8 reserved_at_6c[0x2]; u8 pci_status_and_power[0x1]; @@ -9888,10 +9895,10 @@ struct mlx5_ifc_pcmr_reg_bits { }; struct mlx5_ifc_lane_2_module_mapping_bits { - u8 reserved_at_0[0x6]; - u8 rx_lane[0x2]; - u8 reserved_at_8[0x6]; - u8 tx_lane[0x2]; + u8 reserved_at_0[0x4]; + u8 rx_lane[0x4]; + u8 reserved_at_8[0x4]; + u8 tx_lane[0x4]; u8 reserved_at_10[0x8]; u8 module[0x8]; }; @@ -9900,8 +9907,8 @@ struct mlx5_ifc_bufferx_reg_bits { u8 reserved_at_0[0x6]; u8 lossy[0x1]; u8 epsb[0x1]; - u8 reserved_at_8[0xc]; - u8 size[0xc]; + u8 reserved_at_8[0x8]; + u8 size[0x10]; u8 xoff_threshold[0x10]; u8 xon_threshold[0x10]; @@ -10376,6 +10383,14 @@ struct mlx5_ifc_mcda_reg_bits { }; enum { + MLX5_MFRL_REG_RESET_STATE_IDLE = 0, + MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1, + MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS = 2, + MLX5_MFRL_REG_RESET_STATE_TIMEOUT = 3, + MLX5_MFRL_REG_RESET_STATE_NACK = 4, +}; + +enum { MLX5_MFRL_REG_RESET_TYPE_FULL_CHIP = BIT(0), MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE = BIT(1), }; @@ -10393,7 +10408,8 @@ struct mlx5_ifc_mfrl_reg_bits { u8 pci_sync_for_fw_update_start[0x1]; u8 pci_sync_for_fw_update_resp[0x2]; u8 rst_type_sel[0x3]; - u8 reserved_at_28[0x8]; + u8 reserved_at_28[0x4]; + u8 reset_state[0x4]; u8 reset_type[0x8]; u8 reset_level[0x8]; }; @@ -11519,4 +11535,142 @@ enum { MLX5_MTT_PERM_RW = MLX5_MTT_PERM_READ | MLX5_MTT_PERM_WRITE, }; +enum { + MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_INITIATOR = 0x0, + MLX5_SUSPEND_VHCA_IN_OP_MOD_SUSPEND_RESPONDER = 0x1, +}; + +struct mlx5_ifc_suspend_vhca_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 vhca_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_suspend_vhca_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +enum { + MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_RESPONDER = 0x0, + MLX5_RESUME_VHCA_IN_OP_MOD_RESUME_INITIATOR = 0x1, +}; + +struct mlx5_ifc_resume_vhca_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 vhca_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_resume_vhca_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_query_vhca_migration_state_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 vhca_id[0x10]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_vhca_migration_state_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; + + u8 required_umem_size[0x20]; + + u8 reserved_at_a0[0x160]; +}; + +struct mlx5_ifc_save_vhca_state_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 vhca_id[0x10]; + + u8 reserved_at_60[0x20]; + + u8 va[0x40]; + + u8 mkey[0x20]; + + u8 size[0x20]; +}; + +struct mlx5_ifc_save_vhca_state_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 actual_image_size[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_load_vhca_state_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 vhca_id[0x10]; + + u8 reserved_at_60[0x20]; + + u8 va[0x40]; + + u8 mkey[0x20]; + + u8 size[0x20]; +}; + +struct mlx5_ifc_load_vhca_state_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 77ea4f9c5265..28a928b0684b 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -56,8 +56,6 @@ enum mlx5_an_status { MLX5_AN_LINK_DOWN = 4, }; -#define MLX5_EEPROM_MAX_BYTES 32 -#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK 0x000000ff #define MLX5_I2C_ADDR_LOW 0x50 #define MLX5_I2C_ADDR_HIGH 0x51 #define MLX5_EEPROM_PAGE_LENGTH 256 diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 61e48d459b23..8bda3ba5b109 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -202,6 +202,9 @@ struct mlx5_wqe_fmr_seg { struct mlx5_wqe_ctrl_seg { __be32 opmod_idx_opcode; __be32 qpn_ds; + + struct_group(trailer, + u8 signature; u8 rsvd[2]; u8 fm_ce_se; @@ -211,6 +214,8 @@ struct mlx5_wqe_ctrl_seg { __be32 umr_mkey; __be32 tis_tir_num; }; + + ); /* end of trailer group */ }; #define MLX5_WQE_CTRL_DS_MASK 0x3f diff --git a/include/linux/mm.h b/include/linux/mm.h index 5744a3fc4716..e34edb775334 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3,9 +3,6 @@ #define _LINUX_MM_H #include <linux/errno.h> - -#ifdef __KERNEL__ - #include <linux/mmdebug.h> #include <linux/gfp.h> #include <linux/bug.h> @@ -26,7 +23,6 @@ #include <linux/err.h> #include <linux/page-flags.h> #include <linux/page_ref.h> -#include <linux/memremap.h> #include <linux/overflow.h> #include <linux/sizes.h> #include <linux/sched.h> @@ -216,8 +212,10 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) +#define folio_page_idx(folio, p) (page_to_pfn(p) - folio_pfn(folio)) #else #define nth_page(page,n) ((page) + (n)) +#define folio_page_idx(folio, p) ((p) - &(folio)->page) #endif /* to align the pointer to the (next) page boundary */ @@ -227,6 +225,10 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) #define lru_to_page(head) (list_entry((head)->prev, struct page, lru)) +static inline struct folio *lru_to_folio(struct list_head *head) +{ + return list_entry((head)->prev, struct folio, lru); +} void setup_initial_init_mm(void *start_code, void *end_code, void *end_data, void *brk); @@ -478,7 +480,8 @@ struct vm_fault { struct vm_area_struct *vma; /* Target VMA */ gfp_t gfp_mask; /* gfp mask to be used for allocations */ pgoff_t pgoff; /* Logical page offset based on vma */ - unsigned long address; /* Faulting virtual address */ + unsigned long address; /* Faulting virtual address - masked */ + unsigned long real_address; /* Faulting virtual address - unmasked */ }; enum fault_flag flags; /* FAULT_FLAG_xxx flags * XXX: should really be 'const' */ @@ -774,21 +777,26 @@ static inline int is_vmalloc_or_module_addr(const void *x) } #endif -static inline int head_compound_mapcount(struct page *head) +/* + * How many times the entire folio is mapped as a single unit (eg by a + * PMD or PUD entry). This is probably not what you want, except for + * debugging purposes; look at folio_mapcount() or page_mapcount() + * instead. + */ +static inline int folio_entire_mapcount(struct folio *folio) { - return atomic_read(compound_mapcount_ptr(head)) + 1; + VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); + return atomic_read(folio_mapcount_ptr(folio)) + 1; } /* * Mapcount of compound page as a whole, does not include mapped sub-pages. * - * Must be called only for compound pages or any their tail sub-pages. + * Must be called only for compound pages. */ static inline int compound_mapcount(struct page *page) { - VM_BUG_ON_PAGE(!PageCompound(page), page); - page = compound_head(page); - return head_compound_mapcount(page); + return folio_entire_mapcount(page_folio(page)); } /* @@ -818,15 +826,16 @@ static inline int page_mapcount(struct page *page) return atomic_read(&page->_mapcount) + 1; } +int folio_mapcount(struct folio *folio); + #ifdef CONFIG_TRANSPARENT_HUGEPAGE -int total_mapcount(struct page *page); -int page_trans_huge_mapcount(struct page *page); -#else static inline int total_mapcount(struct page *page) { - return page_mapcount(page); + return folio_mapcount(page_folio(page)); } -static inline int page_trans_huge_mapcount(struct page *page) + +#else +static inline int total_mapcount(struct page *page) { return page_mapcount(page); } @@ -889,33 +898,17 @@ static inline void destroy_compound_page(struct page *page) compound_page_dtors[page[1].compound_dtor](page); } -static inline bool hpage_pincount_available(struct page *page) -{ - /* - * Can the page->hpage_pinned_refcount field be used? That field is in - * the 3rd page of the compound page, so the smallest (2-page) compound - * pages cannot support it. - */ - page = compound_head(page); - return PageCompound(page) && compound_order(page) > 1; -} - static inline int head_compound_pincount(struct page *head) { return atomic_read(compound_pincount_ptr(head)); } -static inline int compound_pincount(struct page *page) -{ - VM_BUG_ON_PAGE(!hpage_pincount_available(page), page); - page = compound_head(page); - return head_compound_pincount(page); -} - static inline void set_compound_order(struct page *page, unsigned int order) { page[1].compound_order = order; +#ifdef CONFIG_64BIT page[1].compound_nr = 1U << order; +#endif } /* Returns the number of pages in this potentially compound page. */ @@ -923,7 +916,11 @@ static inline unsigned long compound_nr(struct page *page) { if (!PageHead(page)) return 1; +#ifdef CONFIG_64BIT return page[1].compound_nr; +#else + return 1UL << compound_order(page); +#endif } /* Returns the number of bytes in this potentially compound page. */ @@ -938,6 +935,37 @@ static inline unsigned int page_shift(struct page *page) return PAGE_SHIFT + compound_order(page); } +/** + * thp_order - Order of a transparent huge page. + * @page: Head page of a transparent huge page. + */ +static inline unsigned int thp_order(struct page *page) +{ + VM_BUG_ON_PGFLAGS(PageTail(page), page); + return compound_order(page); +} + +/** + * thp_nr_pages - The number of regular pages in this huge page. + * @page: The head page of a huge page. + */ +static inline int thp_nr_pages(struct page *page) +{ + VM_BUG_ON_PGFLAGS(PageTail(page), page); + return compound_nr(page); +} + +/** + * thp_size - Size of a transparent huge page. + * @page: Head page of a transparent huge page. + * + * Return: Number of bytes in this page. + */ +static inline unsigned long thp_size(struct page *page) +{ + return PAGE_SIZE << thp_order(page); +} + void free_compound_page(struct page *page); #ifdef CONFIG_MMU @@ -1089,59 +1117,35 @@ static inline bool is_zone_device_page(const struct page *page) } #endif +static inline bool folio_is_zone_device(const struct folio *folio) +{ + return is_zone_device_page(&folio->page); +} + static inline bool is_zone_movable_page(const struct page *page) { return page_zonenum(page) == ZONE_MOVABLE; } -#ifdef CONFIG_DEV_PAGEMAP_OPS -void free_devmap_managed_page(struct page *page); +#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX) DECLARE_STATIC_KEY_FALSE(devmap_managed_key); -static inline bool page_is_devmap_managed(struct page *page) +bool __put_devmap_managed_page(struct page *page); +static inline bool put_devmap_managed_page(struct page *page) { if (!static_branch_unlikely(&devmap_managed_key)) return false; if (!is_zone_device_page(page)) return false; - switch (page->pgmap->type) { - case MEMORY_DEVICE_PRIVATE: - case MEMORY_DEVICE_FS_DAX: - return true; - default: - break; - } - return false; + return __put_devmap_managed_page(page); } -void put_devmap_managed_page(struct page *page); - -#else /* CONFIG_DEV_PAGEMAP_OPS */ -static inline bool page_is_devmap_managed(struct page *page) +#else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ +static inline bool put_devmap_managed_page(struct page *page) { return false; } - -static inline void put_devmap_managed_page(struct page *page) -{ -} -#endif /* CONFIG_DEV_PAGEMAP_OPS */ - -static inline bool is_device_private_page(const struct page *page) -{ - return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && - IS_ENABLED(CONFIG_DEVICE_PRIVATE) && - is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PRIVATE; -} - -static inline bool is_pci_p2pdma_page(const struct page *page) -{ - return IS_ENABLED(CONFIG_DEV_PAGEMAP_OPS) && - IS_ENABLED(CONFIG_PCI_P2PDMA) && - is_zone_device_page(page) && - page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; -} +#endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */ /* 127: arbitrary random number, small enough to assemble well */ #define folio_ref_zero_or_close_to_overflow(folio) \ @@ -1167,9 +1171,6 @@ static inline void get_page(struct page *page) } bool __must_check try_grab_page(struct page *page, unsigned int flags); -struct page *try_grab_compound_head(struct page *page, int refs, - unsigned int flags); - static inline __must_check bool try_get_page(struct page *page) { @@ -1224,16 +1225,11 @@ static inline void put_page(struct page *page) struct folio *folio = page_folio(page); /* - * For devmap managed pages we need to catch refcount transition from - * 2 to 1, when refcount reach one it means the page is free and we - * need to inform the device driver through callback. See - * include/linux/memremap.h and HMM for details. + * For some devmap managed pages we need to catch refcount transition + * from 2 to 1: */ - if (page_is_devmap_managed(&folio->page)) { - put_devmap_managed_page(&folio->page); + if (put_devmap_managed_page(&folio->page)) return; - } - folio_put(folio); } @@ -1263,10 +1259,9 @@ static inline void put_page(struct page *page) * applications that don't have huge page reference counts, this won't be an * issue. * - * Locking: the lockless algorithm described in page_cache_get_speculative() - * and page_cache_gup_pin_speculative() provides safe operation for - * get_user_pages and page_mkclean and other calls that race to set up page - * table entries. + * Locking: the lockless algorithm described in folio_try_get_rcu() + * provides safe operation for get_user_pages(), page_mkclean() and + * other calls that race to set up page table entries. */ #define GUP_PIN_COUNTING_BIAS (1U << 10) @@ -1277,70 +1272,11 @@ void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, bool make_dirty); void unpin_user_pages(struct page **pages, unsigned long npages); -/** - * page_maybe_dma_pinned - Report if a page is pinned for DMA. - * @page: The page. - * - * This function checks if a page has been pinned via a call to - * a function in the pin_user_pages() family. - * - * For non-huge pages, the return value is partially fuzzy: false is not fuzzy, - * because it means "definitely not pinned for DMA", but true means "probably - * pinned for DMA, but possibly a false positive due to having at least - * GUP_PIN_COUNTING_BIAS worth of normal page references". - * - * False positives are OK, because: a) it's unlikely for a page to get that many - * refcounts, and b) all the callers of this routine are expected to be able to - * deal gracefully with a false positive. - * - * For huge pages, the result will be exactly correct. That's because we have - * more tracking data available: the 3rd struct page in the compound page is - * used to track the pincount (instead using of the GUP_PIN_COUNTING_BIAS - * scheme). - * - * For more information, please see Documentation/core-api/pin_user_pages.rst. - * - * Return: True, if it is likely that the page has been "dma-pinned". - * False, if the page is definitely not dma-pinned. - */ -static inline bool page_maybe_dma_pinned(struct page *page) -{ - if (hpage_pincount_available(page)) - return compound_pincount(page) > 0; - - /* - * page_ref_count() is signed. If that refcount overflows, then - * page_ref_count() returns a negative value, and callers will avoid - * further incrementing the refcount. - * - * Here, for that overflow case, use the signed bit to count a little - * bit higher via unsigned math, and thus still get an accurate result. - */ - return ((unsigned int)page_ref_count(compound_head(page))) >= - GUP_PIN_COUNTING_BIAS; -} - static inline bool is_cow_mapping(vm_flags_t flags) { return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; } -/* - * This should most likely only be called during fork() to see whether we - * should break the cow immediately for a page on the src mm. - */ -static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, - struct page *page) -{ - if (!is_cow_mapping(vma->vm_flags)) - return false; - - if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)) - return false; - - return page_maybe_dma_pinned(page); -} - #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP) #define SECTION_IN_PAGE_FLAGS #endif @@ -1585,6 +1521,74 @@ static inline unsigned long folio_pfn(struct folio *folio) return page_to_pfn(&folio->page); } +static inline atomic_t *folio_pincount_ptr(struct folio *folio) +{ + return &folio_page(folio, 1)->compound_pincount; +} + +/** + * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA. + * @folio: The folio. + * + * This function checks if a folio has been pinned via a call to + * a function in the pin_user_pages() family. + * + * For small folios, the return value is partially fuzzy: false is not fuzzy, + * because it means "definitely not pinned for DMA", but true means "probably + * pinned for DMA, but possibly a false positive due to having at least + * GUP_PIN_COUNTING_BIAS worth of normal folio references". + * + * False positives are OK, because: a) it's unlikely for a folio to + * get that many refcounts, and b) all the callers of this routine are + * expected to be able to deal gracefully with a false positive. + * + * For large folios, the result will be exactly correct. That's because + * we have more tracking data available: the compound_pincount is used + * instead of the GUP_PIN_COUNTING_BIAS scheme. + * + * For more information, please see Documentation/core-api/pin_user_pages.rst. + * + * Return: True, if it is likely that the page has been "dma-pinned". + * False, if the page is definitely not dma-pinned. + */ +static inline bool folio_maybe_dma_pinned(struct folio *folio) +{ + if (folio_test_large(folio)) + return atomic_read(folio_pincount_ptr(folio)) > 0; + + /* + * folio_ref_count() is signed. If that refcount overflows, then + * folio_ref_count() returns a negative value, and callers will avoid + * further incrementing the refcount. + * + * Here, for that overflow case, use the sign bit to count a little + * bit higher via unsigned math, and thus still get an accurate result. + */ + return ((unsigned int)folio_ref_count(folio)) >= + GUP_PIN_COUNTING_BIAS; +} + +static inline bool page_maybe_dma_pinned(struct page *page) +{ + return folio_maybe_dma_pinned(page_folio(page)); +} + +/* + * This should most likely only be called during fork() to see whether we + * should break the cow immediately for a page on the src mm. + */ +static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, + struct page *page) +{ + if (!is_cow_mapping(vma->vm_flags)) + return false; + + if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags)) + return false; + + return page_maybe_dma_pinned(page); +} + /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */ #ifdef CONFIG_MIGRATION static inline bool is_pinnable_page(struct page *page) @@ -1599,6 +1603,11 @@ static inline bool is_pinnable_page(struct page *page) } #endif +static inline bool folio_is_pinnable(struct folio *folio) +{ + return is_pinnable_page(&folio->page); +} + static inline void set_page_zone(struct page *page, enum zone_type zone) { page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); @@ -1748,7 +1757,6 @@ static inline void *folio_address(const struct folio *folio) } extern void *page_rmapping(struct page *page); -extern struct anon_vma *page_anon_vma(struct page *page); extern pgoff_t __page_file_index(struct page *page); /* @@ -1854,7 +1862,6 @@ extern void truncate_setsize(struct inode *inode, loff_t newsize); void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); int generic_error_remove_page(struct address_space *mapping, struct page *page); -int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, @@ -1916,10 +1923,6 @@ long get_user_pages(unsigned long start, unsigned long nr_pages, long pin_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas); -long get_user_pages_locked(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, int *locked); -long pin_user_pages_locked(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, int *locked); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, @@ -1939,9 +1942,6 @@ int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, struct page **pages); struct page *get_dump_page(unsigned long addr); -extern void do_invalidatepage(struct page *page, unsigned int offset, - unsigned int length); - bool folio_mark_dirty(struct folio *folio); bool set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); @@ -2453,7 +2453,6 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) } extern void __init pagecache_init(void); -extern void __init free_area_init_memoryless_node(int nid); extern void free_initmem(void); /* @@ -2925,13 +2924,11 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, #define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ #define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO * and return without waiting upon it */ -#define FOLL_POPULATE 0x40 /* fault in pages (with FOLL_MLOCK) */ #define FOLL_NOFAULT 0x80 /* do not fault in pages */ #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ -#define FOLL_MLOCK 0x1000 /* lock present pages */ #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ #define FOLL_COW 0x4000 /* internal GUP flag */ #define FOLL_ANON 0x8000 /* don't do file mappings */ @@ -3151,10 +3148,12 @@ static inline void print_vma_addr(char *prefix, unsigned long rip) } #endif +#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP int vmemmap_remap_free(unsigned long start, unsigned long end, unsigned long reuse); int vmemmap_remap_alloc(unsigned long start, unsigned long end, unsigned long reuse, gfp_t gfp_mask); +#endif void *sparse_buffer_alloc(unsigned long size); struct page * __populate_section_memmap(unsigned long pfn, @@ -3244,6 +3243,7 @@ enum mf_action_page_type { MF_MSG_BUDDY, MF_MSG_DAX, MF_MSG_UNSPLIT_THP, + MF_MSG_DIFFERENT_PAGE_SIZE, MF_MSG_UNKNOWN, }; @@ -3382,5 +3382,4 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start, } #endif -#endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_api.h b/include/linux/mm_api.h new file mode 100644 index 000000000000..a5ace2b198b8 --- /dev/null +++ b/include/linux/mm_api.h @@ -0,0 +1 @@ +#include <linux/mm.h> diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index cf90b1fa2c60..ac32125745ab 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -99,7 +99,8 @@ void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio) update_lru_size(lruvec, lru, folio_zonenum(folio), folio_nr_pages(folio)); - list_add(&folio->lru, &lruvec->lists[lru]); + if (lru != LRU_UNEVICTABLE) + list_add(&folio->lru, &lruvec->lists[lru]); } static __always_inline void add_page_to_lru_list(struct page *page, @@ -115,6 +116,7 @@ void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio) update_lru_size(lruvec, lru, folio_zonenum(folio), folio_nr_pages(folio)); + /* This is not expected to be used on LRU_UNEVICTABLE */ list_add_tail(&folio->lru, &lruvec->lists[lru]); } @@ -127,8 +129,11 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page, static __always_inline void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio) { - list_del(&folio->lru); - update_lru_size(lruvec, folio_lru_list(folio), folio_zonenum(folio), + enum lru_list lru = folio_lru_list(folio); + + if (lru != LRU_UNEVICTABLE) + list_del(&folio->lru); + update_lru_size(lruvec, lru, folio_zonenum(folio), -folio_nr_pages(folio)); } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 0f549870da6a..8834e38c06a4 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -85,7 +85,16 @@ struct page { * lruvec->lru_lock. Sometimes used as a generic list * by the page owner. */ - struct list_head lru; + union { + struct list_head lru; + /* Or, for the Unevictable "LRU list" slot */ + struct { + /* Always even, to negate PageTail */ + void *__filler; + /* Count page's or folio's mlocks */ + unsigned int mlock_count; + }; + }; /* See page-flags.h for PAGE_MAPPING_FLAGS */ struct address_space *mapping; pgoff_t index; /* Our offset within mapping. */ @@ -126,11 +135,14 @@ struct page { unsigned char compound_dtor; unsigned char compound_order; atomic_t compound_mapcount; + atomic_t compound_pincount; +#ifdef CONFIG_64BIT unsigned int compound_nr; /* 1 << compound_order */ +#endif }; struct { /* Second tail page of compound page */ unsigned long _compound_pad_1; /* compound_head */ - atomic_t hpage_pinned_refcount; + unsigned long _compound_pad_2; /* For both global and memcg */ struct list_head deferred_list; }; @@ -241,7 +253,13 @@ struct folio { struct { /* public: */ unsigned long flags; - struct list_head lru; + union { + struct list_head lru; + struct { + void *__filler; + unsigned int mlock_count; + }; + }; struct address_space *mapping; pgoff_t index; void *private; @@ -285,7 +303,7 @@ static inline atomic_t *compound_mapcount_ptr(struct page *page) static inline atomic_t *compound_pincount_ptr(struct page *page) { - return &page[2].hpage_pinned_refcount; + return &page[1].compound_pincount; } /* @@ -634,7 +652,7 @@ struct mm_struct { #endif struct work_struct async_put_work; -#ifdef CONFIG_IOMMU_SUPPORT +#ifdef CONFIG_IOMMU_SVA u32 pasid; #endif } __randomize_layout; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index aed44e9b5d89..962b14d403e8 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -83,6 +83,17 @@ static inline bool is_migrate_movable(int mt) return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; } +/* + * Check whether a migratetype can be merged with another migratetype. + * + * It is only mergeable when it can fall back to other migratetypes for + * allocation. See fallbacks[MIGRATE_TYPES][3] in page_alloc.c. + */ +static inline bool migratetype_is_mergeable(int mt) +{ + return mt < MIGRATE_PCPTYPES; +} + #define for_each_migratetype_order(order, type) \ for (order = 0; order < MAX_ORDER; order++) \ for (type = 0; type < MIGRATE_TYPES; type++) @@ -211,6 +222,9 @@ enum node_stat_item { #ifdef CONFIG_SWAP NR_SWAPCACHE, #endif +#ifdef CONFIG_NUMA_BALANCING + PGPROMOTE_SUCCESS, /* promote successfully */ +#endif NR_VM_NODE_STAT_ITEMS }; @@ -339,6 +353,7 @@ enum zone_watermarks { WMARK_MIN, WMARK_LOW, WMARK_HIGH, + WMARK_PROMO, NR_WMARK }; @@ -920,12 +935,6 @@ typedef struct pglist_data { #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) -#ifdef CONFIG_FLATMEM -#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr)) -#else -#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr)) -#endif -#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) @@ -1101,7 +1110,6 @@ static inline struct pglist_data *NODE_DATA(int nid) { return &contig_page_data; } -#define NODE_MEM_MAP(nid) mem_map #else /* CONFIG_NUMA */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 4bb71979a8fd..5da5d990ff58 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -211,7 +211,7 @@ struct css_device_id { kernel_ulong_t driver_data; }; -#define ACPI_ID_LEN 9 +#define ACPI_ID_LEN 16 struct acpi_device_id { __u8 id[ACPI_ID_LEN]; diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 1ffa933121f6..151607e9d64a 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -188,9 +188,6 @@ struct module; /* only needed for owner field in mtd_info */ */ struct mtd_debug_info { struct dentry *dfs_dir; - - const char *partname; - const char *partid; }; /** @@ -711,7 +708,11 @@ static inline int mtd_is_bitflip_or_eccerr(int err) { unsigned mtd_mmap_capabilities(struct mtd_info *mtd); -extern char *mtd_expert_analysis_warning; -extern bool mtd_expert_analysis_mode; +#ifdef CONFIG_DEBUG_FS +bool mtd_check_expert_analysis_mode(void); +#else +static inline bool mtd_check_expert_analysis_mode(void) { return false; } +#endif + #endif /* __MTD_MTD_H__ */ diff --git a/include/linux/mtd/nand-ecc-mxic.h b/include/linux/mtd/nand-ecc-mxic.h new file mode 100644 index 000000000000..b125926e458c --- /dev/null +++ b/include/linux/mtd/nand-ecc-mxic.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright © 2019 Macronix + * Author: Miquèl Raynal <miquel.raynal@bootlin.com> + * + * Header for the Macronix external ECC engine. + */ + +#ifndef __MTD_NAND_ECC_MXIC_H__ +#define __MTD_NAND_ECC_MXIC_H__ + +#include <linux/platform_device.h> +#include <linux/device.h> + +struct mxic_ecc_engine; + +#if IS_ENABLED(CONFIG_MTD_NAND_ECC_MXIC) && IS_REACHABLE(CONFIG_MTD_NAND_CORE) + +struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void); +struct nand_ecc_engine *mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev); +void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng); +int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng, + unsigned int direction, dma_addr_t dirmap); + +#else /* !CONFIG_MTD_NAND_ECC_MXIC */ + +static inline struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void) +{ + return NULL; +} + +static inline struct nand_ecc_engine * +mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng) {} + +static inline int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng, + unsigned int direction, + dma_addr_t dirmap) +{ + return -EOPNOTSUPP; +} + +#endif /* CONFIG_MTD_NAND_ECC_MXIC */ + +#endif /* __MTD_NAND_ECC_MXIC_H__ */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 32fc7edf65b3..c3693bb87b4c 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -264,11 +264,35 @@ struct nand_ecc_engine_ops { }; /** + * enum nand_ecc_engine_integration - How the NAND ECC engine is integrated + * @NAND_ECC_ENGINE_INTEGRATION_INVALID: Invalid value + * @NAND_ECC_ENGINE_INTEGRATION_PIPELINED: Pipelined engine, performs on-the-fly + * correction, does not need to copy + * data around + * @NAND_ECC_ENGINE_INTEGRATION_EXTERNAL: External engine, needs to bring the + * data into its own area before use + */ +enum nand_ecc_engine_integration { + NAND_ECC_ENGINE_INTEGRATION_INVALID, + NAND_ECC_ENGINE_INTEGRATION_PIPELINED, + NAND_ECC_ENGINE_INTEGRATION_EXTERNAL, +}; + +/** * struct nand_ecc_engine - ECC engine abstraction for NAND devices + * @dev: Host device + * @node: Private field for registration time * @ops: ECC engine operations + * @integration: How the engine is integrated with the host + * (only relevant on %NAND_ECC_ENGINE_TYPE_ON_HOST engines) + * @priv: Private data */ struct nand_ecc_engine { + struct device *dev; + struct list_head node; struct nand_ecc_engine_ops *ops; + enum nand_ecc_engine_integration integration; + void *priv; }; void of_get_nand_ecc_user_config(struct nand_device *nand); @@ -279,8 +303,28 @@ int nand_ecc_prepare_io_req(struct nand_device *nand, int nand_ecc_finish_io_req(struct nand_device *nand, struct nand_page_io_req *req); bool nand_ecc_is_strong_enough(struct nand_device *nand); + +#if IS_REACHABLE(CONFIG_MTD_NAND_CORE) +int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine); +int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine); +#else +static inline int +nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine) +{ + return -ENOTSUPP; +} +static inline int +nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine) +{ + return -ENOTSUPP; +} +#endif + struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand); struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand); +struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand); +void nand_ecc_put_on_host_hw_engine(struct nand_device *nand); +struct device *nand_ecc_get_engine_dev(struct device *host); #if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING) struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void); @@ -962,6 +1006,11 @@ int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos); int nanddev_ecc_engine_init(struct nand_device *nand); void nanddev_ecc_engine_cleanup(struct nand_device *nand); +static inline void *nand_to_ecc_ctx(struct nand_device *nand) +{ + return nand->ecc.ctx.priv; +} + /* BBT related functions */ enum nand_bbt_block_status { NAND_BBT_BLOCK_STATUS_UNKNOWN, diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 5b88cd51fadb..dcf90144d70b 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -1240,6 +1240,7 @@ struct nand_secure_region { * @lock: Lock protecting the suspended field. Also used to serialize accesses * to the NAND device * @suspended: Set to 1 when the device is suspended, 0 when it's not + * @resume_wq: wait queue to sleep if rawnand is in suspended state. * @cur_cs: Currently selected target. -1 means no target selected, otherwise we * should always have cur_cs >= 0 && cur_cs < nanddev_ntargets(). * NAND Controller drivers should not modify this value, but they're @@ -1294,6 +1295,7 @@ struct nand_chip { /* Internals */ struct mutex lock; unsigned int suspended : 1; + wait_queue_head_t resume_wq; int cur_cs; int read_retries; struct nand_secure_region *secure_regions; diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index fc90fce26e33..5e25a7b75ae2 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -47,8 +47,6 @@ #define SPINOR_OP_RDID 0x9f /* Read JEDEC ID */ #define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */ #define SPINOR_OP_RDCR 0x35 /* Read configuration register */ -#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */ -#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */ #define SPINOR_OP_RDEAR 0xc8 /* Read Extended Address Register */ #define SPINOR_OP_WREAR 0xc5 /* Write Extended Address Register */ #define SPINOR_OP_SRSTEN 0x66 /* Software Reset Enable */ @@ -86,22 +84,12 @@ #define SPINOR_OP_BP 0x02 /* Byte program */ #define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */ -/* Used for S3AN flashes only */ -#define SPINOR_OP_XSE 0x50 /* Sector erase */ -#define SPINOR_OP_XPP 0x82 /* Page program */ -#define SPINOR_OP_XRDSR 0xd7 /* Read status register */ - -#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */ -#define XSR_RDY BIT(7) /* Ready */ - - /* Used for Macronix and Winbond flashes. */ #define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */ #define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */ /* Used for Spansion flashes only. */ #define SPINOR_OP_BRWR 0x17 /* Bank register write */ -#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */ /* Used for Micron flashes only. */ #define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */ @@ -135,12 +123,6 @@ /* Enhanced Volatile Configuration Register bits */ #define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */ -/* Flag Status Register bits */ -#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */ -#define FSR_E_ERR BIT(5) /* Erase operation status */ -#define FSR_P_ERR BIT(4) /* Program operation status */ -#define FSR_PT_ERR BIT(1) /* Protection error bit */ - /* Status Register 2 bits. */ #define SR2_QUAD_EN_BIT1 BIT(1) #define SR2_LB1 BIT(3) /* Security Register Lock Bit 1 */ diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 6988956b8492..3aa28240a77f 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -389,6 +389,8 @@ struct spinand_info { struct spinand_dirmap { struct spi_mem_dirmap_desc *wdesc; struct spi_mem_dirmap_desc *rdesc; + struct spi_mem_dirmap_desc *wdesc_ecc; + struct spi_mem_dirmap_desc *rdesc_ecc; }; /** diff --git a/include/linux/mutex_api.h b/include/linux/mutex_api.h new file mode 100644 index 000000000000..85ab9491e13e --- /dev/null +++ b/include/linux/mutex_api.h @@ -0,0 +1 @@ +#include <linux/mutex.h> diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h index 6b3267b49755..ed42bd5f639f 100644 --- a/include/linux/net/intel/i40e_client.h +++ b/include/linux/net/intel/i40e_client.h @@ -26,11 +26,6 @@ struct i40e_client_version { u8 rsvd; }; -enum i40e_client_state { - __I40E_CLIENT_NULL, - __I40E_CLIENT_REGISTERED -}; - enum i40e_client_instance_state { __I40E_CLIENT_INSTANCE_NONE, __I40E_CLIENT_INSTANCE_OPENED, @@ -190,11 +185,6 @@ struct i40e_client { const struct i40e_client_ops *ops; /* client ops provided by the client */ }; -static inline bool i40e_client_is_registered(struct i40e_client *client) -{ - return test_bit(__I40E_CLIENT_REGISTERED, &client->state); -} - void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client); void i40e_client_device_unregister(struct i40e_info *ldev); diff --git a/include/linux/net/intel/iidc.h b/include/linux/net/intel/iidc.h index 1289593411d3..1c1332e4df26 100644 --- a/include/linux/net/intel/iidc.h +++ b/include/linux/net/intel/iidc.h @@ -32,6 +32,8 @@ enum iidc_rdma_protocol { }; #define IIDC_MAX_USER_PRIORITY 8 +#define IIDC_MAX_DSCP_MAPPING 64 +#define IIDC_DSCP_PFC_MODE 0x1 /* Struct to hold per RDMA Qset info */ struct iidc_rdma_qset_params { @@ -60,6 +62,8 @@ struct iidc_qos_params { u8 vport_relative_bw; u8 vport_priority_type; u8 num_tc; + u8 pfc_mode; + u8 dscp_map[IIDC_MAX_DSCP_MAPPING]; }; struct iidc_event { diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8b5a314db167..cd7a597c55b1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -28,6 +28,7 @@ #include <linux/prefetch.h> #include <asm/cache.h> #include <asm/byteorder.h> +#include <asm/local.h> #include <linux/percpu.h> #include <linux/rculist.h> @@ -194,6 +195,14 @@ struct net_device_stats { unsigned long tx_compressed; }; +/* per-cpu stats, allocated on demand. + * Try to fit them in a single cache line, for dev_get_stats() sake. + */ +struct net_device_core_stats { + local_t rx_dropped; + local_t tx_dropped; + local_t rx_nohandler; +} __aligned(4 * sizeof(local_t)); #include <linux/cache.h> #include <linux/skbuff.h> @@ -1735,12 +1744,8 @@ enum netdev_ml_priv_type { * @stats: Statistics struct, which was left as a legacy, use * rtnl_link_stats64 instead * - * @rx_dropped: Dropped packets by core network, - * do not use this in drivers - * @tx_dropped: Dropped packets by core network, + * @core_stats: core networking counters, * do not use this in drivers - * @rx_nohandler: nohandler dropped packets by core network on - * inactive devices, do not use this in drivers * @carrier_up_count: Number of times the carrier has been up * @carrier_down_count: Number of times the carrier has been down * @@ -1894,6 +1899,8 @@ enum netdev_ml_priv_type { * @garp_port: GARP * @mrp_port: MRP * + * @dm_private: Drop monitor private + * * @dev: Class/net/name entry * @sysfs_groups: Space for optional device, statistics and wireless * sysfs groups @@ -1948,6 +1955,9 @@ enum netdev_ml_priv_type { * @dev_addr_shadow: Copy of @dev_addr to catch direct writes. * @linkwatch_dev_tracker: refcount tracker used by linkwatch. * @watchdog_dev_tracker: refcount tracker used by watchdog. + * @dev_registered_tracker: tracker for reference held while + * registered + * @offload_xstats_l3: L3 HW stats for this netdevice. * * FIXME: cleanup struct net_device such that network protocol info * moves out. @@ -2020,9 +2030,7 @@ struct net_device { struct net_device_stats stats; /* not used by modern drivers */ - atomic_long_t rx_dropped; - atomic_long_t tx_dropped; - atomic_long_t rx_nohandler; + struct net_device_core_stats __percpu *core_stats; /* Stats to monitor link on/off, flapping */ atomic_t carrier_up_count; @@ -2234,7 +2242,9 @@ struct net_device { #if IS_ENABLED(CONFIG_MRP) struct mrp_port __rcu *mrp_port; #endif - +#if IS_ENABLED(CONFIG_NET_DROP_MONITOR) + struct dm_hw_stat_delta __rcu *dm_private; +#endif struct device dev; const struct attribute_group *sysfs_groups[4]; const struct attribute_group *sysfs_rx_queue_group; @@ -2282,6 +2292,8 @@ struct net_device { u8 dev_addr_shadow[MAX_ADDR_LEN]; netdevice_tracker linkwatch_dev_tracker; netdevice_tracker watchdog_dev_tracker; + netdevice_tracker dev_registered_tracker; + struct rtnl_hw_stats64 *offload_xstats_l3; }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -2722,6 +2734,10 @@ enum netdev_cmd { NETDEV_CVLAN_FILTER_DROP_INFO, NETDEV_SVLAN_FILTER_PUSH_INFO, NETDEV_SVLAN_FILTER_DROP_INFO, + NETDEV_OFFLOAD_XSTATS_ENABLE, + NETDEV_OFFLOAD_XSTATS_DISABLE, + NETDEV_OFFLOAD_XSTATS_REPORT_USED, + NETDEV_OFFLOAD_XSTATS_REPORT_DELTA, }; const char *netdev_cmd_to_name(enum netdev_cmd cmd); @@ -2772,6 +2788,42 @@ struct netdev_notifier_pre_changeaddr_info { const unsigned char *dev_addr; }; +enum netdev_offload_xstats_type { + NETDEV_OFFLOAD_XSTATS_TYPE_L3 = 1, +}; + +struct netdev_notifier_offload_xstats_info { + struct netdev_notifier_info info; /* must be first */ + enum netdev_offload_xstats_type type; + + union { + /* NETDEV_OFFLOAD_XSTATS_REPORT_DELTA */ + struct netdev_notifier_offload_xstats_rd *report_delta; + /* NETDEV_OFFLOAD_XSTATS_REPORT_USED */ + struct netdev_notifier_offload_xstats_ru *report_used; + }; +}; + +int netdev_offload_xstats_enable(struct net_device *dev, + enum netdev_offload_xstats_type type, + struct netlink_ext_ack *extack); +int netdev_offload_xstats_disable(struct net_device *dev, + enum netdev_offload_xstats_type type); +bool netdev_offload_xstats_enabled(const struct net_device *dev, + enum netdev_offload_xstats_type type); +int netdev_offload_xstats_get(struct net_device *dev, + enum netdev_offload_xstats_type type, + struct rtnl_hw_stats64 *stats, bool *used, + struct netlink_ext_ack *extack); +void +netdev_offload_xstats_report_delta(struct netdev_notifier_offload_xstats_rd *rd, + const struct rtnl_hw_stats64 *stats); +void +netdev_offload_xstats_report_used(struct netdev_notifier_offload_xstats_ru *ru); +void netdev_offload_xstats_push_delta(struct net_device *dev, + enum netdev_offload_xstats_type type, + const struct rtnl_hw_stats64 *stats); + static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, struct net_device *dev) { @@ -3614,7 +3666,6 @@ static inline unsigned int get_netdev_rx_queue_index( } #endif -#define DEFAULT_MAX_NUM_RSS_QUEUES (8) int netif_get_num_default_rss_queues(void); enum skb_free_reason { @@ -3669,8 +3720,8 @@ u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); int netif_rx(struct sk_buff *skb); -int netif_rx_ni(struct sk_buff *skb); -int netif_rx_any_context(struct sk_buff *skb); +int __netif_rx(struct sk_buff *skb); + int netif_receive_skb(struct sk_buff *skb); int netif_receive_skb_core(struct sk_buff *skb); void netif_receive_skb_list_internal(struct list_head *head); @@ -3792,13 +3843,42 @@ static __always_inline bool __is_skb_forwardable(const struct net_device *dev, return false; } +struct net_device_core_stats *netdev_core_stats_alloc(struct net_device *dev); + +static inline struct net_device_core_stats *dev_core_stats(struct net_device *dev) +{ + /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */ + struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats); + + if (likely(p)) + return this_cpu_ptr(p); + + return netdev_core_stats_alloc(dev); +} + +#define DEV_CORE_STATS_INC(FIELD) \ +static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \ +{ \ + struct net_device_core_stats *p; \ + \ + preempt_disable(); \ + p = dev_core_stats(dev); \ + \ + if (p) \ + local_inc(&p->FIELD); \ + preempt_enable(); \ +} +DEV_CORE_STATS_INC(rx_dropped) +DEV_CORE_STATS_INC(tx_dropped) +DEV_CORE_STATS_INC(rx_nohandler) + static __always_inline int ____dev_forward_skb(struct net_device *dev, struct sk_buff *skb, const bool check_mtu) { if (skb_orphan_frags(skb, GFP_ATOMIC) || unlikely(!__is_skb_forwardable(dev, skb, check_mtu))) { - atomic_long_inc(&dev->rx_dropped); + dev_core_stats_rx_dropped_inc(dev); kfree_skb(skb); return NET_RX_DROP; } @@ -3817,14 +3897,7 @@ extern unsigned int netdev_budget_usecs; /* Called by rtnetlink.c:rtnl_unlock() */ void netdev_run_todo(void); -/** - * dev_put - release reference to device - * @dev: network device - * - * Release reference to device to allow it to be freed. - * Try using dev_put_track() instead. - */ -static inline void dev_put(struct net_device *dev) +static inline void __dev_put(struct net_device *dev) { if (dev) { #ifdef CONFIG_PCPU_DEV_REFCNT @@ -3835,14 +3908,7 @@ static inline void dev_put(struct net_device *dev) } } -/** - * dev_hold - get reference to device - * @dev: network device - * - * Hold reference to device to keep it from being freed. - * Try using dev_hold_track() instead. - */ -static inline void dev_hold(struct net_device *dev) +static inline void __dev_hold(struct net_device *dev) { if (dev) { #ifdef CONFIG_PCPU_DEV_REFCNT @@ -3853,11 +3919,24 @@ static inline void dev_hold(struct net_device *dev) } } +static inline void __netdev_tracker_alloc(struct net_device *dev, + netdevice_tracker *tracker, + gfp_t gfp) +{ +#ifdef CONFIG_NET_DEV_REFCNT_TRACKER + ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp); +#endif +} + +/* netdev_tracker_alloc() can upgrade a prior untracked reference + * taken by dev_get_by_name()/dev_get_by_index() to a tracked one. + */ static inline void netdev_tracker_alloc(struct net_device *dev, netdevice_tracker *tracker, gfp_t gfp) { #ifdef CONFIG_NET_DEV_REFCNT_TRACKER - ref_tracker_alloc(&dev->refcnt_tracker, tracker, gfp); + refcount_dec(&dev->refcnt_tracker.no_tracker); + __netdev_tracker_alloc(dev, tracker, gfp); #endif } @@ -3873,8 +3952,8 @@ static inline void dev_hold_track(struct net_device *dev, netdevice_tracker *tracker, gfp_t gfp) { if (dev) { - dev_hold(dev); - netdev_tracker_alloc(dev, tracker, gfp); + __dev_hold(dev); + __netdev_tracker_alloc(dev, tracker, gfp); } } @@ -3883,10 +3962,34 @@ static inline void dev_put_track(struct net_device *dev, { if (dev) { netdev_tracker_free(dev, tracker); - dev_put(dev); + __dev_put(dev); } } +/** + * dev_hold - get reference to device + * @dev: network device + * + * Hold reference to device to keep it from being freed. + * Try using dev_hold_track() instead. + */ +static inline void dev_hold(struct net_device *dev) +{ + dev_hold_track(dev, NULL, GFP_ATOMIC); +} + +/** + * dev_put - release reference to device + * @dev: network device + * + * Release reference to device to allow it to be freed. + * Try using dev_put_track() instead. + */ +static inline void dev_put(struct net_device *dev) +{ + dev_put_track(dev, NULL); +} + static inline void dev_replace_track(struct net_device *odev, struct net_device *ndev, netdevice_tracker *tracker, @@ -3895,11 +3998,11 @@ static inline void dev_replace_track(struct net_device *odev, if (odev) netdev_tracker_free(odev, tracker); - dev_hold(ndev); - dev_put(odev); + __dev_hold(ndev); + __dev_put(odev); if (ndev) - netdev_tracker_alloc(ndev, tracker, gfp); + __netdev_tracker_alloc(ndev, tracker, gfp); } /* Carrier loss detection, dial on demand. The functions netif_carrier_on @@ -4602,6 +4705,8 @@ int skb_csum_hwoffload_help(struct sk_buff *skb, struct sk_buff *__skb_gso_segment(struct sk_buff *skb, netdev_features_t features, bool tx_path); +struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, + netdev_features_t features, __be16 type); struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, netdev_features_t features); diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 15e71bfff726..c2c6f332fb90 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -379,6 +379,7 @@ struct nf_nat_hook { unsigned int (*manip_pkt)(struct sk_buff *skb, struct nf_conn *ct, enum nf_nat_manip_type mtype, enum ip_conntrack_dir dir); + void (*remove_nat_bysrc)(struct nf_conn *ct); }; extern const struct nf_nat_hook __rcu *nf_nat_hook; diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h index a28aa289afdc..c3bdb4370938 100644 --- a/include/linux/netfilter/nf_conntrack_pptp.h +++ b/include/linux/netfilter/nf_conntrack_pptp.h @@ -300,26 +300,22 @@ union pptp_ctrl_union { struct PptpSetLinkInfo setlink; }; -extern int -(*nf_nat_pptp_hook_outbound)(struct sk_buff *skb, - struct nf_conn *ct, enum ip_conntrack_info ctinfo, - unsigned int protoff, - struct PptpControlHeader *ctlh, - union pptp_ctrl_union *pptpReq); - -extern int -(*nf_nat_pptp_hook_inbound)(struct sk_buff *skb, - struct nf_conn *ct, enum ip_conntrack_info ctinfo, - unsigned int protoff, - struct PptpControlHeader *ctlh, - union pptp_ctrl_union *pptpReq); - -extern void -(*nf_nat_pptp_hook_exp_gre)(struct nf_conntrack_expect *exp_orig, - struct nf_conntrack_expect *exp_reply); - -extern void -(*nf_nat_pptp_hook_expectfn)(struct nf_conn *ct, - struct nf_conntrack_expect *exp); +struct nf_nat_pptp_hook { + int (*outbound)(struct sk_buff *skb, + struct nf_conn *ct, enum ip_conntrack_info ctinfo, + unsigned int protoff, + struct PptpControlHeader *ctlh, + union pptp_ctrl_union *pptpReq); + int (*inbound)(struct sk_buff *skb, + struct nf_conn *ct, enum ip_conntrack_info ctinfo, + unsigned int protoff, + struct PptpControlHeader *ctlh, + union pptp_ctrl_union *pptpReq); + void (*exp_gre)(struct nf_conntrack_expect *exp_orig, + struct nf_conntrack_expect *exp_reply); + void (*expectfn)(struct nf_conn *ct, + struct nf_conntrack_expect *exp); +}; +extern const struct nf_nat_pptp_hook __rcu *nf_nat_pptp_hook; #endif /* _NF_CONNTRACK_PPTP_H */ diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 1ec631838af9..bda1c385cffb 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -135,15 +135,6 @@ static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack, extack->cookie_len = sizeof(cookie); } -static inline void nl_set_extack_cookie_u32(struct netlink_ext_ack *extack, - u32 cookie) -{ - if (!extack) - return; - memcpy(extack->cookie, &cookie, sizeof(cookie)); - extack->cookie_len = sizeof(cookie); -} - void netlink_kernel_release(struct sock *sk); int __netlink_change_ngroups(struct sock *sk, unsigned int groups); int netlink_change_ngroups(struct sock *sk, unsigned int groups); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 68f81d8d36de..784120cc217e 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -583,7 +583,7 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned extern int nfs_sync_inode(struct inode *inode); extern int nfs_wb_all(struct inode *inode); extern int nfs_wb_page(struct inode *inode, struct page *page); -extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); +int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio); extern int nfs_commit_inode(struct inode *, int); extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail); extern void nfs_commit_free(struct nfs_commit_data *data); diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index ca0959e51e81..6aa2a200676a 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -138,6 +138,7 @@ struct nfs_server { struct nlm_host *nlm_host; /* NLM client handle */ struct nfs_iostats __percpu *io_stats; /* I/O statistics */ atomic_long_t writeback; /* number of writeback pages */ + unsigned int write_congested;/* flag set when writeback gets too high */ unsigned int flags; /* various flags */ /* The following are for internal use only. Also see uapi/linux/nfs_mount.h */ diff --git a/include/linux/node.h b/include/linux/node.h index bb21fd631b16..40d641a8bfb0 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -99,19 +99,20 @@ extern struct node *node_devices[]; typedef void (*node_registration_func_t)(struct node *); #if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA) -void link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn, - enum meminit_context context); +void register_memory_blocks_under_node(int nid, unsigned long start_pfn, + unsigned long end_pfn, + enum meminit_context context); #else -static inline void link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn, - enum meminit_context context) +static inline void register_memory_blocks_under_node(int nid, unsigned long start_pfn, + unsigned long end_pfn, + enum meminit_context context) { } #endif extern void unregister_node(struct node *node); #ifdef CONFIG_NUMA +extern void node_dev_init(void); /* Core of the node registration - only memory hotplug should use this */ extern int __register_one_node(int nid); @@ -128,8 +129,8 @@ static inline int register_one_node(int nid) error = __register_one_node(nid); if (error) return error; - /* link memory sections under this node */ - link_mem_sections(nid, start_pfn, end_pfn, MEMINIT_EARLY); + register_memory_blocks_under_node(nid, start_pfn, end_pfn, + MEMINIT_EARLY); } return error; @@ -149,6 +150,9 @@ extern void register_hugetlbfs_with_node(node_registration_func_t doregister, node_registration_func_t unregister); #endif #else +static inline void node_dev_init(void) +{ +} static inline int __register_one_node(int nid) { return 0; @@ -181,4 +185,9 @@ static inline void register_hugetlbfs_with_node(node_registration_func_t reg, #define to_node(device) container_of(device, struct node, dev) +static inline bool node_is_toptier(int node) +{ + return node_state(node, N_CPU); +} + #endif /* _LINUX_NODE_H_ */ diff --git a/include/linux/of.h b/include/linux/of.h index 2dc77430a91a..04971e85fbc9 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -388,9 +388,6 @@ extern int of_phandle_iterator_args(struct of_phandle_iterator *it, extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)); extern int of_alias_get_id(struct device_node *np, const char *stem); extern int of_alias_get_highest_id(const char *stem); -extern int of_alias_get_alias_list(const struct of_device_id *matches, - const char *stem, unsigned long *bitmap, - unsigned int nbits); extern int of_machine_is_compatible(const char *compat); @@ -766,13 +763,6 @@ static inline int of_alias_get_highest_id(const char *stem) return -ENOSYS; } -static inline int of_alias_get_alias_list(const struct of_device_id *matches, - const char *stem, unsigned long *bitmap, - unsigned int nbits) -{ - return -ENOSYS; -} - static inline int of_machine_is_compatible(const char *compat) { return 0; diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 4669632bd72b..f1221d11f8e5 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -4,6 +4,7 @@ #include <linux/compiler.h> #include <linux/limits.h> +#include <linux/const.h> /* * We need to compute the minimum and maximum values representable in a given @@ -118,81 +119,94 @@ static inline bool __must_check __must_check_overflow(bool overflow) })) /** - * array_size() - Calculate size of 2-dimensional array. - * - * @a: dimension one - * @b: dimension two + * size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX * - * Calculates size of 2-dimensional array: @a * @b. + * @factor1: first factor + * @factor2: second factor * - * Returns: number of bytes needed to represent the array or SIZE_MAX on - * overflow. + * Returns: calculate @factor1 * @factor2, both promoted to size_t, + * with any overflow causing the return value to be SIZE_MAX. The + * lvalue must be size_t to avoid implicit type conversion. */ -static inline __must_check size_t array_size(size_t a, size_t b) +static inline size_t __must_check size_mul(size_t factor1, size_t factor2) { size_t bytes; - if (check_mul_overflow(a, b, &bytes)) + if (check_mul_overflow(factor1, factor2, &bytes)) return SIZE_MAX; return bytes; } /** - * array3_size() - Calculate size of 3-dimensional array. + * size_add() - Calculate size_t addition with saturation at SIZE_MAX * - * @a: dimension one - * @b: dimension two - * @c: dimension three - * - * Calculates size of 3-dimensional array: @a * @b * @c. + * @addend1: first addend + * @addend2: second addend * - * Returns: number of bytes needed to represent the array or SIZE_MAX on - * overflow. + * Returns: calculate @addend1 + @addend2, both promoted to size_t, + * with any overflow causing the return value to be SIZE_MAX. The + * lvalue must be size_t to avoid implicit type conversion. */ -static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) +static inline size_t __must_check size_add(size_t addend1, size_t addend2) { size_t bytes; - if (check_mul_overflow(a, b, &bytes)) - return SIZE_MAX; - if (check_mul_overflow(bytes, c, &bytes)) + if (check_add_overflow(addend1, addend2, &bytes)) return SIZE_MAX; return bytes; } -/* - * Compute a*b+c, returning SIZE_MAX on overflow. Internal helper for - * struct_size() below. +/** + * size_sub() - Calculate size_t subtraction with saturation at SIZE_MAX + * + * @minuend: value to subtract from + * @subtrahend: value to subtract from @minuend + * + * Returns: calculate @minuend - @subtrahend, both promoted to size_t, + * with any overflow causing the return value to be SIZE_MAX. For + * composition with the size_add() and size_mul() helpers, neither + * argument may be SIZE_MAX (or the result with be forced to SIZE_MAX). + * The lvalue must be size_t to avoid implicit type conversion. */ -static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c) +static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend) { size_t bytes; - if (check_mul_overflow(a, b, &bytes)) - return SIZE_MAX; - if (check_add_overflow(bytes, c, &bytes)) + if (minuend == SIZE_MAX || subtrahend == SIZE_MAX || + check_sub_overflow(minuend, subtrahend, &bytes)) return SIZE_MAX; return bytes; } /** - * struct_size() - Calculate size of structure with trailing array. - * @p: Pointer to the structure. - * @member: Name of the array member. - * @count: Number of elements in the array. + * array_size() - Calculate size of 2-dimensional array. * - * Calculates size of memory needed for structure @p followed by an - * array of @count number of @member elements. + * @a: dimension one + * @b: dimension two * - * Return: number of bytes needed or SIZE_MAX on overflow. + * Calculates size of 2-dimensional array: @a * @b. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. */ -#define struct_size(p, member, count) \ - __ab_c_size(count, \ - sizeof(*(p)->member) + __must_be_array((p)->member),\ - sizeof(*(p))) +#define array_size(a, b) size_mul(a, b) + +/** + * array3_size() - Calculate size of 3-dimensional array. + * + * @a: dimension one + * @b: dimension two + * @c: dimension three + * + * Calculates size of 3-dimensional array: @a * @b * @c. + * + * Returns: number of bytes needed to represent the array or SIZE_MAX on + * overflow. + */ +#define array3_size(a, b, c) size_mul(size_mul(a, b), c) /** * flex_array_size() - Calculate size of a flexible array member @@ -208,7 +222,25 @@ static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c) * Return: number of bytes needed or SIZE_MAX on overflow. */ #define flex_array_size(p, member, count) \ - array_size(count, \ - sizeof(*(p)->member) + __must_be_array((p)->member)) + __builtin_choose_expr(__is_constexpr(count), \ + (count) * sizeof(*(p)->member) + __must_be_array((p)->member), \ + size_mul(count, sizeof(*(p)->member) + __must_be_array((p)->member))) + +/** + * struct_size() - Calculate size of structure with trailing flexible array. + * + * @p: Pointer to the structure. + * @member: Name of the array member. + * @count: Number of elements in the array. + * + * Calculates size of memory needed for structure @p followed by an + * array of @count number of @member elements. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define struct_size(p, member, count) \ + __builtin_choose_expr(__is_constexpr(count), \ + sizeof(*(p)) + flex_array_size(p, member, count), \ + size_add(sizeof(*(p)), flex_array_size(p, member, count))) #endif /* __LINUX_OVERFLOW_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 1c3b6e5c8bfd..9d8eeaa67d05 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -190,13 +190,81 @@ enum pageflags { #ifndef __GENERATING_BOUNDS_H +#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP +DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON, + hugetlb_free_vmemmap_enabled_key); + +static __always_inline bool hugetlb_free_vmemmap_enabled(void) +{ + return static_branch_maybe(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON, + &hugetlb_free_vmemmap_enabled_key); +} + +/* + * If the feature of freeing some vmemmap pages associated with each HugeTLB + * page is enabled, the head vmemmap page frame is reused and all of the tail + * vmemmap addresses map to the head vmemmap page frame (furture details can + * refer to the figure at the head of the mm/hugetlb_vmemmap.c). In other + * words, there are more than one page struct with PG_head associated with each + * HugeTLB page. We __know__ that there is only one head page struct, the tail + * page structs with PG_head are fake head page structs. We need an approach + * to distinguish between those two different types of page structs so that + * compound_head() can return the real head page struct when the parameter is + * the tail page struct but with PG_head. + * + * The page_fixed_fake_head() returns the real head page struct if the @page is + * fake page head, otherwise, returns @page which can either be a true page + * head or tail. + */ +static __always_inline const struct page *page_fixed_fake_head(const struct page *page) +{ + if (!hugetlb_free_vmemmap_enabled()) + return page; + + /* + * Only addresses aligned with PAGE_SIZE of struct page may be fake head + * struct page. The alignment check aims to avoid access the fields ( + * e.g. compound_head) of the @page[1]. It can avoid touch a (possibly) + * cold cacheline in some cases. + */ + if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && + test_bit(PG_head, &page->flags)) { + /* + * We can safely access the field of the @page[1] with PG_head + * because the @page is a compound page composed with at least + * two contiguous pages. + */ + unsigned long head = READ_ONCE(page[1].compound_head); + + if (likely(head & 1)) + return (const struct page *)(head - 1); + } + return page; +} +#else +static inline const struct page *page_fixed_fake_head(const struct page *page) +{ + return page; +} + +static inline bool hugetlb_free_vmemmap_enabled(void) +{ + return false; +} +#endif + +static __always_inline int page_is_fake_head(struct page *page) +{ + return page_fixed_fake_head(page) != page; +} + static inline unsigned long _compound_head(const struct page *page) { unsigned long head = READ_ONCE(page->compound_head); if (unlikely(head & 1)) return head - 1; - return (unsigned long)page; + return (unsigned long)page_fixed_fake_head(page); } #define compound_head(page) ((typeof(page))_compound_head(page)) @@ -231,12 +299,13 @@ static inline unsigned long _compound_head(const struct page *page) static __always_inline int PageTail(struct page *page) { - return READ_ONCE(page->compound_head) & 1; + return READ_ONCE(page->compound_head) & 1 || page_is_fake_head(page); } static __always_inline int PageCompound(struct page *page) { - return test_bit(PG_head, &page->flags) || PageTail(page); + return test_bit(PG_head, &page->flags) || + READ_ONCE(page->compound_head) & 1; } #define PAGE_POISON_PATTERN -1l @@ -412,7 +481,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; } TESTSETFLAG_FALSE(uname, lname) TESTCLEARFLAG_FALSE(uname, lname) __PAGEFLAG(Locked, locked, PF_NO_TAIL) -PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) +PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL) PAGEFLAG(Referenced, referenced, PF_HEAD) TESTCLEARFLAG(Referenced, referenced, PF_HEAD) @@ -695,7 +764,20 @@ static inline bool test_set_page_writeback(struct page *page) return set_page_writeback(page); } -__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) +static __always_inline bool folio_test_head(struct folio *folio) +{ + return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY)); +} + +static __always_inline int PageHead(struct page *page) +{ + PF_POISONED_CHECK(page); + return test_bit(PG_head, &page->flags) && !page_is_fake_head(page); +} + +__SETPAGEFLAG(Head, head, PF_ANY) +__CLEARPAGEFLAG(Head, head, PF_ANY) +CLEARPAGEFLAG(Head, head, PF_ANY) /** * folio_test_large() - Does this folio contain more than one page? @@ -918,7 +1000,7 @@ PAGE_TYPE_OPS(Guard, guard) extern bool is_free_buddy_page(struct page *page); -__PAGEFLAG(Isolated, isolated, PF_ANY); +PAGEFLAG(Isolated, isolated, PF_ANY); #ifdef CONFIG_MMU #define __PG_MLOCKED (1UL << PG_mlocked) diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index 973fd731a520..83c7248053a1 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -37,8 +37,11 @@ extern unsigned int pageblock_order; #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ -/* Huge pages are a constant size */ -#define pageblock_order HUGETLB_PAGE_ORDER +/* + * Huge pages are a constant size, but don't exceed the maximum allocation + * granularity. + */ +#define pageblock_order min_t(unsigned int, HUGETLB_PAGE_ORDER, MAX_ORDER - 1) #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 270bf5136c34..a8d0b327b066 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -18,6 +18,120 @@ struct folio_batch; +unsigned long invalidate_mapping_pages(struct address_space *mapping, + pgoff_t start, pgoff_t end); + +static inline void invalidate_remote_inode(struct inode *inode) +{ + if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode)) + invalidate_mapping_pages(inode->i_mapping, 0, -1); +} +int invalidate_inode_pages2(struct address_space *mapping); +int invalidate_inode_pages2_range(struct address_space *mapping, + pgoff_t start, pgoff_t end); +int write_inode_now(struct inode *, int sync); +int filemap_fdatawrite(struct address_space *); +int filemap_flush(struct address_space *); +int filemap_fdatawait_keep_errors(struct address_space *mapping); +int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); +int filemap_fdatawait_range_keep_errors(struct address_space *mapping, + loff_t start_byte, loff_t end_byte); + +static inline int filemap_fdatawait(struct address_space *mapping) +{ + return filemap_fdatawait_range(mapping, 0, LLONG_MAX); +} + +bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend); +int filemap_write_and_wait_range(struct address_space *mapping, + loff_t lstart, loff_t lend); +int __filemap_fdatawrite_range(struct address_space *mapping, + loff_t start, loff_t end, int sync_mode); +int filemap_fdatawrite_range(struct address_space *mapping, + loff_t start, loff_t end); +int filemap_check_errors(struct address_space *mapping); +void __filemap_set_wb_err(struct address_space *mapping, int err); +int filemap_fdatawrite_wbc(struct address_space *mapping, + struct writeback_control *wbc); + +static inline int filemap_write_and_wait(struct address_space *mapping) +{ + return filemap_write_and_wait_range(mapping, 0, LLONG_MAX); +} + +/** + * filemap_set_wb_err - set a writeback error on an address_space + * @mapping: mapping in which to set writeback error + * @err: error to be set in mapping + * + * When writeback fails in some way, we must record that error so that + * userspace can be informed when fsync and the like are called. We endeavor + * to report errors on any file that was open at the time of the error. Some + * internal callers also need to know when writeback errors have occurred. + * + * When a writeback error occurs, most filesystems will want to call + * filemap_set_wb_err to record the error in the mapping so that it will be + * automatically reported whenever fsync is called on the file. + */ +static inline void filemap_set_wb_err(struct address_space *mapping, int err) +{ + /* Fastpath for common case of no error */ + if (unlikely(err)) + __filemap_set_wb_err(mapping, err); +} + +/** + * filemap_check_wb_err - has an error occurred since the mark was sampled? + * @mapping: mapping to check for writeback errors + * @since: previously-sampled errseq_t + * + * Grab the errseq_t value from the mapping, and see if it has changed "since" + * the given value was sampled. + * + * If it has then report the latest error set, otherwise return 0. + */ +static inline int filemap_check_wb_err(struct address_space *mapping, + errseq_t since) +{ + return errseq_check(&mapping->wb_err, since); +} + +/** + * filemap_sample_wb_err - sample the current errseq_t to test for later errors + * @mapping: mapping to be sampled + * + * Writeback errors are always reported relative to a particular sample point + * in the past. This function provides those sample points. + */ +static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) +{ + return errseq_sample(&mapping->wb_err); +} + +/** + * file_sample_sb_err - sample the current errseq_t to test for later errors + * @file: file pointer to be sampled + * + * Grab the most current superblock-level errseq_t value for the given + * struct file. + */ +static inline errseq_t file_sample_sb_err(struct file *file) +{ + return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); +} + +/* + * Flush file data before changing attributes. Caller must hold any locks + * required to prevent further writes to this file until we're done setting + * flags. + */ +static inline int inode_drain_writes(struct inode *inode) +{ + inode_dio_wait(inode); + return filemap_write_and_wait(inode->i_mapping); +} + static inline bool mapping_empty(struct address_space *mapping) { return xa_empty(&mapping->i_pages); @@ -192,9 +306,14 @@ static inline void mapping_set_large_folios(struct address_space *mapping) __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); } +/* + * Large folio support currently depends on THP. These dependencies are + * being worked on but are not yet fixed. + */ static inline bool mapping_large_folio_support(struct address_space *mapping) { - return test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); + return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && + test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); } static inline int filemap_nr_thps(struct address_space *mapping) @@ -212,7 +331,7 @@ static inline void filemap_nr_thps_inc(struct address_space *mapping) if (!mapping_large_folio_support(mapping)) atomic_inc(&mapping->nr_thps); #else - WARN_ON_ONCE(1); + WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); #endif } @@ -222,7 +341,7 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping) if (!mapping_large_folio_support(mapping)) atomic_dec(&mapping->nr_thps); #else - WARN_ON_ONCE(1); + WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0); #endif } @@ -283,16 +402,6 @@ static inline struct inode *folio_inode(struct folio *folio) return folio->mapping->host; } -static inline bool page_cache_add_speculative(struct page *page, int count) -{ - return folio_ref_try_add_rcu((struct folio *)page, count); -} - -static inline bool page_cache_get_speculative(struct page *page) -{ - return page_cache_add_speculative(page, 1); -} - /** * folio_attach_private - Attach private data to a folio. * @folio: Folio to attach data to. @@ -423,6 +532,24 @@ static inline struct folio *filemap_get_folio(struct address_space *mapping, } /** + * filemap_lock_folio - Find and lock a folio. + * @mapping: The address_space to search. + * @index: The page index. + * + * Looks up the page cache entry at @mapping & @index. If a folio is + * present, it is returned locked with an increased refcount. + * + * Context: May sleep. + * Return: A folio or %NULL if there is no folio in the cache for this + * index. Will not return a shadow, swap or DAX entry. + */ +static inline struct folio *filemap_lock_folio(struct address_space *mapping, + pgoff_t index) +{ + return __filemap_get_folio(mapping, index, FGP_LOCK, 0); +} + +/** * find_get_page - find and get a page reference * @mapping: the address_space to search * @offset: the page index @@ -594,13 +721,6 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index) unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, pgoff_t end, unsigned int nr_pages, struct page **pages); -static inline unsigned find_get_pages(struct address_space *mapping, - pgoff_t *start, unsigned int nr_pages, - struct page **pages) -{ - return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, - pages); -} unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages); unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, @@ -636,15 +756,15 @@ extern int read_cache_pages(struct address_space *mapping, struct list_head *pages, filler_t *filler, void *data); static inline struct page *read_mapping_page(struct address_space *mapping, - pgoff_t index, void *data) + pgoff_t index, struct file *file) { - return read_cache_page(mapping, index, NULL, data); + return read_cache_page(mapping, index, NULL, file); } static inline struct folio *read_mapping_folio(struct address_space *mapping, - pgoff_t index, void *data) + pgoff_t index, struct file *file) { - return read_cache_folio(mapping, index, NULL, data); + return read_cache_folio(mapping, index, NULL, file); } /* @@ -713,6 +833,17 @@ static inline loff_t folio_file_pos(struct folio *folio) return page_file_offset(&folio->page); } +/* + * Get the offset in PAGE_SIZE (even for hugetlb folios). + * (TODO: hugetlb folios should have ->index in PAGE_SIZE) + */ +static inline pgoff_t folio_pgoff(struct folio *folio) +{ + if (unlikely(folio_test_hugetlb(folio))) + return hugetlb_basepage_index(&folio->page); + return folio->index; +} + extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, unsigned long address); @@ -878,8 +1009,7 @@ static inline void __set_page_dirty(struct page *page, { __folio_mark_dirty(page_folio(page), mapping, warn); } -void folio_account_cleaned(struct folio *folio, struct address_space *mapping, - struct bdi_writeback *wb); +void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb); void __folio_cancel_dirty(struct folio *folio); static inline void folio_cancel_dirty(struct folio *folio) { @@ -893,6 +1023,7 @@ static inline void cancel_dirty_page(struct page *page) } bool folio_clear_dirty_for_io(struct folio *folio); bool clear_page_dirty_for_io(struct page *page); +void folio_invalidate(struct folio *folio, size_t offset, size_t length); int __must_check folio_write_one(struct folio *folio); static inline int __must_check write_one_page(struct page *page) { @@ -900,7 +1031,7 @@ static inline int __must_check write_one_page(struct page *page) } int __set_page_dirty_nobuffers(struct page *page); -int __set_page_dirty_no_writeback(struct page *page); +bool noop_dirty_folio(struct address_space *mapping, struct folio *folio); void page_endio(struct page *page, bool is_write, int err); diff --git a/include/linux/pci.h b/include/linux/pci.h index 8253a5413d7c..b957eeb89c7a 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -668,6 +668,7 @@ struct pci_bus { struct bin_attribute *legacy_io; /* Legacy I/O for this bus */ struct bin_attribute *legacy_mem; /* Legacy mem */ unsigned int is_added:1; + unsigned int unsafe_warn:1; /* warned about RW1C config write */ }; #define to_pci_bus(n) container_of(n, struct pci_bus, dev) @@ -2166,7 +2167,8 @@ void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar); #ifdef CONFIG_PCI_IOV int pci_iov_virtfn_bus(struct pci_dev *dev, int id); int pci_iov_virtfn_devfn(struct pci_dev *dev, int id); - +int pci_iov_vf_id(struct pci_dev *dev); +void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver); int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); void pci_disable_sriov(struct pci_dev *dev); @@ -2194,6 +2196,18 @@ static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id) { return -ENOSYS; } + +static inline int pci_iov_vf_id(struct pci_dev *dev) +{ + return -ENOSYS; +} + +static inline void *pci_iov_get_pf_drvdata(struct pci_dev *dev, + struct pci_driver *pf_driver) +{ + return ERR_PTR(-EINVAL); +} + static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) { return -ENODEV; } diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index aad54c666407..0178823ce8c2 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -60,6 +60,8 @@ #define PCI_CLASS_BRIDGE_EISA 0x0602 #define PCI_CLASS_BRIDGE_MC 0x0603 #define PCI_CLASS_BRIDGE_PCI 0x0604 +#define PCI_CLASS_BRIDGE_PCI_NORMAL 0x060400 +#define PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE 0x060401 #define PCI_CLASS_BRIDGE_PCMCIA 0x0605 #define PCI_CLASS_BRIDGE_NUBUS 0x0606 #define PCI_CLASS_BRIDGE_CARDBUS 0x0607 @@ -2529,11 +2531,16 @@ #define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff #define PCI_VENDOR_ID_HUAWEI 0x19e5 +#define PCI_DEVICE_ID_HUAWEI_ZIP_VF 0xa251 +#define PCI_DEVICE_ID_HUAWEI_SEC_VF 0xa256 +#define PCI_DEVICE_ID_HUAWEI_HPRE_VF 0xa259 #define PCI_VENDOR_ID_NETRONOME 0x19ee +#define PCI_DEVICE_ID_NETRONOME_NFP3800 0x3800 #define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000 #define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000 #define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000 +#define PCI_DEVICE_ID_NETRONOME_NFP3800_VF 0x3803 #define PCI_DEVICE_ID_NETRONOME_NFP6000_VF 0x6003 #define PCI_VENDOR_ID_QMI 0x1a32 @@ -2561,6 +2568,8 @@ #define PCI_VENDOR_ID_HYGON 0x1d94 +#define PCI_VENDOR_ID_FUNGIBLE 0x1dad + #define PCI_VENDOR_ID_HXT 0x1dbf #define PCI_VENDOR_ID_TEKRAM 0x1de1 diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h index add077a81b21..266eb26fb029 100644 --- a/include/linux/pcs/pcs-xpcs.h +++ b/include/linux/pcs/pcs-xpcs.h @@ -31,8 +31,7 @@ void xpcs_link_up(struct phylink_pcs *pcs, unsigned int mode, phy_interface_t interface, int speed, int duplex); int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, unsigned int mode); -void xpcs_validate(struct dw_xpcs *xpcs, unsigned long *supported, - struct phylink_link_state *state); +void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces); int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable); struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev, diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 2512e2f9cd4e..0407a38b470a 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -26,6 +26,8 @@ */ /* Event uses a 64bit counter */ #define ARMPMU_EVT_64BIT 1 +/* Event uses a 47bit counter */ +#define ARMPMU_EVT_47BIT 2 #define HW_OP_UNSUPPORTED 0xFFFF #define C(_x) PERF_COUNT_HW_CACHE_##_x diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h new file mode 100644 index 000000000000..46f9b6fe306e --- /dev/null +++ b/include/linux/perf/riscv_pmu.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 SiFive + * Copyright (C) 2018 Andes Technology Corporation + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * + */ + +#ifndef _ASM_RISCV_PERF_EVENT_H +#define _ASM_RISCV_PERF_EVENT_H + +#include <linux/perf_event.h> +#include <linux/ptrace.h> +#include <linux/interrupt.h> + +#ifdef CONFIG_RISCV_PMU + +/* + * The RISCV_MAX_COUNTERS parameter should be specified. + */ + +#define RISCV_MAX_COUNTERS 64 +#define RISCV_OP_UNSUPP (-EOPNOTSUPP) +#define RISCV_PMU_PDEV_NAME "riscv-pmu" +#define RISCV_PMU_LEGACY_PDEV_NAME "riscv-pmu-legacy" + +#define RISCV_PMU_STOP_FLAG_RESET 1 + +struct cpu_hw_events { + /* currently enabled events */ + int n_events; + /* Counter overflow interrupt */ + int irq; + /* currently enabled events */ + struct perf_event *events[RISCV_MAX_COUNTERS]; + /* currently enabled hardware counters */ + DECLARE_BITMAP(used_hw_ctrs, RISCV_MAX_COUNTERS); + /* currently enabled firmware counters */ + DECLARE_BITMAP(used_fw_ctrs, RISCV_MAX_COUNTERS); +}; + +struct riscv_pmu { + struct pmu pmu; + char *name; + + irqreturn_t (*handle_irq)(int irq_num, void *dev); + + int num_counters; + u64 (*ctr_read)(struct perf_event *event); + int (*ctr_get_idx)(struct perf_event *event); + int (*ctr_get_width)(int idx); + void (*ctr_clear_idx)(struct perf_event *event); + void (*ctr_start)(struct perf_event *event, u64 init_val); + void (*ctr_stop)(struct perf_event *event, unsigned long flag); + int (*event_map)(struct perf_event *event, u64 *config); + + struct cpu_hw_events __percpu *hw_events; + struct hlist_node node; +}; + +#define to_riscv_pmu(p) (container_of(p, struct riscv_pmu, pmu)) +unsigned long riscv_pmu_ctr_read_csr(unsigned long csr); +int riscv_pmu_event_set_period(struct perf_event *event); +uint64_t riscv_pmu_ctr_get_width_mask(struct perf_event *event); +u64 riscv_pmu_event_update(struct perf_event *event); +#ifdef CONFIG_RISCV_PMU_LEGACY +void riscv_pmu_legacy_skip_init(void); +#else +static inline void riscv_pmu_legacy_skip_init(void) {}; +#endif +struct riscv_pmu *riscv_pmu_alloc(void); + +#endif /* CONFIG_RISCV_PMU */ + +#endif /* _ASM_RISCV_PERF_EVENT_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 733649184b27..af97dd427501 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -864,7 +864,7 @@ struct perf_event_context { #define PERF_NR_CONTEXTS 4 /** - * struct perf_event_cpu_context - per cpu event context structure + * struct perf_cpu_context - per cpu event context structure */ struct perf_cpu_context { struct perf_event_context ctx; diff --git a/include/linux/perf_event_api.h b/include/linux/perf_event_api.h new file mode 100644 index 000000000000..c2fd6048b790 --- /dev/null +++ b/include/linux/perf_event_api.h @@ -0,0 +1 @@ +#include <linux/perf_event.h> diff --git a/include/linux/pgtable_api.h b/include/linux/pgtable_api.h new file mode 100644 index 000000000000..ff367a4ba8c4 --- /dev/null +++ b/include/linux/pgtable_api.h @@ -0,0 +1 @@ +#include <linux/pgtable.h> diff --git a/include/linux/phy.h b/include/linux/phy.h index 6de8d7a90d78..36ca2b5c2253 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -87,8 +87,8 @@ extern const int phy_10gbit_features_array[1]; * * @PHY_INTERFACE_MODE_NA: Not Applicable - don't touch * @PHY_INTERFACE_MODE_INTERNAL: No interface, MAC and PHY combined - * @PHY_INTERFACE_MODE_MII: Median-independent interface - * @PHY_INTERFACE_MODE_GMII: Gigabit median-independent interface + * @PHY_INTERFACE_MODE_MII: Media-independent interface + * @PHY_INTERFACE_MODE_GMII: Gigabit media-independent interface * @PHY_INTERFACE_MODE_SGMII: Serial gigabit media-independent interface * @PHY_INTERFACE_MODE_TBI: Ten Bit Interface * @PHY_INTERFACE_MODE_REVMII: Reverse Media Independent Interface @@ -1578,6 +1578,7 @@ int genphy_update_link(struct phy_device *phydev); int genphy_read_lpa(struct phy_device *phydev); int genphy_read_status_fixed(struct phy_device *phydev); int genphy_read_status(struct phy_device *phydev); +int genphy_read_master_slave(struct phy_device *phydev); int genphy_suspend(struct phy_device *phydev); int genphy_resume(struct phy_device *phydev); int genphy_loopback(struct phy_device *phydev, bool enable); @@ -1661,7 +1662,7 @@ int phy_disable_interrupts(struct phy_device *phydev); void phy_request_interrupt(struct phy_device *phydev); void phy_free_interrupt(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); -int phy_set_max_speed(struct phy_device *phydev, u32 max_speed); +void phy_set_max_speed(struct phy_device *phydev, u32 max_speed); void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode); void phy_advertise_supported(struct phy_device *phydev); void phy_support_sym_pause(struct phy_device *phydev); diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 713a0c928b7c..223781622b33 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -86,7 +86,6 @@ enum phylink_op_type { * @type: operation type of PHYLINK instance * @legacy_pre_march2020: driver has not been updated for March 2020 updates * (See commit 7cceb599d15d ("net: phylink: avoid mac_config calls") - * @pcs_poll: MAC PCS cannot provide link change interrupt * @poll_fixed_state: if true, starts link_poll, * if MAC link is at %MLO_AN_FIXED mode. * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND @@ -100,7 +99,6 @@ struct phylink_config { struct device *dev; enum phylink_op_type type; bool legacy_pre_march2020; - bool pcs_poll; bool poll_fixed_state; bool ovr_an_inband; void (*get_fixed_state)(struct phylink_config *config, @@ -534,7 +532,6 @@ void phylink_generic_validate(struct phylink_config *config, struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *, phy_interface_t iface, const struct phylink_mac_ops *mac_ops); -void phylink_set_pcs(struct phylink *, struct phylink_pcs *pcs); void phylink_destroy(struct phylink *); int phylink_connect_phy(struct phylink *, struct phy_device *); @@ -582,7 +579,6 @@ int phylink_speed_up(struct phylink *pl); #define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode) void phylink_set_port_modes(unsigned long *bits); -void phylink_set_10g_modes(unsigned long *mask); void phylink_helper_basex_speed(struct phylink_link_state *state); void phylink_mii_c22_pcs_decode_state(struct phylink_link_state *state, diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h index 2b5676ff35be..f922a192fe58 100644 --- a/include/linux/platform_data/brcmfmac.h +++ b/include/linux/platform_data/brcmfmac.h @@ -178,7 +178,7 @@ struct brcmfmac_platform_data { void (*power_off)(void); char *fw_alternative_path; int device_count; - struct brcmfmac_pd_device devices[0]; + struct brcmfmac_pd_device devices[]; }; diff --git a/include/linux/platform_data/brcmnand.h b/include/linux/platform_data/brcmnand.h new file mode 100644 index 000000000000..8b8777985dce --- /dev/null +++ b/include/linux/platform_data/brcmnand.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef BRCMNAND_PLAT_DATA_H +#define BRCMNAND_PLAT_DATA_H + +struct brcmnand_platform_data { + int chip_select; + const char * const *part_probe_types; + unsigned int ecc_stepsize; + unsigned int ecc_strength; +}; + +#endif /* BRCMNAND_PLAT_DATA_H */ diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 271bd87bff0a..c23554531961 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -3386,6 +3386,9 @@ enum ec_mkbp_event { /* Send an incoming CEC message to the AP */ EC_MKBP_EVENT_CEC_MESSAGE = 9, + /* Peripheral device charger event */ + EC_MKBP_EVENT_PCHG = 12, + /* Number of MKBP events */ EC_MKBP_EVENT_COUNT, }; @@ -5527,6 +5530,67 @@ enum pchg_state { [PCHG_STATE_CONNECTED] = "CONNECTED", \ } +/* + * Update firmware of peripheral chip + */ +#define EC_CMD_PCHG_UPDATE 0x0136 + +/* Port number is encoded in bit[28:31]. */ +#define EC_MKBP_PCHG_PORT_SHIFT 28 +/* Utility macro for converting MKBP event to port number. */ +#define EC_MKBP_PCHG_EVENT_TO_PORT(e) (((e) >> EC_MKBP_PCHG_PORT_SHIFT) & 0xf) +/* Utility macro for extracting event bits. */ +#define EC_MKBP_PCHG_EVENT_MASK(e) ((e) \ + & GENMASK(EC_MKBP_PCHG_PORT_SHIFT-1, 0)) + +#define EC_MKBP_PCHG_UPDATE_OPENED BIT(0) +#define EC_MKBP_PCHG_WRITE_COMPLETE BIT(1) +#define EC_MKBP_PCHG_UPDATE_CLOSED BIT(2) +#define EC_MKBP_PCHG_UPDATE_ERROR BIT(3) +#define EC_MKBP_PCHG_DEVICE_EVENT BIT(4) + +enum ec_pchg_update_cmd { + /* Reset chip to normal mode. */ + EC_PCHG_UPDATE_CMD_RESET_TO_NORMAL = 0, + /* Reset and put a chip in update (a.k.a. download) mode. */ + EC_PCHG_UPDATE_CMD_OPEN, + /* Write a block of data containing FW image. */ + EC_PCHG_UPDATE_CMD_WRITE, + /* Close update session. */ + EC_PCHG_UPDATE_CMD_CLOSE, + /* End of commands */ + EC_PCHG_UPDATE_CMD_COUNT, +}; + +struct ec_params_pchg_update { + /* PCHG port number */ + uint8_t port; + /* enum ec_pchg_update_cmd */ + uint8_t cmd; + /* Padding */ + uint8_t reserved0; + uint8_t reserved1; + /* Version of new firmware */ + uint32_t version; + /* CRC32 of new firmware */ + uint32_t crc32; + /* Address in chip memory where <data> is written to */ + uint32_t addr; + /* Size of <data> */ + uint32_t size; + /* Partial data of new firmware */ + uint8_t data[]; +} __ec_align4; + +BUILD_ASSERT(EC_PCHG_UPDATE_CMD_COUNT + < BIT(sizeof(((struct ec_params_pchg_update *)0)->cmd)*8)); + +struct ec_response_pchg_update { + /* Block size */ + uint32_t block_size; +} __ec_align4; + + /*****************************************************************************/ /* Voltage regulator controls */ @@ -5644,7 +5708,7 @@ struct ec_response_typec_discovery { uint8_t svid_count; /* Number of SVIDs partner sent */ uint16_t reserved; uint32_t discovery_vdo[6]; /* Max VDOs allowed after VDM header is 6 */ - struct svid_mode_info svids[0]; + struct svid_mode_info svids[]; } __ec_align1; /* USB Type-C commands for AP-controlled device policy. */ diff --git a/include/linux/platform_data/eth_ixp4xx.h b/include/linux/platform_data/eth_ixp4xx.h deleted file mode 100644 index 114b0940729f..000000000000 --- a/include/linux/platform_data/eth_ixp4xx.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __PLATFORM_DATA_ETH_IXP4XX -#define __PLATFORM_DATA_ETH_IXP4XX - -#include <linux/types.h> - -#define IXP4XX_ETH_NPEA 0x00 -#define IXP4XX_ETH_NPEB 0x10 -#define IXP4XX_ETH_NPEC 0x20 - -/* Information about built-in Ethernet MAC interfaces */ -struct eth_plat_info { - u8 phy; /* MII PHY ID, 0 - 31 */ - u8 rxq; /* configurable, currently 0 - 31 only */ - u8 txreadyq; - u8 hwaddr[6]; - u8 npe; /* NPE instance used by this interface */ - bool has_mdio; /* If this instance has an MDIO bus */ -}; - -#endif diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h index 773daf7915a3..5df1ace6d2c9 100644 --- a/include/linux/platform_data/spi-s3c64xx.h +++ b/include/linux/platform_data/spi-s3c64xx.h @@ -16,7 +16,6 @@ struct platform_device; * struct s3c64xx_spi_csinfo - ChipSelect description * @fb_delay: Slave specific feedback delay. * Refer to FB_CLK_SEL register definition in SPI chapter. - * @line: Custom 'identity' of the CS line. * * This is per SPI-Slave Chipselect information. * Allocate and initialize one in machine init code and make the @@ -24,7 +23,6 @@ struct platform_device; */ struct s3c64xx_spi_csinfo { u8 fb_delay; - unsigned line; }; /** @@ -43,26 +41,16 @@ struct s3c64xx_spi_info { /** * s3c64xx_spi_set_platdata - SPI Controller configure callback by the board * initialization code. - * @cfg_gpio: Pointer to gpio setup function. * @src_clk_nr: Clock the SPI controller is to use to generate SPI clocks. * @num_cs: Number of elements in the 'cs' array. * * Call this from machine init code for each SPI Controller that * has some chips attached to it. */ -extern void s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, - int num_cs); -extern void s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, - int num_cs); -extern void s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr, - int num_cs); +extern void s3c64xx_spi0_set_platdata(int src_clk_nr, int num_cs); /* defined by architecture to configure gpio */ extern int s3c64xx_spi0_cfg_gpio(void); -extern int s3c64xx_spi1_cfg_gpio(void); -extern int s3c64xx_spi2_cfg_gpio(void); extern struct s3c64xx_spi_info s3c64xx_spi0_pdata; -extern struct s3c64xx_spi_info s3c64xx_spi1_pdata; -extern struct s3c64xx_spi_info s3c64xx_spi2_pdata; #endif /*__SPI_S3C64XX_H */ diff --git a/include/linux/platform_data/wan_ixp4xx_hss.h b/include/linux/platform_data/wan_ixp4xx_hss.h deleted file mode 100644 index d525a0feb9e1..000000000000 --- a/include/linux/platform_data/wan_ixp4xx_hss.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __PLATFORM_DATA_WAN_IXP4XX_HSS_H -#define __PLATFORM_DATA_WAN_IXP4XX_HSS_H - -#include <linux/types.h> - -/* Information about built-in HSS (synchronous serial) interfaces */ -struct hss_plat_info { - int (*set_clock)(int port, unsigned int clock_type); - int (*open)(int port, void *pdev, - void (*set_carrier_cb)(void *pdev, int carrier)); - void (*close)(int port, void *pdev); - u8 txreadyq; - u32 timer_freq; -}; - -#endif diff --git a/include/linux/platform_data/x86/intel-spi.h b/include/linux/platform_data/x86/spi-intel.h index 7f53a5c6f35e..a512ec37abbb 100644 --- a/include/linux/platform_data/x86/intel-spi.h +++ b/include/linux/platform_data/x86/spi-intel.h @@ -6,8 +6,8 @@ * Author: Mika Westerberg <mika.westerberg@linux.intel.com> */ -#ifndef INTEL_SPI_PDATA_H -#define INTEL_SPI_PDATA_H +#ifndef SPI_INTEL_PDATA_H +#define SPI_INTEL_PDATA_H enum intel_spi_type { INTEL_SPI_BYT = 1, @@ -19,11 +19,13 @@ enum intel_spi_type { /** * struct intel_spi_boardinfo - Board specific data for Intel SPI driver * @type: Type which this controller is compatible with - * @writeable: The chip is writeable + * @set_writeable: Try to make the chip writeable (optional) + * @data: Data to be passed to @set_writeable can be %NULL */ struct intel_spi_boardinfo { enum intel_spi_type type; - bool writeable; + bool (*set_writeable)(void __iomem *base, void *data); + void *data; }; -#endif /* INTEL_SPI_PDATA_H */ +#endif /* SPI_INTEL_PDATA_H */ diff --git a/include/linux/pm.h b/include/linux/pm.h index f7d2be686359..e65b3ab28377 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -770,11 +770,11 @@ extern int dpm_suspend_late(pm_message_t state); extern int dpm_suspend(pm_message_t state); extern int dpm_prepare(pm_message_t state); -extern void __suspend_report_result(const char *function, void *fn, int ret); +extern void __suspend_report_result(const char *function, struct device *dev, void *fn, int ret); -#define suspend_report_result(fn, ret) \ +#define suspend_report_result(dev, fn, ret) \ do { \ - __suspend_report_result(__func__, fn, ret); \ + __suspend_report_result(__func__, dev, fn, ret); \ } while (0) extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); @@ -814,7 +814,7 @@ static inline int dpm_suspend_start(pm_message_t state) return 0; } -#define suspend_report_result(fn, ret) do {} while (0) +#define suspend_report_result(dev, fn, ret) do {} while (0) static inline int device_pm_wait_for_dev(struct device *a, struct device *b) { diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 9f09601c465a..2bff6a10095d 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -567,6 +567,10 @@ static inline void pm_runtime_disable(struct device *dev) * Allow the runtime PM autosuspend mechanism to be used for @dev whenever * requested (or "autosuspend" will be handled as direct runtime-suspend for * it). + * + * NOTE: It's important to undo this with pm_runtime_dont_use_autosuspend() + * at driver exit time unless your driver initially enabled pm_runtime + * with devm_pm_runtime_enable() (which handles it for you). */ static inline void pm_runtime_use_autosuspend(struct device *dev) { diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 5bbcd280bfd2..9cf126c3b27f 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -253,7 +253,7 @@ void posix_cpu_timers_exit_group(struct task_struct *task); void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, u64 *newval, u64 *oldval); -void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); +int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); void posixtimer_rearm(struct kernel_siginfo *info); #endif diff --git a/include/linux/power/bq25890_charger.h b/include/linux/power/bq25890_charger.h new file mode 100644 index 000000000000..c706ddb77a08 --- /dev/null +++ b/include/linux/power/bq25890_charger.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Platform data for the TI bq25890 battery charger driver. + */ + +#ifndef _BQ25890_CHARGER_H_ +#define _BQ25890_CHARGER_H_ + +struct regulator_init_data; + +struct bq25890_platform_data { + const struct regulator_init_data *regulator_init_data; +}; + +#endif diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index e218041cc000..cb380c1d9459 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -49,6 +49,7 @@ enum { POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE, /* dynamically adjusted speed */ POWER_SUPPLY_CHARGE_TYPE_CUSTOM, /* use CHARGE_CONTROL_* props */ POWER_SUPPLY_CHARGE_TYPE_LONGLIFE, /* slow speed, longer life */ + POWER_SUPPLY_CHARGE_TYPE_BYPASS, /* bypassing the charger */ }; enum { @@ -348,6 +349,57 @@ struct power_supply_resistance_temp_table { int resistance; /* internal resistance percent */ }; +struct power_supply_vbat_ri_table { + int vbat_uv; /* Battery voltage in microvolt */ + int ri_uohm; /* Internal resistance in microohm */ +}; + +/** + * struct power_supply_maintenance_charge_table - setting for maintenace charging + * @charge_current_max_ua: maintenance charging current that is used to keep + * the charge of the battery full as current is consumed after full charging. + * The corresponding charge_voltage_max_uv is used as a safeguard: when we + * reach this voltage the maintenance charging current is turned off. It is + * turned back on if we fall below this voltage. + * @charge_voltage_max_uv: maintenance charging voltage that is usually a bit + * lower than the constant_charge_voltage_max_uv. We can apply this settings + * charge_current_max_ua until we get back up to this voltage. + * @safety_timer_minutes: maintenance charging safety timer, with an expiry + * time in minutes. We will only use maintenance charging in this setting + * for a certain amount of time, then we will first move to the next + * maintenance charge current and voltage pair in respective array and wait + * for the next safety timer timeout, or, if we reached the last maintencance + * charging setting, disable charging until we reach + * charge_restart_voltage_uv and restart ordinary CC/CV charging from there. + * These timers should be chosen to align with the typical discharge curve + * for the battery. + * + * When the main CC/CV charging is complete the battery can optionally be + * maintenance charged at the voltages from this table: a table of settings is + * traversed using a slightly lower current and voltage than what is used for + * CC/CV charging. The maintenance charging will for safety reasons not go on + * indefinately: we lower the current and voltage with successive maintenance + * settings, then disable charging completely after we reach the last one, + * and after that we do not restart charging until we reach + * charge_restart_voltage_uv (see struct power_supply_battery_info) and restart + * ordinary CC/CV charging from there. + * + * As an example, a Samsung EB425161LA Lithium-Ion battery is CC/CV charged + * at 900mA to 4340mV, then maintenance charged at 600mA and 4150mV for + * 60 hours, then maintenance charged at 600mA and 4100mV for 200 hours. + * After this the charge cycle is restarted waiting for + * charge_restart_voltage_uv. + * + * For most mobile electronics this type of maintenance charging is enough for + * the user to disconnect the device and make use of it before both maintenance + * charging cycles are complete. + */ +struct power_supply_maintenance_charge_table { + int charge_current_max_ua; + int charge_voltage_max_uv; + int charge_safety_timer_minutes; +}; + #define POWER_SUPPLY_OCV_TEMP_MAX 20 /** @@ -393,10 +445,34 @@ struct power_supply_resistance_temp_table { * @constant_charge_voltage_max_uv: voltage in microvolts signifying the end of * the CC (constant current) charging phase and the beginning of the CV * (constant voltage) charging phase. + * @maintenance_charge: an array of maintenance charging settings to be used + * after the main CC/CV charging phase is complete. + * @maintenance_charge_size: the number of maintenance charging settings in + * maintenance_charge. + * @alert_low_temp_charge_current_ua: The charging current to use if the battery + * enters low alert temperature, i.e. if the internal temperature is between + * temp_alert_min and temp_min. No matter the charging phase, this + * and alert_high_temp_charge_voltage_uv will be applied. + * @alert_low_temp_charge_voltage_uv: Same as alert_low_temp_charge_current_ua, + * but for the charging voltage. + * @alert_high_temp_charge_current_ua: The charging current to use if the + * battery enters high alert temperature, i.e. if the internal temperature is + * between temp_alert_max and temp_max. No matter the charging phase, this + * and alert_high_temp_charge_voltage_uv will be applied, usually lowering + * the charging current as an evasive manouver. + * @alert_high_temp_charge_voltage_uv: Same as + * alert_high_temp_charge_current_ua, but for the charging voltage. * @factory_internal_resistance_uohm: the internal resistance of the battery * at fabrication time, expressed in microohms. This resistance will vary * depending on the lifetime and charge of the battery, so this is just a - * nominal ballpark figure. + * nominal ballpark figure. This internal resistance is given for the state + * when the battery is discharging. + * @factory_internal_resistance_charging_uohm: the internal resistance of the + * battery at fabrication time while charging, expressed in microohms. + * The charging process will affect the internal resistance of the battery + * so this value provides a better resistance under these circumstances. + * This resistance will vary depending on the lifetime and charge of the + * battery, so this is just a nominal ballpark figure. * @ocv_temp: array indicating the open circuit voltage (OCV) capacity * temperature indices. This is an array of temperatures in degrees Celsius * indicating which capacity table to use for a certain temperature, since @@ -434,13 +510,38 @@ struct power_supply_resistance_temp_table { * by temperature: highest temperature with lowest resistance first, lowest * temperature with highest resistance last. * @resist_table_size: the number of items in the resist_table. + * @vbat2ri_discharging: this is a table that correlates Battery voltage (VBAT) + * to internal resistance (Ri). The resistance is given in microohm for the + * corresponding voltage in microvolts. The internal resistance is used to + * determine the open circuit voltage so that we can determine the capacity + * of the battery. These voltages to resistance tables apply when the battery + * is discharging. The table must be ordered descending by voltage: highest + * voltage first. + * @vbat2ri_discharging_size: the number of items in the vbat2ri_discharging + * table. + * @vbat2ri_charging: same function as vbat2ri_discharging but for the state + * when the battery is charging. Being under charge changes the battery's + * internal resistance characteristics so a separate table is needed.* + * The table must be ordered descending by voltage: highest voltage first. + * @vbat2ri_charging_size: the number of items in the vbat2ri_charging + * table. + * @bti_resistance_ohm: The Battery Type Indicator (BIT) nominal resistance + * in ohms for this battery, if an identification resistor is mounted + * between a third battery terminal and ground. This scheme is used by a lot + * of mobile device batteries. + * @bti_resistance_tolerance: The tolerance in percent of the BTI resistance, + * for example 10 for +/- 10%, if the bti_resistance is set to 7000 and the + * tolerance is 10% we will detect a proper battery if the BTI resistance + * is between 6300 and 7700 Ohm. * * This is the recommended struct to manage static battery parameters, * populated by power_supply_get_battery_info(). Most platform drivers should * use these for consistency. * * Its field names must correspond to elements in enum power_supply_property. - * The default field value is -EINVAL. + * The default field value is -EINVAL or NULL for pointers. + * + * CC/CV CHARGING: * * The charging parameters here assume a CC/CV charging scheme. This method * is most common with Lithium Ion batteries (other methods are possible) and @@ -525,6 +626,66 @@ struct power_supply_resistance_temp_table { * Overcharging Lithium Ion cells can be DANGEROUS and lead to fire or * explosions. * + * DETERMINING BATTERY CAPACITY: + * + * Several members of the struct deal with trying to determine the remaining + * capacity in the battery, usually as a percentage of charge. In practice + * many chargers uses a so-called fuel gauge or coloumb counter that measure + * how much charge goes into the battery and how much goes out (+/- leak + * consumption). This does not help if we do not know how much capacity the + * battery has to begin with, such as when it is first used or was taken out + * and charged in a separate charger. Therefore many capacity algorithms use + * the open circuit voltage with a look-up table to determine the rough + * capacity of the battery. The open circuit voltage can be conceptualized + * with an ideal voltage source (V) in series with an internal resistance (Ri) + * like this: + * + * +-------> IBAT >----------------+ + * | ^ | + * [ ] Ri | | + * | | VBAT | + * o <---------- | | + * +| ^ | [ ] Rload + * .---. | | | + * | V | | OCV | | + * '---' | | | + * | | | | + * GND +-------------------------------+ + * + * If we disconnect the load (here simplified as a fixed resistance Rload) + * and measure VBAT with a infinite impedance voltage meter we will get + * VBAT = OCV and this assumption is sometimes made even under load, assuming + * Rload is insignificant. However this will be of dubious quality because the + * load is rarely that small and Ri is strongly nonlinear depending on + * temperature and how much capacity is left in the battery due to the + * chemistry involved. + * + * In many practical applications we cannot just disconnect the battery from + * the load, so instead we often try to measure the instantaneous IBAT (the + * current out from the battery), estimate the Ri and thus calculate the + * voltage drop over Ri and compensate like this: + * + * OCV = VBAT - (IBAT * Ri) + * + * The tables vbat2ri_discharging and vbat2ri_charging are used to determine + * (by interpolation) the Ri from the VBAT under load. These curves are highly + * nonlinear and may need many datapoints but can be found in datasheets for + * some batteries. This gives the compensated open circuit voltage (OCV) for + * the battery even under load. Using this method will also compensate for + * temperature changes in the environment: this will also make the internal + * resistance change, and it will affect the VBAT under load, so correlating + * VBAT to Ri takes both remaining capacity and temperature into consideration. + * + * Alternatively a manufacturer can specify how the capacity of the battery + * is dependent on the battery temperature which is the main factor affecting + * Ri. As we know all checmical reactions are faster when it is warm and slower + * when it is cold. You can put in 1500mAh and only get 800mAh out before the + * voltage drops too low for example. This effect is also highly nonlinear and + * the purpose of the table resist_table: this will take a temperature and + * tell us how big percentage of Ri the specified temperature correlates to. + * Usually we have 100% of the factory_internal_resistance_uohm at 25 degrees + * Celsius. + * * The power supply class itself doesn't use this struct as of now. */ @@ -542,7 +703,14 @@ struct power_supply_battery_info { int overvoltage_limit_uv; int constant_charge_current_max_ua; int constant_charge_voltage_max_uv; + struct power_supply_maintenance_charge_table *maintenance_charge; + int maintenance_charge_size; + int alert_low_temp_charge_current_ua; + int alert_low_temp_charge_voltage_uv; + int alert_high_temp_charge_current_ua; + int alert_high_temp_charge_voltage_uv; int factory_internal_resistance_uohm; + int factory_internal_resistance_charging_uohm; int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX]; int temp_ambient_alert_min; int temp_ambient_alert_max; @@ -554,6 +722,12 @@ struct power_supply_battery_info { int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX]; struct power_supply_resistance_temp_table *resist_table; int resist_table_size; + struct power_supply_vbat_ri_table *vbat2ri_discharging; + int vbat2ri_discharging_size; + struct power_supply_vbat_ri_table *vbat2ri_charging; + int vbat2ri_charging_size; + int bti_resistance_ohm; + int bti_resistance_tolerance; }; extern struct atomic_notifier_head power_supply_notifier; @@ -595,12 +769,43 @@ extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info, extern int power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table, int table_len, int temp); +extern int power_supply_vbat2ri(struct power_supply_battery_info *info, + int vbat_uv, bool charging); +extern struct power_supply_maintenance_charge_table * +power_supply_get_maintenance_charging_setting(struct power_supply_battery_info *info, int index); +extern bool power_supply_battery_bti_in_range(struct power_supply_battery_info *info, + int resistance); extern void power_supply_changed(struct power_supply *psy); extern int power_supply_am_i_supplied(struct power_supply *psy); -extern int power_supply_set_input_current_limit_from_supplier( - struct power_supply *psy); +int power_supply_get_property_from_supplier(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val); extern int power_supply_set_battery_charged(struct power_supply *psy); +static inline bool +power_supply_supports_maintenance_charging(struct power_supply_battery_info *info) +{ + struct power_supply_maintenance_charge_table *mt; + + mt = power_supply_get_maintenance_charging_setting(info, 0); + + return (mt != NULL); +} + +static inline bool +power_supply_supports_vbat2ri(struct power_supply_battery_info *info) +{ + return ((info->vbat2ri_discharging != NULL) && + info->vbat2ri_discharging_size > 0); +} + +static inline bool +power_supply_supports_temp2ri(struct power_supply_battery_info *info) +{ + return ((info->resist_table != NULL) && + info->resist_table_size > 0); +} + #ifdef CONFIG_POWER_SUPPLY extern int power_supply_is_system_supplied(void); #else diff --git a/include/linux/psi.h b/include/linux/psi.h index 7f7d1d88c3bb..89784763d19e 100644 --- a/include/linux/psi.h +++ b/include/linux/psi.h @@ -6,6 +6,7 @@ #include <linux/psi_types.h> #include <linux/sched.h> #include <linux/poll.h> +#include <linux/cgroup-defs.h> struct seq_file; struct css_set; diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h index 1a3cef26d129..c7fe7c089718 100644 --- a/include/linux/psi_types.h +++ b/include/linux/psi_types.h @@ -141,6 +141,9 @@ struct psi_trigger { * events to one per window */ u64 last_event_time; + + /* Deferred event(s) from previous ratelimit window */ + bool pending_event; }; struct psi_group { diff --git a/include/linux/pstore.h b/include/linux/pstore.h index eb93a54cff31..e97a8188f0fd 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -14,7 +14,7 @@ #include <linux/errno.h> #include <linux/kmsg_dump.h> #include <linux/mutex.h> -#include <linux/semaphore.h> +#include <linux/spinlock.h> #include <linux/time.h> #include <linux/types.h> @@ -87,7 +87,7 @@ struct pstore_record { * @owner: module which is responsible for this backend driver * @name: name of the backend driver * - * @buf_lock: semaphore to serialize access to @buf + * @buf_lock: spinlock to serialize access to @buf * @buf: preallocated crash dump buffer * @bufsize: size of @buf available for crash dump bytes (must match * smallest number of bytes available for writing to a @@ -178,7 +178,7 @@ struct pstore_info { struct module *owner; const char *name; - struct semaphore buf_lock; + spinlock_t buf_lock; char *buf; size_t bufsize; diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h index 9afd34a2d36c..fefa7790dc46 100644 --- a/include/linux/ptp_classify.h +++ b/include/linux/ptp_classify.h @@ -126,6 +126,17 @@ static inline u8 ptp_get_msgtype(const struct ptp_header *hdr, return msgtype; } +/** + * ptp_msg_is_sync - Evaluates whether the given skb is a PTP Sync message + * @skb: packet buffer + * @type: type of the packet (see ptp_classify_raw()) + * + * This function evaluates whether the given skb is a PTP Sync message. + * + * Return: true if sync message, false otherwise + */ +bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type); + void __init ptp_classifier_init(void); #else static inline void ptp_classifier_init(void) @@ -148,5 +159,9 @@ static inline u8 ptp_get_msgtype(const struct ptp_header *hdr, */ return PTP_MSGTYPE_SYNC; } +static inline bool ptp_msg_is_sync(struct sk_buff *skb, unsigned int type) +{ + return false; +} #endif #endif /* _PTP_CLASSIFY_H_ */ diff --git a/include/linux/ptrace_api.h b/include/linux/ptrace_api.h new file mode 100644 index 000000000000..26e7d275ad8d --- /dev/null +++ b/include/linux/ptrace_api.h @@ -0,0 +1 @@ +#include <linux/ptrace.h> diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 81cad9e1e412..f8335644a01a 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -63,13 +63,21 @@ enum qcom_scm_ice_cipher { extern bool qcom_scm_is_available(void); -extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus); -extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus); +extern int qcom_scm_set_cold_boot_addr(void *entry); +extern int qcom_scm_set_warm_boot_addr(void *entry); extern void qcom_scm_cpu_power_down(u32 flags); extern int qcom_scm_set_remote_state(u32 state, u32 id); +struct qcom_scm_pas_metadata { + void *ptr; + dma_addr_t phys; + ssize_t size; +}; + extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, - size_t size); + size_t size, + struct qcom_scm_pas_metadata *ctx); +void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx); extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size); extern int qcom_scm_pas_auth_and_reset(u32 peripheral); @@ -83,6 +91,7 @@ extern bool qcom_scm_restore_sec_cfg_available(void); extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); +extern int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size); extern int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, u32 cp_nonpixel_start, u32 cp_nonpixel_size); @@ -107,6 +116,7 @@ extern bool qcom_scm_hdcp_available(void); extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); +extern int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt); extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en); extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h index 2a9fee8ddae3..51b811b62322 100644 --- a/include/linux/raid/xor.h +++ b/include/linux/raid/xor.h @@ -11,13 +11,20 @@ struct xor_block_template { struct xor_block_template *next; const char *name; int speed; - void (*do_2)(unsigned long, unsigned long *, unsigned long *); - void (*do_3)(unsigned long, unsigned long *, unsigned long *, - unsigned long *); - void (*do_4)(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *); - void (*do_5)(unsigned long, unsigned long *, unsigned long *, - unsigned long *, unsigned long *, unsigned long *); + void (*do_2)(unsigned long, unsigned long * __restrict, + const unsigned long * __restrict); + void (*do_3)(unsigned long, unsigned long * __restrict, + const unsigned long * __restrict, + const unsigned long * __restrict); + void (*do_4)(unsigned long, unsigned long * __restrict, + const unsigned long * __restrict, + const unsigned long * __restrict, + const unsigned long * __restrict); + void (*do_5)(unsigned long, unsigned long * __restrict, + const unsigned long * __restrict, + const unsigned long * __restrict, + const unsigned long * __restrict, + const unsigned long * __restrict); }; #endif diff --git a/include/linux/random.h b/include/linux/random.h index c45b2693e51f..f673fbb838b3 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -1,9 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * include/linux/random.h - * - * Include file for the random number generator. - */ + #ifndef _LINUX_RANDOM_H #define _LINUX_RANDOM_H @@ -14,14 +10,10 @@ #include <uapi/linux/random.h> -struct random_ready_callback { - struct list_head list; - void (*func)(struct random_ready_callback *rdy); - struct module *owner; -}; +struct notifier_block; -extern void add_device_randomness(const void *, unsigned int); -extern void add_bootloader_randomness(const void *, unsigned int); +extern void add_device_randomness(const void *, size_t); +extern void add_bootloader_randomness(const void *, size_t); #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) static inline void add_latent_entropy(void) @@ -36,14 +28,24 @@ static inline void add_latent_entropy(void) {} extern void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) __latent_entropy; extern void add_interrupt_randomness(int irq) __latent_entropy; +extern void add_hwgenerator_randomness(const void *buffer, size_t count, + size_t entropy); +#if IS_ENABLED(CONFIG_VMGENID) +extern void add_vmfork_randomness(const void *unique_vm_id, size_t size); +extern int register_random_vmfork_notifier(struct notifier_block *nb); +extern int unregister_random_vmfork_notifier(struct notifier_block *nb); +#else +static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; } +static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; } +#endif -extern void get_random_bytes(void *buf, int nbytes); +extern void get_random_bytes(void *buf, size_t nbytes); extern int wait_for_random_bytes(void); extern int __init rand_initialize(void); extern bool rng_is_initialized(void); -extern int add_random_ready_callback(struct random_ready_callback *rdy); -extern void del_random_ready_callback(struct random_ready_callback *rdy); -extern int __must_check get_random_bytes_arch(void *buf, int nbytes); +extern int register_random_ready_notifier(struct notifier_block *nb); +extern int unregister_random_ready_notifier(struct notifier_block *nb); +extern size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes); #ifndef MODULE extern const struct file_operations random_fops, urandom_fops; @@ -87,7 +89,7 @@ static inline unsigned long get_random_canary(void) /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). * Returns the result of the call to wait_for_random_bytes. */ -static inline int get_random_bytes_wait(void *buf, int nbytes) +static inline int get_random_bytes_wait(void *buf, size_t nbytes) { int ret = wait_for_random_bytes(); get_random_bytes(buf, nbytes); @@ -158,4 +160,9 @@ static inline bool __init arch_get_random_long_early(unsigned long *v) } #endif +#ifdef CONFIG_SMP +extern int random_prepare_cpu(unsigned int cpu); +extern int random_online_cpu(unsigned int cpu); +#endif + #endif /* _LINUX_RANDOM_H */ diff --git a/include/linux/randomize_kstack.h b/include/linux/randomize_kstack.h index bebc911161b6..1468caf001c0 100644 --- a/include/linux/randomize_kstack.h +++ b/include/linux/randomize_kstack.h @@ -2,6 +2,7 @@ #ifndef _LINUX_RANDOMIZE_KSTACK_H #define _LINUX_RANDOMIZE_KSTACK_H +#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET #include <linux/kernel.h> #include <linux/jump_label.h> #include <linux/percpu-defs.h> @@ -16,8 +17,20 @@ DECLARE_PER_CPU(u32, kstack_offset); * alignment. Also, since this use is being explicitly masked to a max of * 10 bits, stack-clash style attacks are unlikely. For more details see * "VLAs" in Documentation/process/deprecated.rst + * + * The normal __builtin_alloca() is initialized with INIT_STACK_ALL (currently + * only with Clang and not GCC). Initializing the unused area on each syscall + * entry is expensive, and generating an implicit call to memset() may also be + * problematic (such as in noinstr functions). Therefore, if the compiler + * supports it (which it should if it initializes allocas), always use the + * "uninitialized" variant of the builtin. */ -void *__builtin_alloca(size_t size); +#if __has_builtin(__builtin_alloca_uninitialized) +#define __kstack_alloca __builtin_alloca_uninitialized +#else +#define __kstack_alloca __builtin_alloca +#endif + /* * Use, at most, 10 bits of entropy. We explicitly cap this to keep the * "VLA" from being unbounded (see above). 10 bits leaves enough room for @@ -36,7 +49,7 @@ void *__builtin_alloca(size_t size); if (static_branch_maybe(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT, \ &randomize_kstack_offset)) { \ u32 offset = raw_cpu_read(kstack_offset); \ - u8 *ptr = __builtin_alloca(KSTACK_OFFSET_MAX(offset)); \ + u8 *ptr = __kstack_alloca(KSTACK_OFFSET_MAX(offset)); \ /* Keep allocation even after "ptr" loses scope. */ \ asm volatile("" :: "r"(ptr) : "memory"); \ } \ @@ -50,5 +63,9 @@ void *__builtin_alloca(size_t size); raw_cpu_write(kstack_offset, offset); \ } \ } while (0) +#else /* CONFIG_RANDOMIZE_KSTACK_OFFSET */ +#define add_random_kstack_offset() do { } while (0) +#define choose_random_kstack_offset(rand) do { } while (0) +#endif /* CONFIG_RANDOMIZE_KSTACK_OFFSET */ #endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 88b42eb46406..e7c39c200e2b 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -84,7 +84,7 @@ static inline int rcu_preempt_depth(void) /* Internal to kernel */ void rcu_init(void); -extern int rcu_scheduler_active __read_mostly; +extern int rcu_scheduler_active; void rcu_sched_clock_irq(int user); void rcu_report_dead(unsigned int cpu); void rcutree_migrate_callbacks(int cpu); @@ -924,7 +924,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * * kvfree_rcu(ptr); * - * where @ptr is a pointer to kvfree(). + * where @ptr is the pointer to be freed by kvfree(). * * Please note, head-less way of freeing is permitted to * use from a context that has to follow might_sleep() diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 858f4d429946..5fed476f977f 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -64,9 +64,8 @@ static inline void rcu_softirq_qs(void) rcu_tasks_qs(current, (preempt)); \ } while (0) -static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) +static inline int rcu_needs_cpu(void) { - *nextevt = KTIME_MAX; return 0; } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 53209d669400..9c6cfb742504 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -19,7 +19,7 @@ void rcu_softirq_qs(void); void rcu_note_context_switch(bool preempt); -int rcu_needs_cpu(u64 basem, u64 *nextevt); +int rcu_needs_cpu(void); void rcu_cpu_stall_reset(void); /* @@ -62,7 +62,7 @@ static inline void rcu_irq_exit_check_preempt(void) { } void exit_rcu(void); void rcu_scheduler_starting(void); -extern int rcu_scheduler_active __read_mostly; +extern int rcu_scheduler_active; void rcu_end_inkernel_boot(void); bool rcu_inkernel_boot_has_ended(void); bool rcu_is_watching(void); diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h index 61c56cca95c4..8052d34da782 100644 --- a/include/linux/rcuwait.h +++ b/include/linux/rcuwait.h @@ -47,11 +47,7 @@ static inline void prepare_to_rcuwait(struct rcuwait *w) rcu_assign_pointer(w->task, current); } -static inline void finish_rcuwait(struct rcuwait *w) -{ - rcu_assign_pointer(w->task, NULL); - __set_current_state(TASK_RUNNING); -} +extern void finish_rcuwait(struct rcuwait *w); #define rcuwait_wait_event(w, condition, state) \ ({ \ diff --git a/include/linux/rcuwait_api.h b/include/linux/rcuwait_api.h new file mode 100644 index 000000000000..f962e28544dd --- /dev/null +++ b/include/linux/rcuwait_api.h @@ -0,0 +1 @@ +#include <linux/rcuwait.h> diff --git a/include/linux/ref_tracker.h b/include/linux/ref_tracker.h index 60f3453be23e..9ca353ab712b 100644 --- a/include/linux/ref_tracker.h +++ b/include/linux/ref_tracker.h @@ -13,6 +13,8 @@ struct ref_tracker_dir { spinlock_t lock; unsigned int quarantine_avail; refcount_t untracked; + refcount_t no_tracker; + bool dead; struct list_head list; /* List of active trackers */ struct list_head quarantine; /* List of dead trackers */ #endif @@ -26,7 +28,9 @@ static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir, INIT_LIST_HEAD(&dir->quarantine); spin_lock_init(&dir->lock); dir->quarantine_avail = quarantine_count; + dir->dead = false; refcount_set(&dir->untracked, 1); + refcount_set(&dir->no_tracker, 1); stack_depot_init(); } diff --git a/include/linux/refcount_api.h b/include/linux/refcount_api.h new file mode 100644 index 000000000000..5f032589f568 --- /dev/null +++ b/include/linux/refcount_api.h @@ -0,0 +1 @@ +#include <linux/refcount.h> diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 22652e5fbc38..de81a94d7b30 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -237,6 +237,10 @@ typedef void (*regmap_unlock)(void *); * @reg_stride: The register address stride. Valid register addresses are a * multiple of this value. If set to 0, a value of 1 will be * used. + * @reg_downshift: The number of bits to downshift the register before + * performing any operations. + * @reg_base: Value to be added to every register address before performing any + * operation. * @pad_bits: Number of bits of padding between register and value. * @val_bits: Number of bits in a register value, mandatory. * @@ -360,6 +364,8 @@ struct regmap_config { int reg_bits; int reg_stride; + int reg_downshift; + unsigned int reg_base; int pad_bits; int val_bits; diff --git a/include/linux/resource.h b/include/linux/resource.h index bdf491cbcab7..4fdbc0c3f315 100644 --- a/include/linux/resource.h +++ b/include/linux/resource.h @@ -8,7 +8,5 @@ struct task_struct; void getrusage(struct task_struct *p, int who, struct rusage *ru); -int do_prlimit(struct task_struct *tsk, unsigned int resource, - struct rlimit *new_rlim, struct rlimit *old_rlim); #endif diff --git a/include/linux/rethook.h b/include/linux/rethook.h new file mode 100644 index 000000000000..c8ac1e5afcd1 --- /dev/null +++ b/include/linux/rethook.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Return hooking with list-based shadow stack. + */ +#ifndef _LINUX_RETHOOK_H +#define _LINUX_RETHOOK_H + +#include <linux/compiler.h> +#include <linux/freelist.h> +#include <linux/kallsyms.h> +#include <linux/llist.h> +#include <linux/rcupdate.h> +#include <linux/refcount.h> + +struct rethook_node; + +typedef void (*rethook_handler_t) (struct rethook_node *, void *, struct pt_regs *); + +/** + * struct rethook - The rethook management data structure. + * @data: The user-defined data storage. + * @handler: The user-defined return hook handler. + * @pool: The pool of struct rethook_node. + * @ref: The reference counter. + * @rcu: The rcu_head for deferred freeing. + * + * Don't embed to another data structure, because this is a self-destructive + * data structure when all rethook_node are freed. + */ +struct rethook { + void *data; + rethook_handler_t handler; + struct freelist_head pool; + refcount_t ref; + struct rcu_head rcu; +}; + +/** + * struct rethook_node - The rethook shadow-stack entry node. + * @freelist: The freelist, linked to struct rethook::pool. + * @rcu: The rcu_head for deferred freeing. + * @llist: The llist, linked to a struct task_struct::rethooks. + * @rethook: The pointer to the struct rethook. + * @ret_addr: The storage for the real return address. + * @frame: The storage for the frame pointer. + * + * You can embed this to your extended data structure to store any data + * on each entry of the shadow stack. + */ +struct rethook_node { + union { + struct freelist_node freelist; + struct rcu_head rcu; + }; + struct llist_node llist; + struct rethook *rethook; + unsigned long ret_addr; + unsigned long frame; +}; + +struct rethook *rethook_alloc(void *data, rethook_handler_t handler); +void rethook_free(struct rethook *rh); +void rethook_add_node(struct rethook *rh, struct rethook_node *node); +struct rethook_node *rethook_try_get(struct rethook *rh); +void rethook_recycle(struct rethook_node *node); +void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount); +unsigned long rethook_find_ret_addr(struct task_struct *tsk, unsigned long frame, + struct llist_node **cur); + +/* Arch dependent code must implement arch_* and trampoline code */ +void arch_rethook_prepare(struct rethook_node *node, struct pt_regs *regs, bool mcount); +void arch_rethook_trampoline(void); + +/** + * is_rethook_trampoline() - Check whether the address is rethook trampoline + * @addr: The address to be checked + * + * Return true if the @addr is the rethook trampoline address. + */ +static inline bool is_rethook_trampoline(unsigned long addr) +{ + return addr == (unsigned long)dereference_symbol_descriptor(arch_rethook_trampoline); +} + +/* If the architecture needs to fixup the return address, implement it. */ +void arch_rethook_fixup_return(struct pt_regs *regs, + unsigned long correct_ret_addr); + +/* Generic trampoline handler, arch code must prepare asm stub */ +unsigned long rethook_trampoline_handler(struct pt_regs *regs, + unsigned long frame); + +#ifdef CONFIG_RETHOOK +void rethook_flush_task(struct task_struct *tk); +#else +#define rethook_flush_task(tsk) do { } while (0) +#endif + +#endif + diff --git a/include/linux/rmap.h b/include/linux/rmap.h index e704b1a4c06c..17230c458341 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -11,6 +11,7 @@ #include <linux/rwsem.h> #include <linux/memcontrol.h> #include <linux/highmem.h> +#include <linux/pagemap.h> /* * The anon_vma heads a list of private "related" vmas, to scan if @@ -167,18 +168,19 @@ struct anon_vma *page_get_anon_vma(struct page *page); */ void page_move_anon_rmap(struct page *, struct vm_area_struct *); void page_add_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long, bool); + unsigned long address, bool compound); void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long, int); + unsigned long address, int flags); void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long, bool); -void page_add_file_rmap(struct page *, bool); -void page_remove_rmap(struct page *, bool); - + unsigned long address, bool compound); +void page_add_file_rmap(struct page *, struct vm_area_struct *, + bool compound); +void page_remove_rmap(struct page *, struct vm_area_struct *, + bool compound); void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long); + unsigned long address); void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long); + unsigned long address); static inline void page_dup_rmap(struct page *page, bool compound) { @@ -188,11 +190,11 @@ static inline void page_dup_rmap(struct page *page, bool compound) /* * Called from mm/vmscan.c to handle paging out */ -int page_referenced(struct page *, int is_locked, +int folio_referenced(struct folio *, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags); -void try_to_migrate(struct page *page, enum ttu_flags flags); -void try_to_unmap(struct page *, enum ttu_flags flags); +void try_to_migrate(struct folio *folio, enum ttu_flags flags); +void try_to_unmap(struct folio *, enum ttu_flags flags); int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, unsigned long end, struct page **pages, @@ -200,11 +202,13 @@ int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, /* Avoid racy checks */ #define PVMW_SYNC (1 << 0) -/* Look for migarion entries rather than present PTEs */ +/* Look for migration entries rather than present PTEs */ #define PVMW_MIGRATION (1 << 1) struct page_vma_mapped_walk { - struct page *page; + unsigned long pfn; + unsigned long nr_pages; + pgoff_t pgoff; struct vm_area_struct *vma; unsigned long address; pmd_t *pmd; @@ -213,10 +217,30 @@ struct page_vma_mapped_walk { unsigned int flags; }; +#define DEFINE_PAGE_VMA_WALK(name, _page, _vma, _address, _flags) \ + struct page_vma_mapped_walk name = { \ + .pfn = page_to_pfn(_page), \ + .nr_pages = compound_nr(page), \ + .pgoff = page_to_pgoff(page), \ + .vma = _vma, \ + .address = _address, \ + .flags = _flags, \ + } + +#define DEFINE_FOLIO_VMA_WALK(name, _folio, _vma, _address, _flags) \ + struct page_vma_mapped_walk name = { \ + .pfn = folio_pfn(_folio), \ + .nr_pages = folio_nr_pages(_folio), \ + .pgoff = folio_pgoff(_folio), \ + .vma = _vma, \ + .address = _address, \ + .flags = _flags, \ + } + static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) { /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */ - if (pvmw->pte && !PageHuge(pvmw->page)) + if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma)) pte_unmap(pvmw->pte); if (pvmw->ptl) spin_unlock(pvmw->ptl); @@ -237,18 +261,12 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); */ int folio_mkclean(struct folio *); -/* - * called in munlock()/munmap() path to check for other vmas holding - * the page mlocked. - */ -void page_mlock(struct page *page); - -void remove_migration_ptes(struct page *old, struct page *new, bool locked); +void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked); /* * Called by memory-failure.c to kill processes. */ -struct anon_vma *page_lock_anon_vma_read(struct page *page); +struct anon_vma *folio_lock_anon_vma_read(struct folio *folio); void page_unlock_anon_vma_read(struct anon_vma *anon_vma); int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); @@ -267,15 +285,15 @@ struct rmap_walk_control { * Return false if page table scanning in rmap_walk should be stopped. * Otherwise, return true. */ - bool (*rmap_one)(struct page *page, struct vm_area_struct *vma, + bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg); - int (*done)(struct page *page); - struct anon_vma *(*anon_lock)(struct page *page); + int (*done)(struct folio *folio); + struct anon_vma *(*anon_lock)(struct folio *folio); bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); }; -void rmap_walk(struct page *page, struct rmap_walk_control *rwc); -void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); +void rmap_walk(struct folio *folio, const struct rmap_walk_control *rwc); +void rmap_walk_locked(struct folio *folio, const struct rmap_walk_control *rwc); #else /* !CONFIG_MMU */ @@ -283,7 +301,7 @@ void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); #define anon_vma_prepare(vma) (0) #define anon_vma_link(vma) do {} while (0) -static inline int page_referenced(struct page *page, int is_locked, +static inline int folio_referenced(struct folio *folio, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags) { @@ -291,7 +309,7 @@ static inline int page_referenced(struct page *page, int is_locked, return 0; } -static inline void try_to_unmap(struct page *page, enum ttu_flags flags) +static inline void try_to_unmap(struct folio *folio, enum ttu_flags flags) { } diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index bb9cb84114c1..7f970b16da3a 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -134,4 +134,7 @@ extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, int (*vlan_fill)(struct sk_buff *skb, struct net_device *dev, u32 filter_mask)); + +extern void rtnl_offload_xstats_notify(struct net_device *dev); + #endif /* __LINUX_RTNETLINK_H */ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index f9348769e558..efa5c324369a 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -230,7 +230,7 @@ extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map * do { \ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ _down_write_nest_lock(sem, &(nest_lock)->dep_map); \ -} while (0); +} while (0) /* * Take/release a lock when not the owner will release it. diff --git a/include/linux/sched.h b/include/linux/sched.h index 75ba8aa60248..4a6fdd2a679f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -938,6 +938,9 @@ struct task_struct { /* Recursion prevention for eventfd_signal() */ unsigned in_eventfd_signal:1; #endif +#ifdef CONFIG_IOMMU_SVA + unsigned pasid_activated:1; +#endif unsigned long atomic_flags; /* Flags requiring atomic access. */ @@ -1087,6 +1090,9 @@ struct task_struct { /* Restored if set_restore_sigmask() was used: */ sigset_t saved_sigmask; struct sigpending pending; +#ifdef CONFIG_RT_DELAYED_SIGNALS + struct kernel_siginfo forced_info; +#endif unsigned long sas_ss_sp; size_t sas_ss_size; unsigned int sas_ss_flags; @@ -1481,6 +1487,9 @@ struct task_struct { #ifdef CONFIG_KRETPROBES struct llist_head kretprobe_instances; #endif +#ifdef CONFIG_RETHOOK + struct llist_head rethooks; +#endif #ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH /* @@ -1620,19 +1629,32 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk) #define TASK_REPORT_IDLE (TASK_REPORT + 1) #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) -static inline unsigned int task_state_index(struct task_struct *tsk) +static inline unsigned int __task_state_index(unsigned int tsk_state, + unsigned int tsk_exit_state) { - unsigned int tsk_state = READ_ONCE(tsk->__state); - unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT; + unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT; BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX); if (tsk_state == TASK_IDLE) state = TASK_REPORT_IDLE; + /* + * We're lying here, but rather than expose a completely new task state + * to userspace, we can make this appear as if the task has gone through + * a regular rt_mutex_lock() call. + */ + if (tsk_state == TASK_RTLOCK_WAIT) + state = TASK_UNINTERRUPTIBLE; + return fls(state); } +static inline unsigned int task_state_index(struct task_struct *tsk) +{ + return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state); +} + static inline char task_index_to_char(unsigned int state) { static const char state_char[] = "RSDTtXZPI"; @@ -1689,7 +1711,6 @@ extern struct pid *cad_pid; * I am cleaning dirty pages from some other bdi. */ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ -#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MEMALLOC_PIN 0x10000000 /* Allocation context constrained to zones which allow long term pinning. */ @@ -2015,7 +2036,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) extern int __cond_resched(void); -#ifdef CONFIG_PREEMPT_DYNAMIC +#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) DECLARE_STATIC_CALL(cond_resched, __cond_resched); @@ -2024,6 +2045,14 @@ static __always_inline int _cond_resched(void) return static_call_mod(cond_resched)(); } +#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) +extern int dynamic_cond_resched(void); + +static __always_inline int _cond_resched(void) +{ + return dynamic_cond_resched(); +} + #else static inline int _cond_resched(void) diff --git a/include/linux/sched/affinity.h b/include/linux/sched/affinity.h new file mode 100644 index 000000000000..227f5be81bcd --- /dev/null +++ b/include/linux/sched/affinity.h @@ -0,0 +1 @@ +#include <linux/sched.h> diff --git a/include/linux/sched/cond_resched.h b/include/linux/sched/cond_resched.h new file mode 100644 index 000000000000..227f5be81bcd --- /dev/null +++ b/include/linux/sched/cond_resched.h @@ -0,0 +1 @@ +#include <linux/sched.h> diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 1aff00b65f3c..7c83d4d5a971 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -6,6 +6,8 @@ * NORMAL/BATCH tasks. */ +#include <linux/sched.h> + #define MAX_DL_PRIO 0 static inline int dl_prio(int prio) diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index cc9f393e2a70..8c15abd67aed 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -5,54 +5,55 @@ #include <linux/init.h> #include <linux/tick.h> -enum hk_flags { - HK_FLAG_TIMER = 1, - HK_FLAG_RCU = (1 << 1), - HK_FLAG_MISC = (1 << 2), - HK_FLAG_SCHED = (1 << 3), - HK_FLAG_TICK = (1 << 4), - HK_FLAG_DOMAIN = (1 << 5), - HK_FLAG_WQ = (1 << 6), - HK_FLAG_MANAGED_IRQ = (1 << 7), - HK_FLAG_KTHREAD = (1 << 8), +enum hk_type { + HK_TYPE_TIMER, + HK_TYPE_RCU, + HK_TYPE_MISC, + HK_TYPE_SCHED, + HK_TYPE_TICK, + HK_TYPE_DOMAIN, + HK_TYPE_WQ, + HK_TYPE_MANAGED_IRQ, + HK_TYPE_KTHREAD, + HK_TYPE_MAX }; #ifdef CONFIG_CPU_ISOLATION DECLARE_STATIC_KEY_FALSE(housekeeping_overridden); -extern int housekeeping_any_cpu(enum hk_flags flags); -extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags); -extern bool housekeeping_enabled(enum hk_flags flags); -extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags); -extern bool housekeeping_test_cpu(int cpu, enum hk_flags flags); +extern int housekeeping_any_cpu(enum hk_type type); +extern const struct cpumask *housekeeping_cpumask(enum hk_type type); +extern bool housekeeping_enabled(enum hk_type type); +extern void housekeeping_affine(struct task_struct *t, enum hk_type type); +extern bool housekeeping_test_cpu(int cpu, enum hk_type type); extern void __init housekeeping_init(void); #else -static inline int housekeeping_any_cpu(enum hk_flags flags) +static inline int housekeeping_any_cpu(enum hk_type type) { return smp_processor_id(); } -static inline const struct cpumask *housekeeping_cpumask(enum hk_flags flags) +static inline const struct cpumask *housekeeping_cpumask(enum hk_type type) { return cpu_possible_mask; } -static inline bool housekeeping_enabled(enum hk_flags flags) +static inline bool housekeeping_enabled(enum hk_type type) { return false; } static inline void housekeeping_affine(struct task_struct *t, - enum hk_flags flags) { } + enum hk_type type) { } static inline void housekeeping_init(void) { } #endif /* CONFIG_CPU_ISOLATION */ -static inline bool housekeeping_cpu(int cpu, enum hk_flags flags) +static inline bool housekeeping_cpu(int cpu, enum hk_type type) { #ifdef CONFIG_CPU_ISOLATION if (static_branch_unlikely(&housekeeping_overridden)) - return housekeeping_test_cpu(cpu, flags); + return housekeeping_test_cpu(cpu, type); #endif return true; } diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index aa5f09ca5bcf..a80356e9dc69 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -8,6 +8,7 @@ #include <linux/mm_types.h> #include <linux/gfp.h> #include <linux/sync_core.h> +#include <linux/ioasid.h> /* * Routines for handling mm_structs @@ -433,4 +434,29 @@ static inline void membarrier_update_current_mm(struct mm_struct *next_mm) } #endif +#ifdef CONFIG_IOMMU_SVA +static inline void mm_pasid_init(struct mm_struct *mm) +{ + mm->pasid = INVALID_IOASID; +} + +/* Associate a PASID with an mm_struct: */ +static inline void mm_pasid_set(struct mm_struct *mm, u32 pasid) +{ + mm->pasid = pasid; +} + +static inline void mm_pasid_drop(struct mm_struct *mm) +{ + if (pasid_valid(mm->pasid)) { + ioasid_free(mm->pasid); + mm->pasid = INVALID_IOASID; + } +} +#else +static inline void mm_pasid_init(struct mm_struct *mm) {} +static inline void mm_pasid_set(struct mm_struct *mm, u32 pasid) {} +static inline void mm_pasid_drop(struct mm_struct *mm) {} +#endif + #endif /* _LINUX_SCHED_MM_H */ diff --git a/include/linux/sched/posix-timers.h b/include/linux/sched/posix-timers.h new file mode 100644 index 000000000000..523a381d6c88 --- /dev/null +++ b/include/linux/sched/posix-timers.h @@ -0,0 +1 @@ +#include <linux/posix-timers.h> diff --git a/include/linux/sched/rseq_api.h b/include/linux/sched/rseq_api.h new file mode 100644 index 000000000000..cf2af72693e1 --- /dev/null +++ b/include/linux/sched/rseq_api.h @@ -0,0 +1 @@ +#include <linux/rseq.h> diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index c19dd5a2c05c..c1076b5e17fb 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -23,6 +23,16 @@ enum sched_tunable_scaling { SCHED_TUNABLESCALING_END, }; +#define NUMA_BALANCING_DISABLED 0x0 +#define NUMA_BALANCING_NORMAL 0x1 +#define NUMA_BALANCING_MEMORY_TIERING 0x2 + +#ifdef CONFIG_NUMA_BALANCING +extern int sysctl_numa_balancing_mode; +#else +#define sysctl_numa_balancing_mode 0 +#endif + /* * control realtime throttling: * @@ -45,10 +55,6 @@ extern unsigned int sysctl_sched_uclamp_util_min_rt_default; extern unsigned int sysctl_sched_cfs_bandwidth_slice; #endif -#ifdef CONFIG_SCHED_AUTOGROUP -extern unsigned int sysctl_sched_autogroup_enabled; -#endif - extern int sysctl_sched_rr_timeslice; extern int sched_rr_timeslice; diff --git a/include/linux/sched/task_flags.h b/include/linux/sched/task_flags.h new file mode 100644 index 000000000000..227f5be81bcd --- /dev/null +++ b/include/linux/sched/task_flags.h @@ -0,0 +1 @@ +#include <linux/sched.h> diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h index d10150587d81..892562ebbd3a 100644 --- a/include/linux/sched/task_stack.h +++ b/include/linux/sched/task_stack.h @@ -79,6 +79,8 @@ static inline void *try_get_task_stack(struct task_struct *tsk) static inline void put_task_stack(struct task_struct *tsk) {} #endif +void exit_task_stack_account(struct task_struct *tsk); + #define task_stack_end_corrupted(task) \ (*(end_of_stack(task)) != STACK_END_MAGIC) diff --git a/include/linux/sched/thread_info_api.h b/include/linux/sched/thread_info_api.h new file mode 100644 index 000000000000..2c60fbc16c08 --- /dev/null +++ b/include/linux/sched/thread_info_api.h @@ -0,0 +1 @@ +#include <linux/thread_info.h> diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 8054641c0a7b..56cffe42abbc 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -93,6 +93,7 @@ struct sched_domain { unsigned int busy_factor; /* less balancing by factor if busy */ unsigned int imbalance_pct; /* No balance until over watermark */ unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */ + unsigned int imb_numa_nr; /* Nr running tasks that allows a NUMA imbalance */ int nohz_idle; /* NOHZ IDLE status */ int flags; /* See SD_* */ diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h index 835ee87ed792..cb41c5edb4d4 100644 --- a/include/linux/sched_clock.h +++ b/include/linux/sched_clock.h @@ -5,6 +5,8 @@ #ifndef LINUX_SCHED_CLOCK #define LINUX_SCHED_CLOCK +#include <linux/types.h> + #ifdef CONFIG_GENERIC_SCHED_CLOCK /** * struct clock_read_data - data required to read from sched_clock() diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 80e781c51ddc..b87551f41f9f 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -42,6 +42,7 @@ struct scmi_revision_info { struct scmi_clock_info { char name[SCMI_MAX_STR_SIZE]; + unsigned int enable_latency; bool rate_discrete; union { struct { @@ -82,6 +83,9 @@ struct scmi_clk_proto_ops { u64 rate); int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id); int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id); + int (*enable_atomic)(const struct scmi_protocol_handle *ph, u32 clk_id); + int (*disable_atomic)(const struct scmi_protocol_handle *ph, + u32 clk_id); }; /** @@ -612,6 +616,15 @@ struct scmi_notify_ops { * @devm_protocol_get: devres managed method to acquire a protocol and get specific * operations and a dedicated protocol handler * @devm_protocol_put: devres managed method to release a protocol + * @is_transport_atomic: method to check if the underlying transport for this + * instance handle is configured to support atomic + * transactions for commands. + * Some users of the SCMI stack in the upper layers could + * be interested to know if they can assume SCMI + * command transactions associated to this handle will + * never sleep and act accordingly. + * An optional atomic threshold value could be returned + * where configured. * @notify_ops: pointer to set of notifications related operations */ struct scmi_handle { @@ -622,6 +635,8 @@ struct scmi_handle { (*devm_protocol_get)(struct scmi_device *sdev, u8 proto, struct scmi_protocol_handle **ph); void (*devm_protocol_put)(struct scmi_device *sdev, u8 proto); + bool (*is_transport_atomic)(const struct scmi_handle *handle, + unsigned int *atomic_threshold); const struct scmi_notify_ops *notify_ops; }; diff --git a/include/linux/security.h b/include/linux/security.h index 6d72772182c8..25b3ef71f495 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1422,6 +1422,8 @@ int security_sctp_bind_connect(struct sock *sk, int optname, struct sockaddr *address, int addrlen); void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk, struct sock *newsk); +int security_sctp_assoc_established(struct sctp_association *asoc, + struct sk_buff *skb); #else /* CONFIG_SECURITY_NETWORK */ static inline int security_unix_stream_connect(struct sock *sock, @@ -1641,6 +1643,12 @@ static inline void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *newsk) { } + +static inline int security_sctp_assoc_established(struct sctp_association *asoc, + struct sk_buff *skb) +{ + return 0; +} #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND diff --git a/include/linux/seqlock_api.h b/include/linux/seqlock_api.h new file mode 100644 index 000000000000..be91e7d3b826 --- /dev/null +++ b/include/linux/seqlock_api.h @@ -0,0 +1 @@ +#include <linux/seqlock.h> diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index e65b80ed09e7..ab51d3cd39bd 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -24,6 +24,7 @@ struct shmem_inode_info { struct shared_policy policy; /* NUMA memory alloc policy */ struct simple_xattrs xattrs; /* list of xattrs */ atomic_t stop_eviction; /* hold when working on inode */ + struct timespec64 i_crtime; /* file creation time */ struct inode vfs_inode; }; diff --git a/include/linux/sizes.h b/include/linux/sizes.h index 1ac79bcee2bb..84aa448d8bb3 100644 --- a/include/linux/sizes.h +++ b/include/linux/sizes.h @@ -47,6 +47,8 @@ #define SZ_8G _AC(0x200000000, ULL) #define SZ_16G _AC(0x400000000, ULL) #define SZ_32G _AC(0x800000000, ULL) + +#define SZ_1T _AC(0x10000000000, ULL) #define SZ_64T _AC(0x400000000000, ULL) #endif /* __LINUX_SIZES_H__ */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 8a636e678902..3a30cae8b0a5 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -314,12 +314,136 @@ struct sk_buff; * used to translate the reason to string. */ enum skb_drop_reason { - SKB_DROP_REASON_NOT_SPECIFIED, - SKB_DROP_REASON_NO_SOCKET, - SKB_DROP_REASON_PKT_TOO_SMALL, - SKB_DROP_REASON_TCP_CSUM, - SKB_DROP_REASON_SOCKET_FILTER, - SKB_DROP_REASON_UDP_CSUM, + SKB_NOT_DROPPED_YET = 0, + SKB_DROP_REASON_NOT_SPECIFIED, /* drop reason is not specified */ + SKB_DROP_REASON_NO_SOCKET, /* socket not found */ + SKB_DROP_REASON_PKT_TOO_SMALL, /* packet size is too small */ + SKB_DROP_REASON_TCP_CSUM, /* TCP checksum error */ + SKB_DROP_REASON_SOCKET_FILTER, /* dropped by socket filter */ + SKB_DROP_REASON_UDP_CSUM, /* UDP checksum error */ + SKB_DROP_REASON_NETFILTER_DROP, /* dropped by netfilter */ + SKB_DROP_REASON_OTHERHOST, /* packet don't belong to current + * host (interface is in promisc + * mode) + */ + SKB_DROP_REASON_IP_CSUM, /* IP checksum error */ + SKB_DROP_REASON_IP_INHDR, /* there is something wrong with + * IP header (see + * IPSTATS_MIB_INHDRERRORS) + */ + SKB_DROP_REASON_IP_RPFILTER, /* IP rpfilter validate failed. + * see the document for rp_filter + * in ip-sysctl.rst for more + * information + */ + SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST, /* destination address of L2 + * is multicast, but L3 is + * unicast. + */ + SKB_DROP_REASON_XFRM_POLICY, /* xfrm policy check failed */ + SKB_DROP_REASON_IP_NOPROTO, /* no support for IP protocol */ + SKB_DROP_REASON_SOCKET_RCVBUFF, /* socket receive buff is full */ + SKB_DROP_REASON_PROTO_MEM, /* proto memory limition, such as + * udp packet drop out of + * udp_memory_allocated. + */ + SKB_DROP_REASON_TCP_MD5NOTFOUND, /* no MD5 hash and one + * expected, corresponding + * to LINUX_MIB_TCPMD5NOTFOUND + */ + SKB_DROP_REASON_TCP_MD5UNEXPECTED, /* MD5 hash and we're not + * expecting one, corresponding + * to LINUX_MIB_TCPMD5UNEXPECTED + */ + SKB_DROP_REASON_TCP_MD5FAILURE, /* MD5 hash and its wrong, + * corresponding to + * LINUX_MIB_TCPMD5FAILURE + */ + SKB_DROP_REASON_SOCKET_BACKLOG, /* failed to add skb to socket + * backlog (see + * LINUX_MIB_TCPBACKLOGDROP) + */ + SKB_DROP_REASON_TCP_FLAGS, /* TCP flags invalid */ + SKB_DROP_REASON_TCP_ZEROWINDOW, /* TCP receive window size is zero, + * see LINUX_MIB_TCPZEROWINDOWDROP + */ + SKB_DROP_REASON_TCP_OLD_DATA, /* the TCP data reveived is already + * received before (spurious retrans + * may happened), see + * LINUX_MIB_DELAYEDACKLOST + */ + SKB_DROP_REASON_TCP_OVERWINDOW, /* the TCP data is out of window, + * the seq of the first byte exceed + * the right edges of receive + * window + */ + SKB_DROP_REASON_TCP_OFOMERGE, /* the data of skb is already in + * the ofo queue, corresponding to + * LINUX_MIB_TCPOFOMERGE + */ + SKB_DROP_REASON_IP_OUTNOROUTES, /* route lookup failed */ + SKB_DROP_REASON_BPF_CGROUP_EGRESS, /* dropped by + * BPF_PROG_TYPE_CGROUP_SKB + * eBPF program + */ + SKB_DROP_REASON_IPV6DISABLED, /* IPv6 is disabled on the device */ + SKB_DROP_REASON_NEIGH_CREATEFAIL, /* failed to create neigh + * entry + */ + SKB_DROP_REASON_NEIGH_FAILED, /* neigh entry in failed state */ + SKB_DROP_REASON_NEIGH_QUEUEFULL, /* arp_queue for neigh + * entry is full + */ + SKB_DROP_REASON_NEIGH_DEAD, /* neigh entry is dead */ + SKB_DROP_REASON_TC_EGRESS, /* dropped in TC egress HOOK */ + SKB_DROP_REASON_QDISC_DROP, /* dropped by qdisc when packet + * outputting (failed to enqueue to + * current qdisc) + */ + SKB_DROP_REASON_CPU_BACKLOG, /* failed to enqueue the skb to + * the per CPU backlog queue. This + * can be caused by backlog queue + * full (see netdev_max_backlog in + * net.rst) or RPS flow limit + */ + SKB_DROP_REASON_XDP, /* dropped by XDP in input path */ + SKB_DROP_REASON_TC_INGRESS, /* dropped in TC ingress HOOK */ + SKB_DROP_REASON_PTYPE_ABSENT, /* not packet_type found to handle + * the skb. For an etner packet, + * this means that L3 protocol is + * not supported + */ + SKB_DROP_REASON_SKB_CSUM, /* sk_buff checksum computation + * error + */ + SKB_DROP_REASON_SKB_GSO_SEG, /* gso segmentation error */ + SKB_DROP_REASON_SKB_UCOPY_FAULT, /* failed to copy data from + * user space, e.g., via + * zerocopy_sg_from_iter() + * or skb_orphan_frags_rx() + */ + SKB_DROP_REASON_DEV_HDR, /* device driver specific + * header/metadata is invalid + */ + /* the device is not ready to xmit/recv due to any of its data + * structure that is not up/ready/initialized, e.g., the IFF_UP is + * not set, or driver specific tun->tfiles[txq] is not initialized + */ + SKB_DROP_REASON_DEV_READY, + SKB_DROP_REASON_FULL_RING, /* ring buffer is full */ + SKB_DROP_REASON_NOMEM, /* error due to OOM */ + SKB_DROP_REASON_HDR_TRUNC, /* failed to trunc/extract the header + * from networking data, e.g., failed + * to pull the protocol header from + * frags via pskb_may_pull() + */ + SKB_DROP_REASON_TAP_FILTER, /* dropped by (ebpf) filter directly + * attached to tun/tap, e.g., via + * TUNSETFILTEREBPF + */ + SKB_DROP_REASON_TAP_TXFILTER, /* dropped by tx filter implemented + * at tun/tap, e.g., check_filter() + */ SKB_DROP_REASON_MAX, }; @@ -557,6 +681,7 @@ struct skb_shared_info { * Warning : all fields before dataref are cleared in __alloc_skb() */ atomic_t dataref; + unsigned int xdp_frags_size; /* Intermediate layers must ensure that destructor_arg * remains valid until skb destructor */ @@ -720,6 +845,10 @@ typedef unsigned char *sk_buff_data_t; * @dst_pending_confirm: need to confirm neighbour * @decrypted: Decrypted SKB * @slow_gro: state present at GRO time, slower prepare step required + * @mono_delivery_time: When set, skb->tstamp has the + * delivery_time in mono clock base (i.e. EDT). Otherwise, the + * skb->tstamp has the (rcv) timestamp at ingress and + * delivery_time at egress. * @napi_id: id of the NAPI struct this skb came from * @sender_cpu: (aka @napi_id) source CPU in XPS * @secmark: security marking @@ -862,8 +991,12 @@ struct sk_buff { __u8 vlan_present:1; /* See PKT_VLAN_PRESENT_BIT */ __u8 csum_complete_sw:1; __u8 csum_level:2; - __u8 csum_not_inet:1; __u8 dst_pending_confirm:1; + __u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */ +#ifdef CONFIG_NET_CLS_ACT + __u8 tc_skip_classify:1; + __u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */ +#endif #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif @@ -875,10 +1008,6 @@ struct sk_buff { __u8 offload_fwd_mark:1; __u8 offload_l3_fwd_mark:1; #endif -#ifdef CONFIG_NET_CLS_ACT - __u8 tc_skip_classify:1; - __u8 tc_at_ingress:1; -#endif __u8 redirected:1; #ifdef CONFIG_NET_REDIRECT __u8 from_ingress:1; @@ -890,6 +1019,7 @@ struct sk_buff { __u8 decrypted:1; #endif __u8 slow_gro:1; + __u8 csum_not_inet:1; #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ @@ -964,11 +1094,17 @@ struct sk_buff { #endif #define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset) -/* if you move pkt_vlan_present around you also must adapt these constants */ +/* if you move pkt_vlan_present, tc_at_ingress, or mono_delivery_time + * around, you also must adapt these constants. + */ #ifdef __BIG_ENDIAN_BITFIELD #define PKT_VLAN_PRESENT_BIT 7 +#define TC_AT_INGRESS_MASK (1 << 0) +#define SKB_MONO_DELIVERY_TIME_MASK (1 << 2) #else #define PKT_VLAN_PRESENT_BIT 0 +#define TC_AT_INGRESS_MASK (1 << 7) +#define SKB_MONO_DELIVERY_TIME_MASK (1 << 5) #endif #define PKT_VLAN_PRESENT_OFFSET offsetof(struct sk_buff, __pkt_vlan_present_offset) @@ -1115,10 +1251,16 @@ static inline void kfree_skb(struct sk_buff *skb) } void skb_release_head_state(struct sk_buff *skb); -void kfree_skb_list(struct sk_buff *segs); +void kfree_skb_list_reason(struct sk_buff *segs, + enum skb_drop_reason reason); void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt); void skb_tx_error(struct sk_buff *skb); +static inline void kfree_skb_list(struct sk_buff *segs) +{ + kfree_skb_list_reason(segs, SKB_DROP_REASON_NOT_SPECIFIED); +} + #ifdef CONFIG_TRACEPOINTS void consume_skb(struct sk_buff *skb); #else @@ -1475,6 +1617,11 @@ static inline unsigned int skb_end_offset(const struct sk_buff *skb) { return skb->end; } + +static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) +{ + skb->end = offset; +} #else static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) { @@ -1485,6 +1632,11 @@ static inline unsigned int skb_end_offset(const struct sk_buff *skb) { return skb->end - skb->head; } + +static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) +{ + skb->end = skb->head + offset; +} #endif /* Internal */ @@ -1724,19 +1876,19 @@ static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) return 0; } -/* This variant of skb_unclone() makes sure skb->truesize is not changed */ +/* This variant of skb_unclone() makes sure skb->truesize + * and skb_end_offset() are not changed, whenever a new skb->head is needed. + * + * Indeed there is no guarantee that ksize(kmalloc(X)) == ksize(kmalloc(X)) + * when various debugging features are in place. + */ +int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri); static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) { might_sleep_if(gfpflags_allow_blocking(pri)); - if (skb_cloned(skb)) { - unsigned int save = skb->truesize; - int res; - - res = pskb_expand_head(skb, 0, 0, pri); - skb->truesize = save; - return res; - } + if (skb_cloned(skb)) + return __skb_unclone_keeptruesize(skb, pri); return 0; } @@ -3891,6 +4043,7 @@ static inline void skb_get_new_timestampns(const struct sk_buff *skb, static inline void __net_timestamp(struct sk_buff *skb) { skb->tstamp = ktime_get_real(); + skb->mono_delivery_time = 0; } static inline ktime_t net_timedelta(ktime_t t) @@ -3898,8 +4051,53 @@ static inline ktime_t net_timedelta(ktime_t t) return ktime_sub(ktime_get_real(), t); } -static inline ktime_t net_invalid_timestamp(void) +static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt, + bool mono) +{ + skb->tstamp = kt; + skb->mono_delivery_time = kt && mono; +} + +DECLARE_STATIC_KEY_FALSE(netstamp_needed_key); + +/* It is used in the ingress path to clear the delivery_time. + * If needed, set the skb->tstamp to the (rcv) timestamp. + */ +static inline void skb_clear_delivery_time(struct sk_buff *skb) +{ + if (skb->mono_delivery_time) { + skb->mono_delivery_time = 0; + if (static_branch_unlikely(&netstamp_needed_key)) + skb->tstamp = ktime_get_real(); + else + skb->tstamp = 0; + } +} + +static inline void skb_clear_tstamp(struct sk_buff *skb) +{ + if (skb->mono_delivery_time) + return; + + skb->tstamp = 0; +} + +static inline ktime_t skb_tstamp(const struct sk_buff *skb) +{ + if (skb->mono_delivery_time) + return 0; + + return skb->tstamp; +} + +static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond) { + if (!skb->mono_delivery_time && skb->tstamp) + return skb->tstamp; + + if (static_branch_unlikely(&netstamp_needed_key) || cond) + return ktime_get_real(); + return 0; } @@ -4759,7 +4957,7 @@ static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) #ifdef CONFIG_NET_REDIRECT skb->from_ingress = from_ingress; if (skb->from_ingress) - skb->tstamp = 0; + skb_clear_tstamp(skb); #endif } diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 18a717fe62eb..c5a2d6f50f25 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -29,7 +29,7 @@ struct sk_msg_sg { u32 end; u32 size; u32 copybreak; - unsigned long copy; + DECLARE_BITMAP(copy, MAX_MSG_FRAGS + 2); /* The extra two elements: * 1) used for chaining the front and sections when the list becomes * partitioned (e.g. end < start). The crypto APIs require the @@ -38,7 +38,6 @@ struct sk_msg_sg { */ struct scatterlist data[MAX_MSG_FRAGS + 2]; }; -static_assert(BITS_PER_LONG >= NR_MSG_FRAG_IDS); /* UAPI in filter.c depends on struct sk_msg_sg being first element. */ struct sk_msg { @@ -171,11 +170,6 @@ static inline u32 sk_msg_iter_dist(u32 start, u32 end) #define sk_msg_iter_next(msg, which) \ sk_msg_iter_var_next(msg->sg.which) -static inline void sk_msg_clear_meta(struct sk_msg *msg) -{ - memset(&msg->sg, 0, offsetofend(struct sk_msg_sg, copy)); -} - static inline void sk_msg_init(struct sk_msg *msg) { BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS); @@ -234,7 +228,7 @@ static inline void sk_msg_compute_data_pointers(struct sk_msg *msg) { struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start); - if (test_bit(msg->sg.start, &msg->sg.copy)) { + if (test_bit(msg->sg.start, msg->sg.copy)) { msg->data = NULL; msg->data_end = NULL; } else { @@ -253,7 +247,7 @@ static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page, sg_set_page(sge, page, len, offset); sg_unmark_end(sge); - __set_bit(msg->sg.end, &msg->sg.copy); + __set_bit(msg->sg.end, msg->sg.copy); msg->sg.size += len; sk_msg_iter_next(msg, end); } @@ -262,9 +256,9 @@ static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state) { do { if (copy_state) - __set_bit(i, &msg->sg.copy); + __set_bit(i, msg->sg.copy); else - __clear_bit(i, &msg->sg.copy); + __clear_bit(i, msg->sg.copy); sk_msg_iter_var_next(i); if (i == msg->sg.end) break; @@ -310,21 +304,16 @@ static inline void sock_drop(struct sock *sk, struct sk_buff *skb) kfree_skb(skb); } -static inline void drop_sk_msg(struct sk_psock *psock, struct sk_msg *msg) -{ - if (msg->skb) - sock_drop(psock->sk, msg->skb); - kfree(msg); -} - static inline void sk_psock_queue_msg(struct sk_psock *psock, struct sk_msg *msg) { spin_lock_bh(&psock->ingress_lock); if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) list_add_tail(&msg->list, &psock->ingress_msg); - else - drop_sk_msg(psock, msg); + else { + sk_msg_free(psock->sk, msg); + kfree(msg); + } spin_unlock_bh(&psock->ingress_lock); } diff --git a/include/linux/slab.h b/include/linux/slab.h index 5b6193fd8bd9..373b3ef99f4e 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -117,9 +117,6 @@ #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ -/* Slab deactivation flag */ -#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U) - /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * @@ -135,6 +132,7 @@ #include <linux/kasan.h> +struct list_lru; struct mem_cgroup; /* * struct kmem_cache related prototypes @@ -416,6 +414,8 @@ static __always_inline unsigned int __kmalloc_index(size_t size, void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc; +void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, + gfp_t gfpflags) __assume_slab_alignment __malloc; void kmem_cache_free(struct kmem_cache *s, void *objp); /* diff --git a/include/linux/soc/ixp4xx/cpu.h b/include/linux/soc/ixp4xx/cpu.h index 88bd8de0e803..f526ac33afea 100644 --- a/include/linux/soc/ixp4xx/cpu.h +++ b/include/linux/soc/ixp4xx/cpu.h @@ -9,6 +9,7 @@ #define __SOC_IXP4XX_CPU_H__ #include <linux/io.h> +#include <linux/regmap.h> #ifdef CONFIG_ARM #include <asm/cputype.h> #endif @@ -23,6 +24,9 @@ #define IXP46X_PROCESSOR_ID_VALUE 0x69054200 /* including IXP455 */ #define IXP46X_PROCESSOR_ID_MASK 0xfffffff0 +/* Feature register in the expansion bus controller */ +#define IXP4XX_EXP_CNFG2 0x2c + /* "fuse" bits of IXP_EXP_CFG2 */ /* All IXP4xx CPUs */ #define IXP4XX_FEATURE_RCOMP (1 << 0) @@ -86,21 +90,31 @@ IXP43X_PROCESSOR_ID_VALUE) #define cpu_is_ixp46x() ((read_cpuid_id() & IXP46X_PROCESSOR_ID_MASK) == \ IXP46X_PROCESSOR_ID_VALUE) +static inline u32 cpu_ixp4xx_features(struct regmap *rmap) +{ + u32 val; -u32 ixp4xx_read_feature_bits(void); -void ixp4xx_write_feature_bits(u32 value); + regmap_read(rmap, IXP4XX_EXP_CNFG2, &val); + /* For some reason this register is inverted */ + val = ~val; + if (cpu_is_ixp42x_rev_a0()) + return IXP42X_FEATURE_MASK & ~(IXP4XX_FEATURE_RCOMP | + IXP4XX_FEATURE_AES); + if (cpu_is_ixp42x()) + return val & IXP42X_FEATURE_MASK; + if (cpu_is_ixp43x()) + return val & IXP43X_FEATURE_MASK; + return val & IXP46X_FEATURE_MASK; +} #else #define cpu_is_ixp42x_rev_a0() 0 #define cpu_is_ixp42x() 0 #define cpu_is_ixp43x() 0 #define cpu_is_ixp46x() 0 -static inline u32 ixp4xx_read_feature_bits(void) +static inline u32 cpu_ixp4xx_features(struct regmap *rmap) { return 0; } -static inline void ixp4xx_write_feature_bits(u32 value) -{ -} #endif #endif /* _ASM_ARCH_CPU_H */ diff --git a/include/linux/soc/ixp4xx/npe.h b/include/linux/soc/ixp4xx/npe.h index 2a91f465d456..9efeac777da1 100644 --- a/include/linux/soc/ixp4xx/npe.h +++ b/include/linux/soc/ixp4xx/npe.h @@ -3,6 +3,7 @@ #define __IXP4XX_NPE_H #include <linux/kernel.h> +#include <linux/regmap.h> extern const char *npe_names[]; @@ -17,6 +18,7 @@ struct npe_regs { struct npe { struct npe_regs __iomem *regs; + struct regmap *rmap; int id; int valid; }; diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index 4615a228da51..50804ac748bd 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -2,6 +2,88 @@ #ifndef __SOC_MEDIATEK_INFRACFG_H #define __SOC_MEDIATEK_INFRACFG_H +#define MT8195_TOP_AXI_PROT_EN_STA1 0x228 +#define MT8195_TOP_AXI_PROT_EN_1_STA1 0x258 +#define MT8195_TOP_AXI_PROT_EN_SET 0x2a0 +#define MT8195_TOP_AXI_PROT_EN_CLR 0x2a4 +#define MT8195_TOP_AXI_PROT_EN_1_SET 0x2a8 +#define MT8195_TOP_AXI_PROT_EN_1_CLR 0x2ac +#define MT8195_TOP_AXI_PROT_EN_MM_SET 0x2d4 +#define MT8195_TOP_AXI_PROT_EN_MM_CLR 0x2d8 +#define MT8195_TOP_AXI_PROT_EN_MM_STA1 0x2ec +#define MT8195_TOP_AXI_PROT_EN_2_SET 0x714 +#define MT8195_TOP_AXI_PROT_EN_2_CLR 0x718 +#define MT8195_TOP_AXI_PROT_EN_2_STA1 0x724 +#define MT8195_TOP_AXI_PROT_EN_VDNR_SET 0xb84 +#define MT8195_TOP_AXI_PROT_EN_VDNR_CLR 0xb88 +#define MT8195_TOP_AXI_PROT_EN_VDNR_STA1 0xb90 +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_SET 0xba4 +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_CLR 0xba8 +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_STA1 0xbb0 +#define MT8195_TOP_AXI_PROT_EN_VDNR_2_SET 0xbb8 +#define MT8195_TOP_AXI_PROT_EN_VDNR_2_CLR 0xbbc +#define MT8195_TOP_AXI_PROT_EN_VDNR_2_STA1 0xbc4 +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_SET 0xbcc +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR 0xbd0 +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1 0xbd8 +#define MT8195_TOP_AXI_PROT_EN_MM_2_SET 0xdcc +#define MT8195_TOP_AXI_PROT_EN_MM_2_CLR 0xdd0 +#define MT8195_TOP_AXI_PROT_EN_MM_2_STA1 0xdd8 + +#define MT8195_TOP_AXI_PROT_EN_VDOSYS0 BIT(6) +#define MT8195_TOP_AXI_PROT_EN_VPPSYS0 BIT(10) +#define MT8195_TOP_AXI_PROT_EN_MFG1 BIT(11) +#define MT8195_TOP_AXI_PROT_EN_MFG1_2ND GENMASK(22, 21) +#define MT8195_TOP_AXI_PROT_EN_VPPSYS0_2ND BIT(23) +#define MT8195_TOP_AXI_PROT_EN_1_MFG1 GENMASK(20, 19) +#define MT8195_TOP_AXI_PROT_EN_1_CAM BIT(22) +#define MT8195_TOP_AXI_PROT_EN_2_CAM BIT(0) +#define MT8195_TOP_AXI_PROT_EN_2_MFG1_2ND GENMASK(6, 5) +#define MT8195_TOP_AXI_PROT_EN_2_MFG1 BIT(7) +#define MT8195_TOP_AXI_PROT_EN_2_AUDIO (BIT(9) | BIT(11)) +#define MT8195_TOP_AXI_PROT_EN_2_ADSP (BIT(12) | GENMASK(16, 14)) +#define MT8195_TOP_AXI_PROT_EN_MM_CAM (BIT(0) | BIT(2) | BIT(4)) +#define MT8195_TOP_AXI_PROT_EN_MM_IPE BIT(1) +#define MT8195_TOP_AXI_PROT_EN_MM_IMG BIT(3) +#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS0 GENMASK(21, 17) +#define MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1 GENMASK(8, 5) +#define MT8195_TOP_AXI_PROT_EN_MM_VENC (BIT(9) | BIT(11)) +#define MT8195_TOP_AXI_PROT_EN_MM_VENC_CORE1 (BIT(10) | BIT(12)) +#define MT8195_TOP_AXI_PROT_EN_MM_VDEC0 BIT(13) +#define MT8195_TOP_AXI_PROT_EN_MM_VDEC1 BIT(14) +#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1_2ND BIT(22) +#define MT8195_TOP_AXI_PROT_EN_MM_VPPSYS1_2ND BIT(23) +#define MT8195_TOP_AXI_PROT_EN_MM_CAM_2ND BIT(24) +#define MT8195_TOP_AXI_PROT_EN_MM_IMG_2ND BIT(25) +#define MT8195_TOP_AXI_PROT_EN_MM_VENC_2ND BIT(26) +#define MT8195_TOP_AXI_PROT_EN_MM_WPESYS BIT(27) +#define MT8195_TOP_AXI_PROT_EN_MM_VDEC0_2ND BIT(28) +#define MT8195_TOP_AXI_PROT_EN_MM_VDEC1_2ND BIT(29) +#define MT8195_TOP_AXI_PROT_EN_MM_VDOSYS1 GENMASK(31, 30) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0_2ND (GENMASK(1, 0) | BIT(4) | BIT(11)) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VENC BIT(2) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VENC_CORE1 (BIT(3) | BIT(15)) +#define MT8195_TOP_AXI_PROT_EN_MM_2_CAM (BIT(5) | BIT(17)) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS1 (GENMASK(7, 6) | BIT(18)) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VPPSYS0 GENMASK(9, 8) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VDOSYS1 BIT(10) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2_2ND BIT(12) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0_2ND BIT(13) +#define MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS_2ND BIT(14) +#define MT8195_TOP_AXI_PROT_EN_MM_2_IPE BIT(16) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC2 BIT(21) +#define MT8195_TOP_AXI_PROT_EN_MM_2_VDEC0 BIT(22) +#define MT8195_TOP_AXI_PROT_EN_MM_2_WPESYS GENMASK(24, 23) +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_EPD_TX BIT(1) +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_DP_TX BIT(2) +#define MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P0 (BIT(11) | BIT(28)) +#define MT8195_TOP_AXI_PROT_EN_VDNR_PCIE_MAC_P1 (BIT(12) | BIT(29)) +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P0 BIT(13) +#define MT8195_TOP_AXI_PROT_EN_VDNR_1_PCIE_MAC_P1 BIT(14) +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_MFG1 (BIT(17) | BIT(19)) +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VPPSYS0 BIT(20) +#define MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_VDOSYS0 BIT(21) + #define MT8192_TOP_AXI_PROT_EN_STA1 0x228 #define MT8192_TOP_AXI_PROT_EN_1_STA1 0x258 #define MT8192_TOP_AXI_PROT_EN_SET 0x2a0 @@ -58,6 +140,54 @@ #define MT8192_TOP_AXI_PROT_EN_MM_2_MDP_2ND BIT(13) #define MT8192_TOP_AXI_PROT_EN_VDNR_CAM BIT(21) +#define MT8186_TOP_AXI_PROT_EN_SET (0x2A0) +#define MT8186_TOP_AXI_PROT_EN_CLR (0x2A4) +#define MT8186_TOP_AXI_PROT_EN_STA (0x228) +#define MT8186_TOP_AXI_PROT_EN_1_SET (0x2A8) +#define MT8186_TOP_AXI_PROT_EN_1_CLR (0x2AC) +#define MT8186_TOP_AXI_PROT_EN_1_STA (0x258) +#define MT8186_TOP_AXI_PROT_EN_2_SET (0x2B0) +#define MT8186_TOP_AXI_PROT_EN_2_CLR (0x2B4) +#define MT8186_TOP_AXI_PROT_EN_2_STA (0x26C) +#define MT8186_TOP_AXI_PROT_EN_3_SET (0x2B8) +#define MT8186_TOP_AXI_PROT_EN_3_CLR (0x2BC) +#define MT8186_TOP_AXI_PROT_EN_3_STA (0x2C8) + +/* MFG1 */ +#define MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP1 (GENMASK(28, 27)) +#define MT8186_TOP_AXI_PROT_EN_MFG1_STEP2 (GENMASK(22, 21)) +#define MT8186_TOP_AXI_PROT_EN_MFG1_STEP3 (BIT(25)) +#define MT8186_TOP_AXI_PROT_EN_1_MFG1_STEP4 (BIT(29)) +/* DIS */ +#define MT8186_TOP_AXI_PROT_EN_1_DIS_STEP1 (GENMASK(12, 11)) +#define MT8186_TOP_AXI_PROT_EN_DIS_STEP2 (GENMASK(2, 1) | GENMASK(11, 10)) +/* IMG */ +#define MT8186_TOP_AXI_PROT_EN_1_IMG_STEP1 (BIT(23)) +#define MT8186_TOP_AXI_PROT_EN_1_IMG_STEP2 (BIT(15)) +/* IPE */ +#define MT8186_TOP_AXI_PROT_EN_1_IPE_STEP1 (BIT(24)) +#define MT8186_TOP_AXI_PROT_EN_1_IPE_STEP2 (BIT(16)) +/* CAM */ +#define MT8186_TOP_AXI_PROT_EN_1_CAM_STEP1 (GENMASK(22, 21)) +#define MT8186_TOP_AXI_PROT_EN_1_CAM_STEP2 (GENMASK(14, 13)) +/* VENC */ +#define MT8186_TOP_AXI_PROT_EN_1_VENC_STEP1 (BIT(31)) +#define MT8186_TOP_AXI_PROT_EN_1_VENC_STEP2 (BIT(19)) +/* VDEC */ +#define MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP1 (BIT(30)) +#define MT8186_TOP_AXI_PROT_EN_1_VDEC_STEP2 (BIT(17)) +/* WPE */ +#define MT8186_TOP_AXI_PROT_EN_2_WPE_STEP1 (BIT(17)) +#define MT8186_TOP_AXI_PROT_EN_2_WPE_STEP2 (BIT(16)) +/* CONN_ON */ +#define MT8186_TOP_AXI_PROT_EN_1_CONN_ON_STEP1 (BIT(18)) +#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP2 (BIT(14)) +#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP3 (BIT(13)) +#define MT8186_TOP_AXI_PROT_EN_CONN_ON_STEP4 (BIT(16)) +/* ADSP_TOP */ +#define MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP1 (GENMASK(12, 11)) +#define MT8186_TOP_AXI_PROT_EN_3_ADSP_TOP_STEP2 (GENMASK(1, 0)) + #define MT8183_TOP_AXI_PROT_EN_STA1 0x228 #define MT8183_TOP_AXI_PROT_EN_STA1_1 0x258 #define MT8183_TOP_AXI_PROT_EN_SET 0x2a0 @@ -147,6 +277,9 @@ #define INFRA_TOPAXI_PROTECTEN_SET 0x0260 #define INFRA_TOPAXI_PROTECTEN_CLR 0x0264 +#define MT8192_INFRA_CTRL 0x290 +#define MT8192_INFRA_CTRL_DISABLE_MFG2ACP BIT(9) + #define REG_INFRA_MISC 0xf00 #define F_DDR_4GB_SUPPORT_EN BIT(13) diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index 9e8fd92c96b7..0bc21ee58fac 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -35,7 +35,12 @@ #define LLCC_WRCACHE 31 #define LLCC_CVPFW 32 #define LLCC_CPUSS1 33 +#define LLCC_CAMEXP0 34 +#define LLCC_CPUMTE 35 #define LLCC_CPUHWT 36 +#define LLCC_MDMCLAD2 37 +#define LLCC_CAMEXP1 38 +#define LLCC_AENPU 45 /** * struct llcc_slice_desc - Cache slice descriptor @@ -83,7 +88,7 @@ struct llcc_edac_reg_data { * @bitmap: Bit map to track the active slice ids * @offsets: Pointer to the bank offsets array * @ecc_irq: interrupt for llcc cache error detection and reporting - * @major_version: Indicates the LLCC major version + * @version: Indicates the LLCC version */ struct llcc_drv_data { struct regmap *regmap; @@ -96,7 +101,7 @@ struct llcc_drv_data { unsigned long *bitmap; u32 *offsets; int ecc_irq; - u32 major_version; + u32 version; }; #if IS_ENABLED(CONFIG_QCOM_LLCC) diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h index afd47217996b..9e8e60421192 100644 --- a/include/linux/soc/qcom/mdt_loader.h +++ b/include/linux/soc/qcom/mdt_loader.h @@ -10,10 +10,14 @@ struct device; struct firmware; +struct qcom_scm_pas_metadata; #if IS_ENABLED(CONFIG_QCOM_MDT_LOADER) ssize_t qcom_mdt_get_size(const struct firmware *fw); +int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw, + const char *fw_name, int pas_id, phys_addr_t mem_phys, + struct qcom_scm_pas_metadata *pas_metadata_ctx); int qcom_mdt_load(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size, @@ -23,7 +27,8 @@ int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, phys_addr_t mem_phys, size_t mem_size, phys_addr_t *reloc_base); -void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len); +void *qcom_mdt_read_metadata(const struct firmware *fw, size_t *data_len, + const char *fw_name, struct device *dev); #else /* !IS_ENABLED(CONFIG_QCOM_MDT_LOADER) */ @@ -32,6 +37,13 @@ static inline ssize_t qcom_mdt_get_size(const struct firmware *fw) return -ENODEV; } +static inline int qcom_mdt_pas_init(struct device *dev, const struct firmware *fw, + const char *fw_name, int pas_id, phys_addr_t mem_phys, + struct qcom_scm_pas_metadata *pas_metadata_ctx) +{ + return -ENODEV; +} + static inline int qcom_mdt_load(struct device *dev, const struct firmware *fw, const char *fw_name, int pas_id, void *mem_region, phys_addr_t mem_phys, @@ -51,7 +63,8 @@ static inline int qcom_mdt_load_no_init(struct device *dev, } static inline void *qcom_mdt_read_metadata(const struct firmware *fw, - size_t *data_len) + size_t *data_len, const char *fw_name, + struct device *dev) { return ERR_PTR(-ENODEV); } diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h index 0aad7009b50e..bd0d11af76c5 100644 --- a/include/linux/soc/ti/ti_sci_protocol.h +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -645,7 +645,7 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, static inline struct ti_sci_resource * devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, - u32 dev_id, u32 sub_type); + u32 dev_id, u32 sub_type) { return ERR_PTR(-EINVAL); } diff --git a/include/linux/socket.h b/include/linux/socket.h index 8ef26d89ef49..6f85f5d957ef 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -366,6 +366,7 @@ struct ucred { #define SOL_XDP 283 #define SOL_MPTCP 284 #define SOL_MCTP 285 +#define SOL_SMC 286 /* IPX options */ #define IPX_TYPE 1 diff --git a/include/linux/softirq.h b/include/linux/softirq.h new file mode 100644 index 000000000000..c73d7dcb4cb5 --- /dev/null +++ b/include/linux/softirq.h @@ -0,0 +1 @@ +#include <linux/interrupt.h> diff --git a/include/linux/sort.h b/include/linux/sort.h index b5898725fe9d..e163287ac6c1 100644 --- a/include/linux/sort.h +++ b/include/linux/sort.h @@ -6,7 +6,7 @@ void sort_r(void *base, size_t num, size_t size, cmp_r_func_t cmp_func, - swap_func_t swap_func, + swap_r_func_t swap_func, const void *priv); void sort(void *base, size_t num, size_t size, diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index ca74dce36706..4658e7801b42 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -42,7 +42,6 @@ struct pxa2xx_spi_chip { u8 rx_threshold; u8 dma_burst_size; u32 timeout; - int gpio_cs; }; #if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP) diff --git a/include/linux/spi/s3c24xx.h b/include/linux/spi/s3c24xx.h index 440a71593162..9b8bb22d5b0c 100644 --- a/include/linux/spi/s3c24xx.h +++ b/include/linux/spi/s3c24xx.h @@ -10,14 +10,9 @@ #define __LINUX_SPI_S3C24XX_H __FILE__ struct s3c2410_spi_info { - int pin_cs; /* simple gpio cs */ unsigned int num_cs; /* total chipselects */ int bus_num; /* bus number to use. */ - unsigned int use_fiq:1; /* use fiq */ - - void (*gpio_setup)(struct s3c2410_spi_info *spi, int enable); - void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol); }; extern int s3c24xx_set_fiq(unsigned int irq, u32 *ack_ptr, bool on); diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 85e2ff7b840d..2ba044d0d5e5 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -89,6 +89,7 @@ enum spi_mem_data_dir { * @dummy.dtr: whether the dummy bytes should be sent in DTR mode or not * @data.buswidth: number of IO lanes used to send/receive the data * @data.dtr: whether the data should be sent in DTR mode or not + * @data.ecc: whether error correction is required or not * @data.dir: direction of the transfer * @data.nbytes: number of data bytes to send/receive. Can be zero if the * operation does not involve transferring data @@ -119,6 +120,7 @@ struct spi_mem_op { struct { u8 buswidth; u8 dtr : 1; + u8 ecc : 1; enum spi_mem_data_dir dir; unsigned int nbytes; union { @@ -286,6 +288,19 @@ struct spi_controller_mem_ops { }; /** + * struct spi_controller_mem_caps - SPI memory controller capabilities + * @dtr: Supports DTR operations + * @ecc: Supports operations with error correction + */ +struct spi_controller_mem_caps { + bool dtr; + bool ecc; +}; + +#define spi_mem_controller_is_capable(ctlr, cap) \ + ((ctlr)->mem_caps && (ctlr)->mem_caps->cap) + +/** * struct spi_mem_driver - SPI memory driver * @spidrv: inherit from a SPI driver * @probe: probe a SPI memory. Usually where detection/initialization takes @@ -319,10 +334,6 @@ void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, bool spi_mem_default_supports_op(struct spi_mem *mem, const struct spi_mem_op *op); - -bool spi_mem_dtr_supports_op(struct spi_mem *mem, - const struct spi_mem_op *op); - #else static inline int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, @@ -345,13 +356,6 @@ bool spi_mem_default_supports_op(struct spi_mem *mem, { return false; } - -static inline -bool spi_mem_dtr_supports_op(struct spi_mem *mem, - const struct spi_mem_op *op) -{ - return false; -} #endif /* CONFIG_SPI_MEM */ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 7ab3fed7b804..5f8c063ddff4 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -16,6 +16,7 @@ #include <linux/gpio/consumer.h> #include <uapi/linux/spi/spi.h> +#include <linux/acpi.h> struct dma_chan; struct software_node; @@ -23,6 +24,7 @@ struct ptp_system_timestamp; struct spi_controller; struct spi_transfer; struct spi_controller_mem_ops; +struct spi_controller_mem_caps; /* * INTERFACES between SPI master-side drivers and SPI slave protocol handlers, @@ -136,9 +138,6 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer); * for driver coldplugging, and in uevents used for hotplugging * @driver_override: If the name of a driver is written to this attribute, then * the device will bind to the named driver and only the named driver. - * @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when - * not using a GPIO line) use cs_gpiod in new drivers by opting in on - * the spi_master. * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when * not using a GPIO line) * @word_delay: delay to be inserted between consecutive @@ -185,7 +184,6 @@ struct spi_device { void *controller_data; char modalias[SPI_NAME_SIZE]; const char *driver_override; - int cs_gpio; /* LEGACY: chip select gpio */ struct gpio_desc *cs_gpiod; /* chip select gpio desc */ struct spi_delay word_delay; /* inter-word delay */ /* CS delays */ @@ -280,7 +278,7 @@ struct spi_message; struct spi_driver { const struct spi_device_id *id_table; int (*probe)(struct spi_device *spi); - int (*remove)(struct spi_device *spi); + void (*remove)(struct spi_device *spi); void (*shutdown)(struct spi_device *spi); struct device_driver driver; }; @@ -373,7 +371,8 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @cur_msg_prepared: spi_prepare_message was called for the currently * in-flight message * @cur_msg_mapped: message has been mapped for DMA - * @last_cs_enable: was enable true on the last call to set_cs. + * @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip + * selected * @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs. * @xfer_completion: used by core transfer_one_message() * @busy: message pump is busy @@ -415,19 +414,15 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @mem_ops: optimized/dedicated operations for interactions with SPI memory. * This field is optional and should only be implemented if the * controller has native support for memory like operations. + * @mem_caps: controller capabilities for the handling of memory operations. * @unprepare_message: undo any work done by prepare_message(). * @slave_abort: abort the ongoing transfer request on an SPI slave controller - * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per - * CS number. Any individual value may be -ENOENT for CS lines that - * are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods - * in new drivers. * @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS * number. Any individual value may be NULL for CS lines that * are not GPIOs (driven by the SPI controller itself). * @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab - * GPIO descriptors rather than using global GPIO numbers grabbed by the - * driver. This will fill in @cs_gpiods and @cs_gpios should not be used, - * and SPI devices will have the cs_gpiod assigned rather than cs_gpio. + * GPIO descriptors. This will fill in @cs_gpiods and SPI devices will have + * the cs_gpiod assigned if a GPIO line is found for the chipselect. * @unused_native_cs: When cs_gpiods is used, spi_register_controller() will * fill in this field with the first unused native CS, to be used by SPI * controller drivers that need to drive a native CS when using GPIO CS. @@ -611,7 +606,7 @@ struct spi_controller { bool auto_runtime_pm; bool cur_msg_prepared; bool cur_msg_mapped; - bool last_cs_enable; + char last_cs; bool last_cs_mode_high; bool fallback; struct completion xfer_completion; @@ -639,9 +634,9 @@ struct spi_controller { /* Optimized handlers for SPI memory-like operations. */ const struct spi_controller_mem_ops *mem_ops; + const struct spi_controller_mem_caps *mem_caps; /* gpio chip select */ - int *cs_gpios; struct gpio_desc **cs_gpiods; bool use_gpio_descriptors; s8 unused_native_cs; @@ -759,6 +754,13 @@ extern int devm_spi_register_controller(struct device *dev, struct spi_controller *ctlr); extern void spi_unregister_controller(struct spi_controller *ctlr); +#if IS_ENABLED(CONFIG_ACPI) +extern struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr, + struct acpi_device *adev, + int index); +int acpi_spi_count_resources(struct acpi_device *adev); +#endif + /* * SPI resource management while processing a SPI message */ @@ -1452,8 +1454,20 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n) * use spi_new_device() to describe each device. You can also call * spi_unregister_device() to start making that device vanish, but * normally that would be handled by spi_unregister_controller(). + * + * You can also use spi_alloc_device() and spi_add_device() to use a two + * stage registration sequence for each spi_device. This gives the caller + * some more control over the spi_device structure before it is registered, + * but requires that caller to initialize fields that would otherwise + * be defined using the board info. */ extern struct spi_device * +spi_alloc_device(struct spi_controller *ctlr); + +extern int +spi_add_device(struct spi_device *spi); + +extern struct spi_device * spi_new_device(struct spi_controller *, struct spi_board_info *); extern void spi_unregister_device(struct spi_device *spi); diff --git a/include/linux/spinlock_api.h b/include/linux/spinlock_api.h new file mode 100644 index 000000000000..6338b27f98df --- /dev/null +++ b/include/linux/spinlock_api.h @@ -0,0 +1 @@ +#include <linux/spinlock.h> diff --git a/include/linux/ssb/ssb_driver_gige.h b/include/linux/ssb/ssb_driver_gige.h index 15ba0df1ee0d..28c145a51e57 100644 --- a/include/linux/ssb/ssb_driver_gige.h +++ b/include/linux/ssb/ssb_driver_gige.h @@ -95,7 +95,7 @@ static inline bool ssb_gige_must_flush_posted_writes(struct pci_dev *pdev) struct ssb_gige *dev = pdev_to_ssb_gige(pdev); if (dev) return (dev->dev->bus->chip_id == 0x4785); - return 0; + return false; } /* Get the device MAC address */ diff --git a/include/linux/stddef.h b/include/linux/stddef.h index ca507bd5f808..929d67710cc5 100644 --- a/include/linux/stddef.h +++ b/include/linux/stddef.h @@ -13,11 +13,7 @@ enum { }; #undef offsetof -#ifdef __compiler_offsetof -#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER) -#else -#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) -#endif +#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER) /** * sizeof_field() - Report the size of a struct field in bytes diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index 7a22921c9db7..4d72258d42fd 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -106,4 +106,24 @@ void kfree_strarray(char **array, size_t n); char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n); +static inline const char *str_yes_no(bool v) +{ + return v ? "yes" : "no"; +} + +static inline const char *str_on_off(bool v) +{ + return v ? "on" : "off"; +} + +static inline const char *str_enable_disable(bool v) +{ + return v ? "enable" : "disable"; +} + +static inline const char *str_enabled_disabled(bool v) +{ + return v ? "enabled" : "disabled"; +} + #endif diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index f35c22b3355f..a5dda4987e8b 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -52,24 +52,6 @@ struct svc_pool { unsigned long sp_flags; } ____cacheline_aligned_in_smp; -struct svc_serv; - -struct svc_serv_ops { - /* Callback to use when last thread exits. */ - void (*svo_shutdown)(struct svc_serv *, struct net *); - - /* function for service threads to run */ - int (*svo_function)(void *); - - /* queue up a transport for servicing */ - void (*svo_enqueue_xprt)(struct svc_xprt *); - - /* optional module to count when adding threads. - * Thread function must call module_put_and_kthread_exit() to exit. - */ - struct module *svo_module; -}; - /* * RPC service. * @@ -102,7 +84,8 @@ struct svc_serv { unsigned int sv_nrpools; /* number of thread pools */ struct svc_pool * sv_pools; /* array of thread pools */ - const struct svc_serv_ops *sv_ops; /* server operations */ + int (*sv_threadfn)(void *data); + #if defined(CONFIG_SUNRPC_BACKCHANNEL) struct list_head sv_cb_list; /* queue for callback requests * that arrive over the same @@ -503,7 +486,7 @@ int svc_rpcb_setup(struct svc_serv *serv, struct net *net); void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net); int svc_bind(struct svc_serv *serv, struct net *net); struct svc_serv *svc_create(struct svc_program *, unsigned int, - const struct svc_serv_ops *); + int (*threadfn)(void *data)); struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node); void svc_rqst_replace_page(struct svc_rqst *rqstp, @@ -511,10 +494,9 @@ void svc_rqst_replace_page(struct svc_rqst *rqstp, void svc_rqst_free(struct svc_rqst *); void svc_exit_thread(struct svc_rqst *); struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, - const struct svc_serv_ops *); + int (*threadfn)(void *data)); int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); int svc_pool_stats_open(struct svc_serv *serv, struct file *file); -void svc_shutdown_net(struct svc_serv *, struct net *); int svc_process(struct svc_rqst *); int bc_svc_process(struct svc_serv *, struct rpc_rqst *, struct svc_rqst *); diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 571f605bc91e..d42a75b3be10 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -28,6 +28,7 @@ struct svc_xprt_ops { void (*xpo_free)(struct svc_xprt *); void (*xpo_secure_port)(struct svc_rqst *rqstp); void (*xpo_kill_temp_xprt)(struct svc_xprt *); + void (*xpo_start_tls)(struct svc_xprt *); }; struct svc_xprt_class { @@ -88,6 +89,7 @@ struct svc_xprt { struct list_head xpt_users; /* callbacks on free */ struct net *xpt_net; + netns_tracker ns_tracker; const struct cred *xpt_cred; struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */ struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */ @@ -127,15 +129,16 @@ int svc_reg_xprt_class(struct svc_xprt_class *); void svc_unreg_xprt_class(struct svc_xprt_class *); void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *, struct svc_serv *); -int svc_create_xprt(struct svc_serv *, const char *, struct net *, - const int, const unsigned short, int, - const struct cred *); +int svc_xprt_create(struct svc_serv *serv, const char *xprt_name, + struct net *net, const int family, + const unsigned short port, int flags, + const struct cred *cred); +void svc_xprt_destroy_all(struct svc_serv *serv, struct net *net); void svc_xprt_received(struct svc_xprt *xprt); -void svc_xprt_do_enqueue(struct svc_xprt *xprt); void svc_xprt_enqueue(struct svc_xprt *xprt); void svc_xprt_put(struct svc_xprt *xprt); void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt); -void svc_close_xprt(struct svc_xprt *xprt); +void svc_xprt_close(struct svc_xprt *xprt); int svc_port_is_privileged(struct sockaddr *sin); int svc_print_xprts(char *buf, int maxlen); struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index b519609af1d0..4417f667c757 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -731,6 +731,8 @@ xdr_stream_decode_uint32_array(struct xdr_stream *xdr, if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) return -EBADMSG; + if (len > SIZE_MAX / sizeof(*p)) + return -EBADMSG; p = xdr_inline_decode(xdr, len * sizeof(*p)); if (unlikely(!p)) return -EBADMSG; diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 955ea4d7af0b..3cdc8d878d81 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -284,6 +284,7 @@ struct rpc_xprt { } stat; struct net *xprt_net; + netns_tracker ns_tracker; const char *servername; const char *address_strings[RPC_DISPLAY_MAX]; #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) diff --git a/include/linux/swait_api.h b/include/linux/swait_api.h new file mode 100644 index 000000000000..1eeaaaaa5ea7 --- /dev/null +++ b/include/linux/swait_api.h @@ -0,0 +1 @@ +#include <linux/swait.h> diff --git a/include/linux/swap.h b/include/linux/swap.h index 1d38d9475c4d..27093b477c5f 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -328,15 +328,18 @@ static inline swp_entry_t folio_swap_entry(struct folio *folio) /* linux/mm/workingset.c */ void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); -void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg); +void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); void workingset_refault(struct folio *folio, void *shadow); void workingset_activation(struct folio *folio); /* Only track the nodes of mappings with shadow entries */ void workingset_update_node(struct xa_node *node); +extern struct list_lru shadow_nodes; #define mapping_set_update(xas, mapping) do { \ - if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \ + if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \ xas_set_update(xas, workingset_update_node); \ + xas_set_lru(xas, &shadow_nodes); \ + } \ } while (0) /* linux/mm/page_alloc.c */ @@ -372,7 +375,6 @@ extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu_zone(struct zone *zone); extern void lru_add_drain_all(void); -extern void deactivate_file_page(struct page *page); extern void deactivate_page(struct page *page); extern void mark_page_lazyfree(struct page *page); extern void swap_setup(void); @@ -384,7 +386,6 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page, extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); -extern bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode); extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, @@ -395,7 +396,7 @@ extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, unsigned long *nr_scanned); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; -extern int remove_mapping(struct address_space *mapping, struct page *page); +long remove_mapping(struct address_space *mapping, struct folio *folio); extern unsigned long reclaim_pages(struct list_head *page_list); #ifdef CONFIG_NUMA @@ -427,7 +428,7 @@ extern int swap_writepage(struct page *page, struct writeback_control *wbc); extern void end_swap_bio_write(struct bio *bio); extern int __swap_writepage(struct page *page, struct writeback_control *wbc, bio_end_io_t end_write_func); -extern int swap_set_page_dirty(struct page *page); +bool swap_dirty_folio(struct address_space *mapping, struct folio *folio); int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, unsigned long nr_pages, sector_t start_block); @@ -514,7 +515,6 @@ extern int __swp_swapcount(swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry); extern struct swap_info_struct *page_swap_info(struct page *); extern struct swap_info_struct *swp_swap_info(swp_entry_t entry); -extern bool reuse_swap_page(struct page *); extern int try_to_free_swap(struct page *); struct backing_dev_info; extern int init_swap_address_space(unsigned int type, unsigned long nr_pages); @@ -680,9 +680,6 @@ static inline int swp_swapcount(swp_entry_t entry) return 0; } -#define reuse_swap_page(page) \ - (page_trans_huge_mapcount(page) == 1) - static inline int try_to_free_swap(struct page *page) { return 0; @@ -741,7 +738,7 @@ static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) #endif #ifdef CONFIG_MEMCG_SWAP -extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry); +void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry); extern int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry); static inline int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) { @@ -761,7 +758,7 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_p extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); extern bool mem_cgroup_swap_full(struct page *page); #else -static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry) +static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) { } diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 819c0cb00b6d..a34b0f9a9972 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -290,10 +290,6 @@ static inline void addr_limit_user_check(void) return; #endif - if (CHECK_DATA_CORRUPTION(uaccess_kernel(), - "Invalid address limit on user-mode return")) - force_sig(SIGKILL); - #ifdef TIF_FSCHECK clear_thread_flag(TIF_FSCHECK); #endif diff --git a/include/linux/syscalls_api.h b/include/linux/syscalls_api.h new file mode 100644 index 000000000000..23e012b04db4 --- /dev/null +++ b/include/linux/syscalls_api.h @@ -0,0 +1 @@ +#include <linux/syscalls.h> diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 78b91bb92f0d..1168302b7927 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -394,6 +394,7 @@ struct tcp_sock { bool is_mptcp; #endif #if IS_ENABLED(CONFIG_SMC) + bool (*smc_hs_congested)(const struct sock *sk); bool syn_smc; /* SYN includes SMC */ #endif diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 5e1533ee3785..911cad324acc 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2015-2016, Linaro Limited + * Copyright (c) 2015-2022 Linaro Limited */ #ifndef __TEE_DRV_H @@ -20,14 +20,11 @@ * specific TEE driver. */ -#define TEE_SHM_MAPPED BIT(0) /* Memory mapped by the kernel */ -#define TEE_SHM_DMA_BUF BIT(1) /* Memory with dma-buf handle */ -#define TEE_SHM_EXT_DMA_BUF BIT(2) /* Memory with dma-buf handle */ -#define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */ -#define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */ -#define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */ -#define TEE_SHM_KERNEL_MAPPED BIT(6) /* Memory mapped in kernel space */ -#define TEE_SHM_PRIV BIT(7) /* Memory private to TEE driver */ +#define TEE_SHM_DYNAMIC BIT(0) /* Dynamic shared memory registered */ + /* in secure world */ +#define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */ +#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */ +#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */ struct device; struct tee_device; @@ -221,92 +218,39 @@ struct tee_shm { }; /** - * struct tee_shm_pool_mgr - shared memory manager + * struct tee_shm_pool - shared memory pool * @ops: operations * @private_data: private data for the shared memory manager */ -struct tee_shm_pool_mgr { - const struct tee_shm_pool_mgr_ops *ops; +struct tee_shm_pool { + const struct tee_shm_pool_ops *ops; void *private_data; }; /** - * struct tee_shm_pool_mgr_ops - shared memory pool manager operations + * struct tee_shm_pool_ops - shared memory pool operations * @alloc: called when allocating shared memory * @free: called when freeing shared memory - * @destroy_poolmgr: called when destroying the pool manager + * @destroy_pool: called when destroying the pool */ -struct tee_shm_pool_mgr_ops { - int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm, - size_t size); - void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm); - void (*destroy_poolmgr)(struct tee_shm_pool_mgr *poolmgr); +struct tee_shm_pool_ops { + int (*alloc)(struct tee_shm_pool *pool, struct tee_shm *shm, + size_t size, size_t align); + void (*free)(struct tee_shm_pool *pool, struct tee_shm *shm); + void (*destroy_pool)(struct tee_shm_pool *pool); }; -/** - * tee_shm_pool_alloc() - Create a shared memory pool from shm managers - * @priv_mgr: manager for driver private shared memory allocations - * @dmabuf_mgr: manager for dma-buf shared memory allocations - * - * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied - * in @dmabuf, others will use the range provided by @priv. - * - * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. - */ -struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr, - struct tee_shm_pool_mgr *dmabuf_mgr); - /* - * tee_shm_pool_mgr_alloc_res_mem() - Create a shm manager for reserved - * memory + * tee_shm_pool_alloc_res_mem() - Create a shm manager for reserved memory * @vaddr: Virtual address of start of pool * @paddr: Physical address of start of pool * @size: Size in bytes of the pool * - * @returns pointer to a 'struct tee_shm_pool_mgr' or an ERR_PTR on failure. - */ -struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr, - phys_addr_t paddr, - size_t size, - int min_alloc_order); - -/** - * tee_shm_pool_mgr_destroy() - Free a shared memory manager - */ -static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm) -{ - poolm->ops->destroy_poolmgr(poolm); -} - -/** - * struct tee_shm_pool_mem_info - holds information needed to create a shared - * memory pool - * @vaddr: Virtual address of start of pool - * @paddr: Physical address of start of pool - * @size: Size in bytes of the pool - */ -struct tee_shm_pool_mem_info { - unsigned long vaddr; - phys_addr_t paddr; - size_t size; -}; - -/** - * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved - * memory range - * @priv_info: Information for driver private shared memory pool - * @dmabuf_info: Information for dma-buf shared memory pool - * - * Start and end of pools will must be page aligned. - * - * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied - * in @dmabuf, others will use the range provided by @priv. - * * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. */ -struct tee_shm_pool * -tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, - struct tee_shm_pool_mem_info *dmabuf_info); +struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr, + phys_addr_t paddr, size_t size, + int min_alloc_order); /** * tee_shm_pool_free() - Free a shared memory pool @@ -315,7 +259,10 @@ tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, * The must be no remaining shared memory allocated from this pool when * this function is called. */ -void tee_shm_pool_free(struct tee_shm_pool *pool); +static inline void tee_shm_pool_free(struct tee_shm_pool *pool) +{ + pool->ops->destroy_pool(pool); +} /** * tee_get_drvdata() - Return driver_data pointer @@ -323,43 +270,20 @@ void tee_shm_pool_free(struct tee_shm_pool *pool); */ void *tee_get_drvdata(struct tee_device *teedev); -/** - * tee_shm_alloc() - Allocate shared memory - * @ctx: Context that allocates the shared memory - * @size: Requested size of shared memory - * @flags: Flags setting properties for the requested shared memory. - * - * Memory allocated as global shared memory is automatically freed when the - * TEE file pointer is closed. The @flags field uses the bits defined by - * TEE_SHM_* above. TEE_SHM_MAPPED must currently always be set. If - * TEE_SHM_DMA_BUF global shared memory will be allocated and associated - * with a dma-buf handle, else driver private memory. - * - * @returns a pointer to 'struct tee_shm' - */ -struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); +struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size); struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); -/** - * tee_shm_register() - Register shared memory buffer - * @ctx: Context that registers the shared memory - * @addr: Address is userspace of the shared buffer - * @length: Length of the shared buffer - * @flags: Flags setting properties for the requested shared memory. - * - * @returns a pointer to 'struct tee_shm' - */ -struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, - size_t length, u32 flags); +struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, + void *addr, size_t length); /** - * tee_shm_is_registered() - Check if shared memory object in registered in TEE + * tee_shm_is_dynamic() - Check if shared memory object is of the dynamic kind * @shm: Shared memory handle - * @returns true if object is registered in TEE + * @returns true if object is dynamic shared memory */ -static inline bool tee_shm_is_registered(struct tee_shm *shm) +static inline bool tee_shm_is_dynamic(struct tee_shm *shm) { - return shm && (shm->flags & TEE_SHM_REGISTER); + return shm && (shm->flags & TEE_SHM_DYNAMIC); } /** diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 73a6f34b3847..9f392ec76f2b 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -209,9 +209,12 @@ __bad_copy_from(void); extern void __compiletime_error("copy destination size is too small") __bad_copy_to(void); +void __copy_overflow(int size, unsigned long count); + static inline void copy_overflow(int size, unsigned long count) { - WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); + if (IS_ENABLED(CONFIG_BUG)) + __copy_overflow(size, count); } static __always_inline __must_check bool diff --git a/include/linux/topology.h b/include/linux/topology.h index a6e201758ae9..f19bc3626297 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -211,6 +211,9 @@ static inline int cpu_to_mem(int cpu) #ifndef topology_drawer_id #define topology_drawer_id(cpu) ((void)(cpu), -1) #endif +#ifndef topology_ppin +#define topology_ppin(cpu) ((void)(cpu), 0ull) +#endif #ifndef topology_sibling_cpumask #define topology_sibling_cpumask(cpu) cpumask_of(cpu) #endif diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index dcea51fb60e2..e6e95a9f07a5 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -15,6 +15,7 @@ struct array_buffer; struct tracer; struct dentry; struct bpf_prog; +union bpf_attr; const char *trace_print_flags_seq(struct trace_seq *p, const char *delim, unsigned long flags, @@ -315,6 +316,7 @@ enum { TRACE_EVENT_FL_KPROBE_BIT, TRACE_EVENT_FL_UPROBE_BIT, TRACE_EVENT_FL_EPROBE_BIT, + TRACE_EVENT_FL_CUSTOM_BIT, }; /* @@ -328,6 +330,9 @@ enum { * KPROBE - Event is a kprobe * UPROBE - Event is a uprobe * EPROBE - Event is an event probe + * CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint) + * This is set when the custom event has not been attached + * to a tracepoint yet, then it is cleared when it is. */ enum { TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), @@ -339,6 +344,7 @@ enum { TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT), TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT), + TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT), }; #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE) @@ -440,7 +446,9 @@ static inline bool bpf_prog_array_valid(struct trace_event_call *call) static inline const char * trace_event_name(struct trace_event_call *call) { - if (call->flags & TRACE_EVENT_FL_TRACEPOINT) + if (call->flags & TRACE_EVENT_FL_CUSTOM) + return call->name; + else if (call->flags & TRACE_EVENT_FL_TRACEPOINT) return call->tp ? call->tp->name : NULL; else return call->name; @@ -738,6 +746,7 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp); int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, u64 *probe_offset, u64 *probe_addr); +int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); #else static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) { @@ -779,6 +788,11 @@ static inline int bpf_get_perf_event_info(const struct perf_event *event, { return -EOPNOTSUPP; } +static inline int +bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} #endif enum { @@ -903,3 +917,18 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, #endif #endif /* _LINUX_TRACE_EVENT_H */ + +/* + * Note: we keep the TRACE_CUSTOM_EVENT outside the include file ifdef protection. + * This is due to the way trace custom events work. If a file includes two + * trace event headers under one "CREATE_CUSTOM_TRACE_EVENTS" the first include + * will override the TRACE_CUSTOM_EVENT and break the second include. + */ + +#ifndef TRACE_CUSTOM_EVENT + +#define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print) +#define DEFINE_CUSTOM_EVENT(template, name, proto, args) +#define TRACE_CUSTOM_EVENT(name, proto, args, struct, assign, print) + +#endif /* ifdef TRACE_CUSTOM_EVENT (see note above) */ diff --git a/include/linux/types.h b/include/linux/types.h index ac825ad90e44..ea8cf60a8a79 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -226,6 +226,7 @@ struct callback_head { typedef void (*rcu_callback_t)(struct rcu_head *head); typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func); +typedef void (*swap_r_func_t)(void *a, void *b, int size, const void *priv); typedef void (*swap_func_t)(void *a, void *b, int size); typedef int (*cmp_r_func_t)(const void *a, const void *b, const void *priv); diff --git a/include/linux/u64_stats_sync_api.h b/include/linux/u64_stats_sync_api.h new file mode 100644 index 000000000000..c72ca63da44b --- /dev/null +++ b/include/linux/u64_stats_sync_api.h @@ -0,0 +1 @@ +#include <linux/u64_stats_sync.h> diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index ac0394087f7d..546179418ffa 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -10,46 +10,6 @@ #include <asm/uaccess.h> -#ifdef CONFIG_SET_FS -/* - * Force the uaccess routines to be wired up for actual userspace access, - * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone - * using force_uaccess_end below. - */ -static inline mm_segment_t force_uaccess_begin(void) -{ - mm_segment_t fs = get_fs(); - - set_fs(USER_DS); - return fs; -} - -static inline void force_uaccess_end(mm_segment_t oldfs) -{ - set_fs(oldfs); -} -#else /* CONFIG_SET_FS */ -typedef struct { - /* empty dummy */ -} mm_segment_t; - -#ifndef TASK_SIZE_MAX -#define TASK_SIZE_MAX TASK_SIZE -#endif - -#define uaccess_kernel() (false) -#define user_addr_max() (TASK_SIZE_MAX) - -static inline mm_segment_t force_uaccess_begin(void) -{ - return (mm_segment_t) { }; -} - -static inline void force_uaccess_end(mm_segment_t oldfs) -{ -} -#endif /* CONFIG_SET_FS */ - /* * Architectures should provide two primitives (raw_copy_{to,from}_user()) * and get rid of their private instances of copy_{to,from}_user() and @@ -368,6 +328,25 @@ long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, long count); long strnlen_user_nofault(const void __user *unsafe_addr, long count); +#ifndef __get_kernel_nofault +#define __get_kernel_nofault(dst, src, type, label) \ +do { \ + type __user *p = (type __force __user *)(src); \ + type data; \ + if (__get_user(data, p)) \ + goto label; \ + *(type *)dst = data; \ +} while (0) + +#define __put_kernel_nofault(dst, src, type, label) \ +do { \ + type __user *p = (type __force __user *)(dst); \ + type data = *(type *)src; \ + if (__put_user(data, p)) \ + goto label; \ +} while (0) +#endif + /** * get_kernel_nofault(): safely attempt to read from a location * @val: read into this variable @@ -401,8 +380,6 @@ static inline void user_access_restore(unsigned long flags) { } #endif #ifdef CONFIG_HARDENED_USERCOPY -void usercopy_warn(const char *name, const char *detail, bool to_user, - unsigned long offset, unsigned long len); void __noreturn usercopy_abort(const char *name, const char *detail, bool to_user, unsigned long offset, unsigned long len); diff --git a/include/linux/udp.h b/include/linux/udp.h index ae66dadd8543..254a2654400f 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -23,11 +23,6 @@ static inline struct udphdr *udp_hdr(const struct sk_buff *skb) return (struct udphdr *)skb_transport_header(skb); } -static inline struct udphdr *inner_udp_hdr(const struct sk_buff *skb) -{ - return (struct udphdr *)skb_inner_transport_header(skb); -} - #define UDP_HTABLE_SIZE_MIN (CONFIG_BASE_SMALL ? 128 : 256) static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask) diff --git a/include/linux/uio.h b/include/linux/uio.h index 1198a2bfc9bf..739285fe5a2f 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -273,6 +273,23 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) i->count = count; } +static inline int +iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes) +{ + size_t shorted = 0; + int npages; + + if (iov_iter_count(i) > max_bytes) { + shorted = iov_iter_count(i) - max_bytes; + iov_iter_truncate(i, max_bytes); + } + npages = iov_iter_npages(i, INT_MAX); + if (shorted) + iov_iter_reexpand(i, iov_iter_count(i) + shorted); + + return npages; +} + struct csum_state { __wsum csum; size_t off; diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 2de442ececae..721089bb4c84 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -401,18 +401,24 @@ static inline int vdpa_reset(struct vdpa_device *vdev) return ret; } -static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features, bool locked) +static inline int vdpa_set_features_unlocked(struct vdpa_device *vdev, u64 features) { const struct vdpa_config_ops *ops = vdev->config; int ret; - if (!locked) - mutex_lock(&vdev->cf_mutex); - vdev->features_valid = true; ret = ops->set_driver_features(vdev, features); - if (!locked) - mutex_unlock(&vdev->cf_mutex); + + return ret; +} + +static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features) +{ + int ret; + + mutex_lock(&vdev->cf_mutex); + ret = vdpa_set_features_unlocked(vdev, features); + mutex_unlock(&vdev->cf_mutex); return ret; } diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 76191d7abed1..66dda06ec42d 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -33,6 +33,7 @@ struct vfio_device { struct vfio_group *group; struct vfio_device_set *dev_set; struct list_head dev_set_list; + unsigned int migration_flags; /* Members below here are private, not for driver use */ refcount_t refcount; @@ -55,6 +56,17 @@ struct vfio_device { * @match: Optional device name match callback (return: 0 for no-match, >0 for * match, -errno for abort (ex. match with insufficient or incorrect * additional args) + * @device_feature: Optional, fill in the VFIO_DEVICE_FEATURE ioctl + * @migration_set_state: Optional callback to change the migration state for + * devices that support migration. It's mandatory for + * VFIO_DEVICE_FEATURE_MIGRATION migration support. + * The returned FD is used for data transfer according to the FSM + * definition. The driver is responsible to ensure that FD reaches end + * of stream or error whenever the migration FSM leaves a data transfer + * state or before close_device() returns. + * @migration_get_state: Optional callback to get the migration state for + * devices that support migration. It's mandatory for + * VFIO_DEVICE_FEATURE_MIGRATION migration support. */ struct vfio_device_ops { char *name; @@ -69,8 +81,44 @@ struct vfio_device_ops { int (*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma); void (*request)(struct vfio_device *vdev, unsigned int count); int (*match)(struct vfio_device *vdev, char *buf); + int (*device_feature)(struct vfio_device *device, u32 flags, + void __user *arg, size_t argsz); + struct file *(*migration_set_state)( + struct vfio_device *device, + enum vfio_device_mig_state new_state); + int (*migration_get_state)(struct vfio_device *device, + enum vfio_device_mig_state *curr_state); }; +/** + * vfio_check_feature - Validate user input for the VFIO_DEVICE_FEATURE ioctl + * @flags: Arg from the device_feature op + * @argsz: Arg from the device_feature op + * @supported_ops: Combination of VFIO_DEVICE_FEATURE_GET and SET the driver + * supports + * @minsz: Minimum data size the driver accepts + * + * For use in a driver's device_feature op. Checks that the inputs to the + * VFIO_DEVICE_FEATURE ioctl are correct for the driver's feature. Returns 1 if + * the driver should execute the get or set, otherwise the relevant + * value should be returned. + */ +static inline int vfio_check_feature(u32 flags, size_t argsz, u32 supported_ops, + size_t minsz) +{ + if ((flags & (VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_SET)) & + ~supported_ops) + return -EINVAL; + if (flags & VFIO_DEVICE_FEATURE_PROBE) + return 0; + /* Without PROBE one of GET or SET must be requested */ + if (!(flags & (VFIO_DEVICE_FEATURE_GET | VFIO_DEVICE_FEATURE_SET))) + return -EINVAL; + if (argsz < minsz) + return -EINVAL; + return 1; +} + void vfio_init_group_dev(struct vfio_device *device, struct device *dev, const struct vfio_device_ops *ops); void vfio_uninit_group_dev(struct vfio_device *device); @@ -82,6 +130,11 @@ extern void vfio_device_put(struct vfio_device *device); int vfio_assign_device_set(struct vfio_device *device, void *set_id); +int vfio_mig_get_next_state(struct vfio_device *device, + enum vfio_device_mig_state cur_fsm, + enum vfio_device_mig_state new_fsm, + enum vfio_device_mig_state *next_fsm); + /* * External user API */ diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index ef9a44b6cf5d..74a4a0f17b28 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -159,8 +159,17 @@ extern ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, extern ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, size_t count, loff_t *ppos, bool iswrite); +#ifdef CONFIG_VFIO_PCI_VGA extern ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf, size_t count, loff_t *ppos, bool iswrite); +#else +static inline ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, + char __user *buf, size_t count, + loff_t *ppos, bool iswrite) +{ + return -EINVAL; +} +#endif extern long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset, uint64_t data, int count, int fd); @@ -220,6 +229,8 @@ int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn); extern const struct pci_error_handlers vfio_pci_core_err_handlers; long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd, unsigned long arg); +int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags, + void __user *arg, size_t argsz); ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf, size_t count, loff_t *ppos); ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf, @@ -230,6 +241,8 @@ int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf); int vfio_pci_core_enable(struct vfio_pci_core_device *vdev); void vfio_pci_core_disable(struct vfio_pci_core_device *vdev); void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev); +pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev, + pci_channel_state_t state); static inline bool vfio_pci_is_vga(struct pci_dev *pdev) { diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 72292a62cd90..5464f398912a 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -133,7 +133,6 @@ bool is_virtio_device(struct device *dev); void virtio_break_device(struct virtio_device *dev); void virtio_config_changed(struct virtio_device *dev); -int virtio_finalize_features(struct virtio_device *dev); #ifdef CONFIG_PM_SLEEP int virtio_device_freeze(struct virtio_device *dev); int virtio_device_restore(struct virtio_device *dev); diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 4d107ad31149..dafdc7f48c01 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -64,8 +64,9 @@ struct virtio_shm_region { * Returns the first 64 feature bits (all we currently need). * @finalize_features: confirm what device features we'll be using. * vdev: the virtio_device - * This gives the final feature bits for the device: it can change + * This sends the driver feature bits to the device: it can change * the dev->feature bits if it wants. + * Note: despite the name this can be called any number of times. * Returns 0 on success or error status * @bus_name: return the bus name associated with the device (optional) * vdev: the virtio_device diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 7b2363388bfa..16a0a4fd000b 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -129,6 +129,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_SWAP SWAP_RA, SWAP_RA_HIT, +#ifdef CONFIG_KSM + KSM_SWPIN_COPY, +#endif #endif #ifdef CONFIG_X86 DIRECT_MAP_LEVEL2_SPLIT, diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 880227b9f044..3b1df7da402d 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -35,17 +35,6 @@ struct notifier_block; /* in notifier.h */ #define VM_DEFER_KMEMLEAK 0 #endif -/* - * VM_KASAN is used slightly differently depending on CONFIG_KASAN_VMALLOC. - * - * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after - * shadow memory has been mapped. It's used to handle allocation errors so that - * we don't try to poison shadow on free if it was never allocated. - * - * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to - * determine which allocations need the module shadow freed. - */ - /* bits [20..32] reserved for arch specific ioremap internals */ /* @@ -80,8 +69,8 @@ struct vmap_area { /* * The following two variables can be packed, because * a vmap_area object can be either: - * 1) in "free" tree (root is vmap_area_root) - * 2) or "busy" tree (root is free_vmap_area_root) + * 1) in "free" tree (root is free_vmap_area_root) + * 2) or "busy" tree (root is vmap_area_root) */ union { unsigned long subtree_max_size; /* in "free" tree */ @@ -126,6 +115,13 @@ static inline int arch_vmap_pte_supported_shift(unsigned long size) } #endif +#ifndef arch_vmap_pgprot_tagged +static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot) +{ + return prot; +} +#endif + /* * Highlevel APIs for driver use */ @@ -159,6 +155,11 @@ void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, int node, const void *caller) __alloc_size(1); void *vmalloc_no_huge(unsigned long size) __alloc_size(1); +extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); +extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2); +extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); +extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2); + extern void vfree(const void *addr); extern void vfree_atomic(const void *addr); diff --git a/include/linux/wait_api.h b/include/linux/wait_api.h new file mode 100644 index 000000000000..4e930548935a --- /dev/null +++ b/include/linux/wait_api.h @@ -0,0 +1 @@ +#include <linux/wait.h> diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h index c994d1b2cdba..3b9a40ae8bdb 100644 --- a/include/linux/watch_queue.h +++ b/include/linux/watch_queue.h @@ -28,7 +28,8 @@ struct watch_type_filter { struct watch_filter { union { struct rcu_head rcu; - unsigned long type_filter[2]; /* Bitmask of accepted types */ + /* Bitmask of accepted types */ + DECLARE_BITMAP(type_filter, WATCH_TYPE__NR); }; u32 nr_filters; /* Number of filters */ struct watch_type_filter filters[]; diff --git a/include/linux/workqueue_api.h b/include/linux/workqueue_api.h new file mode 100644 index 000000000000..77debb5d2760 --- /dev/null +++ b/include/linux/workqueue_api.h @@ -0,0 +1 @@ +#include <linux/workqueue.h> diff --git a/include/linux/wwan.h b/include/linux/wwan.h index afb3334ec8c5..5ce2acf444fb 100644 --- a/include/linux/wwan.h +++ b/include/linux/wwan.h @@ -174,11 +174,13 @@ void wwan_unregister_ops(struct device *parent); #ifdef CONFIG_WWAN_DEBUGFS struct dentry *wwan_get_debugfs_dir(struct device *parent); +void wwan_put_debugfs_dir(struct dentry *dir); #else static inline struct dentry *wwan_get_debugfs_dir(struct device *parent) { return ERR_PTR(-ENODEV); } +static inline void wwan_put_debugfs_dir(struct dentry *dir) {} #endif #endif /* __WWAN_H */ diff --git a/include/linux/xarray.h b/include/linux/xarray.h index d6d5da6ed735..bb52b786be1b 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -1317,6 +1317,7 @@ struct xa_state { struct xa_node *xa_node; struct xa_node *xa_alloc; xa_update_node_t xa_update; + struct list_lru *xa_lru; }; /* @@ -1336,7 +1337,8 @@ struct xa_state { .xa_pad = 0, \ .xa_node = XAS_RESTART, \ .xa_alloc = NULL, \ - .xa_update = NULL \ + .xa_update = NULL, \ + .xa_lru = NULL, \ } /** @@ -1631,6 +1633,11 @@ static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update) xas->xa_update = update; } +static inline void xas_set_lru(struct xa_state *xas, struct list_lru *lru) +{ + xas->xa_lru = lru; +} + /** * xas_next_entry() - Advance iterator to next present entry. * @xas: XArray operation state. diff --git a/include/media/hevc-ctrls.h b/include/media/hevc-ctrls.h index ef63bc205756..01ccda48d8c5 100644 --- a/include/media/hevc-ctrls.h +++ b/include/media/hevc-ctrls.h @@ -127,15 +127,13 @@ struct v4l2_ctrl_hevc_pps { __u64 flags; }; -#define V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_BEFORE 0x01 -#define V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_AFTER 0x02 -#define V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR 0x03 +#define V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE 0x01 #define V4L2_HEVC_DPB_ENTRIES_NUM_MAX 16 struct v4l2_hevc_dpb_entry { __u64 timestamp; - __u8 rps; + __u8 flags; __u8 field_pic; __u16 pic_order_cnt[2]; __u8 padding[2]; diff --git a/include/media/i2c/m5mols.h b/include/media/i2c/m5mols.h index 9cec5a09e125..a56ae353c891 100644 --- a/include/media/i2c/m5mols.h +++ b/include/media/i2c/m5mols.h @@ -14,15 +14,11 @@ /** * struct m5mols_platform_data - platform data for M-5MOLS driver - * @gpio_reset: GPIO driving the reset pin of M-5MOLS - * @reset_polarity: active state for gpio_reset pin, 0 or 1 * @set_power: an additional callback to the board setup code * to be called after enabling and before disabling * the sensor's supply regulators */ struct m5mols_platform_data { - int gpio_reset; - u8 reset_polarity; int (*set_power)(struct device *dev, int on); }; diff --git a/include/media/i2c/noon010pc30.h b/include/media/i2c/noon010pc30.h index d1b2e06a1de0..1880dad25cf0 100644 --- a/include/media/i2c/noon010pc30.h +++ b/include/media/i2c/noon010pc30.h @@ -12,14 +12,10 @@ /** * struct noon010pc30_platform_data - platform data * @clk_rate: the clock frequency in Hz - * @gpio_nreset: GPIO driving nRESET pin - * @gpio_nstby: GPIO driving nSTBY pin */ struct noon010pc30_platform_data { unsigned long clk_rate; - int gpio_nreset; - int gpio_nstby; }; #endif /* NOON010PC30_H */ diff --git a/include/media/media-entity.h b/include/media/media-entity.h index fea489f03d57..742918962d46 100644 --- a/include/media/media-entity.h +++ b/include/media/media-entity.h @@ -268,7 +268,6 @@ enum media_entity_type { * @pads: Pads array with the size defined by @num_pads. * @links: List of data links. * @ops: Entity operations. - * @stream_count: Stream count for the entity. * @use_count: Use count for the entity. * @pipe: Pipeline this entity belongs to. * @info: Union with devnode information. Kept just for backward @@ -283,10 +282,9 @@ enum media_entity_type { * * .. note:: * - * @stream_count and @use_count reference counts must never be - * negative, but are signed integers on purpose: a simple ``WARN_ON(<0)`` - * check can be used to detect reference count bugs that would make them - * negative. + * The @use_count reference count must never be negative, but is a signed + * integer on purpose: a simple ``WARN_ON(<0)`` check can be used to detect + * reference count bugs that would make it negative. */ struct media_entity { struct media_gobj graph_obj; /* must be first field in struct */ @@ -305,7 +303,6 @@ struct media_entity { const struct media_entity_operations *ops; - int stream_count; int use_count; struct media_pipeline *pipe; @@ -657,6 +654,10 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads, * * This function must be called during the cleanup phase after unregistering * the entity (currently, it does nothing). + * + * Calling media_entity_cleanup() on a media_entity whose memory has been + * zeroed but that has not been initialized with media_entity_pad_init() is + * valid and is a no-op. */ #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) static inline void media_entity_cleanup(struct media_entity *entity) {} @@ -859,6 +860,18 @@ struct media_link *media_entity_find_link(struct media_pad *source, struct media_pad *media_entity_remote_pad(const struct media_pad *pad); /** + * media_entity_is_streaming - Test if an entity is part of a streaming pipeline + * @entity: The entity + * + * Return: True if the entity is part of a pipeline started with the + * media_pipeline_start() function, false otherwise. + */ +static inline bool media_entity_is_streaming(const struct media_entity *entity) +{ + return entity->pipe; +} + +/** * media_entity_get_fwnode_pad - Get pad number from fwnode * * @entity: The entity diff --git a/include/media/mipi-csi2.h b/include/media/mipi-csi2.h new file mode 100644 index 000000000000..392794e5badd --- /dev/null +++ b/include/media/mipi-csi2.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * MIPI CSI-2 Data Types + * + * Copyright (C) 2022 Laurent Pinchart <laurent.pinchart@ideasonboard.com> + */ + +#ifndef _MEDIA_MIPI_CSI2_H +#define _MEDIA_MIPI_CSI2_H + +/* Short packet data types */ +#define MIPI_CSI2_DT_FS 0x00 +#define MIPI_CSI2_DT_FE 0x01 +#define MIPI_CSI2_DT_LS 0x02 +#define MIPI_CSI2_DT_LE 0x03 +#define MIPI_CSI2_DT_GENERIC_SHORT(n) (0x08 + (n)) /* 0..7 */ + +/* Long packet data types */ +#define MIPI_CSI2_DT_NULL 0x10 +#define MIPI_CSI2_DT_BLANKING 0x11 +#define MIPI_CSI2_DT_EMBEDDED_8B 0x12 +#define MIPI_CSI2_DT_YUV420_8B 0x18 +#define MIPI_CSI2_DT_YUV420_10B 0x19 +#define MIPI_CSI2_DT_YUV420_8B_LEGACY 0x1a +#define MIPI_CSI2_DT_YUV420_8B_CS 0x1c +#define MIPI_CSI2_DT_YUV420_10B_CS 0x1d +#define MIPI_CSI2_DT_YUV422_8B 0x1e +#define MIPI_CSI2_DT_YUV422_10B 0x1f +#define MIPI_CSI2_DT_RGB444 0x20 +#define MIPI_CSI2_DT_RGB555 0x21 +#define MIPI_CSI2_DT_RGB565 0x22 +#define MIPI_CSI2_DT_RGB666 0x23 +#define MIPI_CSI2_DT_RGB888 0x24 +#define MIPI_CSI2_DT_RAW24 0x27 +#define MIPI_CSI2_DT_RAW6 0x28 +#define MIPI_CSI2_DT_RAW7 0x29 +#define MIPI_CSI2_DT_RAW8 0x2a +#define MIPI_CSI2_DT_RAW10 0x2b +#define MIPI_CSI2_DT_RAW12 0x2c +#define MIPI_CSI2_DT_RAW14 0x2d +#define MIPI_CSI2_DT_RAW16 0x2e +#define MIPI_CSI2_DT_RAW20 0x2f +#define MIPI_CSI2_DT_USER_DEFINED(n) (0x30 + (n)) /* 0..7 */ + +#endif /* _MEDIA_MIPI_CSI2_H */ diff --git a/include/media/rc-core.h b/include/media/rc-core.h index ab9d3b7cd799..803349599c27 100644 --- a/include/media/rc-core.h +++ b/include/media/rc-core.h @@ -130,9 +130,7 @@ struct lirc_fh { * @tx_resolution: resolution (in us) of output sampler * @lirc_dev: lirc device * @lirc_cdev: lirc char cdev - * @gap_start: time when gap starts - * @gap_duration: duration of initial gap - * @gap: true if we're in a gap + * @gap_start: start time for gap after timeout if non-zero * @lirc_fh_lock: protects lirc_fh list * @lirc_fh: list of open files * @registered: set to true by rc_register_device(), false by @@ -201,8 +199,6 @@ struct rc_dev { struct device lirc_dev; struct cdev lirc_cdev; ktime_t gap_start; - u64 gap_duration; - bool gap; spinlock_t lirc_fh_lock; struct list_head lirc_fh; #endif @@ -302,7 +298,7 @@ struct ir_raw_event { u8 duty_cycle; unsigned pulse:1; - unsigned reset:1; + unsigned overflow:1; unsigned timeout:1; unsigned carrier_report:1; }; @@ -325,9 +321,9 @@ int ir_raw_encode_scancode(enum rc_proto protocol, u32 scancode, struct ir_raw_event *events, unsigned int max); int ir_raw_encode_carrier(enum rc_proto protocol); -static inline void ir_raw_event_reset(struct rc_dev *dev) +static inline void ir_raw_event_overflow(struct rc_dev *dev) { - ir_raw_event_store(dev, &((struct ir_raw_event) { .reset = true })); + ir_raw_event_store(dev, &((struct ir_raw_event) { .overflow = true })); dev->idle = true; ir_raw_event_handle(dev); } diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h index 9c97f1dbd1c6..feb132df45a3 100644 --- a/include/media/v4l2-fwnode.h +++ b/include/media/v4l2-fwnode.h @@ -25,68 +25,19 @@ struct fwnode_handle; struct v4l2_async_notifier; struct v4l2_async_subdev; -#define V4L2_FWNODE_CSI2_MAX_DATA_LANES 8 - -/** - * struct v4l2_fwnode_bus_mipi_csi2 - MIPI CSI-2 bus data structure - * @flags: media bus (V4L2_MBUS_*) flags - * @data_lanes: an array of physical data lane indexes - * @clock_lane: physical lane index of the clock lane - * @num_data_lanes: number of data lanes - * @lane_polarities: polarity of the lanes. The order is the same of - * the physical lanes. - */ -struct v4l2_fwnode_bus_mipi_csi2 { - unsigned int flags; - unsigned char data_lanes[V4L2_FWNODE_CSI2_MAX_DATA_LANES]; - unsigned char clock_lane; - unsigned char num_data_lanes; - bool lane_polarities[1 + V4L2_FWNODE_CSI2_MAX_DATA_LANES]; -}; - -/** - * struct v4l2_fwnode_bus_parallel - parallel data bus data structure - * @flags: media bus (V4L2_MBUS_*) flags - * @bus_width: bus width in bits - * @data_shift: data shift in bits - */ -struct v4l2_fwnode_bus_parallel { - unsigned int flags; - unsigned char bus_width; - unsigned char data_shift; -}; - -/** - * struct v4l2_fwnode_bus_mipi_csi1 - CSI-1/CCP2 data bus structure - * @clock_inv: polarity of clock/strobe signal - * false - not inverted, true - inverted - * @strobe: false - data/clock, true - data/strobe - * @lane_polarity: the polarities of the clock (index 0) and data lanes - * index (1) - * @data_lane: the number of the data lane - * @clock_lane: the number of the clock lane - */ -struct v4l2_fwnode_bus_mipi_csi1 { - unsigned char clock_inv:1; - unsigned char strobe:1; - bool lane_polarity[2]; - unsigned char data_lane; - unsigned char clock_lane; -}; - /** * struct v4l2_fwnode_endpoint - the endpoint data structure * @base: fwnode endpoint of the v4l2_fwnode * @bus_type: bus type * @bus: bus configuration data structure - * @bus.parallel: embedded &struct v4l2_fwnode_bus_parallel. + * @bus.parallel: embedded &struct v4l2_mbus_config_parallel. * Used if the bus is parallel. - * @bus.mipi_csi1: embedded &struct v4l2_fwnode_bus_mipi_csi1. + * @bus.mipi_csi1: embedded &struct v4l2_mbus_config_mipi_csi1. * Used if the bus is MIPI Alliance's Camera Serial * Interface version 1 (MIPI CSI1) or Standard * Mobile Imaging Architecture's Compact Camera Port 2 * (SMIA CCP2). - * @bus.mipi_csi2: embedded &struct v4l2_fwnode_bus_mipi_csi2. + * @bus.mipi_csi2: embedded &struct v4l2_mbus_config_mipi_csi2. * Used if the bus is MIPI Alliance's Camera Serial * Interface version 2 (MIPI CSI2). * @link_frequencies: array of supported link frequencies @@ -100,9 +51,9 @@ struct v4l2_fwnode_endpoint { */ enum v4l2_mbus_type bus_type; struct { - struct v4l2_fwnode_bus_parallel parallel; - struct v4l2_fwnode_bus_mipi_csi1 mipi_csi1; - struct v4l2_fwnode_bus_mipi_csi2 mipi_csi2; + struct v4l2_mbus_config_parallel parallel; + struct v4l2_mbus_config_mipi_csi1 mipi_csi1; + struct v4l2_mbus_config_mipi_csi2 mipi_csi2; } bus; u64 *link_frequencies; unsigned int nr_of_link_frequencies; diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h index 841e190aedd9..e0db3bcff9ed 100644 --- a/include/media/v4l2-mediabus.h +++ b/include/media/v4l2-mediabus.h @@ -15,15 +15,12 @@ * How to use the V4L2_MBUS_* flags: * Flags are defined for each of the possible states and values of a media * bus configuration parameter. One and only one bit of each group of flags - * shall be set by the users of the v4l2_subdev_pad_ops.get_mbus_config and - * v4l2_subdev_pad_ops.set_mbus_config operations to ensure that no - * conflicting settings are specified when reporting and setting the media bus - * configuration with the two operations respectively. For example, it is - * invalid to set or clear both the V4L2_MBUS_HSYNC_ACTIVE_HIGH and the + * shall be set by the users of the v4l2_subdev_pad_ops.get_mbus_config + * operation to ensure that no conflicting settings are specified when + * reporting the media bus configuration. For example, it is invalid to set or + * clear both the V4L2_MBUS_HSYNC_ACTIVE_HIGH and the * V4L2_MBUS_HSYNC_ACTIVE_LOW flag at the same time. Instead either flag - * V4L2_MBUS_HSYNC_ACTIVE_HIGH or flag V4L2_MBUS_HSYNC_ACTIVE_LOW shall be - * set. The same is true for the V4L2_MBUS_CSI2_1/2/3/4_LANE flags group: only - * one of these four bits shall be set. + * V4L2_MBUS_HSYNC_ACTIVE_HIGH or flag V4L2_MBUS_HSYNC_ACTIVE_LOW shall be set. * * TODO: replace the existing V4L2_MBUS_* flags with structures of fields * to avoid conflicting settings. @@ -70,28 +67,57 @@ #define V4L2_MBUS_DATA_ENABLE_LOW BIT(15) /* Serial flags */ -/* CSI-2 D-PHY number of data lanes. */ -#define V4L2_MBUS_CSI2_1_LANE BIT(0) -#define V4L2_MBUS_CSI2_2_LANE BIT(1) -#define V4L2_MBUS_CSI2_3_LANE BIT(2) -#define V4L2_MBUS_CSI2_4_LANE BIT(3) -/* CSI-2 Virtual Channel identifiers. */ -#define V4L2_MBUS_CSI2_CHANNEL_0 BIT(4) -#define V4L2_MBUS_CSI2_CHANNEL_1 BIT(5) -#define V4L2_MBUS_CSI2_CHANNEL_2 BIT(6) -#define V4L2_MBUS_CSI2_CHANNEL_3 BIT(7) /* Clock non-continuous mode support. */ -#define V4L2_MBUS_CSI2_CONTINUOUS_CLOCK BIT(8) -#define V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK BIT(9) - -#define V4L2_MBUS_CSI2_LANES (V4L2_MBUS_CSI2_1_LANE | \ - V4L2_MBUS_CSI2_2_LANE | \ - V4L2_MBUS_CSI2_3_LANE | \ - V4L2_MBUS_CSI2_4_LANE) -#define V4L2_MBUS_CSI2_CHANNELS (V4L2_MBUS_CSI2_CHANNEL_0 | \ - V4L2_MBUS_CSI2_CHANNEL_1 | \ - V4L2_MBUS_CSI2_CHANNEL_2 | \ - V4L2_MBUS_CSI2_CHANNEL_3) +#define V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK BIT(0) + +#define V4L2_MBUS_CSI2_MAX_DATA_LANES 8 + +/** + * struct v4l2_mbus_config_mipi_csi2 - MIPI CSI-2 data bus configuration + * @flags: media bus (V4L2_MBUS_*) flags + * @data_lanes: an array of physical data lane indexes + * @clock_lane: physical lane index of the clock lane + * @num_data_lanes: number of data lanes + * @lane_polarities: polarity of the lanes. The order is the same of + * the physical lanes. + */ +struct v4l2_mbus_config_mipi_csi2 { + unsigned int flags; + unsigned char data_lanes[V4L2_MBUS_CSI2_MAX_DATA_LANES]; + unsigned char clock_lane; + unsigned char num_data_lanes; + bool lane_polarities[1 + V4L2_MBUS_CSI2_MAX_DATA_LANES]; +}; + +/** + * struct v4l2_mbus_config_parallel - parallel data bus configuration + * @flags: media bus (V4L2_MBUS_*) flags + * @bus_width: bus width in bits + * @data_shift: data shift in bits + */ +struct v4l2_mbus_config_parallel { + unsigned int flags; + unsigned char bus_width; + unsigned char data_shift; +}; + +/** + * struct v4l2_mbus_config_mipi_csi1 - CSI-1/CCP2 data bus configuration + * @clock_inv: polarity of clock/strobe signal + * false - not inverted, true - inverted + * @strobe: false - data/clock, true - data/strobe + * @lane_polarity: the polarities of the clock (index 0) and data lanes + * index (1) + * @data_lane: the number of the data lane + * @clock_lane: the number of the clock lane + */ +struct v4l2_mbus_config_mipi_csi1 { + unsigned char clock_inv:1; + unsigned char strobe:1; + bool lane_polarity[2]; + unsigned char data_lane; + unsigned char clock_lane; +}; /** * enum v4l2_mbus_type - media bus type @@ -118,12 +144,26 @@ enum v4l2_mbus_type { /** * struct v4l2_mbus_config - media bus configuration - * @type: in: interface type - * @flags: in / out: configuration flags, depending on @type + * @type: interface type + * @bus: bus configuration data structure + * @bus.parallel: embedded &struct v4l2_mbus_config_parallel. + * Used if the bus is parallel or BT.656. + * @bus.mipi_csi1: embedded &struct v4l2_mbus_config_mipi_csi1. + * Used if the bus is MIPI Alliance's Camera Serial + * Interface version 1 (MIPI CSI1) or Standard + * Mobile Imaging Architecture's Compact Camera Port 2 + * (SMIA CCP2). + * @bus.mipi_csi2: embedded &struct v4l2_mbus_config_mipi_csi2. + * Used if the bus is MIPI Alliance's Camera Serial + * Interface version 2 (MIPI CSI2). */ struct v4l2_mbus_config { enum v4l2_mbus_type type; - unsigned int flags; + union { + struct v4l2_mbus_config_parallel parallel; + struct v4l2_mbus_config_mipi_csi1 mipi_csi1; + struct v4l2_mbus_config_mipi_csi2 mipi_csi2; + } bus; }; /** diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index 95ec18c2f49c..6c153b33bb04 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -715,17 +715,6 @@ struct v4l2_subdev_state { * this operation as close as possible to stream on time. The * operation shall fail if the pad index it has been called on * is not valid or in case of unrecoverable failures. - * - * @set_mbus_config: set the media bus configuration of a remote sub-device. - * This operations is intended to allow, in combination with - * the get_mbus_config operation, the negotiation of media bus - * configuration parameters between media sub-devices. The - * operation shall not fail if the requested configuration is - * not supported, but the driver shall update the content of - * the %config argument to reflect what has been actually - * applied to the hardware. The operation shall fail if the - * pad index it has been called on is not valid or in case of - * unrecoverable failures. */ struct v4l2_subdev_pad_ops { int (*init_cfg)(struct v4l2_subdev *sd, @@ -768,8 +757,6 @@ struct v4l2_subdev_pad_ops { struct v4l2_mbus_frame_desc *fd); int (*get_mbus_config)(struct v4l2_subdev *sd, unsigned int pad, struct v4l2_mbus_config *config); - int (*set_mbus_config)(struct v4l2_subdev *sd, unsigned int pad, - struct v4l2_mbus_config *config); }; /** diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 59940e230b78..f7506f08e505 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -64,6 +64,8 @@ struct ifa6_config { const struct in6_addr *pfx; unsigned int plen; + u8 ifa_proto; + const struct in6_addr *peer_pfx; u32 rt_priority; diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index ab207677e0a8..f742e50207fb 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -205,7 +205,8 @@ struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr); struct sock *vsock_find_connected_socket(struct sockaddr_vm *src, struct sockaddr_vm *dst); void vsock_remove_sock(struct vsock_sock *vsk); -void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)); +void vsock_for_each_connected_socket(struct vsock_transport *transport, + void (*fn)(struct sock *sk)); int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk); bool vsock_find_cid(unsigned int cid); diff --git a/include/net/arp.h b/include/net/arp.h index 031374ac2f22..d7ef4ec71dfe 100644 --- a/include/net/arp.h +++ b/include/net/arp.h @@ -65,6 +65,7 @@ void arp_send(int type, int ptype, __be32 dest_ip, const unsigned char *src_hw, const unsigned char *th); int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir); void arp_ifdown(struct net_device *dev); +int arp_invalidate(struct net_device *dev, __be32 ip, bool force); struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, struct net_device *dev, __be32 src_ip, diff --git a/include/net/ax25.h b/include/net/ax25.h index 8221af1811df..0f9790c455bb 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -187,18 +187,12 @@ typedef struct { typedef struct ax25_route { struct ax25_route *next; - refcount_t refcount; ax25_address callsign; struct net_device *dev; ax25_digi *digipeat; char ip_mode; } ax25_route; -static inline void ax25_hold_route(ax25_route *ax25_rt) -{ - refcount_inc(&ax25_rt->refcount); -} - void __ax25_put_route(ax25_route *ax25_rt); extern rwlock_t ax25_route_lock; @@ -213,12 +207,6 @@ static inline void ax25_route_lock_unuse(void) read_unlock(&ax25_route_lock); } -static inline void ax25_put_route(ax25_route *ax25_rt) -{ - if (refcount_dec_and_test(&ax25_rt->refcount)) - __ax25_put_route(ax25_rt); -} - typedef struct { char slave; /* slave_mode? */ struct timer_list slave_timer; /* timeout timer */ diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index a647e5fabdbd..6b48d9e2aab9 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -204,19 +204,21 @@ void bt_err_ratelimited(const char *fmt, ...); #define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__) #endif +#define bt_dev_name(hdev) ((hdev) ? (hdev)->name : "null") + #define bt_dev_info(hdev, fmt, ...) \ - BT_INFO("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + BT_INFO("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) #define bt_dev_warn(hdev, fmt, ...) \ - BT_WARN("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + BT_WARN("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) #define bt_dev_err(hdev, fmt, ...) \ - BT_ERR("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + BT_ERR("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) #define bt_dev_dbg(hdev, fmt, ...) \ - BT_DBG("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + BT_DBG("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) #define bt_dev_warn_ratelimited(hdev, fmt, ...) \ - bt_warn_ratelimited("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + bt_warn_ratelimited("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) #define bt_dev_err_ratelimited(hdev, fmt, ...) \ - bt_err_ratelimited("%s: " fmt, (hdev)->name, ##__VA_ARGS__) + bt_err_ratelimited("%s: " fmt, bt_dev_name(hdev), ##__VA_ARGS__) /* Connection and socket states */ enum { @@ -343,7 +345,7 @@ int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg, __poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait); int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo); -int bt_sock_wait_ready(struct sock *sk, unsigned long flags); +int bt_sock_wait_ready(struct sock *sk, unsigned int msg_flags); void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh); void bt_accept_unlink(struct sock *sk); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 35c073d44ec5..5cb095b09a94 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -255,6 +255,16 @@ enum { * during the hdev->setup vendor callback. */ HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, + + /* When this quirk is set, HCI_OP_SET_EVENT_FLT requests with + * HCI_FLT_CLEAR_ALL are ignored and event filtering is + * completely avoided. A subset of the CSR controller + * clones struggle with this and instantly lock up. + * + * Note that devices using this must (separately) disable + * runtime suspend, because event filtering takes place there. + */ + HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, }; /* HCI device flags */ diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index e336e9c1dda4..d5377740e99c 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -258,6 +258,15 @@ struct adv_info { #define HCI_ADV_TX_POWER_NO_PREFERENCE 0x7F +struct monitored_device { + struct list_head list; + + bdaddr_t bdaddr; + __u8 addr_type; + __u16 handle; + bool notified; +}; + struct adv_pattern { struct list_head list; __u8 ad_type; @@ -294,6 +303,9 @@ struct adv_monitor { #define HCI_MAX_SHORT_NAME_LENGTH 10 +#define HCI_CONN_HANDLE_UNSET 0xffff +#define HCI_CONN_HANDLE_MAX 0x0eff + /* Min encryption key size to match with SMP */ #define HCI_MIN_ENC_KEY_SIZE 7 @@ -591,6 +603,9 @@ struct hci_dev { struct delayed_work interleave_scan; + struct list_head monitored_devices; + bool advmon_pend_notify; + #if IS_ENABLED(CONFIG_BT_LEDS) struct led_trigger *power_led; #endif @@ -1855,6 +1870,8 @@ void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle); int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip); int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status); int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status); +void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle, + bdaddr_t *bdaddr, u8 addr_type); u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, u16 to_multiplier); diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 107b25deae68..7c1ad0f6fcec 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -696,7 +696,7 @@ struct mgmt_cp_set_blocked_keys { #define MGMT_READ_CONTROLLER_CAP_SIZE 0 struct mgmt_rp_read_controller_cap { __le16 cap_len; - __u8 cap[0]; + __u8 cap[]; } __packed; #define MGMT_OP_READ_EXP_FEATURES_INFO 0x0049 @@ -1104,3 +1104,19 @@ struct mgmt_ev_controller_resume { #define MGMT_WAKE_REASON_NON_BT_WAKE 0x0 #define MGMT_WAKE_REASON_UNEXPECTED 0x1 #define MGMT_WAKE_REASON_REMOTE_WAKE 0x2 + +#define MGMT_EV_ADV_MONITOR_DEVICE_FOUND 0x002f +struct mgmt_ev_adv_monitor_device_found { + __le16 monitor_handle; + struct mgmt_addr_info addr; + __s8 rssi; + __le32 flags; + __le16 eir_len; + __u8 eir[]; +} __packed; + +#define MGMT_EV_ADV_MONITOR_DEVICE_LOST 0x0030 +struct mgmt_ev_adv_monitor_device_lost { + __le16 monitor_handle; + struct mgmt_addr_info addr; +} __packed; diff --git a/include/net/bond_options.h b/include/net/bond_options.h index dd75c071f67e..61b49063791c 100644 --- a/include/net/bond_options.h +++ b/include/net/bond_options.h @@ -66,19 +66,24 @@ enum { BOND_OPT_PEER_NOTIF_DELAY, BOND_OPT_LACP_ACTIVE, BOND_OPT_MISSED_MAX, + BOND_OPT_NS_TARGETS, BOND_OPT_LAST }; /* This structure is used for storing option values and for passing option * values when changing an option. The logic when used as an arg is as follows: - * - if string != NULL -> parse it, if the opt is RAW type then return it, else - * return the parse result - * - if string == NULL -> parse value + * - if value != ULLONG_MAX -> parse value + * - if string != NULL -> parse string + * - if the opt is RAW data and length less than maxlen, + * copy the data to extra storage */ + +#define BOND_OPT_EXTRA_MAXLEN 16 struct bond_opt_value { char *string; u64 value; u32 flags; + char extra[BOND_OPT_EXTRA_MAXLEN]; }; struct bonding; @@ -118,18 +123,26 @@ const struct bond_opt_value *bond_opt_get_val(unsigned int option, u64 val); * When value is ULLONG_MAX then string will be used. */ static inline void __bond_opt_init(struct bond_opt_value *optval, - char *string, u64 value) + char *string, u64 value, + void *extra, size_t extra_len) { memset(optval, 0, sizeof(*optval)); optval->value = ULLONG_MAX; - if (value == ULLONG_MAX) - optval->string = string; - else + if (value != ULLONG_MAX) optval->value = value; + else if (string) + optval->string = string; + else if (extra_len <= BOND_OPT_EXTRA_MAXLEN) + memcpy(optval->extra, extra, extra_len); } -#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value) -#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX) +#define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value, NULL, 0) +#define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX, NULL, 0) +#define bond_opt_initextra(optval, extra, extra_len) \ + __bond_opt_init(optval, NULL, ULLONG_MAX, extra, extra_len) void bond_option_arp_ip_targets_clear(struct bonding *bond); +#if IS_ENABLED(CONFIG_IPV6) +void bond_option_ns_ip6_targets_clear(struct bonding *bond); +#endif #endif /* _NET_BOND_OPTIONS_H */ diff --git a/include/net/bonding.h b/include/net/bonding.h index 83cfd2d70247..b14f4c0b4e9e 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -29,8 +29,11 @@ #include <net/bond_3ad.h> #include <net/bond_alb.h> #include <net/bond_options.h> +#include <net/ipv6.h> +#include <net/addrconf.h> #define BOND_MAX_ARP_TARGETS 16 +#define BOND_MAX_NS_TARGETS BOND_MAX_ARP_TARGETS #define BOND_DEFAULT_MIIMON 100 @@ -146,6 +149,7 @@ struct bond_params { struct reciprocal_value reciprocal_packets_per_slave; u16 ad_actor_sys_prio; u16 ad_user_port_key; + struct in6_addr ns_targets[BOND_MAX_NS_TARGETS]; /* 2 bytes of padding : see ether_addr_equal_64bits() */ u8 ad_actor_system[ETH_ALEN + 2]; @@ -499,6 +503,13 @@ static inline int bond_is_ip_target_ok(__be32 addr) return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr); } +static inline int bond_is_ip6_target_ok(struct in6_addr *addr) +{ + return !ipv6_addr_any(addr) && + !ipv6_addr_loopback(addr) && + !ipv6_addr_is_multicast(addr); +} + /* Get the oldest arp which we've received on this slave for bond's * arp_targets. */ @@ -628,7 +639,7 @@ struct bond_net { struct class_attribute class_attr_bonding_masters; }; -int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); +int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); int bond_create(struct net *net, const char *name); int bond_create_sysfs(struct bond_net *net); @@ -699,20 +710,6 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond, } /* Caller must hold rcu_read_lock() for read */ -static inline struct slave *bond_slave_has_mac_rcu(struct bonding *bond, - const u8 *mac) -{ - struct list_head *iter; - struct slave *tmp; - - bond_for_each_slave_rcu(bond, tmp, iter) - if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) - return tmp; - - return NULL; -} - -/* Caller must hold rcu_read_lock() for read */ static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac) { struct list_head *iter; @@ -749,6 +746,19 @@ static inline int bond_get_targets_ip(__be32 *targets, __be32 ip) return -1; } +static inline int bond_get_targets_ip6(struct in6_addr *targets, struct in6_addr *ip) +{ + int i; + + for (i = 0; i < BOND_MAX_NS_TARGETS; i++) + if (ipv6_addr_equal(&targets[i], ip)) + return i; + else if (ipv6_addr_any(&targets[i])) + break; + + return -1; +} + /* exported from bond_main.c */ extern unsigned int bond_net_id; @@ -760,7 +770,7 @@ extern const struct sysfs_ops slave_sysfs_ops; static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb) { - atomic_long_inc(&dev->tx_dropped); + dev_core_stats_tx_dropped_inc(dev); dev_kfree_skb_any(skb); return NET_XMIT_DROP; } diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index d19e48f9fdc6..68713388b617 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -109,7 +109,12 @@ struct wiphy; * on this channel. * @IEEE80211_CHAN_16MHZ: 16 MHz bandwidth is permitted * on this channel. - * + * @IEEE80211_CHAN_NO_320MHZ: If the driver supports 320 MHz on the band, + * this flag indicates that a 320 MHz channel cannot use this + * channel as the control or any of the secondary channels. + * This may be due to the driver or due to regulatory bandwidth + * restrictions. + * @IEEE80211_CHAN_NO_EHT: EHT operation is not permitted on this channel. */ enum ieee80211_channel_flags { IEEE80211_CHAN_DISABLED = 1<<0, @@ -131,6 +136,8 @@ enum ieee80211_channel_flags { IEEE80211_CHAN_4MHZ = 1<<16, IEEE80211_CHAN_8MHZ = 1<<17, IEEE80211_CHAN_16MHZ = 1<<18, + IEEE80211_CHAN_NO_320MHZ = 1<<19, + IEEE80211_CHAN_NO_EHT = 1<<20, }; #define IEEE80211_CHAN_NO_HT40 \ @@ -361,6 +368,48 @@ struct ieee80211_sta_he_cap { }; /** + * struct ieee80211_eht_mcs_nss_supp - EHT max supported NSS per MCS + * + * See P802.11be_D1.3 Table 9-401k - "Subfields of the Supported EHT-MCS + * and NSS Set field" + * + * @only_20mhz: MCS/NSS support for 20 MHz-only STA. + * @bw._80: MCS/NSS support for BW <= 80 MHz + * @bw._160: MCS/NSS support for BW = 160 MHz + * @bw._320: MCS/NSS support for BW = 320 MHz + */ +struct ieee80211_eht_mcs_nss_supp { + union { + struct ieee80211_eht_mcs_nss_supp_20mhz_only only_20mhz; + struct { + struct ieee80211_eht_mcs_nss_supp_bw _80; + struct ieee80211_eht_mcs_nss_supp_bw _160; + struct ieee80211_eht_mcs_nss_supp_bw _320; + } __packed bw; + } __packed; +} __packed; + +#define IEEE80211_EHT_PPE_THRES_MAX_LEN 32 + +/** + * struct ieee80211_sta_eht_cap - STA's EHT capabilities + * + * This structure describes most essential parameters needed + * to describe 802.11be EHT capabilities for a STA. + * + * @has_eht: true iff EHT data is valid. + * @eht_cap_elem: Fixed portion of the eht capabilities element. + * @eht_mcs_nss_supp: The supported NSS/MCS combinations. + * @eht_ppe_thres: Holds the PPE Thresholds data. + */ +struct ieee80211_sta_eht_cap { + bool has_eht; + struct ieee80211_eht_cap_elem_fixed eht_cap_elem; + struct ieee80211_eht_mcs_nss_supp eht_mcs_nss_supp; + u8 eht_ppe_thres[IEEE80211_EHT_PPE_THRES_MAX_LEN]; +}; + +/** * struct ieee80211_sband_iftype_data - sband data per interface type * * This structure encapsulates sband data that is relevant for the @@ -379,6 +428,7 @@ struct ieee80211_sband_iftype_data { u16 types_mask; struct ieee80211_sta_he_cap he_cap; struct ieee80211_he_6ghz_capa he_6ghz_capa; + struct ieee80211_sta_eht_cap eht_cap; struct { const u8 *data; unsigned int len; @@ -562,6 +612,26 @@ ieee80211_get_he_6ghz_capa(const struct ieee80211_supported_band *sband, } /** + * ieee80211_get_eht_iftype_cap - return ETH capabilities for an sband's iftype + * @sband: the sband to search for the iftype on + * @iftype: enum nl80211_iftype + * + * Return: pointer to the struct ieee80211_sta_eht_cap, or NULL is none found + */ +static inline const struct ieee80211_sta_eht_cap * +ieee80211_get_eht_iftype_cap(const struct ieee80211_supported_band *sband, + enum nl80211_iftype iftype) +{ + const struct ieee80211_sband_iftype_data *data = + ieee80211_get_sband_iftype_data(sband, iftype); + + if (data && data->eht_cap.has_eht) + return &data->eht_cap; + + return NULL; +} + +/** * wiphy_read_of_freq_limits - read frequency limits from device tree * * @wiphy: the wireless device to get extra limits for @@ -1417,6 +1487,8 @@ struct sta_txpwr { * @airtime_weight: airtime scheduler weight for this station * @txpwr: transmit power for an associated station * @he_6ghz_capa: HE 6 GHz Band capabilities of station + * @eht_capa: EHT capabilities of station + * @eht_capa_len: the length of the EHT capabilities */ struct station_parameters { const u8 *supported_rates; @@ -1450,6 +1522,8 @@ struct station_parameters { u16 airtime_weight; struct sta_txpwr txpwr; const struct ieee80211_he_6ghz_capa *he_6ghz_capa; + const struct ieee80211_eht_cap_elem *eht_capa; + u8 eht_capa_len; }; /** @@ -1527,6 +1601,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy, * @RATE_INFO_FLAGS_HE_MCS: HE MCS information * @RATE_INFO_FLAGS_EDMG: 60GHz MCS in EDMG mode * @RATE_INFO_FLAGS_EXTENDED_SC_DMG: 60GHz extended SC MCS + * @RATE_INFO_FLAGS_EHT_MCS: EHT MCS information */ enum rate_info_flags { RATE_INFO_FLAGS_MCS = BIT(0), @@ -1536,6 +1611,7 @@ enum rate_info_flags { RATE_INFO_FLAGS_HE_MCS = BIT(4), RATE_INFO_FLAGS_EDMG = BIT(5), RATE_INFO_FLAGS_EXTENDED_SC_DMG = BIT(6), + RATE_INFO_FLAGS_EHT_MCS = BIT(7), }; /** @@ -1550,6 +1626,8 @@ enum rate_info_flags { * @RATE_INFO_BW_80: 80 MHz bandwidth * @RATE_INFO_BW_160: 160 MHz bandwidth * @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation + * @RATE_INFO_BW_320: 320 MHz bandwidth + * @RATE_INFO_BW_EHT_RU: bandwidth determined by EHT RU allocation */ enum rate_info_bw { RATE_INFO_BW_20 = 0, @@ -1559,6 +1637,8 @@ enum rate_info_bw { RATE_INFO_BW_80, RATE_INFO_BW_160, RATE_INFO_BW_HE_RU, + RATE_INFO_BW_320, + RATE_INFO_BW_EHT_RU, }; /** @@ -1576,6 +1656,9 @@ enum rate_info_bw { * @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc, * only valid if bw is %RATE_INFO_BW_HE_RU) * @n_bonded_ch: In case of EDMG the number of bonded channels (1-4) + * @eht_gi: EHT guard interval (from &enum nl80211_eht_gi) + * @eht_ru_alloc: EHT RU allocation (from &enum nl80211_eht_ru_alloc, + * only valid if bw is %RATE_INFO_BW_EHT_RU) */ struct rate_info { u8 flags; @@ -1587,6 +1670,8 @@ struct rate_info { u8 he_dcm; u8 he_ru_alloc; u8 n_bonded_ch; + u8 eht_gi; + u8 eht_ru_alloc; }; /** @@ -2604,7 +2689,7 @@ const struct element *ieee80211_bss_get_elem(struct cfg80211_bss *bss, u8 id); */ static inline const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 id) { - return (void *)ieee80211_bss_get_elem(bss, id); + return (const void *)ieee80211_bss_get_elem(bss, id); } @@ -5970,9 +6055,9 @@ cfg80211_find_ie_match(u8 eid, const u8 *ies, unsigned int len, (!match_len && match_offset))) return NULL; - return (void *)cfg80211_find_elem_match(eid, ies, len, - match, match_len, - match_offset ? + return (const void *)cfg80211_find_elem_match(eid, ies, len, + match, match_len, + match_offset ? match_offset - 2 : 0); } @@ -6099,7 +6184,7 @@ static inline const u8 * cfg80211_find_vendor_ie(unsigned int oui, int oui_type, const u8 *ies, unsigned int len) { - return (void *)cfg80211_find_vendor_elem(oui, oui_type, ies, len); + return (const void *)cfg80211_find_vendor_elem(oui, oui_type, ies, len); } /** diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index 6ed07844eb24..833672d6fbe4 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -227,6 +227,16 @@ static inline void wpan_phy_net_set(struct wpan_phy *wpan_phy, struct net *net) write_pnet(&wpan_phy->_net, net); } +/** + * struct ieee802154_addr - IEEE802.15.4 device address + * @mode: Address mode from frame header. Can be one of: + * - @IEEE802154_ADDR_NONE + * - @IEEE802154_ADDR_SHORT + * - @IEEE802154_ADDR_LONG + * @pan_id: The PAN ID this address belongs to + * @short_addr: address if @mode is @IEEE802154_ADDR_SHORT + * @extended_addr: address if @mode is @IEEE802154_ADDR_LONG + */ struct ieee802154_addr { u8 mode; __le16 pan_id; diff --git a/include/net/checksum.h b/include/net/checksum.h index 79c67f14c448..6bc783b7a06c 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -80,6 +80,7 @@ static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend) return csum16_add(csum, ~addend); } +#ifndef HAVE_ARCH_CSUM_SHIFT static __always_inline __wsum csum_shift(__wsum sum, int offset) { /* rotate sum to align it with a 16b boundary */ @@ -87,6 +88,7 @@ static __always_inline __wsum csum_shift(__wsum sum, int offset) return (__force __wsum)ror32((__force u32)sum, 8); return sum; } +#endif static __always_inline __wsum csum_block_add(__wsum csum, __wsum csum2, int offset) diff --git a/include/net/devlink.h b/include/net/devlink.h index 8d5349d2fb68..a30180c0988a 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1197,9 +1197,9 @@ struct devlink_ops { struct netlink_ext_ack *extack); int (*port_type_set)(struct devlink_port *devlink_port, enum devlink_port_type port_type); - int (*port_split)(struct devlink *devlink, unsigned int port_index, + int (*port_split)(struct devlink *devlink, struct devlink_port *port, unsigned int count, struct netlink_ext_ack *extack); - int (*port_unsplit)(struct devlink *devlink, unsigned int port_index, + int (*port_unsplit)(struct devlink *devlink, struct devlink_port *port, struct netlink_ext_ack *extack); int (*sb_pool_get)(struct devlink *devlink, unsigned int sb_index, u16 pool_index, @@ -1479,6 +1479,21 @@ void *devlink_priv(struct devlink *devlink); struct devlink *priv_to_devlink(void *priv); struct device *devlink_to_dev(const struct devlink *devlink); +/* Devlink instance explicit locking */ +void devl_lock(struct devlink *devlink); +void devl_unlock(struct devlink *devlink); +void devl_assert_locked(struct devlink *devlink); +bool devl_lock_is_held(struct devlink *devlink); + +int devl_port_register(struct devlink *devlink, + struct devlink_port *devlink_port, + unsigned int port_index); +void devl_port_unregister(struct devlink_port *devlink_port); + +int devl_rate_leaf_create(struct devlink_port *port, void *priv); +void devl_rate_leaf_destroy(struct devlink_port *devlink_port); +void devl_rate_nodes_destroy(struct devlink *devlink); + struct ib_device; struct net *devlink_net(const struct devlink *devlink); diff --git a/include/net/dsa.h b/include/net/dsa.h index 85a5ba3772f5..934958fda962 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -52,6 +52,7 @@ struct phylink_link_state; #define DSA_TAG_PROTO_BRCM_LEGACY_VALUE 22 #define DSA_TAG_PROTO_SJA1110_VALUE 23 #define DSA_TAG_PROTO_RTL8_4_VALUE 24 +#define DSA_TAG_PROTO_RTL8_4T_VALUE 25 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE, @@ -79,6 +80,7 @@ enum dsa_tag_protocol { DSA_TAG_PROTO_SEVILLE = DSA_TAG_PROTO_SEVILLE_VALUE, DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE, DSA_TAG_PROTO_RTL8_4 = DSA_TAG_PROTO_RTL8_4_VALUE, + DSA_TAG_PROTO_RTL8_4T = DSA_TAG_PROTO_RTL8_4T_VALUE, }; struct dsa_switch; @@ -116,6 +118,14 @@ struct dsa_netdevice_ops { #define MODULE_ALIAS_DSA_TAG_DRIVER(__proto) \ MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE)) +struct dsa_lag { + struct net_device *dev; + unsigned int id; + struct mutex fdb_lock; + struct list_head fdbs; + refcount_t refcount; +}; + struct dsa_switch_tree { struct list_head list; @@ -134,7 +144,7 @@ struct dsa_switch_tree { /* Maps offloaded LAG netdevs to a zero-based linear ID for * drivers that need it. */ - struct net_device **lags; + struct dsa_lag **lags; /* Tagging protocol operations */ const struct dsa_device_ops *tag_ops; @@ -163,32 +173,36 @@ struct dsa_switch_tree { unsigned int last_switch; }; +/* LAG IDs are one-based, the dst->lags array is zero-based */ #define dsa_lags_foreach_id(_id, _dst) \ - for ((_id) = 0; (_id) < (_dst)->lags_len; (_id)++) \ - if ((_dst)->lags[(_id)]) + for ((_id) = 1; (_id) <= (_dst)->lags_len; (_id)++) \ + if ((_dst)->lags[(_id) - 1]) #define dsa_lag_foreach_port(_dp, _dst, _lag) \ list_for_each_entry((_dp), &(_dst)->ports, list) \ - if ((_dp)->lag_dev == (_lag)) + if (dsa_port_offloads_lag((_dp), (_lag))) #define dsa_hsr_foreach_port(_dp, _ds, _hsr) \ list_for_each_entry((_dp), &(_ds)->dst->ports, list) \ if ((_dp)->ds == (_ds) && (_dp)->hsr_dev == (_hsr)) -static inline struct net_device *dsa_lag_dev(struct dsa_switch_tree *dst, - unsigned int id) +static inline struct dsa_lag *dsa_lag_by_id(struct dsa_switch_tree *dst, + unsigned int id) { - return dst->lags[id]; + /* DSA LAG IDs are one-based, dst->lags is zero-based */ + return dst->lags[id - 1]; } static inline int dsa_lag_id(struct dsa_switch_tree *dst, - struct net_device *lag) + struct net_device *lag_dev) { unsigned int id; dsa_lags_foreach_id(id, dst) { - if (dsa_lag_dev(dst, id) == lag) - return id; + struct dsa_lag *lag = dsa_lag_by_id(dst, id); + + if (lag->dev == lag_dev) + return lag->id; } return -ENODEV; @@ -278,6 +292,10 @@ struct dsa_port { u8 devlink_port_setup:1; + /* Master state bits, valid only on CPU ports */ + u8 master_admin_up:1; + u8 master_oper_up:1; + u8 setup:1; struct device_node *dn; @@ -287,7 +305,7 @@ struct dsa_port { struct devlink_port devlink_port; struct phylink *pl; struct phylink_config pl_config; - struct net_device *lag_dev; + struct dsa_lag *lag; struct net_device *hsr_dev; struct list_head list; @@ -308,6 +326,10 @@ struct dsa_port { struct mutex addr_lists_lock; struct list_head fdbs; struct list_head mdbs; + + /* List of VLANs that CPU and DSA ports are members of. */ + struct mutex vlans_lock; + struct list_head vlans; }; /* TODO: ideally DSA ports would have a single dp->link_dp member, @@ -321,11 +343,34 @@ struct dsa_link { struct list_head list; }; +enum dsa_db_type { + DSA_DB_PORT, + DSA_DB_LAG, + DSA_DB_BRIDGE, +}; + +struct dsa_db { + enum dsa_db_type type; + + union { + const struct dsa_port *dp; + struct dsa_lag lag; + struct dsa_bridge bridge; + }; +}; + struct dsa_mac_addr { unsigned char addr[ETH_ALEN]; u16 vid; refcount_t refcount; struct list_head list; + struct dsa_db db; +}; + +struct dsa_vlan { + u16 vid; + refcount_t refcount; + struct list_head list; }; struct dsa_switch { @@ -377,17 +422,19 @@ struct dsa_switch { */ u32 vlan_filtering:1; - /* MAC PCS does not provide link state change interrupt, and requires - * polling. Flag passed on to PHYLINK. - */ - u32 pcs_poll:1; - /* For switches that only have the MRU configurable. To ensure the * configured MTU is not exceeded, normalization of MRU on all bridged * interfaces is needed. */ u32 mtu_enforcement_ingress:1; + /* Drivers that isolate the FDBs of multiple bridges must set this + * to true to receive the bridge as an argument in .port_fdb_{add,del} + * and .port_mdb_{add,del}. Otherwise, the bridge.num will always be + * passed as zero. + */ + u32 fdb_isolation:1; + /* Listener for switch fabric events */ struct notifier_block nb; @@ -478,6 +525,12 @@ static inline bool dsa_port_is_unused(struct dsa_port *dp) return dp->type == DSA_PORT_TYPE_UNUSED; } +static inline bool dsa_port_master_is_operational(struct dsa_port *dp) +{ + return dsa_port_is_cpu(dp) && dp->master_admin_up && + dp->master_oper_up; +} + static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p) { return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED; @@ -581,6 +634,24 @@ static inline bool dsa_is_upstream_port(struct dsa_switch *ds, int port) return port == dsa_upstream_port(ds, port); } +/* Return true if this is a DSA port leading away from the CPU */ +static inline bool dsa_is_downstream_port(struct dsa_switch *ds, int port) +{ + return dsa_is_dsa_port(ds, port) && !dsa_is_upstream_port(ds, port); +} + +/* Return the local port used to reach the CPU port */ +static inline unsigned int dsa_switch_upstream_port(struct dsa_switch *ds) +{ + struct dsa_port *dp; + + dsa_switch_for_each_available_port(dp, ds) { + return dsa_upstream_port(ds, dp->index); + } + + return ds->num_ports; +} + /* Return true if @upstream_ds is an upstream switch of @downstream_ds, meaning * that the routing port from @downstream_ds to @upstream_ds is also the port * which @downstream_ds uses to reach its dedicated CPU. @@ -608,14 +679,30 @@ static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp) return dp->vlan_filtering; } +static inline unsigned int dsa_port_lag_id_get(struct dsa_port *dp) +{ + return dp->lag ? dp->lag->id : 0; +} + +static inline struct net_device *dsa_port_lag_dev_get(struct dsa_port *dp) +{ + return dp->lag ? dp->lag->dev : NULL; +} + +static inline bool dsa_port_offloads_lag(struct dsa_port *dp, + const struct dsa_lag *lag) +{ + return dsa_port_lag_dev_get(dp) == lag->dev; +} + static inline struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp) { if (!dp->bridge) return NULL; - if (dp->lag_dev) - return dp->lag_dev; + if (dp->lag) + return dp->lag->dev; else if (dp->hsr_dev) return dp->hsr_dev; @@ -750,6 +837,9 @@ struct dsa_switch_ops { void (*phylink_validate)(struct dsa_switch *ds, int port, unsigned long *supported, struct phylink_link_state *state); + struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds, + int port, + phy_interface_t iface); int (*phylink_mac_link_state)(struct dsa_switch *ds, int port, struct phylink_link_state *state); void (*phylink_mac_config)(struct dsa_switch *ds, int port, @@ -803,6 +893,18 @@ struct dsa_switch_ops { struct ethtool_ts_info *ts); /* + * DCB ops + */ + int (*port_get_default_prio)(struct dsa_switch *ds, int port); + int (*port_set_default_prio)(struct dsa_switch *ds, int port, + u8 prio); + int (*port_get_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp); + int (*port_add_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp, + u8 prio); + int (*port_del_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp, + u8 prio); + + /* * Suspend and resume */ int (*suspend)(struct dsa_switch *ds); @@ -849,12 +951,16 @@ struct dsa_switch_ops { int (*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs); int (*port_bridge_join)(struct dsa_switch *ds, int port, struct dsa_bridge bridge, - bool *tx_fwd_offload); + bool *tx_fwd_offload, + struct netlink_ext_ack *extack); void (*port_bridge_leave)(struct dsa_switch *ds, int port, struct dsa_bridge bridge); void (*port_stp_state_set)(struct dsa_switch *ds, int port, u8 state); + int (*port_mst_state_set)(struct dsa_switch *ds, int port, + const struct switchdev_mst_state *state); void (*port_fast_age)(struct dsa_switch *ds, int port); + int (*port_vlan_fast_age)(struct dsa_switch *ds, int port, u16 vid); int (*port_pre_bridge_flags)(struct dsa_switch *ds, int port, struct switchdev_brport_flags flags, struct netlink_ext_ack *extack); @@ -873,23 +979,36 @@ struct dsa_switch_ops { struct netlink_ext_ack *extack); int (*port_vlan_del)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan); + int (*vlan_msti_set)(struct dsa_switch *ds, struct dsa_bridge bridge, + const struct switchdev_vlan_msti *msti); + /* * Forwarding database */ int (*port_fdb_add)(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid); + const unsigned char *addr, u16 vid, + struct dsa_db db); int (*port_fdb_del)(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid); + const unsigned char *addr, u16 vid, + struct dsa_db db); int (*port_fdb_dump)(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data); + int (*lag_fdb_add)(struct dsa_switch *ds, struct dsa_lag lag, + const unsigned char *addr, u16 vid, + struct dsa_db db); + int (*lag_fdb_del)(struct dsa_switch *ds, struct dsa_lag lag, + const unsigned char *addr, u16 vid, + struct dsa_db db); /* * Multicast database */ int (*port_mdb_add)(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb); + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db); int (*port_mdb_del)(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb); + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db); /* * RXNFC */ @@ -909,7 +1028,7 @@ struct dsa_switch_ops { struct flow_cls_offload *cls, bool ingress); int (*port_mirror_add)(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, - bool ingress); + bool ingress, struct netlink_ext_ack *extack); void (*port_mirror_del)(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror); int (*port_policer_add)(struct dsa_switch *ds, int port, @@ -923,17 +1042,18 @@ struct dsa_switch_ops { */ int (*crosschip_bridge_join)(struct dsa_switch *ds, int tree_index, int sw_index, int port, - struct dsa_bridge bridge); + struct dsa_bridge bridge, + struct netlink_ext_ack *extack); void (*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index, int sw_index, int port, struct dsa_bridge bridge); int (*crosschip_lag_change)(struct dsa_switch *ds, int sw_index, int port); int (*crosschip_lag_join)(struct dsa_switch *ds, int sw_index, - int port, struct net_device *lag, + int port, struct dsa_lag lag, struct netdev_lag_upper_info *info); int (*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index, - int port, struct net_device *lag); + int port, struct dsa_lag lag); /* * PTP functionality @@ -1005,10 +1125,10 @@ struct dsa_switch_ops { */ int (*port_lag_change)(struct dsa_switch *ds, int port); int (*port_lag_join)(struct dsa_switch *ds, int port, - struct net_device *lag, + struct dsa_lag lag, struct netdev_lag_upper_info *info); int (*port_lag_leave)(struct dsa_switch *ds, int port, - struct net_device *lag); + struct dsa_lag lag); /* * HSR integration @@ -1036,6 +1156,13 @@ struct dsa_switch_ops { int (*tag_8021q_vlan_add)(struct dsa_switch *ds, int port, u16 vid, u16 flags); int (*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid); + + /* + * DSA master tracking operations + */ + void (*master_state_change)(struct dsa_switch *ds, + const struct net_device *master, + bool operational); }; #define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \ @@ -1112,6 +1239,19 @@ struct dsa_switch_driver { struct net_device *dsa_dev_to_net_device(struct device *dev); +typedef int dsa_fdb_walk_cb_t(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db); + +int dsa_port_walk_fdbs(struct dsa_switch *ds, int port, dsa_fdb_walk_cb_t cb); +int dsa_port_walk_mdbs(struct dsa_switch *ds, int port, dsa_fdb_walk_cb_t cb); +bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid, + struct dsa_db db); +bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb, + struct dsa_db db); + /* Keep inline for faster access in hot path */ static inline bool netdev_uses_dsa(const struct net_device *dev) { @@ -1212,9 +1352,6 @@ static inline bool dsa_slave_dev_check(const struct net_device *dev) #endif netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev); -int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data); -int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data); -int dsa_port_get_phy_sset_count(struct dsa_port *dp); void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up); struct dsa_tag_driver { @@ -1247,7 +1384,7 @@ module_exit(dsa_tag_driver_module_exit) /** * module_dsa_tag_drivers() - Helper macro for registering DSA tag * drivers - * @__ops_array: Array of tag driver strucutres + * @__ops_array: Array of tag driver structures * * Helper macro for DSA tag drivers which do not do anything special * in module init/exit. Each module may only use this macro once, and diff --git a/include/net/esp.h b/include/net/esp.h index 9c5637d41d95..90cd02ff77ef 100644 --- a/include/net/esp.h +++ b/include/net/esp.h @@ -4,6 +4,8 @@ #include <linux/skbuff.h> +#define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER) + struct ip_esp_hdr; static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb) diff --git a/include/net/flow.h b/include/net/flow.h index 58beb16a49b8..987bd511d652 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -29,6 +29,7 @@ struct flowi_tunnel { struct flowi_common { int flowic_oif; int flowic_iif; + int flowic_l3mdev; __u32 flowic_mark; __u8 flowic_tos; __u8 flowic_scope; @@ -36,7 +37,6 @@ struct flowi_common { __u8 flowic_flags; #define FLOWI_FLAG_ANYSRC 0x01 #define FLOWI_FLAG_KNOWN_NH 0x02 -#define FLOWI_FLAG_SKIP_NH_OIF 0x04 __u32 flowic_secid; kuid_t flowic_uid; struct flowi_tunnel flowic_tun_key; @@ -70,6 +70,7 @@ struct flowi4 { struct flowi_common __fl_common; #define flowi4_oif __fl_common.flowic_oif #define flowi4_iif __fl_common.flowic_iif +#define flowi4_l3mdev __fl_common.flowic_l3mdev #define flowi4_mark __fl_common.flowic_mark #define flowi4_tos __fl_common.flowic_tos #define flowi4_scope __fl_common.flowic_scope @@ -102,6 +103,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif, { fl4->flowi4_oif = oif; fl4->flowi4_iif = LOOPBACK_IFINDEX; + fl4->flowi4_l3mdev = 0; fl4->flowi4_mark = mark; fl4->flowi4_tos = tos; fl4->flowi4_scope = scope; @@ -132,6 +134,7 @@ struct flowi6 { struct flowi_common __fl_common; #define flowi6_oif __fl_common.flowic_oif #define flowi6_iif __fl_common.flowic_iif +#define flowi6_l3mdev __fl_common.flowic_l3mdev #define flowi6_mark __fl_common.flowic_mark #define flowi6_scope __fl_common.flowic_scope #define flowi6_proto __fl_common.flowic_proto @@ -177,6 +180,7 @@ struct flowi { } u; #define flowi_oif u.__fl_common.flowic_oif #define flowi_iif u.__fl_common.flowic_iif +#define flowi_l3mdev u.__fl_common.flowic_l3mdev #define flowi_mark u.__fl_common.flowic_mark #define flowi_tos u.__fl_common.flowic_tos #define flowi_scope u.__fl_common.flowic_scope diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 5b8c54eb7a6b..021778a7e1af 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -148,6 +148,10 @@ enum flow_action_id { FLOW_ACTION_MPLS_MANGLE, FLOW_ACTION_GATE, FLOW_ACTION_PPPOE_PUSH, + FLOW_ACTION_JUMP, + FLOW_ACTION_PIPE, + FLOW_ACTION_VLAN_PUSH_ETH, + FLOW_ACTION_VLAN_POP_ETH, NUM_FLOW_ACTIONS, }; @@ -209,6 +213,10 @@ struct flow_action_entry { __be16 proto; u8 prio; } vlan; + struct { /* FLOW_ACTION_VLAN_PUSH_ETH */ + unsigned char dst[ETH_ALEN]; + unsigned char src[ETH_ALEN]; + } vlan_push_eth; struct { /* FLOW_ACTION_MANGLE */ /* FLOW_ACTION_ADD */ enum flow_action_mangle_base htype; @@ -235,9 +243,16 @@ struct flow_action_entry { struct { /* FLOW_ACTION_POLICE */ u32 burst; u64 rate_bytes_ps; + u64 peakrate_bytes_ps; + u32 avrate; + u16 overhead; u64 burst_pkt; u64 rate_pkt_ps; u32 mtu; + struct { + enum flow_action_id act_id; + u32 extval; + } exceed, notexceed; } police; struct { /* FLOW_ACTION_CT */ int action; @@ -302,6 +317,12 @@ static inline bool flow_offload_has_one_action(const struct flow_action *action) return action->num_entries == 1; } +static inline bool flow_action_is_last_entry(const struct flow_action *action, + const struct flow_action_entry *entry) +{ + return entry == &action->entries[action->num_entries - 1]; +} + #define flow_action_for_each(__i, __act, __actions) \ for (__i = 0, __act = &(__actions)->entries[0]; \ __i < (__actions)->num_entries; \ diff --git a/include/net/gro.h b/include/net/gro.h index 8f75802d50fd..867656b0739c 100644 --- a/include/net/gro.h +++ b/include/net/gro.h @@ -29,46 +29,51 @@ struct napi_gro_cb { /* Number of segments aggregated. */ u16 count; - /* Start offset for remote checksum offload */ - u16 gro_remcsum_start; + /* Used in ipv6_gro_receive() and foo-over-udp */ + u16 proto; /* jiffies when first packet was created/queued */ unsigned long age; - /* Used in ipv6_gro_receive() and foo-over-udp */ - u16 proto; +/* Used in napi_gro_cb::free */ +#define NAPI_GRO_FREE 1 +#define NAPI_GRO_FREE_STOLEN_HEAD 2 + /* portion of the cb set to zero at every gro iteration */ + struct_group(zeroed, - /* This is non-zero if the packet may be of the same flow. */ - u8 same_flow:1; + /* Start offset for remote checksum offload */ + u16 gro_remcsum_start; - /* Used in tunnel GRO receive */ - u8 encap_mark:1; + /* This is non-zero if the packet may be of the same flow. */ + u8 same_flow:1; - /* GRO checksum is valid */ - u8 csum_valid:1; + /* Used in tunnel GRO receive */ + u8 encap_mark:1; - /* Number of checksums via CHECKSUM_UNNECESSARY */ - u8 csum_cnt:3; + /* GRO checksum is valid */ + u8 csum_valid:1; - /* Free the skb? */ - u8 free:2; -#define NAPI_GRO_FREE 1 -#define NAPI_GRO_FREE_STOLEN_HEAD 2 + /* Number of checksums via CHECKSUM_UNNECESSARY */ + u8 csum_cnt:3; + + /* Free the skb? */ + u8 free:2; - /* Used in foo-over-udp, set in udp[46]_gro_receive */ - u8 is_ipv6:1; + /* Used in foo-over-udp, set in udp[46]_gro_receive */ + u8 is_ipv6:1; - /* Used in GRE, set in fou/gue_gro_receive */ - u8 is_fou:1; + /* Used in GRE, set in fou/gue_gro_receive */ + u8 is_fou:1; - /* Used to determine if flush_id can be ignored */ - u8 is_atomic:1; + /* Used to determine if flush_id can be ignored */ + u8 is_atomic:1; - /* Number of gro_receive callbacks this packet already went through */ - u8 recursion_counter:4; + /* Number of gro_receive callbacks this packet already went through */ + u8 recursion_counter:4; - /* GRO is done by frag_list pointer chaining. */ - u8 is_flist:1; + /* GRO is done by frag_list pointer chaining. */ + u8 is_flist:1; + ); /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; diff --git a/include/net/gtp.h b/include/net/gtp.h index 0e16ebb2a82d..c1d6169df331 100644 --- a/include/net/gtp.h +++ b/include/net/gtp.h @@ -7,8 +7,13 @@ #define GTP0_PORT 3386 #define GTP1U_PORT 2152 +/* GTP messages types */ +#define GTP_ECHO_REQ 1 /* Echo Request */ +#define GTP_ECHO_RSP 2 /* Echo Response */ #define GTP_TPDU 255 +#define GTPIE_RECOVERY 14 + struct gtp0_header { /* According to GSM TS 09.60. */ __u8 flags; __u8 type; @@ -27,6 +32,43 @@ struct gtp1_header { /* According to 3GPP TS 29.060. */ __be32 tid; } __attribute__ ((packed)); +struct gtp1_header_long { /* According to 3GPP TS 29.060. */ + __u8 flags; + __u8 type; + __be16 length; + __be32 tid; + __be16 seq; + __u8 npdu; + __u8 next; +} __packed; + +/* GTP Information Element */ +struct gtp_ie { + __u8 tag; + __u8 val; +} __packed; + +struct gtp0_packet { + struct gtp0_header gtp0_h; + struct gtp_ie ie; +} __packed; + +struct gtp1u_packet { + struct gtp1_header_long gtp1u_h; + struct gtp_ie ie; +} __packed; + +struct gtp_pdu_session_info { /* According to 3GPP TS 38.415. */ + u8 pdu_type; + u8 qfi; +}; + +static inline bool netif_is_gtp(const struct net_device *dev) +{ + return dev->rtnl_link_ops && + !strcmp(dev->rtnl_link_ops->kind, "gtp"); +} + #define GTP1_F_NPDU 0x01 #define GTP1_F_SEQ 0x02 #define GTP1_F_EXTHDR 0x04 diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index 11630351c978..598f53d2a3a0 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -1,6 +1,6 @@ /* * Copyright (c) 2017 Intel Deutschland GmbH - * Copyright (c) 2018-2019 Intel Corporation + * Copyright (c) 2018-2019, 2021 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -365,7 +365,7 @@ enum ieee80211_radiotap_zero_len_psdu_type { */ static inline u16 ieee80211_get_radiotap_len(const char *data) { - struct ieee80211_radiotap_header *hdr = (void *)data; + const struct ieee80211_radiotap_header *hdr = (const void *)data; return get_unaligned_le16(&hdr->it_len); } diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index f026cf08a8e8..4cfdef6ca4f6 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -71,6 +71,8 @@ struct inet6_ifaddr { bool tokenized; + u8 ifa_proto; + struct rcu_head rcu; struct in6_addr peer_addr; }; diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 4ad47d9f9d27..3908296d103f 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -285,6 +285,14 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req); +static inline unsigned long +reqsk_timeout(struct request_sock *req, unsigned long max_timeout) +{ + u64 timeout = (u64)req->timeout << req->num_timeout; + + return (unsigned long)min_t(u64, timeout, max_timeout); +} + static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk) { /* The below has to be done to allow calling inet_csk_destroy_sock */ diff --git a/include/net/inet_dscp.h b/include/net/inet_dscp.h new file mode 100644 index 000000000000..72f250dffada --- /dev/null +++ b/include/net/inet_dscp.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * inet_dscp.h: helpers for handling differentiated services codepoints (DSCP) + * + * DSCP is defined in RFC 2474: + * + * 0 1 2 3 4 5 6 7 + * +---+---+---+---+---+---+---+---+ + * | DSCP | CU | + * +---+---+---+---+---+---+---+---+ + * + * DSCP: differentiated services codepoint + * CU: currently unused + * + * The whole DSCP + CU bits form the DS field. + * The DS field is also commonly called TOS or Traffic Class (for IPv6). + * + * Note: the CU bits are now used for Explicit Congestion Notification + * (RFC 3168). + */ + +#ifndef _INET_DSCP_H +#define _INET_DSCP_H + +#include <linux/types.h> + +/* Special type for storing DSCP values. + * + * A dscp_t variable stores a DS field with the CU (ECN) bits cleared. + * Using dscp_t allows to strictly separate DSCP and ECN bits, thus avoiding + * bugs where ECN bits are erroneously taken into account during FIB lookups + * or policy routing. + * + * Note: to get the real DSCP value contained in a dscp_t variable one would + * have to do a bit shift after calling inet_dscp_to_dsfield(). We could have + * a helper for that, but there's currently no users. + */ +typedef u8 __bitwise dscp_t; + +#define INET_DSCP_MASK 0xfc + +static inline dscp_t inet_dsfield_to_dscp(__u8 dsfield) +{ + return (__force dscp_t)(dsfield & INET_DSCP_MASK); +} + +static inline __u8 inet_dscp_to_dsfield(dscp_t dscp) +{ + return (__force __u8)dscp; +} + +static inline bool inet_validate_dscp(__u8 val) +{ + return !(val & ~INET_DSCP_MASK); +} + +#endif /* _INET_DSCP_H */ diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 63540be0fc34..911ad930867d 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -70,6 +70,7 @@ struct frag_v6_compare_key { * @stamp: timestamp of the last received fragment * @len: total length of the original datagram * @meat: length of received fragments so far + * @mono_delivery_time: stamp has a mono delivery time (EDT) * @flags: fragment queue flags * @max_size: maximum received fragment size * @fqdir: pointer to struct fqdir @@ -90,6 +91,7 @@ struct inet_frag_queue { ktime_t stamp; int len; int meat; + u8 mono_delivery_time; __u8 flags; u16 max_size; struct fqdir *fqdir; diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index dfd919b3119e..463ae5d33eb0 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -65,13 +65,13 @@ struct inet_timewait_sock { /* these three are in inet_sock */ __be16 tw_sport; /* And these are ours. */ - unsigned int tw_kill : 1, - tw_transparent : 1, + unsigned int tw_transparent : 1, tw_flowlabel : 20, - tw_pad : 2, /* 2 bits hole */ + tw_pad : 3, /* 3 bits hole */ tw_tos : 8; u32 tw_txhash; u32 tw_priority; + u32 tw_bslot; /* bind bucket slot */ struct timer_list tw_timer; struct inet_bind_bucket *tw_tb; }; @@ -110,8 +110,6 @@ static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo void inet_twsk_deschedule_put(struct inet_timewait_sock *tw); -void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family); - static inline struct net *twsk_net(const struct inet_timewait_sock *twsk) { diff --git a/include/net/ioam6.h b/include/net/ioam6.h index 3f45ba37a2c6..781d2d8b2f29 100644 --- a/include/net/ioam6.h +++ b/include/net/ioam6.h @@ -35,7 +35,7 @@ struct ioam6_schema { int len; __be32 hdr; - u8 data[0]; + u8 data[]; }; struct ioam6_pernet_data { diff --git a/include/net/ip.h b/include/net/ip.h index b51bae43b0dd..3984f2c39c4b 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -517,7 +517,6 @@ void ip_dst_metrics_put(struct dst_entry *dst) kfree(p); } -u32 ip_idents_reserve(u32 hash, int segs); void __ip_select_ident(struct net *net, struct iphdr *iph, int segs); static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb, @@ -712,7 +711,7 @@ int ip_forward(struct sk_buff *skb); */ void ip_options_build(struct sk_buff *skb, struct ip_options *opt, - __be32 daddr, struct rtable *rt, int is_frag); + __be32 daddr, struct rtable *rt); int __ip_options_echo(struct net *net, struct ip_options *dopt, struct sk_buff *skb, const struct ip_options *sopt); diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 2048bc8748cb..6268963d9599 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -369,9 +369,8 @@ struct rt6_statistics { __u32 fib_rt_cache; /* cached rt entries in exception table */ __u32 fib_discarded_routes; /* total number of routes delete */ - /* The following stats are not protected by any lock */ + /* The following stat is not protected by any lock */ atomic_t fib_rt_alloc; /* total number of routes alloced */ - atomic_t fib_rt_uncache; /* rt entries in uncached list */ }; #define RTN_TL_ROOT 0x0001 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index c4297704bbcb..6a82bcb8813b 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -17,6 +17,7 @@ #include <linux/rcupdate.h> #include <net/fib_notifier.h> #include <net/fib_rules.h> +#include <net/inet_dscp.h> #include <net/inetpeer.h> #include <linux/percpu.h> #include <linux/notifier.h> @@ -24,7 +25,7 @@ struct fib_config { u8 fc_dst_len; - u8 fc_tos; + dscp_t fc_dscp; u8 fc_protocol; u8 fc_scope; u8 fc_type; diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 92eec13d1693..213612f1680c 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -15,9 +15,9 @@ #include <linux/refcount.h> #include <linux/jump_label_ratelimit.h> #include <net/if_inet6.h> -#include <net/ndisc.h> #include <net/flow.h> #include <net/flow_dissector.h> +#include <net/inet_dscp.h> #include <net/snmp.h> #include <net/netns/hash.h> @@ -440,8 +440,16 @@ struct ipv6_txoptions *ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, int newtype, struct ipv6_opt_hdr *newopt); -struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, - struct ipv6_txoptions *opt); +struct ipv6_txoptions *__ipv6_fixup_options(struct ipv6_txoptions *opt_space, + struct ipv6_txoptions *opt); + +static inline struct ipv6_txoptions * +ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt) +{ + if (!opt) + return NULL; + return __ipv6_fixup_options(opt_space, opt); +} bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb, const struct inet6_skb_parm *opt); @@ -970,6 +978,11 @@ static inline u8 ip6_tclass(__be32 flowinfo) return ntohl(flowinfo & IPV6_TCLASS_MASK) >> IPV6_TCLASS_SHIFT; } +static inline dscp_t ip6_dscp(__be32 flowinfo) +{ + return inet_dsfield_to_dscp(ip6_tclass(flowinfo)); +} + static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel) { return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel; @@ -1023,7 +1036,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, - struct ipcm6_cookie *ipc6, struct flowi6 *fl6, + struct ipcm6_cookie *ipc6, struct rt6_info *rt, unsigned int flags, struct inet_cork_full *cork); diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h index 0a4779175a52..5052c66e22d2 100644 --- a/include/net/ipv6_frag.h +++ b/include/net/ipv6_frag.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _IPV6_FRAG_H #define _IPV6_FRAG_H +#include <linux/icmpv6.h> #include <linux/kernel.h> #include <net/addrconf.h> #include <net/ipv6.h> diff --git a/include/net/mac80211.h b/include/net/mac80211.h index c50221d7e82c..382ebb862ea8 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -7,7 +7,7 @@ * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2021 Intel Corporation + * Copyright (C) 2018 - 2022 Intel Corporation */ #ifndef MAC80211_H @@ -636,6 +636,7 @@ struct ieee80211_fils_discovery { * @tx_pwr_env: transmit power envelope array of BSS. * @tx_pwr_env_num: number of @tx_pwr_env. * @pwr_reduction: power constraint of BSS. + * @eht_support: does this BSS support EHT */ struct ieee80211_bss_conf { const u8 *bssid; @@ -710,6 +711,7 @@ struct ieee80211_bss_conf { struct ieee80211_tx_pwr_env tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT]; u8 tx_pwr_env_num; u8 pwr_reduction; + bool eht_support; }; /** @@ -883,6 +885,17 @@ enum mac80211_tx_control_flags { IEEE80211_TX_CTRL_DONT_REORDER = BIT(8), }; +/** + * enum mac80211_tx_status_flags - flags to describe transmit status + * + * @IEEE80211_TX_STATUS_ACK_SIGNAL_VALID: ACK signal is valid + * + * These flags are used in tx_info->status.flags. + */ +enum mac80211_tx_status_flags { + IEEE80211_TX_STATUS_ACK_SIGNAL_VALID = BIT(0), +}; + /* * This definition is used as a mask to clear all temporary flags, which are * set by the tx handlers for each transmission attempt by the mac80211 stack. @@ -1046,7 +1059,7 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate) * @status.antenna: (legacy, kept only for iwlegacy) * @status.tx_time: airtime consumed for transmission; note this is only * used for WMM AC, not for airtime fairness - * @status.is_valid_ack_signal: ACK signal is valid + * @status.flags: status flags, see &enum mac80211_tx_status_flags * @status.status_driver_data: driver use area * @ack: union part for pure ACK data * @ack.cookie: cookie for the ACK @@ -1099,8 +1112,8 @@ struct ieee80211_tx_info { u8 ampdu_len; u8 antenna; u16 tx_time; - bool is_valid_ack_signal; - void *status_driver_data[19 / sizeof(void *)]; + u8 flags; + void *status_driver_data[18 / sizeof(void *)]; } status; struct { struct ieee80211_tx_rate driver_rates[ @@ -1994,6 +2007,7 @@ enum ieee80211_sta_state { * @IEEE80211_STA_RX_BW_80: station can receive up to 80 MHz * @IEEE80211_STA_RX_BW_160: station can receive up to 160 MHz * (including 80+80 MHz) + * @IEEE80211_STA_RX_BW_320: station can receive up to 320 MHz * * Implementation note: 20 must be zero to be initialized * correctly, the values must be sorted. @@ -2003,6 +2017,7 @@ enum ieee80211_sta_rx_bandwidth { IEEE80211_STA_RX_BW_40, IEEE80211_STA_RX_BW_80, IEEE80211_STA_RX_BW_160, + IEEE80211_STA_RX_BW_320, }; /** @@ -2058,6 +2073,7 @@ struct ieee80211_sta_txpwr { * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities * @he_cap: HE capabilities of this STA * @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities + * @eht_cap: EHT capabilities of this STA * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU * that this station is allowed to transmit to us. * Can be modified by driver. @@ -2098,6 +2114,7 @@ struct ieee80211_sta { struct ieee80211_sta_vht_cap vht_cap; struct ieee80211_sta_he_cap he_cap; struct ieee80211_he_6ghz_capa he_6ghz_capa; + struct ieee80211_sta_eht_cap eht_cap; u16 max_rx_aggregation_subframes; bool wme; u8 uapsd_queues; @@ -4931,12 +4948,14 @@ void ieee80211_report_low_ack(struct ieee80211_sta *sta, u32 num_packets); * @cntdwn_counter_offs: array of IEEE80211_MAX_CNTDWN_COUNTERS_NUM offsets * to countdown counters. This array can contain zero values which * should be ignored. + * @mbssid_off: position of the multiple bssid element */ struct ieee80211_mutable_offsets { u16 tim_offset; u16 tim_length; u16 cntdwn_counter_offs[IEEE80211_MAX_CNTDWN_COUNTERS_NUM]; + u16 mbssid_off; }; /** @@ -6054,6 +6073,16 @@ void ieee80211_disconnect(struct ieee80211_vif *vif, bool reconnect); void ieee80211_resume_disconnect(struct ieee80211_vif *vif); /** + * ieee80211_hw_restart_disconnect - disconnect from AP after + * hardware restart + * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * + * Instructs mac80211 to disconnect from the AP after + * hardware restart. + */ +void ieee80211_hw_restart_disconnect(struct ieee80211_vif *vif); + +/** * ieee80211_cqm_rssi_notify - inform a configured connection quality monitoring * rssi threshold triggered * diff --git a/include/net/mac802154.h b/include/net/mac802154.h index d524ffb9eb25..2c3bbc6645ba 100644 --- a/include/net/mac802154.h +++ b/include/net/mac802154.h @@ -464,6 +464,12 @@ void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, * ieee802154_wake_queue - wake ieee802154 queue * @hw: pointer as obtained from ieee802154_alloc_hw(). * + * Tranceivers usually have either one transmit framebuffer or one framebuffer + * for both transmitting and receiving. Hence, the core currently only handles + * one frame at a time for each phy, which means we had to stop the queue to + * avoid new skb to come during the transmission. The queue then needs to be + * woken up after the operation. + * * Drivers should use this function instead of netif_wake_queue. */ void ieee802154_wake_queue(struct ieee802154_hw *hw); @@ -472,6 +478,12 @@ void ieee802154_wake_queue(struct ieee802154_hw *hw); * ieee802154_stop_queue - stop ieee802154 queue * @hw: pointer as obtained from ieee802154_alloc_hw(). * + * Tranceivers usually have either one transmit framebuffer or one framebuffer + * for both transmitting and receiving. Hence, the core currently only handles + * one frame at a time for each phy, which means we need to tell upper layers to + * stop giving us new skbs while we are busy with the transmitted one. The queue + * must then be stopped before transmitting. + * * Drivers should use this function instead of netif_stop_queue. */ void ieee802154_stop_queue(struct ieee802154_hw *hw); diff --git a/include/net/mctp.h b/include/net/mctp.h index 7e35ec79b909..d37268fe6825 100644 --- a/include/net/mctp.h +++ b/include/net/mctp.h @@ -40,11 +40,26 @@ struct mctp_hdr { #define MCTP_INITIAL_DEFAULT_NET 1 -static inline bool mctp_address_ok(mctp_eid_t eid) +static inline bool mctp_address_unicast(mctp_eid_t eid) { return eid >= 8 && eid < 255; } +static inline bool mctp_address_broadcast(mctp_eid_t eid) +{ + return eid == 255; +} + +static inline bool mctp_address_null(mctp_eid_t eid) +{ + return eid == 0; +} + +static inline bool mctp_address_matches(mctp_eid_t match, mctp_eid_t eid) +{ + return match == eid || match == MCTP_ADDR_ANY; +} + static inline struct mctp_hdr *mctp_hdr(struct sk_buff *skb) { return (struct mctp_hdr *)skb_network_header(skb); @@ -121,7 +136,7 @@ struct mctp_sock { */ struct mctp_sk_key { mctp_eid_t peer_addr; - mctp_eid_t local_addr; + mctp_eid_t local_addr; /* MCTP_ADDR_ANY for local owned tags */ __u8 tag; /* incoming tag match; invert TO for local */ /* we hold a ref to sk when set */ @@ -158,6 +173,12 @@ struct mctp_sk_key { */ unsigned long dev_flow_state; struct mctp_dev *dev; + + /* a tag allocated with SIOCMCTPALLOCTAG ioctl will not expire + * automatically on timeout or response, instead SIOCMCTPDROPTAG + * is used. + */ + bool manual_alloc; }; struct mctp_skb_cb { @@ -234,6 +255,9 @@ int mctp_local_output(struct sock *sk, struct mctp_route *rt, struct sk_buff *skb, mctp_eid_t daddr, u8 req_tag); void mctp_key_unref(struct mctp_sk_key *key); +struct mctp_sk_key *mctp_alloc_local_tag(struct mctp_sock *msk, + mctp_eid_t daddr, mctp_eid_t saddr, + bool manual, u8 *tagp); /* routing <--> device interface */ unsigned int mctp_default_net(struct net *net); diff --git a/include/net/mptcp.h b/include/net/mptcp.h index a925349b4b89..0a3b0fb04a3b 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -217,12 +217,6 @@ static inline bool rsk_drop_req(const struct request_sock *req) return false; } -static inline void mptcp_parse_option(const struct sk_buff *skb, - const unsigned char *ptr, int opsize, - struct tcp_options_received *opt_rx) -{ -} - static inline bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb, unsigned int *size, struct mptcp_out_options *opts) diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 47ffb360ddfa..da7eec8669ec 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -447,10 +447,15 @@ void ndisc_cleanup(void); int ndisc_rcv(struct sk_buff *skb); +struct sk_buff *ndisc_ns_create(struct net_device *dev, const struct in6_addr *solicit, + const struct in6_addr *saddr, u64 nonce); void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, const struct in6_addr *daddr, const struct in6_addr *saddr, u64 nonce); +void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr, + const struct in6_addr *saddr); + void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr, const struct in6_addr *daddr); void ndisc_send_na(struct net_device *dev, const struct in6_addr *daddr, diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 5b61c462e534..c4f5601f6e32 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -63,7 +63,7 @@ struct net { */ spinlock_t rules_mod_lock; - unsigned int dev_unreg_count; + atomic_t dev_unreg_count; unsigned int dev_base_seq; /* protected by rtnl_mutex */ int ifindex; @@ -513,4 +513,10 @@ static inline void fnhe_genid_bump(struct net *net) atomic_inc(&net->fnhe_genid); } +#ifdef CONFIG_NET +void net_ns_init(void); +#else +static inline void net_ns_init(void) {} +#endif + #endif /* __NET_NET_NAMESPACE_H */ diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 8731d5bcb47d..b08b70989d2c 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -97,7 +97,6 @@ struct nf_conn { unsigned long status; u16 cpu; - u16 local_origin:1; possible_net_t ct_net; #if IS_ENABLED(CONFIG_NF_NAT) diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h index 7f44a771530e..4b2b7f8914ea 100644 --- a/include/net/netfilter/nf_conntrack_acct.h +++ b/include/net/netfilter/nf_conntrack_acct.h @@ -78,7 +78,6 @@ static inline void nf_ct_acct_update(struct nf_conn *ct, u32 dir, void nf_conntrack_acct_pernet_init(struct net *net); -int nf_conntrack_acct_init(void); void nf_conntrack_acct_fini(void); #endif /* _NF_CONNTRACK_ACCT_H */ diff --git a/include/net/netfilter/nf_conntrack_bpf.h b/include/net/netfilter/nf_conntrack_bpf.h new file mode 100644 index 000000000000..a473b56842c5 --- /dev/null +++ b/include/net/netfilter/nf_conntrack_bpf.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _NF_CONNTRACK_BPF_H +#define _NF_CONNTRACK_BPF_H + +#include <linux/btf.h> +#include <linux/kconfig.h> + +#if (IS_BUILTIN(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \ + (IS_MODULE(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) + +extern int register_nf_conntrack_bpf(void); + +#else + +static inline int register_nf_conntrack_bpf(void) +{ + return 0; +} + +#endif + +#endif /* _NF_CONNTRACK_BPF_H */ diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index d932e22edcb4..6c4c490a3e34 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -21,10 +21,10 @@ enum nf_ct_ecache_state { struct nf_conntrack_ecache { unsigned long cache; /* bitops want long */ - u16 missed; /* missed events */ u16 ctmask; /* bitmask of ct events to be delivered */ u16 expmask; /* bitmask of expect events to be delivered */ enum nf_ct_ecache_state state:8;/* ecache state */ + u32 missed; /* missed events */ u32 portid; /* netlink portid of destroyer */ }; @@ -166,9 +166,6 @@ void nf_conntrack_ecache_work(struct net *net, enum nf_ct_ecache_state state); void nf_conntrack_ecache_pernet_init(struct net *net); void nf_conntrack_ecache_pernet_fini(struct net *net); -int nf_conntrack_ecache_init(void); -void nf_conntrack_ecache_fini(void); - static inline bool nf_conntrack_ecache_dwork_pending(const struct net *net) { return net->ct.ecache_dwork_pending; @@ -194,16 +191,6 @@ static inline void nf_conntrack_ecache_pernet_init(struct net *net) static inline void nf_conntrack_ecache_pernet_fini(struct net *net) { } - -static inline int nf_conntrack_ecache_init(void) -{ - return 0; -} - -static inline void nf_conntrack_ecache_fini(void) -{ -} - static inline bool nf_conntrack_ecache_dwork_pending(const struct net *net) { return false; } #endif /* CONFIG_NF_CONNTRACK_EVENTS */ #endif /*_NF_CONNTRACK_ECACHE_H*/ diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h index c7515d82ab06..96635ad2acc7 100644 --- a/include/net/netfilter/nf_conntrack_extend.h +++ b/include/net/netfilter/nf_conntrack_extend.h @@ -49,7 +49,7 @@ enum nf_ct_ext_id { struct nf_ct_ext { u8 offset[NF_CT_EXT_NUM]; u8 len; - char data[]; + char data[] __aligned(8); }; static inline bool __nf_ct_ext_exist(const struct nf_ct_ext *ext, u8 id) @@ -72,23 +72,7 @@ static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id) #define nf_ct_ext_find(ext, id) \ ((id##_TYPE *)__nf_ct_ext_find((ext), (id))) -/* Destroy all relationships */ -void nf_ct_ext_destroy(struct nf_conn *ct); - /* Add this type, returns pointer to data or NULL. */ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp); -struct nf_ct_ext_type { - /* Destroys relationships (can be NULL). */ - void (*destroy)(struct nf_conn *ct); - - enum nf_ct_ext_id id; - - /* Length and min alignment. */ - u8 len; - u8 align; -}; - -int nf_ct_extend_register(const struct nf_ct_ext_type *type); -void nf_ct_extend_unregister(const struct nf_ct_ext_type *type); #endif /* _NF_CONNTRACK_EXTEND_H */ diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h index 37f0fbefb060..9939c366f720 100644 --- a/include/net/netfilter/nf_conntrack_helper.h +++ b/include/net/netfilter/nf_conntrack_helper.h @@ -177,4 +177,5 @@ void nf_nat_helper_unregister(struct nf_conntrack_nat_helper *nat); int nf_nat_helper_try_module_get(const char *name, u16 l3num, u8 protonum); void nf_nat_helper_put(struct nf_conntrack_helper *helper); +void nf_ct_set_auto_assign_helper_warned(struct net *net); #endif /*_NF_CONNTRACK_HELPER_H*/ diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h index ba916411c4e1..3c23298e68ca 100644 --- a/include/net/netfilter/nf_conntrack_labels.h +++ b/include/net/netfilter/nf_conntrack_labels.h @@ -45,12 +45,9 @@ int nf_connlabels_replace(struct nf_conn *ct, #ifdef CONFIG_NF_CONNTRACK_LABELS int nf_conntrack_labels_init(void); -void nf_conntrack_labels_fini(void); int nf_connlabels_get(struct net *net, unsigned int bit); void nf_connlabels_put(struct net *net); #else -static inline int nf_conntrack_labels_init(void) { return 0; } -static inline void nf_conntrack_labels_fini(void) {} static inline int nf_connlabels_get(struct net *net, unsigned int bit) { return 0; } static inline void nf_connlabels_put(struct net *net) {} #endif diff --git a/include/net/netfilter/nf_conntrack_seqadj.h b/include/net/netfilter/nf_conntrack_seqadj.h index 0a10b50537ae..883c414b768e 100644 --- a/include/net/netfilter/nf_conntrack_seqadj.h +++ b/include/net/netfilter/nf_conntrack_seqadj.h @@ -42,7 +42,4 @@ int nf_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff); s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir, u32 seq); -int nf_conntrack_seqadj_init(void); -void nf_conntrack_seqadj_fini(void); - #endif /* _NF_CONNTRACK_SEQADJ_H */ diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h index 659b0ea25b4d..3ea94f6f3844 100644 --- a/include/net/netfilter/nf_conntrack_timeout.h +++ b/include/net/netfilter/nf_conntrack_timeout.h @@ -89,23 +89,11 @@ static inline unsigned int *nf_ct_timeout_lookup(const struct nf_conn *ct) } #ifdef CONFIG_NF_CONNTRACK_TIMEOUT -int nf_conntrack_timeout_init(void); -void nf_conntrack_timeout_fini(void); void nf_ct_untimeout(struct net *net, struct nf_ct_timeout *timeout); int nf_ct_set_timeout(struct net *net, struct nf_conn *ct, u8 l3num, u8 l4num, const char *timeout_name); void nf_ct_destroy_timeout(struct nf_conn *ct); #else -static inline int nf_conntrack_timeout_init(void) -{ - return 0; -} - -static inline void nf_conntrack_timeout_fini(void) -{ - return; -} - static inline int nf_ct_set_timeout(struct net *net, struct nf_conn *ct, u8 l3num, u8 l4num, const char *timeout_name) @@ -120,8 +108,12 @@ static inline void nf_ct_destroy_timeout(struct nf_conn *ct) #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ #ifdef CONFIG_NF_CONNTRACK_TIMEOUT -extern struct nf_ct_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name); -extern void (*nf_ct_timeout_put_hook)(struct nf_ct_timeout *timeout); +struct nf_ct_timeout_hooks { + struct nf_ct_timeout *(*timeout_find_get)(struct net *net, const char *name); + void (*timeout_put)(struct nf_ct_timeout *timeout); +}; + +extern const struct nf_ct_timeout_hooks *nf_ct_timeout_hook; #endif #endif /* _NF_CONNTRACK_TIMEOUT_H */ diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h index 820ea34b6029..57138d974a9f 100644 --- a/include/net/netfilter/nf_conntrack_timestamp.h +++ b/include/net/netfilter/nf_conntrack_timestamp.h @@ -40,21 +40,8 @@ struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp) #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP void nf_conntrack_tstamp_pernet_init(struct net *net); - -int nf_conntrack_tstamp_init(void); -void nf_conntrack_tstamp_fini(void); #else static inline void nf_conntrack_tstamp_pernet_init(struct net *net) {} - -static inline int nf_conntrack_tstamp_init(void) -{ - return 0; -} - -static inline void nf_conntrack_tstamp_fini(void) -{ - return; -} #endif /* CONFIG_NF_CONNTRACK_TIMESTAMP */ #endif /* _NF_CONNTRACK_TSTAMP_H */ diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index bd59e950f4d6..64daafd1fc41 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -10,6 +10,8 @@ #include <linux/netfilter/nf_conntrack_tuple_common.h> #include <net/flow_offload.h> #include <net/dst.h> +#include <linux/if_pppox.h> +#include <linux/ppp_defs.h> struct nf_flowtable; struct nf_flow_rule; @@ -317,4 +319,20 @@ int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow, int nf_flow_table_offload_init(void); void nf_flow_table_offload_exit(void); +static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb) +{ + __be16 proto; + + proto = *((__be16 *)(skb_mac_header(skb) + ETH_HLEN + + sizeof(struct pppoe_hdr))); + switch (proto) { + case htons(PPP_IP): + return htons(ETH_P_IP); + case htons(PPP_IPV6): + return htons(ETH_P_IPV6); + } + + return 0; +} + #endif /* _NF_FLOW_TABLE_H */ diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index c4c0861deac1..20af9d3557b9 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -126,6 +126,7 @@ struct nft_regs_track { struct { const struct nft_expr *selector; const struct nft_expr *bitwise; + u8 num_reg; } regs[NFT_REG32_NUM]; const struct nft_expr *cur; @@ -1633,4 +1634,25 @@ static inline struct nftables_pernet *nft_pernet(const struct net *net) return net_generic(net, nf_tables_net_id); } +#define __NFT_REDUCE_READONLY 1UL +#define NFT_REDUCE_READONLY (void *)__NFT_REDUCE_READONLY + +static inline bool nft_reduce_is_readonly(const struct nft_expr *expr) +{ + return expr->ops->reduce == NFT_REDUCE_READONLY; +} + +void nft_reg_track_update(struct nft_regs_track *track, + const struct nft_expr *expr, u8 dreg, u8 len); +void nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg, u8 len); +void __nft_reg_track_cancel(struct nft_regs_track *track, u8 dreg); + +static inline bool nft_reg_track_cmp(struct nft_regs_track *track, + const struct nft_expr *expr, u8 dreg) +{ + return track->regs[dreg].selector && + track->regs[dreg].selector->ops == expr->ops && + track->regs[dreg].num_reg == 0; +} + #endif /* _NET_NF_TABLES_H */ diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h index b6fb1fdff9b2..0ea7c55cea4d 100644 --- a/include/net/netfilter/nf_tables_core.h +++ b/include/net/netfilter/nf_tables_core.h @@ -42,6 +42,14 @@ struct nft_cmp_fast_expr { bool inv; }; +struct nft_cmp16_fast_expr { + struct nft_data data; + struct nft_data mask; + u8 sreg; + u8 len; + bool inv; +}; + struct nft_immediate_expr { struct nft_data data; u8 dreg; @@ -59,6 +67,7 @@ static inline u32 nft_cmp_fast_mask(unsigned int len) } extern const struct nft_expr_ops nft_cmp_fast_ops; +extern const struct nft_expr_ops nft_cmp16_fast_ops; struct nft_payload { enum nft_payload_bases base:8; diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h index 237f3757637e..eed099eae672 100644 --- a/include/net/netfilter/nft_fib.h +++ b/include/net/netfilter/nft_fib.h @@ -37,4 +37,7 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, void nft_fib_store_result(void *reg, const struct nft_fib *priv, const struct net_device *dev); + +bool nft_fib_reduce(struct nft_regs_track *track, + const struct nft_expr *expr); #endif diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h index 2dce55c736f4..9b51cc67de54 100644 --- a/include/net/netfilter/nft_meta.h +++ b/include/net/netfilter/nft_meta.h @@ -6,6 +6,7 @@ struct nft_meta { enum nft_meta_keys key:8; + u8 len; union { u8 dreg; u8 sreg; @@ -43,4 +44,6 @@ int nft_meta_set_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nft_data **data); +bool nft_meta_get_reduce(struct nft_regs_track *track, + const struct nft_expr *expr); #endif diff --git a/include/net/netns/core.h b/include/net/netns/core.h index 552bc25b1933..388244e315e7 100644 --- a/include/net/netns/core.h +++ b/include/net/netns/core.h @@ -10,6 +10,7 @@ struct netns_core { struct ctl_table_header *sysctl_hdr; int sysctl_somaxconn; + u8 sysctl_txrehash; #ifdef CONFIG_PROC_FS struct prot_inuse __percpu *prot_inuse; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 78557643526e..ce0cc4e8d8c7 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -31,18 +31,16 @@ struct ping_group_range { struct inet_hashinfo; struct inet_timewait_death_row { - atomic_t tw_count; - char tw_pad[L1_CACHE_BYTES - sizeof(atomic_t)]; + refcount_t tw_refcount; - struct inet_hashinfo *hashinfo; + struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp; int sysctl_max_tw_buckets; }; struct tcp_fastopen_context; struct netns_ipv4 { - /* Please keep tcp_death_row at first field in netns_ipv4 */ - struct inet_timewait_death_row tcp_death_row ____cacheline_aligned_in_smp; + struct inet_timewait_death_row *tcp_death_row; #ifdef CONFIG_SYSCTL struct ctl_table_header *forw_hdr; @@ -70,11 +68,9 @@ struct netns_ipv4 { struct hlist_head *fib_table_hash; struct sock *fibnl; - struct sock * __percpu *icmp_sk; struct sock *mc_autojoin_sk; struct inet_peer_base *peers; - struct sock * __percpu *tcp_sk; struct fqdir *fqdir; u8 sysctl_icmp_echo_ignore_all; @@ -87,6 +83,7 @@ struct netns_ipv4 { u32 ip_rt_min_pmtu; int ip_rt_mtu_expires; + int ip_rt_min_advmss; struct local_ports ip_local_ports; @@ -130,6 +127,7 @@ struct netns_ipv4 { u8 sysctl_tcp_synack_retries; u8 sysctl_tcp_syncookies; u8 sysctl_tcp_migrate_req; + u8 sysctl_tcp_comp_sack_nr; int sysctl_tcp_reordering; u8 sysctl_tcp_retries1; u8 sysctl_tcp_retries2; @@ -163,9 +161,9 @@ struct netns_ipv4 { int sysctl_tcp_challenge_ack_limit; int sysctl_tcp_min_rtt_wlen; u8 sysctl_tcp_min_tso_segs; + u8 sysctl_tcp_tso_rtt_log; u8 sysctl_tcp_autocorking; u8 sysctl_tcp_reflect_tos; - u8 sysctl_tcp_comp_sack_nr; int sysctl_tcp_invalid_ratelimit; int sysctl_tcp_pacing_ss_ratio; int sysctl_tcp_pacing_ca_ratio; diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 6bd7e5a85ce7..3d83b64471d3 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -89,11 +89,15 @@ struct netns_ipv6 { struct fib6_table *fib6_local_tbl; struct fib_rules_ops *fib6_rules_ops; #endif - struct sock * __percpu *icmp_sk; struct sock *ndisc_sk; struct sock *tcp_sk; struct sock *igmp_sk; struct sock *mc_autojoin_sk; + + struct hlist_head *inet6_addr_lst; + spinlock_t addrconf_hash_lock; + struct delayed_work addr_chk_work; + #ifdef CONFIG_IPV6_MROUTE #ifndef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES struct mr_table *mrt6; diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h index ea8a9cf2619b..e5389eeaf8bd 100644 --- a/include/net/netns/smc.h +++ b/include/net/netns/smc.h @@ -12,5 +12,11 @@ struct netns_smc { /* protect fback_rsn */ struct mutex mutex_fback_rsn; struct smc_stats_rsn *fback_rsn; + + bool limit_smc_hs; /* constraint on handshake */ +#ifdef CONFIG_SYSCTL + struct ctl_table_header *smc_hdr; +#endif + unsigned int sysctl_autocorking_size; }; #endif diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 947733a639a6..bd7c3be4af5d 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -66,11 +66,7 @@ struct netns_xfrm { int sysctl_larval_drop; u32 sysctl_acq_expires; - u8 policy_default; -#define XFRM_POL_DEFAULT_IN 1 -#define XFRM_POL_DEFAULT_OUT 2 -#define XFRM_POL_DEFAULT_FWD 4 -#define XFRM_POL_DEFAULT_MASK 7 + u8 policy_default[XFRM_POLICY_MAX]; #ifdef CONFIG_SYSCTL struct ctl_table_header *sysctl_hdr; diff --git a/include/net/page_pool.h b/include/net/page_pool.h index 79a805542d0f..ea5fb70e5101 100644 --- a/include/net/page_pool.h +++ b/include/net/page_pool.h @@ -84,6 +84,48 @@ struct page_pool_params { void *init_arg; }; +#ifdef CONFIG_PAGE_POOL_STATS +struct page_pool_alloc_stats { + u64 fast; /* fast path allocations */ + u64 slow; /* slow-path order 0 allocations */ + u64 slow_high_order; /* slow-path high order allocations */ + u64 empty; /* failed refills due to empty ptr ring, forcing + * slow path allocation + */ + u64 refill; /* allocations via successful refill */ + u64 waive; /* failed refills due to numa zone mismatch */ +}; + +struct page_pool_recycle_stats { + u64 cached; /* recycling placed page in the cache. */ + u64 cache_full; /* cache was full */ + u64 ring; /* recycling placed page back into ptr ring */ + u64 ring_full; /* page was released from page-pool because + * PTR ring was full. + */ + u64 released_refcnt; /* page released because of elevated + * refcnt + */ +}; + +/* This struct wraps the above stats structs so users of the + * page_pool_get_stats API can pass a single argument when requesting the + * stats for the page pool. + */ +struct page_pool_stats { + struct page_pool_alloc_stats alloc_stats; + struct page_pool_recycle_stats recycle_stats; +}; + +/* + * Drivers that wish to harvest page pool stats and report them to users + * (perhaps via ethtool, debugfs, or another mechanism) can allocate a + * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool. + */ +bool page_pool_get_stats(struct page_pool *pool, + struct page_pool_stats *stats); +#endif + struct page_pool { struct page_pool_params p; @@ -96,6 +138,11 @@ struct page_pool { unsigned int frag_offset; struct page *frag_page; long frag_users; + +#ifdef CONFIG_PAGE_POOL_STATS + /* these stats are incremented while in softirq context */ + struct page_pool_alloc_stats alloc_stats; +#endif u32 xdp_mem_id; /* @@ -126,6 +173,10 @@ struct page_pool { */ struct ptr_ring ring; +#ifdef CONFIG_PAGE_POOL_STATS + /* recycle stats are per-cpu to avoid locking */ + struct page_pool_recycle_stats __percpu *recycle_stats; +#endif atomic_t pages_state_release_cnt; /* A page_pool is strictly tied to a single RX-queue being @@ -201,21 +252,67 @@ static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data, } #endif -void page_pool_put_page(struct page_pool *pool, struct page *page, - unsigned int dma_sync_size, bool allow_direct); +void page_pool_put_defragged_page(struct page_pool *pool, struct page *page, + unsigned int dma_sync_size, + bool allow_direct); -/* Same as above but will try to sync the entire area pool->max_len */ -static inline void page_pool_put_full_page(struct page_pool *pool, - struct page *page, bool allow_direct) +static inline void page_pool_fragment_page(struct page *page, long nr) +{ + atomic_long_set(&page->pp_frag_count, nr); +} + +static inline long page_pool_defrag_page(struct page *page, long nr) +{ + long ret; + + /* If nr == pp_frag_count then we have cleared all remaining + * references to the page. No need to actually overwrite it, instead + * we can leave this to be overwritten by the calling function. + * + * The main advantage to doing this is that an atomic_read is + * generally a much cheaper operation than an atomic update, + * especially when dealing with a page that may be partitioned + * into only 2 or 3 pieces. + */ + if (atomic_long_read(&page->pp_frag_count) == nr) + return 0; + + ret = atomic_long_sub_return(nr, &page->pp_frag_count); + WARN_ON(ret < 0); + return ret; +} + +static inline bool page_pool_is_last_frag(struct page_pool *pool, + struct page *page) +{ + /* If fragments aren't enabled or count is 0 we were the last user */ + return !(pool->p.flags & PP_FLAG_PAGE_FRAG) || + (page_pool_defrag_page(page, 1) == 0); +} + +static inline void page_pool_put_page(struct page_pool *pool, + struct page *page, + unsigned int dma_sync_size, + bool allow_direct) { /* When page_pool isn't compiled-in, net/core/xdp.c doesn't * allow registering MEM_TYPE_PAGE_POOL, but shield linker. */ #ifdef CONFIG_PAGE_POOL - page_pool_put_page(pool, page, -1, allow_direct); + if (!page_pool_is_last_frag(pool, page)) + return; + + page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct); #endif } +/* Same as above but will try to sync the entire area pool->max_len */ +static inline void page_pool_put_full_page(struct page_pool *pool, + struct page *page, bool allow_direct) +{ + page_pool_put_page(pool, page, -1, allow_direct); +} + /* Same as above but the caller must guarantee safe context. e.g NAPI */ static inline void page_pool_recycle_direct(struct page_pool *pool, struct page *page) @@ -243,30 +340,6 @@ static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr) page->dma_addr_upper = upper_32_bits(addr); } -static inline void page_pool_set_frag_count(struct page *page, long nr) -{ - atomic_long_set(&page->pp_frag_count, nr); -} - -static inline long page_pool_atomic_sub_frag_count_return(struct page *page, - long nr) -{ - long ret; - - /* As suggested by Alexander, atomic_long_read() may cover up the - * reference count errors, so avoid calling atomic_long_read() in - * the cases of freeing or draining the page_frags, where we would - * not expect it to match or that are slowpath anyway. - */ - if (__builtin_constant_p(nr) && - atomic_long_read(&page->pp_frag_count) == nr) - return 0; - - ret = atomic_long_sub_return(nr, &page->pp_frag_count); - WARN_ON(ret < 0); - return ret; -} - static inline bool is_page_pool_compiled_in(void) { #ifdef CONFIG_PAGE_POOL diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 676cb8ea9e15..a3b57a93228a 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -1028,4 +1028,15 @@ struct tc_fifo_qopt_offload { }; }; +#ifdef CONFIG_NET_CLS_ACT +DECLARE_STATIC_KEY_FALSE(tc_skb_ext_tc); +void tc_skb_ext_tc_enable(void); +void tc_skb_ext_tc_disable(void); +#define tc_skb_ext_tc_enabled() static_branch_unlikely(&tc_skb_ext_tc) +#else /* CONFIG_NET_CLS_ACT */ +static inline void tc_skb_ext_tc_enable(void) { } +static inline void tc_skb_ext_tc_disable(void) { } +#define tc_skb_ext_tc_enabled() false +#endif + #endif diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 9e7b21c0b3a6..44a35531952e 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -63,12 +63,6 @@ static inline psched_time_t psched_get_time(void) return PSCHED_NS2TICKS(ktime_get_ns()); } -static inline psched_tdiff_t -psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound) -{ - return min(tv1 - tv2, bound); -} - struct qdisc_watchdog { u64 last_expires; struct hrtimer timer; diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 29e41ff3ec93..144c39db9898 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -70,6 +70,7 @@ struct request_sock { struct saved_syn *saved_syn; u32 secid; u32 peer_secid; + u32 timeout; }; static inline struct request_sock *inet_reqsk(const struct sock *sk) @@ -104,6 +105,7 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener, sk_node_init(&req_to_sk(req)->sk_node); sk_tx_queue_clear(req_to_sk(req)); req->saved_syn = NULL; + req->timeout = 0; req->num_timeout = 0; req->num_retrans = 0; req->sk = NULL; diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 472843eedbae..9bab396c1f3b 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -518,11 +518,6 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) BUILD_BUG_ON(sizeof(qcb->data) < sz); } -static inline int qdisc_qlen_cpu(const struct Qdisc *q) -{ - return this_cpu_ptr(q->cpu_qstats)->qlen; -} - static inline int qdisc_qlen(const struct Qdisc *q) { return q->q.qlen; diff --git a/include/net/sock.h b/include/net/sock.h index 50aecd28b355..c4b91fc19b9c 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -316,6 +316,7 @@ struct sk_filter; * @sk_rcvtimeo: %SO_RCVTIMEO setting * @sk_sndtimeo: %SO_SNDTIMEO setting * @sk_txhash: computed flow hash for use on transmit + * @sk_txrehash: enable TX hash rethink * @sk_filter: socket filtering instructions * @sk_timer: sock cleanup timer * @sk_stamp: time stamp of last packet received @@ -491,6 +492,7 @@ struct sock { u32 sk_ack_backlog; u32 sk_max_ack_backlog; kuid_t sk_uid; + u8 sk_txrehash; #ifdef CONFIG_NET_RX_BUSY_POLL u8 sk_prefer_busy_poll; u16 sk_busy_poll_budget; @@ -587,6 +589,18 @@ static inline bool sk_user_data_is_nocopy(const struct sock *sk) __tmp | SK_USER_DATA_NOCOPY); \ }) +static inline +struct net *sock_net(const struct sock *sk) +{ + return read_pnet(&sk->sk_net); +} + +static inline +void sock_net_set(struct sock *sk, struct net *net) +{ + write_pnet(&sk->sk_net, net); +} + /* * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK * or not whether his port will be reused by someone else. SK_FORCE_REUSE @@ -2054,7 +2068,7 @@ static inline void sk_set_txhash(struct sock *sk) static inline bool sk_rethink_txhash(struct sock *sk) { - if (sk->sk_txhash) { + if (sk->sk_txhash && sk->sk_txrehash == SOCK_TXREHASH_ENABLED) { sk_set_txhash(sk); return true; } @@ -2704,18 +2718,6 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) __kfree_skb(skb); } -static inline -struct net *sock_net(const struct sock *sk) -{ - return read_pnet(&sk->sk_net); -} - -static inline -void sock_net_set(struct sock *sk, struct net *net) -{ - write_pnet(&sk->sk_net, net); -} - static inline bool skb_sk_is_prefetched(struct sk_buff *skb) { diff --git a/include/net/switchdev.h b/include/net/switchdev.h index d353793dfeb5..aa0171d5786d 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -19,6 +19,7 @@ enum switchdev_attr_id { SWITCHDEV_ATTR_ID_UNDEFINED, SWITCHDEV_ATTR_ID_PORT_STP_STATE, + SWITCHDEV_ATTR_ID_PORT_MST_STATE, SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS, SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS, SWITCHDEV_ATTR_ID_PORT_MROUTER, @@ -27,7 +28,14 @@ enum switchdev_attr_id { SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL, SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, + SWITCHDEV_ATTR_ID_BRIDGE_MST, SWITCHDEV_ATTR_ID_MRP_PORT_ROLE, + SWITCHDEV_ATTR_ID_VLAN_MSTI, +}; + +struct switchdev_mst_state { + u16 msti; + u8 state; }; struct switchdev_brport_flags { @@ -35,6 +43,11 @@ struct switchdev_brport_flags { unsigned long mask; }; +struct switchdev_vlan_msti { + u16 vid; + u16 msti; +}; + struct switchdev_attr { struct net_device *orig_dev; enum switchdev_attr_id id; @@ -43,13 +56,16 @@ struct switchdev_attr { void (*complete)(struct net_device *dev, int err, void *priv); union { u8 stp_state; /* PORT_STP_STATE */ + struct switchdev_mst_state mst_state; /* PORT_MST_STATE */ struct switchdev_brport_flags brport_flags; /* PORT_BRIDGE_FLAGS */ bool mrouter; /* PORT_MROUTER */ clock_t ageing_time; /* BRIDGE_AGEING_TIME */ bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */ u16 vlan_protocol; /* BRIDGE_VLAN_PROTOCOL */ + bool mst; /* BRIDGE_MST */ bool mc_disabled; /* MC_DISABLED */ u8 mrp_port_role; /* MRP_PORT_ROLE */ + struct switchdev_vlan_msti vlan_msti; /* VLAN_MSTI */ } u; }; @@ -81,6 +97,13 @@ struct switchdev_obj_port_vlan { struct switchdev_obj obj; u16 flags; u16 vid; + /* If set, the notifier signifies a change of one of the following + * flags for a VLAN that already exists: + * - BRIDGE_VLAN_INFO_PVID + * - BRIDGE_VLAN_INFO_UNTAGGED + * Entries with BRIDGE_VLAN_INFO_BRENTRY unset are not notified at all. + */ + bool changed; }; #define SWITCHDEV_OBJ_PORT_VLAN(OBJ) \ @@ -306,10 +329,7 @@ int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long e const struct net_device *foreign_dev), int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev, unsigned long event, const void *ctx, - const struct switchdev_notifier_fdb_info *fdb_info), - int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev, - unsigned long event, const void *ctx, - const struct switchdev_notifier_fdb_info *fdb_info)); + const struct switchdev_notifier_fdb_info *fdb_info)); int switchdev_handle_port_obj_add(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, @@ -317,11 +337,26 @@ int switchdev_handle_port_obj_add(struct net_device *dev, int (*add_cb)(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj, struct netlink_ext_ack *extack)); +int switchdev_handle_port_obj_add_foreign(struct net_device *dev, + struct switchdev_notifier_port_obj_info *port_obj_info, + bool (*check_cb)(const struct net_device *dev), + bool (*foreign_dev_check_cb)(const struct net_device *dev, + const struct net_device *foreign_dev), + int (*add_cb)(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack)); int switchdev_handle_port_obj_del(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, bool (*check_cb)(const struct net_device *dev), int (*del_cb)(struct net_device *dev, const void *ctx, const struct switchdev_obj *obj)); +int switchdev_handle_port_obj_del_foreign(struct net_device *dev, + struct switchdev_notifier_port_obj_info *port_obj_info, + bool (*check_cb)(const struct net_device *dev), + bool (*foreign_dev_check_cb)(const struct net_device *dev, + const struct net_device *foreign_dev), + int (*del_cb)(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj)); int switchdev_handle_port_attr_set(struct net_device *dev, struct switchdev_notifier_port_attr_info *port_attr_info, @@ -421,10 +456,7 @@ switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event const struct net_device *foreign_dev), int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev, unsigned long event, const void *ctx, - const struct switchdev_notifier_fdb_info *fdb_info), - int (*lag_mod_cb)(struct net_device *dev, struct net_device *orig_dev, - unsigned long event, const void *ctx, - const struct switchdev_notifier_fdb_info *fdb_info)) + const struct switchdev_notifier_fdb_info *fdb_info)) { return 0; } @@ -440,6 +472,18 @@ switchdev_handle_port_obj_add(struct net_device *dev, return 0; } +static inline int switchdev_handle_port_obj_add_foreign(struct net_device *dev, + struct switchdev_notifier_port_obj_info *port_obj_info, + bool (*check_cb)(const struct net_device *dev), + bool (*foreign_dev_check_cb)(const struct net_device *dev, + const struct net_device *foreign_dev), + int (*add_cb)(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack)) +{ + return 0; +} + static inline int switchdev_handle_port_obj_del(struct net_device *dev, struct switchdev_notifier_port_obj_info *port_obj_info, @@ -451,6 +495,18 @@ switchdev_handle_port_obj_del(struct net_device *dev, } static inline int +switchdev_handle_port_obj_del_foreign(struct net_device *dev, + struct switchdev_notifier_port_obj_info *port_obj_info, + bool (*check_cb)(const struct net_device *dev), + bool (*foreign_dev_check_cb)(const struct net_device *dev, + const struct net_device *foreign_dev), + int (*del_cb)(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj)) +{ + return 0; +} + +static inline int switchdev_handle_port_attr_set(struct net_device *dev, struct switchdev_notifier_port_attr_info *port_attr_info, bool (*check_cb)(const struct net_device *dev), diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h index 72649512dcdd..283bde711a42 100644 --- a/include/net/tc_act/tc_police.h +++ b/include/net/tc_act/tc_police.h @@ -159,4 +159,34 @@ static inline u32 tcf_police_tcfp_mtu(const struct tc_action *act) return params->tcfp_mtu; } +static inline u64 tcf_police_peakrate_bytes_ps(const struct tc_action *act) +{ + struct tcf_police *police = to_police(act); + struct tcf_police_params *params; + + params = rcu_dereference_protected(police->params, + lockdep_is_held(&police->tcf_lock)); + return params->peak.rate_bytes_ps; +} + +static inline u32 tcf_police_tcfp_ewma_rate(const struct tc_action *act) +{ + struct tcf_police *police = to_police(act); + struct tcf_police_params *params; + + params = rcu_dereference_protected(police->params, + lockdep_is_held(&police->tcf_lock)); + return params->tcfp_ewma_rate; +} + +static inline u16 tcf_police_rate_overhead(const struct tc_action *act) +{ + struct tcf_police *police = to_police(act); + struct tcf_police_params *params; + + params = rcu_dereference_protected(police->params, + lockdep_is_held(&police->tcf_lock)); + return params->rate.overhead; +} + #endif /* __NET_TC_POLICE_H */ diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h index f94b8bc26f9e..904eddfc1826 100644 --- a/include/net/tc_act/tc_vlan.h +++ b/include/net/tc_act/tc_vlan.h @@ -78,4 +78,14 @@ static inline u8 tcf_vlan_push_prio(const struct tc_action *a) return tcfv_push_prio; } + +static inline void tcf_vlan_push_eth(unsigned char *src, unsigned char *dest, + const struct tc_action *a) +{ + rcu_read_lock(); + memcpy(dest, rcu_dereference(to_vlan(a)->vlan_p)->tcfv_push_dst, ETH_ALEN); + memcpy(src, rcu_dereference(to_vlan(a)->vlan_p)->tcfv_push_src, ETH_ALEN); + rcu_read_unlock(); +} + #endif /* __NET_TC_VLAN_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index b9fc978fb2ca..70ca4a5e330a 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1367,7 +1367,8 @@ static inline bool tcp_checksum_complete(struct sk_buff *skb) __skb_checksum_complete(skb); } -bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); +bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb, + enum skb_drop_reason *reason); #ifdef CONFIG_INET void __sk_defer_free_flush(struct sock *sk); @@ -1674,6 +1675,12 @@ tcp_md5_do_lookup(const struct sock *sk, int l3index, return __tcp_md5_do_lookup(sk, l3index, addr, family); } +enum skb_drop_reason +tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, + const void *saddr, const void *daddr, + int family, int dif, int sdif); + + #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key) #else static inline struct tcp_md5sig_key * @@ -1682,6 +1689,14 @@ tcp_md5_do_lookup(const struct sock *sk, int l3index, { return NULL; } + +static inline enum skb_drop_reason +tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, + const void *saddr, const void *daddr, + int family, int dif, int sdif) +{ + return SKB_NOT_DROPPED_YET; +} #define tcp_twsk_md5_key(twsk) NULL #endif @@ -1817,11 +1832,6 @@ static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) return skb_rb_last(&sk->tcp_rtx_queue); } -static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) -{ - return skb_peek(&sk->sk_write_queue); -} - static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk) { return skb_peek_tail(&sk->sk_write_queue); @@ -2358,7 +2368,7 @@ static inline u32 tcp_timeout_init(struct sock *sk) if (timeout <= 0) timeout = TCP_TIMEOUT_INIT; - return timeout; + return min_t(int, timeout, TCP_RTO_MAX); } static inline u32 tcp_rwnd_init_bpf(struct sock *sk) diff --git a/include/net/tls.h b/include/net/tls.h index 526cb2c3b724..b6968a5b5538 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -626,7 +626,6 @@ tls_offload_ctx_rx(const struct tls_context *tls_ctx) return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx; } -#if IS_ENABLED(CONFIG_TLS_DEVICE) static inline void *__tls_driver_ctx(struct tls_context *tls_ctx, enum tls_offload_ctx_dir direction) { @@ -641,7 +640,6 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction) { return __tls_driver_ctx(tls_get_ctx(sk), direction); } -#endif #define RESYNC_REQ BIT(0) #define RESYNC_REQ_ASYNC BIT(1) diff --git a/include/net/udplite.h b/include/net/udplite.h index 9185e45b997f..a3c53110d30b 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -70,49 +70,6 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) return 0; } -/* Slow-path computation of checksum. Socket is locked. */ -static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) -{ - const struct udp_sock *up = udp_sk(skb->sk); - int cscov = up->len; - __wsum csum = 0; - - if (up->pcflag & UDPLITE_SEND_CC) { - /* - * Sender has set `partial coverage' option on UDP-Lite socket. - * The special case "up->pcslen == 0" signifies full coverage. - */ - if (up->pcslen < up->len) { - if (0 < up->pcslen) - cscov = up->pcslen; - udp_hdr(skb)->len = htons(up->pcslen); - } - /* - * NOTE: Causes for the error case `up->pcslen > up->len': - * (i) Application error (will not be penalized). - * (ii) Payload too big for send buffer: data is split - * into several packets, each with its own header. - * In this case (e.g. last segment), coverage may - * exceed packet length. - * Since packets with coverage length > packet length are - * illegal, we fall back to the defaults here. - */ - } - - skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ - - skb_queue_walk(&sk->sk_write_queue, skb) { - const int off = skb_transport_offset(skb); - const int len = skb->len - off; - - csum = skb_checksum(skb, off, (cscov > len)? len : cscov, csum); - - if ((cscov -= len) <= 0) - break; - } - return csum; -} - /* Fast-path computation of checksum. Socket may not be locked. */ static inline __wsum udplite_csum(struct sk_buff *skb) { diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 5a934bebe630..bca5b01af247 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -227,11 +227,56 @@ struct vxlan_config { enum ifla_vxlan_df df; }; +enum { + VXLAN_VNI_STATS_RX, + VXLAN_VNI_STATS_RX_DROPS, + VXLAN_VNI_STATS_RX_ERRORS, + VXLAN_VNI_STATS_TX, + VXLAN_VNI_STATS_TX_DROPS, + VXLAN_VNI_STATS_TX_ERRORS, +}; + +struct vxlan_vni_stats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_drops; + u64 rx_errors; + u64 tx_packets; + u64 tx_bytes; + u64 tx_drops; + u64 tx_errors; +}; + +struct vxlan_vni_stats_pcpu { + struct vxlan_vni_stats stats; + struct u64_stats_sync syncp; +}; + struct vxlan_dev_node { struct hlist_node hlist; struct vxlan_dev *vxlan; }; +struct vxlan_vni_node { + struct rhash_head vnode; + struct vxlan_dev_node hlist4; /* vni hash table for IPv4 socket */ +#if IS_ENABLED(CONFIG_IPV6) + struct vxlan_dev_node hlist6; /* vni hash table for IPv6 socket */ +#endif + struct list_head vlist; + __be32 vni; + union vxlan_addr remote_ip; /* default remote ip for this vni */ + struct vxlan_vni_stats_pcpu __percpu *stats; + + struct rcu_head rcu; +}; + +struct vxlan_vni_group { + struct rhashtable vni_hash; + struct list_head vni_list; + u32 num_vnis; +}; + /* Pseudo network device */ struct vxlan_dev { struct vxlan_dev_node hlist4; /* vni hash table for IPv4 socket */ @@ -254,6 +299,8 @@ struct vxlan_dev { struct vxlan_config cfg; + struct vxlan_vni_group __rcu *vnigrp; + struct hlist_head fdb_head[FDB_HASH_SIZE]; }; @@ -274,6 +321,7 @@ struct vxlan_dev { #define VXLAN_F_GPE 0x4000 #define VXLAN_F_IPV6_LINKLOCAL 0x8000 #define VXLAN_F_TTL_INHERIT 0x10000 +#define VXLAN_F_VNIFILTER 0x20000 /* Flags that are used in the receive path. These flags must match in * order for a socket to be shareable @@ -283,7 +331,8 @@ struct vxlan_dev { VXLAN_F_UDP_ZERO_CSUM6_RX | \ VXLAN_F_REMCSUM_RX | \ VXLAN_F_REMCSUM_NOPARTIAL | \ - VXLAN_F_COLLECT_METADATA) + VXLAN_F_COLLECT_METADATA | \ + VXLAN_F_VNIFILTER) /* Flags that can be set together with VXLAN_F_GPE. */ #define VXLAN_F_ALLOWED_GPE (VXLAN_F_GPE | \ @@ -292,7 +341,8 @@ struct vxlan_dev { VXLAN_F_UDP_ZERO_CSUM_TX | \ VXLAN_F_UDP_ZERO_CSUM6_TX | \ VXLAN_F_UDP_ZERO_CSUM6_RX | \ - VXLAN_F_COLLECT_METADATA) + VXLAN_F_COLLECT_METADATA | \ + VXLAN_F_VNIFILTER) struct net_device *vxlan_dev_create(struct net *net, const char *name, u8 name_assign_type, struct vxlan_config *conf); diff --git a/include/net/xdp.h b/include/net/xdp.h index 8f0812e4996d..04c852c7a77f 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -60,12 +60,20 @@ struct xdp_rxq_info { u32 reg_state; struct xdp_mem_info mem; unsigned int napi_id; + u32 frag_size; } ____cacheline_aligned; /* perf critical, avoid false-sharing */ struct xdp_txq_info { struct net_device *dev; }; +enum xdp_buff_flags { + XDP_FLAGS_HAS_FRAGS = BIT(0), /* non-linear xdp buff */ + XDP_FLAGS_FRAGS_PF_MEMALLOC = BIT(1), /* xdp paged memory is under + * pressure + */ +}; + struct xdp_buff { void *data; void *data_end; @@ -74,13 +82,40 @@ struct xdp_buff { struct xdp_rxq_info *rxq; struct xdp_txq_info *txq; u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/ + u32 flags; /* supported values defined in xdp_buff_flags */ }; +static __always_inline bool xdp_buff_has_frags(struct xdp_buff *xdp) +{ + return !!(xdp->flags & XDP_FLAGS_HAS_FRAGS); +} + +static __always_inline void xdp_buff_set_frags_flag(struct xdp_buff *xdp) +{ + xdp->flags |= XDP_FLAGS_HAS_FRAGS; +} + +static __always_inline void xdp_buff_clear_frags_flag(struct xdp_buff *xdp) +{ + xdp->flags &= ~XDP_FLAGS_HAS_FRAGS; +} + +static __always_inline bool xdp_buff_is_frag_pfmemalloc(struct xdp_buff *xdp) +{ + return !!(xdp->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC); +} + +static __always_inline void xdp_buff_set_frag_pfmemalloc(struct xdp_buff *xdp) +{ + xdp->flags |= XDP_FLAGS_FRAGS_PF_MEMALLOC; +} + static __always_inline void xdp_init_buff(struct xdp_buff *xdp, u32 frame_sz, struct xdp_rxq_info *rxq) { xdp->frame_sz = frame_sz; xdp->rxq = rxq; + xdp->flags = 0; } static __always_inline void @@ -111,6 +146,20 @@ xdp_get_shared_info_from_buff(struct xdp_buff *xdp) return (struct skb_shared_info *)xdp_data_hard_end(xdp); } +static __always_inline unsigned int xdp_get_buff_len(struct xdp_buff *xdp) +{ + unsigned int len = xdp->data_end - xdp->data; + struct skb_shared_info *sinfo; + + if (likely(!xdp_buff_has_frags(xdp))) + goto out; + + sinfo = xdp_get_shared_info_from_buff(xdp); + len += sinfo->xdp_frags_size; +out: + return len; +} + struct xdp_frame { void *data; u16 len; @@ -122,8 +171,19 @@ struct xdp_frame { */ struct xdp_mem_info mem; struct net_device *dev_rx; /* used by cpumap */ + u32 flags; /* supported values defined in xdp_buff_flags */ }; +static __always_inline bool xdp_frame_has_frags(struct xdp_frame *frame) +{ + return !!(frame->flags & XDP_FLAGS_HAS_FRAGS); +} + +static __always_inline bool xdp_frame_is_frag_pfmemalloc(struct xdp_frame *frame) +{ + return !!(frame->flags & XDP_FLAGS_FRAGS_PF_MEMALLOC); +} + #define XDP_BULK_QUEUE_SIZE 16 struct xdp_frame_bulk { int count; @@ -159,6 +219,19 @@ static inline void xdp_scrub_frame(struct xdp_frame *frame) frame->dev_rx = NULL; } +static inline void +xdp_update_skb_shared_info(struct sk_buff *skb, u8 nr_frags, + unsigned int size, unsigned int truesize, + bool pfmemalloc) +{ + skb_shinfo(skb)->nr_frags = nr_frags; + + skb->len += size; + skb->data_len += size; + skb->truesize += truesize; + skb->pfmemalloc |= pfmemalloc; +} + /* Avoids inlining WARN macro in fast-path */ void xdp_warn(const char *msg, const char *func, const int line); #define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__) @@ -180,6 +253,7 @@ void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp) xdp->data_end = frame->data + frame->len; xdp->data_meta = frame->data - frame->metasize; xdp->frame_sz = frame->frame_sz; + xdp->flags = frame->flags; } static inline @@ -206,6 +280,7 @@ int xdp_update_frame_from_buff(struct xdp_buff *xdp, xdp_frame->headroom = headroom - sizeof(*xdp_frame); xdp_frame->metasize = metasize; xdp_frame->frame_sz = xdp->frame_sz; + xdp_frame->flags = xdp->flags; return 0; } @@ -230,6 +305,8 @@ struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp) return xdp_frame; } +void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, + struct xdp_buff *xdp); void xdp_return_frame(struct xdp_frame *xdpf); void xdp_return_frame_rx_napi(struct xdp_frame *xdpf); void xdp_return_buff(struct xdp_buff *xdp); @@ -246,14 +323,51 @@ void __xdp_release_frame(void *data, struct xdp_mem_info *mem); static inline void xdp_release_frame(struct xdp_frame *xdpf) { struct xdp_mem_info *mem = &xdpf->mem; + struct skb_shared_info *sinfo; + int i; /* Curr only page_pool needs this */ - if (mem->type == MEM_TYPE_PAGE_POOL) - __xdp_release_frame(xdpf->data, mem); + if (mem->type != MEM_TYPE_PAGE_POOL) + return; + + if (likely(!xdp_frame_has_frags(xdpf))) + goto out; + + sinfo = xdp_get_shared_info_from_frame(xdpf); + for (i = 0; i < sinfo->nr_frags; i++) { + struct page *page = skb_frag_page(&sinfo->frags[i]); + + __xdp_release_frame(page_address(page), mem); + } +out: + __xdp_release_frame(xdpf->data, mem); +} + +static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf) +{ + struct skb_shared_info *sinfo; + unsigned int len = xdpf->len; + + if (likely(!xdp_frame_has_frags(xdpf))) + goto out; + + sinfo = xdp_get_shared_info_from_frame(xdpf); + len += sinfo->xdp_frags_size; +out: + return len; +} + +int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, + struct net_device *dev, u32 queue_index, + unsigned int napi_id, u32 frag_size); +static inline int +xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, + struct net_device *dev, u32 queue_index, + unsigned int napi_id) +{ + return __xdp_rxq_info_reg(xdp_rxq, dev, queue_index, napi_id, 0); } -int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, - struct net_device *dev, u32 queue_index, unsigned int napi_id); void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq); void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq); bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq); diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h index 443d45951564..4aa031849668 100644 --- a/include/net/xdp_sock_drv.h +++ b/include/net/xdp_sock_drv.h @@ -13,7 +13,7 @@ void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries); bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc); -u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *desc, u32 max); +u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max); void xsk_tx_release(struct xsk_buff_pool *pool); struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id); @@ -142,8 +142,7 @@ static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, return false; } -static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *desc, - u32 max) +static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max) { return 0; } diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 76aa6f11a540..6fb899ff5afc 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1081,25 +1081,18 @@ xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, un } #ifdef CONFIG_XFRM -static inline bool -xfrm_default_allow(struct net *net, int dir) -{ - u8 def = net->xfrm.policy_default; - - switch (dir) { - case XFRM_POLICY_IN: - return def & XFRM_POL_DEFAULT_IN ? false : true; - case XFRM_POLICY_OUT: - return def & XFRM_POL_DEFAULT_OUT ? false : true; - case XFRM_POLICY_FWD: - return def & XFRM_POL_DEFAULT_FWD ? false : true; - } - return false; -} - int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family); +static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb, + int dir) +{ + if (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) + return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT; + + return false; +} + static inline int __xfrm_policy_check2(struct sock *sk, int dir, struct sk_buff *skb, unsigned int family, int reverse) @@ -1110,13 +1103,9 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir, if (sk && sk->sk_policy[XFRM_POLICY_IN]) return __xfrm_policy_check(sk, ndir, skb, family); - if (xfrm_default_allow(net, dir)) - return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) || - (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) || - __xfrm_policy_check(sk, ndir, skb, family); - else - return (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) || - __xfrm_policy_check(sk, ndir, skb, family); + return __xfrm_check_nopolicy(net, skb, dir) || + (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) || + __xfrm_policy_check(sk, ndir, skb, family); } static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family) @@ -1168,13 +1157,12 @@ static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family) { struct net *net = dev_net(skb->dev); - if (xfrm_default_allow(net, XFRM_POLICY_OUT)) - return !net->xfrm.policy_count[XFRM_POLICY_OUT] || - (skb_dst(skb)->flags & DST_NOXFRM) || - __xfrm_route_forward(skb, family); - else - return (skb_dst(skb)->flags & DST_NOXFRM) || - __xfrm_route_forward(skb, family); + if (!net->xfrm.policy_count[XFRM_POLICY_OUT] && + net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT) + return true; + + return (skb_dst(skb)->flags & DST_NOXFRM) || + __xfrm_route_forward(skb, family); } static inline int xfrm4_route_forward(struct sk_buff *skb) diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index ddeefc4a1040..5554ee75e7da 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -60,6 +60,7 @@ struct xsk_buff_pool { */ dma_addr_t *dma_pages; struct xdp_buff_xsk *heads; + struct xdp_desc *tx_descs; u64 chunk_mask; u64 addrs_cnt; u32 free_list_cnt; diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h index d0337a41141c..1e694fd239b9 100644 --- a/include/ras/ras_event.h +++ b/include/ras/ras_event.h @@ -374,6 +374,7 @@ TRACE_EVENT(aer_event, EM ( MF_MSG_BUDDY, "free buddy page" ) \ EM ( MF_MSG_DAX, "dax page" ) \ EM ( MF_MSG_UNSPLIT_THP, "unsplit thp" ) \ + EM ( MF_MSG_DIFFERENT_PAGE_SIZE, "different page size" ) \ EMe ( MF_MSG_UNKNOWN, "unknown page" ) /* diff --git a/include/rdma/ib.h b/include/rdma/ib.h index 83139b9ce409..f7c185ff7a11 100644 --- a/include/rdma/ib.h +++ b/include/rdma/ib.h @@ -75,7 +75,7 @@ struct sockaddr_ib { */ static inline bool ib_safe_file_access(struct file *filp) { - return filp->f_cred == current_cred() && !uaccess_kernel(); + return filp->f_cred == current_cred(); } #endif /* _RDMA_IB_H */ diff --git a/include/scsi/fc/fc_ms.h b/include/scsi/fc/fc_ms.h index 00191695233a..56a5d2b5a624 100644 --- a/include/scsi/fc/fc_ms.h +++ b/include/scsi/fc/fc_ms.h @@ -158,7 +158,7 @@ struct fc_fdmi_port_name { struct fc_fdmi_attr_entry { __be16 type; __be16 len; - __u8 value[1]; + __u8 value[]; } __attribute__((__packed__)); /* @@ -166,7 +166,7 @@ struct fc_fdmi_attr_entry { */ struct fs_fdmi_attrs { __be32 numattrs; - struct fc_fdmi_attr_entry attr[1]; + struct fc_fdmi_attr_entry attr[]; } __attribute__((__packed__)); /* diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index eeb8d689ff6b..6e29e1719db1 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h @@ -352,6 +352,15 @@ struct fc_fcp_pkt { } ____cacheline_aligned_in_smp; /* + * @fsp should be tested and set under the scsi_pkt_queue lock + */ +struct libfc_cmd_priv { + struct fc_fcp_pkt *fsp; + u32 resid_len; + u8 status; +}; + +/* * Structure and function definitions for managing Fibre Channel Exchanges * and Sequences * diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 4ee233e5a6ff..e76c94697c1b 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -19,6 +19,7 @@ #include <linux/refcount.h> #include <scsi/iscsi_proto.h> #include <scsi/iscsi_if.h> +#include <scsi/scsi_cmnd.h> #include <scsi/scsi_transport_iscsi.h> struct scsi_transport_template; @@ -152,6 +153,17 @@ static inline bool iscsi_task_is_completed(struct iscsi_task *task) task->state == ISCSI_TASK_ABRT_SESS_RECOV; } +/* Private data associated with struct scsi_cmnd. */ +struct iscsi_cmd { + struct iscsi_task *task; + int age; +}; + +static inline struct iscsi_cmd *iscsi_cmd(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + /* Connection's states */ enum { ISCSI_CONN_INITIAL_STAGE, @@ -371,7 +383,6 @@ struct iscsi_host { int state; struct workqueue_struct *workq; - char workq_name[20]; }; /* diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 698f2032807b..ff04eb6d250b 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -492,7 +492,6 @@ enum exec_status { SAS_INTERRUPTED, SAS_QUEUE_FULL, SAS_DEVICE_UNKNOWN, - SAS_SG_ERR, SAS_OPEN_REJECT, SAS_OPEN_TO, SAS_PROTO_RESPONSE, @@ -553,6 +552,21 @@ struct sas_ata_task { u8 stp_affil_pol:1; u8 device_control_reg_update:1; + + bool force_phy; + int force_phy_id; +}; + +/* LLDDs rely on these values */ +enum sas_internal_abort { + SAS_INTERNAL_ABORT_SINGLE = 0, + SAS_INTERNAL_ABORT_DEV = 1, +}; + +struct sas_internal_abort_task { + enum sas_internal_abort type; + unsigned int qid; + u16 tag; }; struct sas_smp_task { @@ -577,6 +591,11 @@ struct sas_ssp_task { struct scsi_cmnd *cmd; }; +struct sas_tmf_task { + u8 tmf; + u16 tag_of_task_to_be_managed; +}; + struct sas_task { struct domain_device *dev; @@ -589,6 +608,7 @@ struct sas_task { struct sas_ata_task ata_task; struct sas_smp_task smp_task; struct sas_ssp_task ssp_task; + struct sas_internal_abort_task abort_task; }; struct scatterlist *scatter; @@ -602,6 +622,7 @@ struct sas_task { void *lldd_task; /* for use by LLDDs */ void *uldd_task; struct sas_task_slow *slow_task; + struct sas_tmf_task *tmf; }; struct sas_task_slow { @@ -617,12 +638,16 @@ struct sas_task_slow { #define SAS_TASK_STATE_DONE 2 #define SAS_TASK_STATE_ABORTED 4 #define SAS_TASK_NEED_DEV_RESET 8 -#define SAS_TASK_AT_INITIATOR 16 extern struct sas_task *sas_alloc_task(gfp_t flags); extern struct sas_task *sas_alloc_slow_task(gfp_t flags); extern void sas_free_task(struct sas_task *task); +static inline bool sas_is_internal_abort(struct sas_task *task) +{ + return task->task_proto == SAS_PROTOCOL_INTERNAL_ABORT; +} + struct sas_domain_function_template { /* The class calls these to notify the LLDD of an event. */ void (*lldd_port_formed)(struct asd_sas_phy *); @@ -637,7 +662,6 @@ struct sas_domain_function_template { /* Task Management Functions. Must be called from process context. */ int (*lldd_abort_task)(struct sas_task *); int (*lldd_abort_task_set)(struct domain_device *, u8 *lun); - int (*lldd_clear_aca)(struct domain_device *, u8 *lun); int (*lldd_clear_task_set)(struct domain_device *, u8 *lun); int (*lldd_I_T_nexus_reset)(struct domain_device *); int (*lldd_ata_check_ready)(struct domain_device *); @@ -645,6 +669,11 @@ struct sas_domain_function_template { int (*lldd_lu_reset)(struct domain_device *, u8 *lun); int (*lldd_query_task)(struct sas_task *); + /* Special TMF callbacks */ + void (*lldd_tmf_exec_complete)(struct domain_device *dev); + void (*lldd_tmf_aborted)(struct sas_task *task); + bool (*lldd_abort_timeout)(struct sas_task *task, void *data); + /* Port and Adapter management */ int (*lldd_clear_nexus_port)(struct asd_sas_port *); int (*lldd_clear_nexus_ha)(struct sas_ha_struct *); @@ -673,6 +702,11 @@ extern int sas_slave_configure(struct scsi_device *); extern int sas_change_queue_depth(struct scsi_device *, int new_depth); extern int sas_bios_param(struct scsi_device *, struct block_device *, sector_t capacity, int *hsc); +int sas_execute_internal_abort_single(struct domain_device *device, + u16 tag, unsigned int qid, + void *data); +int sas_execute_internal_abort_dev(struct domain_device *device, + unsigned int qid, void *data); extern struct scsi_transport_template * sas_domain_attach_transport(struct sas_domain_function_template *); extern struct device_attribute dev_attr_phy_event_threshold; @@ -685,7 +719,7 @@ int sas_ex_revalidate_domain(struct domain_device *); void sas_unregister_domain_devices(struct asd_sas_port *port, int gone); void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *); -int sas_discover_event(struct asd_sas_port *, enum discover_event ev); +void sas_discover_event(struct asd_sas_port *, enum discover_event ev); int sas_discover_sata(struct domain_device *); int sas_discover_end_dev(struct domain_device *); @@ -711,9 +745,15 @@ struct sas_phy *sas_get_local_phy(struct domain_device *dev); int sas_request_addr(struct Scsi_Host *shost, u8 *addr); -int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event, - gfp_t gfp_flags); -int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event, - gfp_t gfp_flags); +int sas_abort_task_set(struct domain_device *dev, u8 *lun); +int sas_clear_task_set(struct domain_device *dev, u8 *lun); +int sas_lu_reset(struct domain_device *dev, u8 *lun); +int sas_query_task(struct sas_task *task, u16 tag); +int sas_abort_task(struct sas_task *task, u16 tag); + +void sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event, + gfp_t gfp_flags); +void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event, + gfp_t gfp_flags); #endif /* _SASLIB_H_ */ diff --git a/include/scsi/sas.h b/include/scsi/sas.h index 64154c1fed02..acfc69fd72d0 100644 --- a/include/scsi/sas.h +++ b/include/scsi/sas.h @@ -95,6 +95,8 @@ enum sas_protocol { SAS_PROTOCOL_SSP = 0x08, SAS_PROTOCOL_ALL = 0x0E, SAS_PROTOCOL_STP_ALL = SAS_PROTOCOL_STP|SAS_PROTOCOL_SATA, + /* these are internal to libsas */ + SAS_PROTOCOL_INTERNAL_ABORT = 0x10, }; /* From the spec; local phys only */ @@ -191,6 +193,13 @@ enum sas_gpio_reg_type { SAS_GPIO_REG_TX_GP = 4, }; +/* Response frame DATAPRES field */ +enum { + SAS_DATAPRES_NO_DATA = 0, + SAS_DATAPRES_RESPONSE_DATA = 1, + SAS_DATAPRES_SENSE_DATA = 2, +}; + struct dev_to_host_fis { u8 fis_type; /* 0x34 */ u8 flags; diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h index 416c9c47d0e7..d47dea70855d 100644 --- a/include/scsi/sas_ata.h +++ b/include/scsi/sas_ata.h @@ -25,14 +25,15 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy); int sas_ata_init(struct domain_device *dev); void sas_ata_task_abort(struct sas_task *task); void sas_ata_strategy_handler(struct Scsi_Host *shost); -void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q, - struct list_head *done_q); +void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q); void sas_ata_schedule_reset(struct domain_device *dev); void sas_ata_wait_eh(struct domain_device *dev); void sas_probe_sata(struct asd_sas_port *port); void sas_suspend_sata(struct asd_sas_port *port); void sas_resume_sata(struct asd_sas_port *port); void sas_ata_end_eh(struct ata_port *ap); +int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, + int force_phy_id); #else @@ -52,8 +53,7 @@ static inline void sas_ata_strategy_handler(struct Scsi_Host *shost) { } -static inline void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q, - struct list_head *done_q) +static inline void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q) { } @@ -85,6 +85,12 @@ static inline int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy static inline void sas_ata_end_eh(struct ata_port *ap) { } + +static inline int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, + int force_phy_id) +{ + return 0; +} #endif #endif /* _SAS_ATA_H_ */ diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 6794d7322cbd..1e80e70dfa92 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -10,10 +10,8 @@ #include <linux/timer.h> #include <linux/scatterlist.h> #include <scsi/scsi_device.h> -#include <scsi/scsi_request.h> struct Scsi_Host; -struct scsi_driver; /* * MAX_COMMAND_SIZE is: @@ -28,9 +26,6 @@ struct scsi_driver; * supports without specifying a cmd_len by ULD's */ #define MAX_COMMAND_SIZE 16 -#if (MAX_COMMAND_SIZE > BLK_MAX_CDB) -# error MAX_COMMAND_SIZE can not be bigger than BLK_MAX_CDB -#endif struct scsi_data_buffer { struct sg_table table; @@ -71,7 +66,6 @@ enum scsi_cmnd_submitter { } __packed; struct scsi_cmnd { - struct scsi_request req; struct scsi_device *device; struct list_head eh_entry; /* entry for the host eh_abort_list/eh_cmd_q */ struct delayed_work abort_work; @@ -100,9 +94,7 @@ struct scsi_cmnd { unsigned short cmd_len; enum dma_data_direction sc_data_direction; - /* These elements define the operation we are about to perform */ - unsigned char *cmnd; - + unsigned char cmnd[32]; /* SCSI CDB */ /* These elements define the operation we ultimately want to perform */ struct scsi_data_buffer sdb; @@ -116,18 +108,23 @@ struct scsi_cmnd { (ie, between disconnect / reconnects. Probably == sector size */ - + unsigned resid_len; /* residual count */ + unsigned sense_len; unsigned char *sense_buffer; /* obtained by REQUEST SENSE when * CHECK CONDITION is received on original * command (auto-sense). Length must be * SCSI_SENSE_BUFFERSIZE bytes. */ + int flags; /* Command flags */ + unsigned long state; /* Command completion state */ + + unsigned int extra_len; /* length of alignment and padding */ + /* - * The following fields can be written to by the host specific code. - * Everything else should be left alone. + * The fields below can be modified by the LLD but the fields above + * must not be modified. */ - struct scsi_pointer SCp; /* Scratchpad used by some host adapters */ unsigned char *host_scribble; /* The host adapter is allowed to * call scsi_malloc and get some memory @@ -138,10 +135,6 @@ struct scsi_cmnd { * to be at an address < 16Mb). */ int result; /* Status code from lower level driver */ - int flags; /* Command flags */ - unsigned long state; /* Command completion state */ - - unsigned int extra_len; /* length of alignment and padding */ }; /* Variant of blk_mq_rq_from_pdu() that verifies the type of its argument. */ @@ -159,15 +152,8 @@ static inline void *scsi_cmd_priv(struct scsi_cmnd *cmd) return cmd + 1; } -/* make sure not to use it with passthrough commands */ -static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd) -{ - struct request *rq = scsi_cmd_to_rq(cmd); - - return *(struct scsi_driver **)rq->q->disk->private_data; -} - void scsi_done(struct scsi_cmnd *cmd); +void scsi_done_direct(struct scsi_cmnd *cmd); extern void scsi_finish_command(struct scsi_cmnd *cmd); @@ -203,12 +189,12 @@ static inline unsigned scsi_bufflen(struct scsi_cmnd *cmd) static inline void scsi_set_resid(struct scsi_cmnd *cmd, unsigned int resid) { - cmd->req.resid_len = resid; + cmd->resid_len = resid; } static inline unsigned int scsi_get_resid(struct scsi_cmnd *cmd) { - return cmd->req.resid_len; + return cmd->resid_len; } #define scsi_for_each_sg(cmd, sg, nseg, __i) \ diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 647c53b26105..57e3e239a1fc 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -206,6 +206,7 @@ struct scsi_device { unsigned rpm_autosuspend:1; /* Enable runtime autosuspend at device * creation time */ unsigned ignore_media_change:1; /* Ignore MEDIA CHANGE on resume */ + unsigned silence_suspend:1; /* Do not print runtime PM related messages */ unsigned int queue_stopped; /* request queue is quiesced */ bool offline_already; /* Device offline message logged */ diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h index 6dffa8555a39..4ce1988b2ba0 100644 --- a/include/scsi/scsi_driver.h +++ b/include/scsi/scsi_driver.h @@ -4,11 +4,10 @@ #include <linux/blk_types.h> #include <linux/device.h> +#include <scsi/scsi_cmnd.h> struct module; struct request; -struct scsi_cmnd; -struct scsi_device; struct scsi_driver { struct device_driver gendrv; @@ -31,4 +30,10 @@ extern int scsi_register_interface(struct class_interface *); #define scsi_unregister_interface(intf) \ class_interface_unregister(intf) +/* make sure not to use it with passthrough commands */ +static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd) +{ + return to_scsi_driver(cmd->device->sdev_gendev.driver); +} + #endif /* _SCSI_SCSI_DRIVER_H */ diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h index 468094254b3c..1ae08e81339f 100644 --- a/include/scsi/scsi_eh.h +++ b/include/scsi/scsi_eh.h @@ -38,10 +38,8 @@ struct scsi_eh_save { unsigned underflow; unsigned char cmd_len; unsigned char prot_op; - unsigned char *cmnd; + unsigned char cmnd[32]; struct scsi_data_buffer sdb; - /* new command support */ - unsigned char eh_cmnd[BLK_MAX_CDB]; struct scatterlist sense_sgl; }; diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 72e1a347baa6..667d889b92b5 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -16,7 +16,6 @@ struct completion; struct module; struct scsi_cmnd; struct scsi_device; -struct scsi_host_cmd_pool; struct scsi_target; struct Scsi_Host; struct scsi_transport_template; @@ -493,8 +492,6 @@ struct scsi_host_template { */ u64 vendor_id; - struct scsi_host_cmd_pool *cmd_pool; - /* Delay for runtime autosuspend */ int rpm_autosuspend_delay; }; diff --git a/include/scsi/scsi_request.h b/include/scsi/scsi_request.h deleted file mode 100644 index 9129b23e12bc..000000000000 --- a/include/scsi/scsi_request.h +++ /dev/null @@ -1,31 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _SCSI_SCSI_REQUEST_H -#define _SCSI_SCSI_REQUEST_H - -#include <linux/blk-mq.h> - -#define BLK_MAX_CDB 16 - -struct scsi_request { - unsigned char __cmd[BLK_MAX_CDB]; - unsigned char *cmd; - unsigned short cmd_len; - int result; - unsigned int sense_len; - unsigned int resid_len; /* residual count */ - int retries; - void *sense; -}; - -static inline struct scsi_request *scsi_req(struct request *rq) -{ - return blk_mq_rq_to_pdu(rq); -} - -static inline void scsi_req_free_cmd(struct scsi_request *req) -{ - if (req->cmd != req->__cmd) - kfree(req->cmd); -} - -#endif /* _SCSI_SCSI_REQUEST_H */ diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index c5d7810fd792..38e4a67f5922 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -251,6 +251,8 @@ struct iscsi_cls_session { bool recovery_tmo_sysfs_override; struct delayed_work recovery_work; + struct workqueue_struct *workq; + unsigned int target_id; bool ida_used; @@ -278,7 +280,6 @@ struct iscsi_cls_session { iscsi_dev_to_session(_stgt->dev.parent) struct iscsi_cls_host { - atomic_t nr_scans; struct mutex mutex; struct request_queue *bsg_q; uint32_t port_speed; @@ -441,14 +442,14 @@ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost, unsigned int target_id); extern void iscsi_remove_session(struct iscsi_cls_session *session); extern void iscsi_free_session(struct iscsi_cls_session *session); -extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess, +extern struct iscsi_cls_conn *iscsi_alloc_conn(struct iscsi_cls_session *sess, int dd_size, uint32_t cid); +extern int iscsi_add_conn(struct iscsi_cls_conn *conn); +extern void iscsi_remove_conn(struct iscsi_cls_conn *conn); extern void iscsi_put_conn(struct iscsi_cls_conn *conn); extern void iscsi_get_conn(struct iscsi_cls_conn *conn); -extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn); extern void iscsi_unblock_session(struct iscsi_cls_session *session); extern void iscsi_block_session(struct iscsi_cls_session *session); -extern int iscsi_scan_finished(struct Scsi_Host *shost, unsigned long time); extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size); extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep); extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle); diff --git a/include/scsi/viosrp.h b/include/scsi/viosrp.h index c978133c83e3..6c5559d2b285 100644 --- a/include/scsi/viosrp.h +++ b/include/scsi/viosrp.h @@ -70,12 +70,17 @@ enum viosrp_crq_status { }; struct viosrp_crq { - u8 valid; /* used by RPA */ - u8 format; /* SCSI vs out-of-band */ - u8 reserved; - u8 status; /* non-scsi failure? (e.g. DMA failure) */ - __be16 timeout; /* in seconds */ - __be16 IU_length; /* in bytes */ + union { + __be64 high; /* High 64 bits */ + struct { + u8 valid; /* used by RPA */ + u8 format; /* SCSI vs out-of-band */ + u8 reserved; + u8 status; /* non-scsi failure? (e.g. DMA failure) */ + __be16 timeout; /* in seconds */ + __be16 IU_length; /* in bytes */ + }; + }; __be64 IU_data_ptr; /* the TCE for transferring data */ }; diff --git a/include/soc/at91/sama7-ddr.h b/include/soc/at91/sama7-ddr.h index f6542584ca13..9e17247474fa 100644 --- a/include/soc/at91/sama7-ddr.h +++ b/include/soc/at91/sama7-ddr.h @@ -11,15 +11,13 @@ #ifndef __SAMA7_DDR_H__ #define __SAMA7_DDR_H__ -#ifdef CONFIG_SOC_SAMA7 - /* DDR3PHY */ #define DDR3PHY_PIR (0x04) /* DDR3PHY PHY Initialization Register */ -#define DDR3PHY_PIR_DLLBYP (1 << 17) /* DLL Bypass */ +#define DDR3PHY_PIR_DLLBYP (1 << 17) /* DLL Bypass */ #define DDR3PHY_PIR_ITMSRST (1 << 4) /* Interface Timing Module Soft Reset */ -#define DDR3PHY_PIR_DLLLOCK (1 << 2) /* DLL Lock */ +#define DDR3PHY_PIR_DLLLOCK (1 << 2) /* DLL Lock */ #define DDR3PHY_PIR_DLLSRST (1 << 1) /* DLL Soft Rest */ -#define DDR3PHY_PIR_INIT (1 << 0) /* Initialization Trigger */ +#define DDR3PHY_PIR_INIT (1 << 0) /* Initialization Trigger */ #define DDR3PHY_PGCR (0x08) /* DDR3PHY PHY General Configuration Register */ #define DDR3PHY_PGCR_CKDV1 (1 << 13) /* CK# Disable Value */ @@ -55,7 +53,8 @@ #define UDDRC_STAT_OPMODE_MSK (0x7 << 0) /* Operating mode mask */ #define UDDRC_PWRCTL (0x30) /* UDDRC Low Power Control Register */ -#define UDDRC_PWRCTRL_SELFREF_SW (1 << 5) /* Software self-refresh */ +#define UDDRC_PWRCTL_SELFREF_EN (1 << 0) /* Automatic self-refresh */ +#define UDDRC_PWRCTL_SELFREF_SW (1 << 5) /* Software self-refresh */ #define UDDRC_DFIMISC (0x1B0) /* UDDRC DFI Miscellaneous Control Register */ #define UDDRC_DFIMISC_DFI_INIT_COMPLETE_EN (1 << 0) /* PHY initialization complete enable signal */ @@ -67,7 +66,7 @@ #define UDDRC_SWSTAT_SW_DONE_ACK (1 << 0) /* Register programming done */ #define UDDRC_PSTAT (0x3FC) /* UDDRC Port Status Register */ -#define UDDRC_PSTAT_ALL_PORTS (0x1F001F) /* Read + writes outstanding transactions on all ports */ +#define UDDRC_PSTAT_ALL_PORTS (0x1F001F) /* Read + writes outstanding transactions on all ports */ #define UDDRC_PCTRL_0 (0x490) /* UDDRC Port 0 Control Register */ #define UDDRC_PCTRL_1 (0x540) /* UDDRC Port 1 Control Register */ @@ -75,6 +74,4 @@ #define UDDRC_PCTRL_3 (0x6A0) /* UDDRC Port 3 Control Register */ #define UDDRC_PCTRL_4 (0x750) /* UDDRC Port 4 Control Register */ -#endif /* CONFIG_SOC_SAMA7 */ - #endif /* __SAMA7_DDR_H__ */ diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h index 73ad784fca96..811ea668c4a1 100644 --- a/include/soc/bcm2835/raspberrypi-firmware.h +++ b/include/soc/bcm2835/raspberrypi-firmware.h @@ -91,6 +91,7 @@ enum rpi_firmware_property_tag { RPI_FIRMWARE_GET_POE_HAT_VAL = 0x00030049, RPI_FIRMWARE_SET_POE_HAT_VAL = 0x00030050, RPI_FIRMWARE_NOTIFY_XHCI_RESET = 0x00030058, + RPI_FIRMWARE_NOTIFY_DISPLAY_DONE = 0x00030066, /* Dispmanx TAGS */ RPI_FIRMWARE_FRAMEBUFFER_ALLOCATE = 0x00040001, diff --git a/include/soc/mediatek/smi.h b/include/soc/mediatek/smi.h index 15e3397cec58..11f7d6b59642 100644 --- a/include/soc/mediatek/smi.h +++ b/include/soc/mediatek/smi.h @@ -19,26 +19,6 @@ struct mtk_smi_larb_iommu { unsigned char bank[32]; }; -/* - * mtk_smi_larb_get: Enable the power domain and clocks for this local arbiter. - * It also initialize some basic setting(like iommu). - * mtk_smi_larb_put: Disable the power domain and clocks for this local arbiter. - * Both should be called in non-atomic context. - * - * Returns 0 if successful, negative on failure. - */ -int mtk_smi_larb_get(struct device *larbdev); -void mtk_smi_larb_put(struct device *larbdev); - -#else - -static inline int mtk_smi_larb_get(struct device *larbdev) -{ - return 0; -} - -static inline void mtk_smi_larb_put(struct device *larbdev) { } - #endif #endif diff --git a/include/soc/microchip/mpfs.h b/include/soc/microchip/mpfs.h index 2b64c95f3be5..6466515262bd 100644 --- a/include/soc/microchip/mpfs.h +++ b/include/soc/microchip/mpfs.h @@ -34,9 +34,9 @@ struct mpfs_mss_response { #if IS_ENABLED(CONFIG_POLARFIRE_SOC_SYS_CTRL) -int mpfs_blocking_transaction(struct mpfs_sys_controller *mpfs_client, void *msg); +int mpfs_blocking_transaction(struct mpfs_sys_controller *mpfs_client, struct mpfs_mss_msg *msg); -struct mpfs_sys_controller *mpfs_sys_controller_get(struct device_node *mailbox_node); +struct mpfs_sys_controller *mpfs_sys_controller_get(struct device *dev); #endif /* if IS_ENABLED(CONFIG_POLARFIRE_SOC_SYS_CTRL) */ diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 5c3a3597f1d2..9b4e6c78d0f4 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -105,8 +105,6 @@ #define REG_RESERVED_ADDR 0xffffffff #define REG_RESERVED(reg) REG(reg, REG_RESERVED_ADDR) -#define OCELOT_MRP_CPUQ 7 - enum ocelot_target { ANA = 1, QS, @@ -540,6 +538,13 @@ struct ocelot_stat_layout { char name[ETH_GSTRING_LEN]; }; +struct ocelot_stats_region { + struct list_head node; + u32 offset; + int count; + u32 *buf; +}; + enum ocelot_tag_prefix { OCELOT_TAG_PREFIX_DISABLED = 0, OCELOT_TAG_PREFIX_NONE, @@ -630,6 +635,18 @@ enum macaccess_entry_type { #define OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION BIT(0) #define OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP BIT(1) +struct ocelot_lag_fdb { + unsigned char addr[ETH_ALEN]; + u16 vid; + struct net_device *bond; + struct list_head list; +}; + +struct ocelot_mirror { + refcount_t refcount; + int to; +}; + struct ocelot_port { struct ocelot *ocelot; @@ -656,6 +673,7 @@ struct ocelot_port { u16 mrp_ring_id; struct net_device *bridge; + int bridge_num; u8 stp_state; int speed; @@ -671,6 +689,7 @@ struct ocelot { struct regmap_field *regfields[REGFIELD_MAX]; const u32 *const *map; const struct ocelot_stat_layout *stats_layout; + struct list_head stats_regions; unsigned int num_stats; u32 pool_size[OCELOT_SB_NUM][OCELOT_SB_POOL_NUM]; @@ -683,6 +702,8 @@ struct ocelot { u8 base_mac[ETH_ALEN]; struct list_head vlans; + struct list_head traps; + struct list_head lag_fdbs; /* Switches like VSC9959 have flooding per traffic class */ int num_flooding_pgids; @@ -698,6 +719,8 @@ struct ocelot { enum ocelot_tag_prefix npi_inj_prefix; enum ocelot_tag_prefix npi_xtr_prefix; + unsigned long bridges; + struct list_head multicast; struct list_head pgids; @@ -705,6 +728,7 @@ struct ocelot { struct ocelot_vcap_block block[3]; struct ocelot_vcap_policer vcap_pol; struct vcap_props *vcap; + struct ocelot_mirror *mirror; struct ocelot_psfp_list psfp; @@ -742,25 +766,42 @@ struct ocelot_policer { u32 burst; /* bytes */ }; -#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) -#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi)) -#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri)) -#define ocelot_read(ocelot, reg) __ocelot_read_ix(ocelot, reg, 0) - -#define ocelot_write_ix(ocelot, val, reg, gi, ri) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) -#define ocelot_write_gix(ocelot, val, reg, gi) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi)) -#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri)) +#define ocelot_bulk_read_rix(ocelot, reg, ri, buf, count) \ + __ocelot_bulk_read_ix(ocelot, reg, reg##_RSZ * (ri), buf, count) + +#define ocelot_read_ix(ocelot, reg, gi, ri) \ + __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) +#define ocelot_read_gix(ocelot, reg, gi) \ + __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi)) +#define ocelot_read_rix(ocelot, reg, ri) \ + __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri)) +#define ocelot_read(ocelot, reg) \ + __ocelot_read_ix(ocelot, reg, 0) + +#define ocelot_write_ix(ocelot, val, reg, gi, ri) \ + __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) +#define ocelot_write_gix(ocelot, val, reg, gi) \ + __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi)) +#define ocelot_write_rix(ocelot, val, reg, ri) \ + __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri)) #define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0) -#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) -#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi)) -#define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri)) +#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) \ + __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) +#define ocelot_rmw_gix(ocelot, val, m, reg, gi) \ + __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi)) +#define ocelot_rmw_rix(ocelot, val, m, reg, ri) \ + __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri)) #define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0) -#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val)) -#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val)) -#define ocelot_fields_write(ocelot, id, reg, val) regmap_fields_write((ocelot)->regfields[(reg)], (id), (val)) -#define ocelot_fields_read(ocelot, id, reg, val) regmap_fields_read((ocelot)->regfields[(reg)], (id), (val)) +#define ocelot_field_write(ocelot, reg, val) \ + regmap_field_write((ocelot)->regfields[(reg)], (val)) +#define ocelot_field_read(ocelot, reg, val) \ + regmap_field_read((ocelot)->regfields[(reg)], (val)) +#define ocelot_fields_write(ocelot, id, reg, val) \ + regmap_fields_write((ocelot)->regfields[(reg)], (id), (val)) +#define ocelot_fields_read(ocelot, id, reg, val) \ + regmap_fields_read((ocelot)->regfields[(reg)], (id), (val)) #define ocelot_target_read_ix(ocelot, target, reg, gi, ri) \ __ocelot_target_read_ix(ocelot, target, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) @@ -784,6 +825,8 @@ struct ocelot_policer { u32 ocelot_port_readl(struct ocelot_port *port, u32 reg); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg); +int __ocelot_bulk_read_ix(struct ocelot *ocelot, u32 reg, u32 offset, void *buf, + int count); u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset); void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset); void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg, @@ -812,6 +855,9 @@ void ocelot_deinit(struct ocelot *ocelot); void ocelot_init_port(struct ocelot *ocelot, int port); void ocelot_deinit_port(struct ocelot *ocelot, int port); +void ocelot_port_set_dsa_8021q_cpu(struct ocelot *ocelot, int port); +void ocelot_port_unset_dsa_8021q_cpu(struct ocelot *ocelot, int port); + /* DSA callbacks */ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data); void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data); @@ -829,17 +875,29 @@ int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port, struct switchdev_brport_flags val); void ocelot_port_bridge_flags(struct ocelot *ocelot, int port, struct switchdev_brport_flags val); -void ocelot_port_bridge_join(struct ocelot *ocelot, int port, - struct net_device *bridge); +int ocelot_port_get_default_prio(struct ocelot *ocelot, int port); +int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio); +int ocelot_port_get_dscp_prio(struct ocelot *ocelot, int port, u8 dscp); +int ocelot_port_add_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio); +int ocelot_port_del_dscp_prio(struct ocelot *ocelot, int port, u8 dscp, u8 prio); +int ocelot_port_bridge_join(struct ocelot *ocelot, int port, + struct net_device *bridge, int bridge_num, + struct netlink_ext_ack *extack); void ocelot_port_bridge_leave(struct ocelot *ocelot, int port, struct net_device *bridge); int ocelot_mact_flush(struct ocelot *ocelot, int port); int ocelot_fdb_dump(struct ocelot *ocelot, int port, dsa_fdb_dump_cb_t *cb, void *data); -int ocelot_fdb_add(struct ocelot *ocelot, int port, - const unsigned char *addr, u16 vid); -int ocelot_fdb_del(struct ocelot *ocelot, int port, - const unsigned char *addr, u16 vid); +int ocelot_fdb_add(struct ocelot *ocelot, int port, const unsigned char *addr, + u16 vid, const struct net_device *bridge); +int ocelot_fdb_del(struct ocelot *ocelot, int port, const unsigned char *addr, + u16 vid, const struct net_device *bridge); +int ocelot_lag_fdb_add(struct ocelot *ocelot, struct net_device *bond, + const unsigned char *addr, u16 vid, + const struct net_device *bridge); +int ocelot_lag_fdb_del(struct ocelot *ocelot, struct net_device *bond, + const unsigned char *addr, u16 vid, + const struct net_device *bridge); int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid, bool untagged, struct netlink_ext_ack *extack); int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, @@ -856,6 +914,9 @@ int ocelot_get_max_mtu(struct ocelot *ocelot, int port); int ocelot_port_policer_add(struct ocelot *ocelot, int port, struct ocelot_policer *pol); int ocelot_port_policer_del(struct ocelot *ocelot, int port); +int ocelot_port_mirror_add(struct ocelot *ocelot, int from, int to, + bool ingress, struct netlink_ext_ack *extack); +void ocelot_port_mirror_del(struct ocelot *ocelot, int from, bool ingress); int ocelot_cls_flower_replace(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress); int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port, @@ -863,9 +924,11 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port, int ocelot_cls_flower_stats(struct ocelot *ocelot, int port, struct flow_cls_offload *f, bool ingress); int ocelot_port_mdb_add(struct ocelot *ocelot, int port, - const struct switchdev_obj_port_mdb *mdb); + const struct switchdev_obj_port_mdb *mdb, + const struct net_device *bridge); int ocelot_port_mdb_del(struct ocelot *ocelot, int port, - const struct switchdev_obj_port_mdb *mdb); + const struct switchdev_obj_port_mdb *mdb, + const struct net_device *bridge); int ocelot_port_lag_join(struct ocelot *ocelot, int port, struct net_device *bond, struct netdev_lag_upper_info *info); diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h index 709cbc198fd2..7b2bf9b1fe69 100644 --- a/include/soc/mscc/ocelot_vcap.h +++ b/include/soc/mscc/ocelot_vcap.h @@ -8,6 +8,20 @@ #include <soc/mscc/ocelot.h> +/* Cookie definitions for private VCAP filters installed by the driver. + * Must be unique per VCAP block. + */ +#define OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port) (port) +#define OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port) (port) +#define OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port) (port) +#define OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port) ((ocelot)->num_phys_ports + (port)) +#define OCELOT_VCAP_IS2_MRP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2) +#define OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 1) +#define OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 2) +#define OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 3) +#define OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 4) +#define OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 5) + /* ================================================================= * VCAP Common * ================================================================= @@ -640,6 +654,7 @@ struct ocelot_vcap_action { enum ocelot_mask_mode mask_mode; unsigned long port_mask; bool police_ena; + bool mirror_ena; struct ocelot_policer pol; u32 pol_ix; }; @@ -666,6 +681,7 @@ struct ocelot_vcap_id { struct ocelot_vcap_filter { struct list_head list; + struct list_head trap_list; enum ocelot_vcap_filter_type type; int block_id; @@ -678,9 +694,11 @@ struct ocelot_vcap_filter { struct ocelot_vcap_action action; struct ocelot_vcap_stats stats; /* For VCAP IS1 and IS2 */ + bool take_ts; unsigned long ingress_port_mask; /* For VCAP ES0 */ struct ocelot_vcap_port ingress_port; + /* For VCAP IS2 mirrors and ES0 */ struct ocelot_vcap_port egress_port; enum ocelot_vcap_bit dmac_mc; diff --git a/include/soc/tegra/bpmp-abi.h b/include/soc/tegra/bpmp-abi.h index bff99f23860c..53171e324d1c 100644 --- a/include/soc/tegra/bpmp-abi.h +++ b/include/soc/tegra/bpmp-abi.h @@ -931,7 +931,7 @@ enum mrq_reset_commands { * @brief Request with MRQ_RESET * * Used by the sender of an #MRQ_RESET message to request BPMP to - * assert or or deassert a given reset line. + * assert or deassert a given reset line. */ struct mrq_reset_request { /** @brief Reset action to perform (@ref mrq_reset_commands) */ diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h index 82d9daa17851..77426ff58338 100644 --- a/include/sound/hda_codec.h +++ b/include/sound/hda_codec.h @@ -306,12 +306,19 @@ struct hda_codec { /* * constructors */ +__printf(3, 4) struct hda_codec * +snd_hda_codec_device_init(struct hda_bus *bus, unsigned int codec_addr, + const char *fmt, ...); int snd_hda_codec_new(struct hda_bus *bus, struct snd_card *card, unsigned int codec_addr, struct hda_codec **codecp); int snd_hda_codec_device_new(struct hda_bus *bus, struct snd_card *card, - unsigned int codec_addr, struct hda_codec *codec); + unsigned int codec_addr, struct hda_codec *codec, + bool snddev_managed); int snd_hda_codec_configure(struct hda_codec *codec); int snd_hda_codec_update_widgets(struct hda_codec *codec); +void snd_hda_codec_register(struct hda_codec *codec); +void snd_hda_codec_unregister(struct hda_codec *codec); +void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec); /* * low level functions @@ -490,9 +497,11 @@ int hda_call_check_power_status(struct hda_codec *codec, hda_nid_t nid) #define snd_hda_power_down(codec) snd_hdac_power_down(&(codec)->core) #define snd_hda_power_down_pm(codec) snd_hdac_power_down_pm(&(codec)->core) #ifdef CONFIG_PM +void snd_hda_codec_set_power_save(struct hda_codec *codec, int delay); void snd_hda_set_power_save(struct hda_bus *bus, int delay); void snd_hda_update_power_acct(struct hda_codec *codec); #else +static inline void snd_hda_codec_set_power_save(struct hda_codec *codec, int delay) {} static inline void snd_hda_set_power_save(struct hda_bus *bus, int delay) {} #endif diff --git a/include/sound/hda_verbs.h b/include/sound/hda_verbs.h index e36b77531c5c..006d358acce2 100644 --- a/include/sound/hda_verbs.h +++ b/include/sound/hda_verbs.h @@ -461,7 +461,7 @@ enum { #define AC_DE_ELDV (1<<1) #define AC_DE_IA (1<<2) -/* device device types (0x0-0xf) */ +/* device types (0x0-0xf) */ enum { AC_JACK_LINE_OUT, AC_JACK_SPEAKER, diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index 6a90ce405e60..15f15075238d 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -9,6 +9,7 @@ #include <linux/device.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/pm_runtime.h> #include <linux/timecounter.h> #include <sound/core.h> @@ -448,6 +449,8 @@ static inline u16 snd_hdac_reg_readw(struct hdac_bus *bus, void __iomem *addr) #define snd_hdac_reg_writel(bus, addr, val) writel(val, addr) #define snd_hdac_reg_readl(bus, addr) readl(addr) +#define snd_hdac_reg_writeq(bus, addr, val) writeq(val, addr) +#define snd_hdac_reg_readq(bus, addr) readq(addr) /* * macros for easy use diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h index 77123c3e4095..d26234f9ee46 100644 --- a/include/sound/hdaudio_ext.h +++ b/include/sound/hdaudio_ext.h @@ -2,6 +2,8 @@ #ifndef __SOUND_HDAUDIO_EXT_H #define __SOUND_HDAUDIO_EXT_H +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/iopoll.h> #include <sound/hdaudio.h> int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev, @@ -28,6 +30,7 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *chip, bool enable, int index); int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_bus *bus); +struct hdac_ext_link *snd_hdac_ext_bus_link_at(struct hdac_bus *bus, int addr); struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_bus *bus, const char *codec_name); @@ -143,6 +146,54 @@ void snd_hdac_ext_bus_link_power(struct hdac_device *codec, bool enable); writew(((readw(addr + reg) & ~(mask)) | (val)), \ addr + reg) +#define snd_hdac_adsp_writeb(chip, reg, value) \ + snd_hdac_reg_writeb(chip, (chip)->dsp_ba + (reg), value) +#define snd_hdac_adsp_readb(chip, reg) \ + snd_hdac_reg_readb(chip, (chip)->dsp_ba + (reg)) +#define snd_hdac_adsp_writew(chip, reg, value) \ + snd_hdac_reg_writew(chip, (chip)->dsp_ba + (reg), value) +#define snd_hdac_adsp_readw(chip, reg) \ + snd_hdac_reg_readw(chip, (chip)->dsp_ba + (reg)) +#define snd_hdac_adsp_writel(chip, reg, value) \ + snd_hdac_reg_writel(chip, (chip)->dsp_ba + (reg), value) +#define snd_hdac_adsp_readl(chip, reg) \ + snd_hdac_reg_readl(chip, (chip)->dsp_ba + (reg)) +#define snd_hdac_adsp_writeq(chip, reg, value) \ + snd_hdac_reg_writeq(chip, (chip)->dsp_ba + (reg), value) +#define snd_hdac_adsp_readq(chip, reg) \ + snd_hdac_reg_readq(chip, (chip)->dsp_ba + (reg)) + +#define snd_hdac_adsp_updateb(chip, reg, mask, val) \ + snd_hdac_adsp_writeb(chip, reg, \ + (snd_hdac_adsp_readb(chip, reg) & ~(mask)) | (val)) +#define snd_hdac_adsp_updatew(chip, reg, mask, val) \ + snd_hdac_adsp_writew(chip, reg, \ + (snd_hdac_adsp_readw(chip, reg) & ~(mask)) | (val)) +#define snd_hdac_adsp_updatel(chip, reg, mask, val) \ + snd_hdac_adsp_writel(chip, reg, \ + (snd_hdac_adsp_readl(chip, reg) & ~(mask)) | (val)) +#define snd_hdac_adsp_updateq(chip, reg, mask, val) \ + snd_hdac_adsp_writeq(chip, reg, \ + (snd_hdac_adsp_readq(chip, reg) & ~(mask)) | (val)) + +#define snd_hdac_adsp_readb_poll(chip, reg, val, cond, delay_us, timeout_us) \ + readb_poll_timeout((chip)->dsp_ba + (reg), val, cond, \ + delay_us, timeout_us) +#define snd_hdac_adsp_readw_poll(chip, reg, val, cond, delay_us, timeout_us) \ + readw_poll_timeout((chip)->dsp_ba + (reg), val, cond, \ + delay_us, timeout_us) +#define snd_hdac_adsp_readl_poll(chip, reg, val, cond, delay_us, timeout_us) \ + readl_poll_timeout((chip)->dsp_ba + (reg), val, cond, \ + delay_us, timeout_us) +#define snd_hdac_adsp_readq_poll(chip, reg, val, cond, delay_us, timeout_us) \ + readq_poll_timeout((chip)->dsp_ba + (reg), val, cond, \ + delay_us, timeout_us) +#define snd_hdac_stream_readb_poll(strm, reg, val, cond, delay_us, timeout_us) \ + readb_poll_timeout((strm)->sd_addr + AZX_REG_ ## reg, val, cond, \ + delay_us, timeout_us) +#define snd_hdac_stream_readl_poll(strm, reg, val, cond, delay_us, timeout_us) \ + readl_poll_timeout((strm)->sd_addr + AZX_REG_ ## reg, val, cond, \ + delay_us, timeout_us) struct hdac_ext_device; diff --git a/include/sound/intel-nhlt.h b/include/sound/intel-nhlt.h index 089a760d36eb..6fb2d5e378fd 100644 --- a/include/sound/intel-nhlt.h +++ b/include/sound/intel-nhlt.h @@ -18,6 +18,13 @@ enum nhlt_link_type { NHLT_LINK_INVALID }; +enum nhlt_device_type { + NHLT_DEVICE_BT = 0, + NHLT_DEVICE_DMIC = 1, + NHLT_DEVICE_I2S = 4, + NHLT_DEVICE_INVALID +}; + #if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SND_INTEL_NHLT) struct wav_fmt { @@ -41,13 +48,6 @@ struct wav_fmt_ext { u8 sub_fmt[16]; } __packed; -enum nhlt_device_type { - NHLT_DEVICE_BT = 0, - NHLT_DEVICE_DMIC = 1, - NHLT_DEVICE_I2S = 4, - NHLT_DEVICE_INVALID -}; - struct nhlt_specific_cfg { u32 size; u8 caps[]; @@ -133,6 +133,9 @@ void intel_nhlt_free(struct nhlt_acpi_table *addr); int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt); bool intel_nhlt_has_endpoint_type(struct nhlt_acpi_table *nhlt, u8 link_type); + +int intel_nhlt_ssp_endpoint_mask(struct nhlt_acpi_table *nhlt, u8 device_type); + struct nhlt_specific_cfg * intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt, u32 bus_id, u8 link_type, u8 vbps, u8 bps, @@ -163,6 +166,11 @@ static inline bool intel_nhlt_has_endpoint_type(struct nhlt_acpi_table *nhlt, return false; } +static inline int intel_nhlt_ssp_endpoint_mask(struct nhlt_acpi_table *nhlt, u8 device_type) +{ + return 0; +} + static inline struct nhlt_specific_cfg * intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt, u32 bus_id, u8 link_type, u8 vbps, u8 bps, diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 36da42cd0774..314f2779cab5 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -401,6 +401,7 @@ struct snd_pcm_runtime { wait_queue_head_t tsleep; /* transfer sleep */ struct fasync_struct *fasync; bool stop_operating; /* sync_stop will be called */ + struct mutex buffer_mutex; /* protect for buffer changes */ /* -- private section -- */ void *private_data; diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h index df430f1c2a10..8faa649f712b 100644 --- a/include/sound/simple_card_utils.h +++ b/include/sound/simple_card_utils.h @@ -16,6 +16,12 @@ #define asoc_simple_init_mic(card, sjack, prefix) \ asoc_simple_init_jack(card, sjack, 0, prefix, NULL) +struct asoc_simple_tdm_width_map { + u8 sample_bits; + u8 slot_count; + u16 slot_width; +}; + struct asoc_simple_dai { const char *name; unsigned int sysclk; @@ -25,6 +31,9 @@ struct asoc_simple_dai { unsigned int tx_slot_mask; unsigned int rx_slot_mask; struct clk *clk; + bool clk_fixed; + struct asoc_simple_tdm_width_map *tdm_width_map; + int n_tdm_widths; }; struct asoc_simple_data { @@ -131,6 +140,9 @@ int asoc_simple_parse_daifmt(struct device *dev, struct device_node *codec, char *prefix, unsigned int *retfmt); +int asoc_simple_parse_tdm_width_map(struct device *dev, struct device_node *np, + struct asoc_simple_dai *dai); + __printf(3, 4) int asoc_simple_set_dailink_name(struct device *dev, struct snd_soc_dai_link *dai_link, diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h index ac0893df9c76..d33cf8df14b1 100644 --- a/include/sound/soc-acpi.h +++ b/include/sound/soc-acpi.h @@ -60,9 +60,11 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg) * @acpi_ipc_irq_index: used for BYT-CR detection * @platform: string used for HDAudio codec support * @codec_mask: used for HDAudio support + * @dmic_num: number of SoC- or chipset-attached PDM digital microphones * @common_hdmi_codec_drv: use commom HDAudio HDMI codec driver - * @link_mask: links enabled on the board - * @links: array of link _ADR descriptors, null terminated + * @link_mask: SoundWire links enabled on the board + * @links: array of SoundWire link _ADR descriptors, null terminated + * @i2s_link_mask: I2S/TDM links enabled on the board * @num_dai_drivers: number of elements in @dai_drivers * @dai_drivers: pointer to dai_drivers, used e.g. in nocodec mode */ @@ -74,6 +76,7 @@ struct snd_soc_acpi_mach_params { bool common_hdmi_codec_drv; u32 link_mask; const struct snd_soc_acpi_link_adr *links; + u32 i2s_link_mask; u32 num_dai_drivers; struct snd_soc_dai_driver *dai_drivers; }; @@ -122,6 +125,24 @@ struct snd_soc_acpi_link_adr { const struct snd_soc_acpi_adr_device *adr_d; }; +/* + * when set the topology uses the -ssp<N> suffix, where N is determined based on + * BIOS or DMI information + */ +#define SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER BIT(0) + +/* + * when more than one SSP is reported in the link mask, use the most significant. + * This choice was found to be valid on platforms with ES8336 codecs. + */ +#define SND_SOC_ACPI_TPLG_INTEL_SSP_MSB BIT(1) + +/* + * when set the topology uses the -dmic<N>ch suffix, where N is determined based on + * BIOS or DMI information + */ +#define SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER BIT(2) + /** * snd_soc_acpi_mach: ACPI-based machine descriptor. Most of the fields are * related to the hardware, except for the firmware and topology file names. @@ -142,8 +163,8 @@ struct snd_soc_acpi_link_adr { * audio codecs whose presence if checked with ACPI * @pdata: intended for platform data or machine specific-ops. This structure * is not constant since this field may be updated at run-time - * @sof_fw_filename: Sound Open Firmware file name, if enabled * @sof_tplg_filename: Sound Open Firmware topology file name, if enabled + * @tplg_quirk_mask: quirks to select different topology files dynamically */ /* Descriptor for SST ASoC machine driver */ struct snd_soc_acpi_mach { @@ -158,8 +179,8 @@ struct snd_soc_acpi_mach { const void *quirk_data; void *pdata; struct snd_soc_acpi_mach_params mach_params; - const char *sof_fw_filename; const char *sof_tplg_filename; + const u32 tplg_quirk_mask; }; #define SND_SOC_ACPI_MAX_CODECS 3 diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index c3039e97929a..ebb8e7a7fc29 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -429,6 +429,7 @@ struct snd_soc_dapm_widget *snd_soc_dapm_new_control_unlocked( const struct snd_soc_dapm_widget *widget); int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm, struct snd_soc_dai *dai); +void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w); int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card); void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card); diff --git a/include/sound/sof.h b/include/sound/sof.h index 813680ab9aad..7cdfc954df12 100644 --- a/include/sound/sof.h +++ b/include/sound/sof.h @@ -39,6 +39,14 @@ enum sof_fw_state { SOF_FW_CRASHED, }; +/* DSP power states */ +enum sof_dsp_power_states { + SOF_DSP_PM_D0, + SOF_DSP_PM_D1, + SOF_DSP_PM_D2, + SOF_DSP_PM_D3, +}; + /* * SOF Platform data. */ diff --git a/include/sound/sof/channel_map.h b/include/sound/sof/channel_map.h index fd3a30fcf756..d363f0ca6979 100644 --- a/include/sound/sof/channel_map.h +++ b/include/sound/sof/channel_map.h @@ -39,7 +39,7 @@ struct sof_ipc_channel_map { uint32_t ext_id; uint32_t ch_mask; uint32_t reserved; - int32_t ch_coeffs[0]; + int32_t ch_coeffs[]; } __packed; /** @@ -55,7 +55,7 @@ struct sof_ipc_stream_map { struct sof_ipc_cmd_hdr hdr; uint32_t num_ch_map; uint32_t reserved[3]; - struct sof_ipc_channel_map ch_map[0]; + struct sof_ipc_channel_map ch_map[]; } __packed; #endif /* __IPC_CHANNEL_MAP_H__ */ diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h index 59ee50ac7705..a818a0f0a226 100644 --- a/include/sound/sof/dai.h +++ b/include/sound/sof/dai.h @@ -116,4 +116,9 @@ struct sof_ipc_dai_config { }; } __packed; +struct sof_dai_private_data { + struct sof_ipc_comp_dai *comp_dai; + struct sof_ipc_dai_config *dai_config; +}; + #endif diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h index b97a76bcb655..b22e925c70e2 100644 --- a/include/sound/sof/header.h +++ b/include/sound/sof/header.h @@ -31,7 +31,7 @@ /* Global Message - Generic */ #define SOF_GLB_TYPE_SHIFT 28 -#define SOF_GLB_TYPE_MASK (0xfL << SOF_GLB_TYPE_SHIFT) +#define SOF_GLB_TYPE_MASK (0xfUL << SOF_GLB_TYPE_SHIFT) #define SOF_GLB_TYPE(x) ((x) << SOF_GLB_TYPE_SHIFT) /* Command Message - Generic */ diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h index 0b7101aef596..65e86e4e9fd8 100644 --- a/include/sound/sof/info.h +++ b/include/sound/sof/info.h @@ -25,6 +25,7 @@ #define SOF_IPC_INFO_LOCKS BIT(1) #define SOF_IPC_INFO_LOCKSV BIT(2) #define SOF_IPC_INFO_GDB BIT(3) +#define SOF_IPC_INFO_D3_PERSISTENT BIT(4) /* extended data types that can be appended onto end of sof_ipc_fw_ready */ enum sof_ipc_ext_data { diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h index d12736e14b69..88560281d420 100644 --- a/include/sound/sof/topology.h +++ b/include/sound/sof/topology.h @@ -87,9 +87,6 @@ struct sof_ipc_comp { */ #define SOF_BUF_UNDERRUN_PERMITTED BIT(1) -/* the UUID size in bytes, shared between FW and host */ -#define SOF_UUID_SIZE 16 - /* create new component buffer - SOF_IPC_TPLG_BUFFER_NEW */ struct sof_ipc_buffer { struct sof_ipc_comp comp; @@ -237,7 +234,7 @@ struct sof_ipc_comp_process { /* reserved for future use */ uint32_t reserved[7]; - uint8_t data[0]; + uint8_t data[]; } __packed; /* frees components, buffers and pipelines @@ -303,9 +300,4 @@ enum sof_event_types { SOF_KEYWORD_DETECT_DAPM_EVENT, }; -/* extended data struct for UUID components */ -struct sof_ipc_comp_ext { - uint8_t uuid[SOF_UUID_SIZE]; -} __packed; - #endif diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 1eccb2ac7d02..adc87de0362b 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -580,6 +580,7 @@ struct iscsi_conn { struct ahash_request *conn_tx_hash; /* Used for scheduling TX and RX connection kthreads */ cpumask_var_t conn_cpumask; + cpumask_var_t allowed_cpumask; unsigned int conn_rx_reset_cpumask:1; unsigned int conn_tx_reset_cpumask:1; /* list_head of struct iscsi_cmd for this connection */ @@ -878,6 +879,7 @@ struct iscsit_global { /* Thread Set bitmap pointer */ unsigned long *ts_bitmap; spinlock_t ts_bitmap_lock; + cpumask_var_t allowed_cpumask; /* Used for iSCSI discovery session authentication */ struct iscsi_node_acl discovery_acl; struct iscsi_portal_group *discovery_tpg; @@ -898,29 +900,8 @@ static inline u32 session_get_next_ttt(struct iscsi_session *session) extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t); -static inline void iscsit_thread_check_cpumask( - struct iscsi_conn *conn, - struct task_struct *p, - int mode) -{ - /* - * mode == 1 signals iscsi_target_tx_thread() usage. - * mode == 0 signals iscsi_target_rx_thread() usage. - */ - if (mode == 1) { - if (!conn->conn_tx_reset_cpumask) - return; - conn->conn_tx_reset_cpumask = 0; - } else { - if (!conn->conn_rx_reset_cpumask) - return; - conn->conn_rx_reset_cpumask = 0; - } - /* - * Update the CPU mask for this single kthread so that - * both TX and RX kthreads are scheduled to run on the - * same CPU. - */ - set_cpus_allowed_ptr(p, conn->conn_cpumask); -} +extern void iscsit_thread_check_cpumask(struct iscsi_conn *conn, + struct task_struct *p, + int mode); + #endif /* ISCSI_TARGET_CORE_H */ diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h index 7660a7846586..6a13220d2d27 100644 --- a/include/trace/bpf_probe.h +++ b/include/trace/bpf_probe.h @@ -21,6 +21,9 @@ #undef __get_bitmask #define __get_bitmask(field) (char *)__get_dynamic_array(field) +#undef __get_sockaddr +#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field)) + #undef __get_rel_dynamic_array #define __get_rel_dynamic_array(field) \ ((void *)(&__entry->__rel_loc_##field) + \ @@ -37,6 +40,9 @@ #undef __get_rel_bitmask #define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field) +#undef __get_rel_sockaddr +#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field)) + #undef __perf_count #define __perf_count(c) (c) diff --git a/include/trace/define_custom_trace.h b/include/trace/define_custom_trace.h new file mode 100644 index 000000000000..5827a4c92c74 --- /dev/null +++ b/include/trace/define_custom_trace.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Trace files that want to automate creation of all tracepoints defined + * in their file should include this file. The following are macros that the + * trace file may define: + * + * TRACE_SYSTEM defines the system the tracepoint is for + * + * TRACE_INCLUDE_FILE if the file name is something other than TRACE_SYSTEM.h + * This macro may be defined to tell define_trace.h what file to include. + * Note, leave off the ".h". + * + * TRACE_INCLUDE_PATH if the path is something other than core kernel include/trace + * then this macro can define the path to use. Note, the path is relative to + * define_trace.h, not the file including it. Full path names for out of tree + * modules must be used. + */ + +#ifdef CREATE_CUSTOM_TRACE_EVENTS + +/* Prevent recursion */ +#undef CREATE_CUSTOM_TRACE_EVENTS + +#include <linux/stringify.h> + +#undef TRACE_CUSTOM_EVENT +#define TRACE_CUSTOM_EVENT(name, proto, args, tstruct, assign, print) + +#undef DEFINE_CUSTOM_EVENT +#define DEFINE_CUSTOM_EVENT(template, name, proto, args) + +#undef TRACE_INCLUDE +#undef __TRACE_INCLUDE + +#ifndef TRACE_INCLUDE_FILE +# define TRACE_INCLUDE_FILE TRACE_SYSTEM +# define UNDEF_TRACE_INCLUDE_FILE +#endif + +#ifndef TRACE_INCLUDE_PATH +# define __TRACE_INCLUDE(system) <trace/events/system.h> +# define UNDEF_TRACE_INCLUDE_PATH +#else +# define __TRACE_INCLUDE(system) __stringify(TRACE_INCLUDE_PATH/system.h) +#endif + +# define TRACE_INCLUDE(system) __TRACE_INCLUDE(system) + +/* Let the trace headers be reread */ +#define TRACE_CUSTOM_MULTI_READ + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +#ifdef TRACEPOINTS_ENABLED +#include <trace/trace_custom_events.h> +#endif + +#undef TRACE_CUSTOM_EVENT +#undef DECLARE_CUSTOM_EVENT_CLASS +#undef DEFINE_CUSTOM_EVENT +#undef TRACE_CUSTOM_MULTI_READ + +/* Only undef what we defined in this file */ +#ifdef UNDEF_TRACE_INCLUDE_FILE +# undef TRACE_INCLUDE_FILE +# undef UNDEF_TRACE_INCLUDE_FILE +#endif + +#ifdef UNDEF_TRACE_INCLUDE_PATH +# undef TRACE_INCLUDE_PATH +# undef UNDEF_TRACE_INCLUDE_PATH +#endif + +/* We may be processing more files */ +#define CREATE_CUSTOM_TRACE_POINTS + +#endif /* CREATE_CUSTOM_TRACE_POINTS */ diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 0d729664b4b4..f068ff30d654 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -53,6 +53,7 @@ struct btrfs_space_info; { BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \ { BTRFS_UUID_TREE_OBJECTID, "UUID_TREE" }, \ { BTRFS_FREE_SPACE_TREE_OBJECTID, "FREE_SPACE_TREE" }, \ + { BTRFS_BLOCK_GROUP_TREE_OBJECTID, "BLOCK_GROUP_TREE" },\ { BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" }) #define show_root_type(obj) \ diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h index c6f5aa74db89..2c530637e10a 100644 --- a/include/trace/events/cachefiles.h +++ b/include/trace/events/cachefiles.h @@ -56,6 +56,7 @@ enum cachefiles_coherency_trace { cachefiles_coherency_set_ok, cachefiles_coherency_vol_check_cmp, cachefiles_coherency_vol_check_ok, + cachefiles_coherency_vol_check_resv, cachefiles_coherency_vol_check_xattr, cachefiles_coherency_vol_set_fail, cachefiles_coherency_vol_set_ok, @@ -139,6 +140,7 @@ enum cachefiles_error_trace { EM(cachefiles_coherency_set_ok, "SET ok ") \ EM(cachefiles_coherency_vol_check_cmp, "VOL BAD cmp ") \ EM(cachefiles_coherency_vol_check_ok, "VOL OK ") \ + EM(cachefiles_coherency_vol_check_resv, "VOL BAD resv") \ EM(cachefiles_coherency_vol_check_xattr,"VOL BAD xatt") \ EM(cachefiles_coherency_vol_set_fail, "VOL SET fail") \ E_(cachefiles_coherency_vol_set_ok, "VOL SET ok ") diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h index 7d48e7079e48..c6d5d70dc7a5 100644 --- a/include/trace/events/compaction.h +++ b/include/trace/events/compaction.h @@ -67,10 +67,10 @@ DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages, #ifdef CONFIG_COMPACTION TRACE_EVENT(mm_compaction_migratepages, - TP_PROTO(unsigned long nr_all, + TP_PROTO(struct compact_control *cc, unsigned int nr_succeeded), - TP_ARGS(nr_all, nr_succeeded), + TP_ARGS(cc, nr_succeeded), TP_STRUCT__entry( __field(unsigned long, nr_migrated) @@ -79,7 +79,7 @@ TRACE_EVENT(mm_compaction_migratepages, TP_fast_assign( __entry->nr_migrated = nr_succeeded; - __entry->nr_failed = nr_all - nr_succeeded; + __entry->nr_failed = cc->nr_migratepages - nr_succeeded; ), TP_printk("nr_migrated=%lu nr_failed=%lu", @@ -88,10 +88,10 @@ TRACE_EVENT(mm_compaction_migratepages, ); TRACE_EVENT(mm_compaction_begin, - TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn, - unsigned long free_pfn, unsigned long zone_end, bool sync), + TP_PROTO(struct compact_control *cc, unsigned long zone_start, + unsigned long zone_end, bool sync), - TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync), + TP_ARGS(cc, zone_start, zone_end, sync), TP_STRUCT__entry( __field(unsigned long, zone_start) @@ -103,8 +103,8 @@ TRACE_EVENT(mm_compaction_begin, TP_fast_assign( __entry->zone_start = zone_start; - __entry->migrate_pfn = migrate_pfn; - __entry->free_pfn = free_pfn; + __entry->migrate_pfn = cc->migrate_pfn; + __entry->free_pfn = cc->free_pfn; __entry->zone_end = zone_end; __entry->sync = sync; ), @@ -118,11 +118,11 @@ TRACE_EVENT(mm_compaction_begin, ); TRACE_EVENT(mm_compaction_end, - TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn, - unsigned long free_pfn, unsigned long zone_end, bool sync, + TP_PROTO(struct compact_control *cc, unsigned long zone_start, + unsigned long zone_end, bool sync, int status), - TP_ARGS(zone_start, migrate_pfn, free_pfn, zone_end, sync, status), + TP_ARGS(cc, zone_start, zone_end, sync, status), TP_STRUCT__entry( __field(unsigned long, zone_start) @@ -135,8 +135,8 @@ TRACE_EVENT(mm_compaction_end, TP_fast_assign( __entry->zone_start = zone_start; - __entry->migrate_pfn = migrate_pfn; - __entry->free_pfn = free_pfn; + __entry->migrate_pfn = cc->migrate_pfn; + __entry->free_pfn = cc->free_pfn; __entry->zone_end = zone_end; __entry->sync = sync; __entry->status = status; diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 19e957b7f941..d06ffffad434 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -95,6 +95,17 @@ TRACE_DEFINE_ENUM(ES_REFERENCED_B); { FALLOC_FL_COLLAPSE_RANGE, "COLLAPSE_RANGE"}, \ { FALLOC_FL_ZERO_RANGE, "ZERO_RANGE"}) +TRACE_DEFINE_ENUM(EXT4_FC_REASON_XATTR); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_CROSS_RENAME); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_NOMEM); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_SWAP_BOOT); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_RESIZE); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_RENAME_DIR); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_FALLOC_RANGE); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_INODE_JOURNAL_DATA); +TRACE_DEFINE_ENUM(EXT4_FC_REASON_MAX); + #define show_fc_reason(reason) \ __print_symbolic(reason, \ { EXT4_FC_REASON_XATTR, "XATTR"}, \ @@ -597,44 +608,44 @@ DEFINE_EVENT(ext4__page_op, ext4_releasepage, TP_ARGS(page) ); -DECLARE_EVENT_CLASS(ext4_invalidatepage_op, - TP_PROTO(struct page *page, unsigned int offset, unsigned int length), +DECLARE_EVENT_CLASS(ext4_invalidate_folio_op, + TP_PROTO(struct folio *folio, size_t offset, size_t length), - TP_ARGS(page, offset, length), + TP_ARGS(folio, offset, length), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) __field( pgoff_t, index ) - __field( unsigned int, offset ) - __field( unsigned int, length ) + __field( size_t, offset ) + __field( size_t, length ) ), TP_fast_assign( - __entry->dev = page->mapping->host->i_sb->s_dev; - __entry->ino = page->mapping->host->i_ino; - __entry->index = page->index; + __entry->dev = folio->mapping->host->i_sb->s_dev; + __entry->ino = folio->mapping->host->i_ino; + __entry->index = folio->index; __entry->offset = offset; __entry->length = length; ), - TP_printk("dev %d,%d ino %lu page_index %lu offset %u length %u", + TP_printk("dev %d,%d ino %lu folio_index %lu offset %zu length %zu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, (unsigned long) __entry->index, __entry->offset, __entry->length) ); -DEFINE_EVENT(ext4_invalidatepage_op, ext4_invalidatepage, - TP_PROTO(struct page *page, unsigned int offset, unsigned int length), +DEFINE_EVENT(ext4_invalidate_folio_op, ext4_invalidate_folio, + TP_PROTO(struct folio *folio, size_t offset, size_t length), - TP_ARGS(page, offset, length) + TP_ARGS(folio, offset, length) ); -DEFINE_EVENT(ext4_invalidatepage_op, ext4_journalled_invalidatepage, - TP_PROTO(struct page *page, unsigned int offset, unsigned int length), +DEFINE_EVENT(ext4_invalidate_folio_op, ext4_journalled_invalidate_folio, + TP_PROTO(struct folio *folio, size_t offset, size_t length), - TP_ARGS(page, offset, length) + TP_ARGS(folio, offset, length) ); TRACE_EVENT(ext4_discard_blocks, @@ -2643,7 +2654,7 @@ TRACE_EVENT(ext4_fc_replay_scan, __entry->off = off; ), - TP_printk("FC scan pass on dev %d,%d: error %d, off %d", + TP_printk("dev %d,%d error %d, off %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->error, __entry->off) ); @@ -2669,32 +2680,35 @@ TRACE_EVENT(ext4_fc_replay, __entry->priv2 = priv2; ), - TP_printk("FC Replay %d,%d: tag %d, ino %d, data1 %d, data2 %d", + TP_printk("dev %d,%d: tag %d, ino %d, data1 %d, data2 %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tag, __entry->ino, __entry->priv1, __entry->priv2) ); TRACE_EVENT(ext4_fc_commit_start, - TP_PROTO(struct super_block *sb), + TP_PROTO(struct super_block *sb, tid_t commit_tid), - TP_ARGS(sb), + TP_ARGS(sb, commit_tid), TP_STRUCT__entry( __field(dev_t, dev) + __field(tid_t, tid) ), TP_fast_assign( __entry->dev = sb->s_dev; + __entry->tid = commit_tid; ), - TP_printk("fast_commit started on dev %d,%d", - MAJOR(__entry->dev), MINOR(__entry->dev)) + TP_printk("dev %d,%d tid %u", MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->tid) ); TRACE_EVENT(ext4_fc_commit_stop, - TP_PROTO(struct super_block *sb, int nblks, int reason), + TP_PROTO(struct super_block *sb, int nblks, int reason, + tid_t commit_tid), - TP_ARGS(sb, nblks, reason), + TP_ARGS(sb, nblks, reason, commit_tid), TP_STRUCT__entry( __field(dev_t, dev) @@ -2703,6 +2717,7 @@ TRACE_EVENT(ext4_fc_commit_stop, __field(int, num_fc) __field(int, num_fc_ineligible) __field(int, nblks_agg) + __field(tid_t, tid) ), TP_fast_assign( @@ -2713,128 +2728,193 @@ TRACE_EVENT(ext4_fc_commit_stop, __entry->num_fc_ineligible = EXT4_SB(sb)->s_fc_stats.fc_ineligible_commits; __entry->nblks_agg = EXT4_SB(sb)->s_fc_stats.fc_numblks; + __entry->tid = commit_tid; ), - TP_printk("fc on [%d,%d] nblks %d, reason %d, fc = %d, ineligible = %d, agg_nblks %d", + TP_printk("dev %d,%d nblks %d, reason %d, fc = %d, ineligible = %d, agg_nblks %d, tid %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nblks, __entry->reason, __entry->num_fc, - __entry->num_fc_ineligible, __entry->nblks_agg) + __entry->num_fc_ineligible, __entry->nblks_agg, __entry->tid) ); #define FC_REASON_NAME_STAT(reason) \ show_fc_reason(reason), \ - __entry->sbi->s_fc_stats.fc_ineligible_reason_count[reason] + __entry->fc_ineligible_rc[reason] TRACE_EVENT(ext4_fc_stats, - TP_PROTO(struct super_block *sb), - - TP_ARGS(sb), - - TP_STRUCT__entry( - __field(dev_t, dev) - __field(struct ext4_sb_info *, sbi) - __field(int, count) - ), - - TP_fast_assign( - __entry->dev = sb->s_dev; - __entry->sbi = EXT4_SB(sb); - ), - - TP_printk("dev %d:%d fc ineligible reasons:\n" - "%s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d, %s:%d; " - "num_commits:%ld, ineligible: %ld, numblks: %ld", - MAJOR(__entry->dev), MINOR(__entry->dev), - FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR), - FC_REASON_NAME_STAT(EXT4_FC_REASON_CROSS_RENAME), - FC_REASON_NAME_STAT(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE), - FC_REASON_NAME_STAT(EXT4_FC_REASON_NOMEM), - FC_REASON_NAME_STAT(EXT4_FC_REASON_SWAP_BOOT), - FC_REASON_NAME_STAT(EXT4_FC_REASON_RESIZE), - FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR), - FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE), - FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA), - __entry->sbi->s_fc_stats.fc_num_commits, - __entry->sbi->s_fc_stats.fc_ineligible_commits, - __entry->sbi->s_fc_stats.fc_numblks) - -); - -#define DEFINE_TRACE_DENTRY_EVENT(__type) \ - TRACE_EVENT(ext4_fc_track_##__type, \ - TP_PROTO(struct inode *inode, struct dentry *dentry, int ret), \ - \ - TP_ARGS(inode, dentry, ret), \ - \ - TP_STRUCT__entry( \ - __field(dev_t, dev) \ - __field(int, ino) \ - __field(int, error) \ - ), \ - \ - TP_fast_assign( \ - __entry->dev = inode->i_sb->s_dev; \ - __entry->ino = inode->i_ino; \ - __entry->error = ret; \ - ), \ - \ - TP_printk("dev %d:%d, inode %d, error %d, fc_%s", \ - MAJOR(__entry->dev), MINOR(__entry->dev), \ - __entry->ino, __entry->error, \ - #__type) \ + TP_PROTO(struct super_block *sb), + + TP_ARGS(sb), + + TP_STRUCT__entry( + __field(dev_t, dev) + __array(unsigned int, fc_ineligible_rc, EXT4_FC_REASON_MAX) + __field(unsigned long, fc_commits) + __field(unsigned long, fc_ineligible_commits) + __field(unsigned long, fc_numblks) + ), + + TP_fast_assign( + int i; + + __entry->dev = sb->s_dev; + for (i = 0; i < EXT4_FC_REASON_MAX; i++) { + __entry->fc_ineligible_rc[i] = + EXT4_SB(sb)->s_fc_stats.fc_ineligible_reason_count[i]; + } + __entry->fc_commits = EXT4_SB(sb)->s_fc_stats.fc_num_commits; + __entry->fc_ineligible_commits = + EXT4_SB(sb)->s_fc_stats.fc_ineligible_commits; + __entry->fc_numblks = EXT4_SB(sb)->s_fc_stats.fc_numblks; + ), + + TP_printk("dev %d,%d fc ineligible reasons:\n" + "%s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u, %s:%u " + "num_commits:%lu, ineligible: %lu, numblks: %lu", + MAJOR(__entry->dev), MINOR(__entry->dev), + FC_REASON_NAME_STAT(EXT4_FC_REASON_XATTR), + FC_REASON_NAME_STAT(EXT4_FC_REASON_CROSS_RENAME), + FC_REASON_NAME_STAT(EXT4_FC_REASON_JOURNAL_FLAG_CHANGE), + FC_REASON_NAME_STAT(EXT4_FC_REASON_NOMEM), + FC_REASON_NAME_STAT(EXT4_FC_REASON_SWAP_BOOT), + FC_REASON_NAME_STAT(EXT4_FC_REASON_RESIZE), + FC_REASON_NAME_STAT(EXT4_FC_REASON_RENAME_DIR), + FC_REASON_NAME_STAT(EXT4_FC_REASON_FALLOC_RANGE), + FC_REASON_NAME_STAT(EXT4_FC_REASON_INODE_JOURNAL_DATA), + __entry->fc_commits, __entry->fc_ineligible_commits, + __entry->fc_numblks) +); + +DECLARE_EVENT_CLASS(ext4_fc_track_dentry, + + TP_PROTO(handle_t *handle, struct inode *inode, + struct dentry *dentry, int ret), + + TP_ARGS(handle, inode, dentry, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(tid_t, t_tid) + __field(ino_t, i_ino) + __field(tid_t, i_sync_tid) + __field(int, error) + ), + + TP_fast_assign( + struct ext4_inode_info *ei = EXT4_I(inode); + + __entry->dev = inode->i_sb->s_dev; + __entry->t_tid = handle->h_transaction->t_tid; + __entry->i_ino = inode->i_ino; + __entry->i_sync_tid = ei->i_sync_tid; + __entry->error = ret; + ), + + TP_printk("dev %d,%d, t_tid %u, ino %lu, i_sync_tid %u, error %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->t_tid, __entry->i_ino, __entry->i_sync_tid, + __entry->error ) +); -DEFINE_TRACE_DENTRY_EVENT(create); -DEFINE_TRACE_DENTRY_EVENT(link); -DEFINE_TRACE_DENTRY_EVENT(unlink); +#define DEFINE_EVENT_CLASS_DENTRY(__type) \ +DEFINE_EVENT(ext4_fc_track_dentry, ext4_fc_track_##__type, \ + TP_PROTO(handle_t *handle, struct inode *inode, \ + struct dentry *dentry, int ret), \ + TP_ARGS(handle, inode, dentry, ret) \ +) + +DEFINE_EVENT_CLASS_DENTRY(create); +DEFINE_EVENT_CLASS_DENTRY(link); +DEFINE_EVENT_CLASS_DENTRY(unlink); TRACE_EVENT(ext4_fc_track_inode, - TP_PROTO(struct inode *inode, int ret), + TP_PROTO(handle_t *handle, struct inode *inode, int ret), - TP_ARGS(inode, ret), + TP_ARGS(handle, inode, ret), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(int, ino) - __field(int, error) - ), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(tid_t, t_tid) + __field(ino_t, i_ino) + __field(tid_t, i_sync_tid) + __field(int, error) + ), + + TP_fast_assign( + struct ext4_inode_info *ei = EXT4_I(inode); - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->error = ret; - ), + __entry->dev = inode->i_sb->s_dev; + __entry->t_tid = handle->h_transaction->t_tid; + __entry->i_ino = inode->i_ino; + __entry->i_sync_tid = ei->i_sync_tid; + __entry->error = ret; + ), - TP_printk("dev %d:%d, inode %d, error %d", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, __entry->error) + TP_printk("dev %d:%d, t_tid %u, inode %lu, i_sync_tid %u, error %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->t_tid, __entry->i_ino, __entry->i_sync_tid, + __entry->error) ); TRACE_EVENT(ext4_fc_track_range, - TP_PROTO(struct inode *inode, long start, long end, int ret), - - TP_ARGS(inode, start, end, ret), - - TP_STRUCT__entry( - __field(dev_t, dev) - __field(int, ino) - __field(long, start) - __field(long, end) - __field(int, error) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->start = start; - __entry->end = end; - __entry->error = ret; - ), - - TP_printk("dev %d:%d, inode %d, error %d, start %ld, end %ld", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, __entry->error, __entry->start, - __entry->end) + TP_PROTO(handle_t *handle, struct inode *inode, + long start, long end, int ret), + + TP_ARGS(handle, inode, start, end, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(tid_t, t_tid) + __field(ino_t, i_ino) + __field(tid_t, i_sync_tid) + __field(long, start) + __field(long, end) + __field(int, error) + ), + + TP_fast_assign( + struct ext4_inode_info *ei = EXT4_I(inode); + + __entry->dev = inode->i_sb->s_dev; + __entry->t_tid = handle->h_transaction->t_tid; + __entry->i_ino = inode->i_ino; + __entry->i_sync_tid = ei->i_sync_tid; + __entry->start = start; + __entry->end = end; + __entry->error = ret; + ), + + TP_printk("dev %d:%d, t_tid %u, inode %lu, i_sync_tid %u, error %d, start %ld, end %ld", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->t_tid, __entry->i_ino, __entry->i_sync_tid, + __entry->error, __entry->start, __entry->end) + ); + +TRACE_EVENT(ext4_fc_cleanup, + TP_PROTO(journal_t *journal, int full, tid_t tid), + + TP_ARGS(journal, full, tid), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(int, j_fc_off) + __field(int, full) + __field(tid_t, tid) + ), + + TP_fast_assign( + struct super_block *sb = journal->j_private; + + __entry->dev = sb->s_dev; + __entry->j_fc_off = journal->j_fc_off; + __entry->full = full; + __entry->tid = tid; + ), + + TP_printk("dev %d,%d, j_fc_off %d, full %d, tid %u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->j_fc_off, __entry->full, __entry->tid) ); TRACE_EVENT(ext4_update_sb, diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index f701bb23f83c..1779e133cea0 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -956,12 +956,11 @@ TRACE_EVENT(f2fs_direct_IO_enter, __entry->rw = rw; ), - TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu ki_flags = %x ki_hint = %x ki_ioprio = %x rw = %d", + TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu ki_flags = %x ki_ioprio = %x rw = %d", show_dev_ino(__entry), __entry->iocb->ki_pos, __entry->len, __entry->iocb->ki_flags, - __entry->iocb->ki_hint, __entry->iocb->ki_ioprio, __entry->rw) ); diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index 4fdb14a81108..d651f3437367 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -29,7 +29,6 @@ EM( SCAN_VMA_NULL, "vma_null") \ EM( SCAN_VMA_CHECK, "vma_check_failed") \ EM( SCAN_ADDRESS_RANGE, "not_suitable_address_range") \ - EM( SCAN_SWAP_CACHE_PAGE, "page_swap_cache") \ EM( SCAN_DEL_PAGE_LRU, "could_not_delete_page_from_lru")\ EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \ EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \ diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index 7346f0164cf4..cddf5b6fbeb4 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -29,22 +29,22 @@ TRACE_EVENT(io_uring_create, TP_ARGS(fd, ctx, sq_entries, cq_entries, flags), TP_STRUCT__entry ( - __field( int, fd ) - __field( void *, ctx ) + __field( int, fd ) + __field( void *, ctx ) __field( u32, sq_entries ) __field( u32, cq_entries ) __field( u32, flags ) ), TP_fast_assign( - __entry->fd = fd; + __entry->fd = fd; __entry->ctx = ctx; __entry->sq_entries = sq_entries; __entry->cq_entries = cq_entries; __entry->flags = flags; ), - TP_printk("ring %p, fd %d sq size %d, cq size %d, flags %d", + TP_printk("ring %p, fd %d sq size %d, cq size %d, flags 0x%x", __entry->ctx, __entry->fd, __entry->sq_entries, __entry->cq_entries, __entry->flags) ); @@ -57,10 +57,9 @@ TRACE_EVENT(io_uring_create, * @opcode: describes which operation to perform * @nr_user_files: number of registered files * @nr_user_bufs: number of registered buffers - * @cq_ev_fd: whether eventfs registered or not * @ret: return code * - * Allows to trace fixed files/buffers/eventfds, that could be registered to + * Allows to trace fixed files/buffers, that could be registered to * avoid an overhead of getting references to them for every operation. This * event, together with io_uring_file_get, can provide a full picture of how * much overhead one can reduce via fixing. @@ -68,17 +67,16 @@ TRACE_EVENT(io_uring_create, TRACE_EVENT(io_uring_register, TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files, - unsigned nr_bufs, bool eventfd, long ret), + unsigned nr_bufs, long ret), - TP_ARGS(ctx, opcode, nr_files, nr_bufs, eventfd, ret), + TP_ARGS(ctx, opcode, nr_files, nr_bufs, ret), TP_STRUCT__entry ( - __field( void *, ctx ) - __field( unsigned, opcode ) - __field( unsigned, nr_files ) - __field( unsigned, nr_bufs ) - __field( bool, eventfd ) - __field( long, ret ) + __field( void *, ctx ) + __field( unsigned, opcode ) + __field( unsigned, nr_files) + __field( unsigned, nr_bufs ) + __field( long, ret ) ), TP_fast_assign( @@ -86,20 +84,21 @@ TRACE_EVENT(io_uring_register, __entry->opcode = opcode; __entry->nr_files = nr_files; __entry->nr_bufs = nr_bufs; - __entry->eventfd = eventfd; __entry->ret = ret; ), TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, " - "eventfd %d, ret %ld", + "ret %ld", __entry->ctx, __entry->opcode, __entry->nr_files, - __entry->nr_bufs, __entry->eventfd, __entry->ret) + __entry->nr_bufs, __entry->ret) ); /** * io_uring_file_get - called before getting references to an SQE file * * @ctx: pointer to a ring context structure + * @req: pointer to a submitted request + * @user_data: user data associated with the request * @fd: SQE file descriptor * * Allows to trace out how often an SQE file reference is obtained, which can @@ -108,59 +107,71 @@ TRACE_EVENT(io_uring_register, */ TRACE_EVENT(io_uring_file_get, - TP_PROTO(void *ctx, int fd), + TP_PROTO(void *ctx, void *req, unsigned long long user_data, int fd), - TP_ARGS(ctx, fd), + TP_ARGS(ctx, req, user_data, fd), TP_STRUCT__entry ( - __field( void *, ctx ) - __field( int, fd ) + __field( void *, ctx ) + __field( void *, req ) + __field( u64, user_data ) + __field( int, fd ) ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = ctx; + __entry->req = req; + __entry->user_data = user_data; __entry->fd = fd; ), - TP_printk("ring %p, fd %d", __entry->ctx, __entry->fd) + TP_printk("ring %p, req %p, user_data 0x%llx, fd %d", + __entry->ctx, __entry->req, __entry->user_data, __entry->fd) ); /** * io_uring_queue_async_work - called before submitting a new async work * * @ctx: pointer to a ring context structure - * @hashed: type of workqueue, hashed or normal * @req: pointer to a submitted request + * @user_data: user data associated with the request + * @opcode: opcode of request + * @flags request flags * @work: pointer to a submitted io_wq_work + * @rw: type of workqueue, hashed or normal * * Allows to trace asynchronous work submission. */ TRACE_EVENT(io_uring_queue_async_work, - TP_PROTO(void *ctx, int rw, void * req, struct io_wq_work *work, - unsigned int flags), + TP_PROTO(void *ctx, void * req, unsigned long long user_data, u8 opcode, + unsigned int flags, struct io_wq_work *work, int rw), - TP_ARGS(ctx, rw, req, work, flags), + TP_ARGS(ctx, req, user_data, flags, opcode, work, rw), TP_STRUCT__entry ( - __field( void *, ctx ) - __field( int, rw ) - __field( void *, req ) - __field( struct io_wq_work *, work ) - __field( unsigned int, flags ) + __field( void *, ctx ) + __field( void *, req ) + __field( u64, user_data ) + __field( u8, opcode ) + __field( unsigned int, flags ) + __field( struct io_wq_work *, work ) + __field( int, rw ) ), TP_fast_assign( - __entry->ctx = ctx; - __entry->rw = rw; - __entry->req = req; - __entry->work = work; - __entry->flags = flags; + __entry->ctx = ctx; + __entry->req = req; + __entry->user_data = user_data; + __entry->flags = flags; + __entry->opcode = opcode; + __entry->work = work; + __entry->rw = rw; ), - TP_printk("ring %p, request %p, flags %d, %s queue, work %p", - __entry->ctx, __entry->req, __entry->flags, - __entry->rw ? "hashed" : "normal", __entry->work) + TP_printk("ring %p, request %p, user_data 0x%llx, opcode %d, flags 0x%x, %s queue, work %p", + __entry->ctx, __entry->req, __entry->user_data, __entry->opcode, + __entry->flags, __entry->rw ? "hashed" : "normal", __entry->work) ); /** @@ -169,30 +180,33 @@ TRACE_EVENT(io_uring_queue_async_work, * @ctx: pointer to a ring context structure * @req: pointer to a deferred request * @user_data: user data associated with the request + * @opcode: opcode of request * * Allows to track deferred requests, to get an insight about what requests are * not started immediately. */ TRACE_EVENT(io_uring_defer, - TP_PROTO(void *ctx, void *req, unsigned long long user_data), + TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode), - TP_ARGS(ctx, req, user_data), + TP_ARGS(ctx, req, user_data, opcode), TP_STRUCT__entry ( - __field( void *, ctx ) - __field( void *, req ) - __field( unsigned long long, data ) + __field( void *, ctx ) + __field( void *, req ) + __field( unsigned long long, data ) + __field( u8, opcode ) ), TP_fast_assign( __entry->ctx = ctx; __entry->req = req; __entry->data = user_data; + __entry->opcode = opcode; ), - TP_printk("ring %p, request %p user_data %llu", __entry->ctx, - __entry->req, __entry->data) + TP_printk("ring %p, request %p, user_data 0x%llx, opcode %d", + __entry->ctx, __entry->req, __entry->data, __entry->opcode) ); /** @@ -250,7 +264,7 @@ TRACE_EVENT(io_uring_cqring_wait, ), TP_fast_assign( - __entry->ctx = ctx; + __entry->ctx = ctx; __entry->min_events = min_events; ), @@ -260,7 +274,10 @@ TRACE_EVENT(io_uring_cqring_wait, /** * io_uring_fail_link - called before failing a linked request * + * @ctx: pointer to a ring context structure * @req: request, which links were cancelled + * @user_data: user data associated with the request + * @opcode: opcode of request * @link: cancelled link * * Allows to track linked requests cancellation, to see not only that some work @@ -268,27 +285,36 @@ TRACE_EVENT(io_uring_cqring_wait, */ TRACE_EVENT(io_uring_fail_link, - TP_PROTO(void *req, void *link), + TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, void *link), - TP_ARGS(req, link), + TP_ARGS(ctx, req, user_data, opcode, link), TP_STRUCT__entry ( - __field( void *, req ) - __field( void *, link ) + __field( void *, ctx ) + __field( void *, req ) + __field( unsigned long long, user_data ) + __field( u8, opcode ) + __field( void *, link ) ), TP_fast_assign( - __entry->req = req; - __entry->link = link; + __entry->ctx = ctx; + __entry->req = req; + __entry->user_data = user_data; + __entry->opcode = opcode; + __entry->link = link; ), - TP_printk("request %p, link %p", __entry->req, __entry->link) + TP_printk("ring %p, request %p, user_data 0x%llx, opcode %d, link %p", + __entry->ctx, __entry->req, __entry->user_data, __entry->opcode, + __entry->link) ); /** * io_uring_complete - called when completing an SQE * * @ctx: pointer to a ring context structure + * @req: pointer to a submitted request * @user_data: user data associated with the request * @res: result of the request * @cflags: completion flags @@ -296,12 +322,13 @@ TRACE_EVENT(io_uring_fail_link, */ TRACE_EVENT(io_uring_complete, - TP_PROTO(void *ctx, u64 user_data, int res, unsigned cflags), + TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags), - TP_ARGS(ctx, user_data, res, cflags), + TP_ARGS(ctx, req, user_data, res, cflags), TP_STRUCT__entry ( __field( void *, ctx ) + __field( void *, req ) __field( u64, user_data ) __field( int, res ) __field( unsigned, cflags ) @@ -309,14 +336,16 @@ TRACE_EVENT(io_uring_complete, TP_fast_assign( __entry->ctx = ctx; + __entry->req = req; __entry->user_data = user_data; __entry->res = res; __entry->cflags = cflags; ), - TP_printk("ring %p, user_data 0x%llx, result %d, cflags %x", - __entry->ctx, (unsigned long long)__entry->user_data, - __entry->res, __entry->cflags) + TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x", + __entry->ctx, __entry->req, + __entry->user_data, + __entry->res, __entry->cflags) ); /** @@ -324,8 +353,8 @@ TRACE_EVENT(io_uring_complete, * * @ctx: pointer to a ring context structure * @req: pointer to a submitted request - * @opcode: opcode of request * @user_data: user data associated with the request + * @opcode: opcode of request * @flags request flags * @force_nonblock: whether a context blocking or not * @sq_thread: true if sq_thread has submitted this SQE @@ -335,34 +364,34 @@ TRACE_EVENT(io_uring_complete, */ TRACE_EVENT(io_uring_submit_sqe, - TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, u32 flags, + TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, u32 flags, bool force_nonblock, bool sq_thread), - TP_ARGS(ctx, req, opcode, user_data, flags, force_nonblock, sq_thread), + TP_ARGS(ctx, req, user_data, opcode, flags, force_nonblock, sq_thread), TP_STRUCT__entry ( - __field( void *, ctx ) - __field( void *, req ) - __field( u8, opcode ) - __field( u64, user_data ) - __field( u32, flags ) - __field( bool, force_nonblock ) - __field( bool, sq_thread ) + __field( void *, ctx ) + __field( void *, req ) + __field( unsigned long long, user_data ) + __field( u8, opcode ) + __field( u32, flags ) + __field( bool, force_nonblock ) + __field( bool, sq_thread ) ), TP_fast_assign( __entry->ctx = ctx; __entry->req = req; - __entry->opcode = opcode; __entry->user_data = user_data; + __entry->opcode = opcode; __entry->flags = flags; __entry->force_nonblock = force_nonblock; __entry->sq_thread = sq_thread; ), - TP_printk("ring %p, req %p, op %d, data 0x%llx, flags %u, " + TP_printk("ring %p, req %p, user_data 0x%llx, opcode %d, flags 0x%x, " "non block %d, sq_thread %d", __entry->ctx, __entry->req, - __entry->opcode, (unsigned long long)__entry->user_data, + __entry->user_data, __entry->opcode, __entry->flags, __entry->force_nonblock, __entry->sq_thread) ); @@ -371,8 +400,8 @@ TRACE_EVENT(io_uring_submit_sqe, * * @ctx: pointer to a ring context structure * @req: pointer to the armed request - * @opcode: opcode of request * @user_data: user data associated with the request + * @opcode: opcode of request * @mask: request poll events mask * @events: registered events of interest * @@ -381,155 +410,110 @@ TRACE_EVENT(io_uring_submit_sqe, */ TRACE_EVENT(io_uring_poll_arm, - TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, + TP_PROTO(void *ctx, void *req, u64 user_data, u8 opcode, int mask, int events), - TP_ARGS(ctx, req, opcode, user_data, mask, events), + TP_ARGS(ctx, req, user_data, opcode, mask, events), TP_STRUCT__entry ( - __field( void *, ctx ) - __field( void *, req ) - __field( u8, opcode ) - __field( u64, user_data ) - __field( int, mask ) - __field( int, events ) + __field( void *, ctx ) + __field( void *, req ) + __field( unsigned long long, user_data ) + __field( u8, opcode ) + __field( int, mask ) + __field( int, events ) ), TP_fast_assign( __entry->ctx = ctx; __entry->req = req; - __entry->opcode = opcode; __entry->user_data = user_data; + __entry->opcode = opcode; __entry->mask = mask; __entry->events = events; ), - TP_printk("ring %p, req %p, op %d, data 0x%llx, mask 0x%x, events 0x%x", - __entry->ctx, __entry->req, __entry->opcode, - (unsigned long long) __entry->user_data, + TP_printk("ring %p, req %p, user_data 0x%llx, opcode %d, mask 0x%x, events 0x%x", + __entry->ctx, __entry->req, __entry->user_data, __entry->opcode, __entry->mask, __entry->events) ); -TRACE_EVENT(io_uring_poll_wake, - - TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask), - - TP_ARGS(ctx, opcode, user_data, mask), - - TP_STRUCT__entry ( - __field( void *, ctx ) - __field( u8, opcode ) - __field( u64, user_data ) - __field( int, mask ) - ), - - TP_fast_assign( - __entry->ctx = ctx; - __entry->opcode = opcode; - __entry->user_data = user_data; - __entry->mask = mask; - ), - - TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x", - __entry->ctx, __entry->opcode, - (unsigned long long) __entry->user_data, - __entry->mask) -); - -TRACE_EVENT(io_uring_task_add, - - TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask), - - TP_ARGS(ctx, opcode, user_data, mask), - - TP_STRUCT__entry ( - __field( void *, ctx ) - __field( u8, opcode ) - __field( u64, user_data ) - __field( int, mask ) - ), - - TP_fast_assign( - __entry->ctx = ctx; - __entry->opcode = opcode; - __entry->user_data = user_data; - __entry->mask = mask; - ), - - TP_printk("ring %p, op %d, data 0x%llx, mask %x", - __entry->ctx, __entry->opcode, - (unsigned long long) __entry->user_data, - __entry->mask) -); - /* - * io_uring_task_run - called when task_work_run() executes the poll events - * notification callbacks + * io_uring_task_add - called after adding a task * * @ctx: pointer to a ring context structure - * @req: pointer to the armed request - * @opcode: opcode of request + * @req: pointer to request * @user_data: user data associated with the request + * @opcode: opcode of request + * @mask: request poll events mask * - * Allows to track when notified poll events are processed */ -TRACE_EVENT(io_uring_task_run, +TRACE_EVENT(io_uring_task_add, - TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data), + TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, int mask), - TP_ARGS(ctx, req, opcode, user_data), + TP_ARGS(ctx, req, user_data, opcode, mask), TP_STRUCT__entry ( - __field( void *, ctx ) - __field( void *, req ) - __field( u8, opcode ) - __field( u64, user_data ) + __field( void *, ctx ) + __field( void *, req ) + __field( unsigned long long, user_data ) + __field( u8, opcode ) + __field( int, mask ) ), TP_fast_assign( __entry->ctx = ctx; __entry->req = req; - __entry->opcode = opcode; __entry->user_data = user_data; + __entry->opcode = opcode; + __entry->mask = mask; ), - TP_printk("ring %p, req %p, op %d, data 0x%llx", - __entry->ctx, __entry->req, __entry->opcode, - (unsigned long long) __entry->user_data) + TP_printk("ring %p, req %p, user_data 0x%llx, opcode %d, mask %x", + __entry->ctx, __entry->req, __entry->user_data, __entry->opcode, + __entry->mask) ); /* * io_uring_req_failed - called when an sqe is errored dring submission * * @sqe: pointer to the io_uring_sqe that failed + * @ctx: pointer to a ring context structure + * @req: pointer to request * @error: error it failed with * * Allows easier diagnosing of malformed requests in production systems. */ TRACE_EVENT(io_uring_req_failed, - TP_PROTO(const struct io_uring_sqe *sqe, int error), + TP_PROTO(const struct io_uring_sqe *sqe, void *ctx, void *req, int error), - TP_ARGS(sqe, error), + TP_ARGS(sqe, ctx, req, error), TP_STRUCT__entry ( - __field( u8, opcode ) - __field( u8, flags ) - __field( u8, ioprio ) - __field( u64, off ) - __field( u64, addr ) - __field( u32, len ) - __field( u32, op_flags ) - __field( u64, user_data ) - __field( u16, buf_index ) - __field( u16, personality ) - __field( u32, file_index ) - __field( u64, pad1 ) - __field( u64, pad2 ) - __field( int, error ) + __field( void *, ctx ) + __field( void *, req ) + __field( unsigned long long, user_data ) + __field( u8, opcode ) + __field( u8, flags ) + __field( u8, ioprio ) + __field( u64, off ) + __field( u64, addr ) + __field( u32, len ) + __field( u32, op_flags ) + __field( u16, buf_index ) + __field( u16, personality ) + __field( u32, file_index ) + __field( u64, pad1 ) + __field( u64, pad2 ) + __field( int, error ) ), TP_fast_assign( + __entry->ctx = ctx; + __entry->req = req; + __entry->user_data = sqe->user_data; __entry->opcode = sqe->opcode; __entry->flags = sqe->flags; __entry->ioprio = sqe->ioprio; @@ -537,7 +521,6 @@ TRACE_EVENT(io_uring_req_failed, __entry->addr = sqe->addr; __entry->len = sqe->len; __entry->op_flags = sqe->rw_flags; - __entry->user_data = sqe->user_data; __entry->buf_index = sqe->buf_index; __entry->personality = sqe->personality; __entry->file_index = sqe->file_index; @@ -546,13 +529,15 @@ TRACE_EVENT(io_uring_req_failed, __entry->error = error; ), - TP_printk("op %d, flags=0x%x, prio=%d, off=%llu, addr=%llu, " - "len=%u, rw_flags=0x%x, user_data=0x%llx, buf_index=%d, " + TP_printk("ring %p, req %p, user_data 0x%llx, " + "op %d, flags 0x%x, prio=%d, off=%llu, addr=%llu, " + "len=%u, rw_flags=0x%x, buf_index=%d, " "personality=%d, file_index=%d, pad=0x%llx/%llx, error=%d", + __entry->ctx, __entry->req, __entry->user_data, __entry->opcode, __entry->flags, __entry->ioprio, (unsigned long long)__entry->off, (unsigned long long) __entry->addr, __entry->len, - __entry->op_flags, (unsigned long long) __entry->user_data, + __entry->op_flags, __entry->buf_index, __entry->personality, __entry->file_index, (unsigned long long) __entry->pad1, (unsigned long long) __entry->pad2, __entry->error) diff --git a/include/trace/events/mctp.h b/include/trace/events/mctp.h index 175b057c507f..165cf25f77a7 100644 --- a/include/trace/events/mctp.h +++ b/include/trace/events/mctp.h @@ -15,6 +15,7 @@ enum { MCTP_TRACE_KEY_REPLIED, MCTP_TRACE_KEY_INVALIDATED, MCTP_TRACE_KEY_CLOSED, + MCTP_TRACE_KEY_DROPPED, }; #endif /* __TRACE_MCTP_ENUMS */ @@ -22,6 +23,7 @@ TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_TIMEOUT); TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_REPLIED); TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_INVALIDATED); TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_CLOSED); +TRACE_DEFINE_ENUM(MCTP_TRACE_KEY_DROPPED); TRACE_EVENT(mctp_key_acquire, TP_PROTO(const struct mctp_sk_key *key), @@ -66,7 +68,8 @@ TRACE_EVENT(mctp_key_release, { MCTP_TRACE_KEY_TIMEOUT, "timeout" }, { MCTP_TRACE_KEY_REPLIED, "replied" }, { MCTP_TRACE_KEY_INVALIDATED, "invalidated" }, - { MCTP_TRACE_KEY_CLOSED, "closed" }) + { MCTP_TRACE_KEY_CLOSED, "closed" }, + { MCTP_TRACE_KEY_DROPPED, "dropped" }) ) ); diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h index 779f3fad9ecd..061b5128f335 100644 --- a/include/trace/events/migrate.h +++ b/include/trace/events/migrate.h @@ -105,6 +105,37 @@ TRACE_EVENT(mm_migrate_pages_start, __print_symbolic(__entry->reason, MIGRATE_REASON)) ); +DECLARE_EVENT_CLASS(migration_pte, + + TP_PROTO(unsigned long addr, unsigned long pte, int order), + + TP_ARGS(addr, pte, order), + + TP_STRUCT__entry( + __field(unsigned long, addr) + __field(unsigned long, pte) + __field(int, order) + ), + + TP_fast_assign( + __entry->addr = addr; + __entry->pte = pte; + __entry->order = order; + ), + + TP_printk("addr=%lx, pte=%lx order=%d", __entry->addr, __entry->pte, __entry->order) +); + +DEFINE_EVENT(migration_pte, set_migration_pte, + TP_PROTO(unsigned long addr, unsigned long pte, int order), + TP_ARGS(addr, pte, order) +); + +DEFINE_EVENT(migration_pte, remove_migration_pte, + TP_PROTO(unsigned long addr, unsigned long pte, int order), + TP_ARGS(addr, pte, order) +); + #endif /* _TRACE_MIGRATE_H */ /* This part must be outside protection */ diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 116ed4d5d0f8..6532119a6bf1 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -49,12 +49,20 @@ {(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \ {(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\ {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\ - {(unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"}, \ - {(unsigned long)__GFP_SKIP_KASAN_POISON,"__GFP_SKIP_KASAN_POISON"}\ + {(unsigned long)__GFP_ZEROTAGS, "__GFP_ZEROTAGS"} \ + +#ifdef CONFIG_KASAN_HW_TAGS +#define __def_gfpflag_names_kasan , \ + {(unsigned long)__GFP_SKIP_ZERO, "__GFP_SKIP_ZERO"}, \ + {(unsigned long)__GFP_SKIP_KASAN_POISON, "__GFP_SKIP_KASAN_POISON"}, \ + {(unsigned long)__GFP_SKIP_KASAN_UNPOISON, "__GFP_SKIP_KASAN_UNPOISON"} +#else +#define __def_gfpflag_names_kasan +#endif #define show_gfp_flags(flags) \ (flags) ? __print_flags(flags, "|", \ - __def_gfpflag_names \ + __def_gfpflag_names __def_gfpflag_names_kasan \ ) : "none" #ifdef CONFIG_MMU diff --git a/include/trace/events/mptcp.h b/include/trace/events/mptcp.h index 6bf43176f14c..f8e28e686c65 100644 --- a/include/trace/events/mptcp.h +++ b/include/trace/events/mptcp.h @@ -115,6 +115,10 @@ DECLARE_EVENT_CLASS(mptcp_dump_mpext, __entry->csum_reqd) ); +DEFINE_EVENT(mptcp_dump_mpext, mptcp_sendmsg_frag, + TP_PROTO(struct mptcp_ext *mpext), + TP_ARGS(mpext)); + DEFINE_EVENT(mptcp_dump_mpext, get_mapping_status, TP_PROTO(struct mptcp_ext *mpext), TP_ARGS(mpext)); diff --git a/include/trace/events/net.h b/include/trace/events/net.h index 78c448c6ab4c..032b431b987b 100644 --- a/include/trace/events/net.h +++ b/include/trace/events/net.h @@ -260,13 +260,6 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry, TP_ARGS(skb) ); -DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry, - - TP_PROTO(const struct sk_buff *skb), - - TP_ARGS(skb) -); - DECLARE_EVENT_CLASS(net_dev_rx_exit_template, TP_PROTO(int ret), @@ -312,13 +305,6 @@ DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_exit, TP_ARGS(ret) ); -DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_ni_exit, - - TP_PROTO(int ret), - - TP_ARGS(ret) -); - DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_list_exit, TP_PROTO(int ret), diff --git a/include/trace/events/random.h b/include/trace/events/random.h deleted file mode 100644 index a2d9aa16a5d7..000000000000 --- a/include/trace/events/random.h +++ /dev/null @@ -1,233 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM random - -#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_RANDOM_H - -#include <linux/writeback.h> -#include <linux/tracepoint.h> - -TRACE_EVENT(add_device_randomness, - TP_PROTO(int bytes, unsigned long IP), - - TP_ARGS(bytes, IP), - - TP_STRUCT__entry( - __field( int, bytes ) - __field(unsigned long, IP ) - ), - - TP_fast_assign( - __entry->bytes = bytes; - __entry->IP = IP; - ), - - TP_printk("bytes %d caller %pS", - __entry->bytes, (void *)__entry->IP) -); - -DECLARE_EVENT_CLASS(random__mix_pool_bytes, - TP_PROTO(int bytes, unsigned long IP), - - TP_ARGS(bytes, IP), - - TP_STRUCT__entry( - __field( int, bytes ) - __field(unsigned long, IP ) - ), - - TP_fast_assign( - __entry->bytes = bytes; - __entry->IP = IP; - ), - - TP_printk("input pool: bytes %d caller %pS", - __entry->bytes, (void *)__entry->IP) -); - -DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes, - TP_PROTO(int bytes, unsigned long IP), - - TP_ARGS(bytes, IP) -); - -DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock, - TP_PROTO(int bytes, unsigned long IP), - - TP_ARGS(bytes, IP) -); - -TRACE_EVENT(credit_entropy_bits, - TP_PROTO(int bits, int entropy_count, unsigned long IP), - - TP_ARGS(bits, entropy_count, IP), - - TP_STRUCT__entry( - __field( int, bits ) - __field( int, entropy_count ) - __field(unsigned long, IP ) - ), - - TP_fast_assign( - __entry->bits = bits; - __entry->entropy_count = entropy_count; - __entry->IP = IP; - ), - - TP_printk("input pool: bits %d entropy_count %d caller %pS", - __entry->bits, __entry->entropy_count, (void *)__entry->IP) -); - -TRACE_EVENT(debit_entropy, - TP_PROTO(int debit_bits), - - TP_ARGS( debit_bits), - - TP_STRUCT__entry( - __field( int, debit_bits ) - ), - - TP_fast_assign( - __entry->debit_bits = debit_bits; - ), - - TP_printk("input pool: debit_bits %d", __entry->debit_bits) -); - -TRACE_EVENT(add_input_randomness, - TP_PROTO(int input_bits), - - TP_ARGS(input_bits), - - TP_STRUCT__entry( - __field( int, input_bits ) - ), - - TP_fast_assign( - __entry->input_bits = input_bits; - ), - - TP_printk("input_pool_bits %d", __entry->input_bits) -); - -TRACE_EVENT(add_disk_randomness, - TP_PROTO(dev_t dev, int input_bits), - - TP_ARGS(dev, input_bits), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( int, input_bits ) - ), - - TP_fast_assign( - __entry->dev = dev; - __entry->input_bits = input_bits; - ), - - TP_printk("dev %d,%d input_pool_bits %d", MAJOR(__entry->dev), - MINOR(__entry->dev), __entry->input_bits) -); - -DECLARE_EVENT_CLASS(random__get_random_bytes, - TP_PROTO(int nbytes, unsigned long IP), - - TP_ARGS(nbytes, IP), - - TP_STRUCT__entry( - __field( int, nbytes ) - __field(unsigned long, IP ) - ), - - TP_fast_assign( - __entry->nbytes = nbytes; - __entry->IP = IP; - ), - - TP_printk("nbytes %d caller %pS", __entry->nbytes, (void *)__entry->IP) -); - -DEFINE_EVENT(random__get_random_bytes, get_random_bytes, - TP_PROTO(int nbytes, unsigned long IP), - - TP_ARGS(nbytes, IP) -); - -DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch, - TP_PROTO(int nbytes, unsigned long IP), - - TP_ARGS(nbytes, IP) -); - -DECLARE_EVENT_CLASS(random__extract_entropy, - TP_PROTO(int nbytes, int entropy_count, unsigned long IP), - - TP_ARGS(nbytes, entropy_count, IP), - - TP_STRUCT__entry( - __field( int, nbytes ) - __field( int, entropy_count ) - __field(unsigned long, IP ) - ), - - TP_fast_assign( - __entry->nbytes = nbytes; - __entry->entropy_count = entropy_count; - __entry->IP = IP; - ), - - TP_printk("input pool: nbytes %d entropy_count %d caller %pS", - __entry->nbytes, __entry->entropy_count, (void *)__entry->IP) -); - - -DEFINE_EVENT(random__extract_entropy, extract_entropy, - TP_PROTO(int nbytes, int entropy_count, unsigned long IP), - - TP_ARGS(nbytes, entropy_count, IP) -); - -TRACE_EVENT(urandom_read, - TP_PROTO(int got_bits, int pool_left, int input_left), - - TP_ARGS(got_bits, pool_left, input_left), - - TP_STRUCT__entry( - __field( int, got_bits ) - __field( int, pool_left ) - __field( int, input_left ) - ), - - TP_fast_assign( - __entry->got_bits = got_bits; - __entry->pool_left = pool_left; - __entry->input_left = input_left; - ), - - TP_printk("got_bits %d nonblocking_pool_entropy_left %d " - "input_entropy_left %d", __entry->got_bits, - __entry->pool_left, __entry->input_left) -); - -TRACE_EVENT(prandom_u32, - - TP_PROTO(unsigned int ret), - - TP_ARGS(ret), - - TP_STRUCT__entry( - __field( unsigned int, ret) - ), - - TP_fast_assign( - __entry->ret = ret; - ), - - TP_printk("ret=%u" , __entry->ret) -); - -#endif /* _TRACE_RANDOM_H */ - -/* This part must be outside protection */ -#include <trace/define_trace.h> diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h index 670e41783edd..90b2fb0292cb 100644 --- a/include/trace/events/rcu.h +++ b/include/trace/events/rcu.h @@ -794,16 +794,15 @@ TRACE_EVENT_RCU(rcu_torture_read, * Tracepoint for rcu_barrier() execution. The string "s" describes * the rcu_barrier phase: * "Begin": rcu_barrier() started. + * "CB": An rcu_barrier_callback() invoked a callback, not the last. * "EarlyExit": rcu_barrier() piggybacked, thus early exit. * "Inc1": rcu_barrier() piggyback check counter incremented. - * "OfflineNoCBQ": rcu_barrier() found offline no-CBs CPU with callbacks. - * "OnlineQ": rcu_barrier() found online CPU with callbacks. - * "OnlineNQ": rcu_barrier() found online CPU, no callbacks. + * "Inc2": rcu_barrier() piggyback check counter incremented. * "IRQ": An rcu_barrier_callback() callback posted on remote CPU. * "IRQNQ": An rcu_barrier_callback() callback found no callbacks. - * "CB": An rcu_barrier_callback() invoked a callback, not the last. * "LastCB": An rcu_barrier_callback() invoked the last callback. - * "Inc2": rcu_barrier() piggyback check counter incremented. + * "NQ": rcu_barrier() found a CPU with no callbacks. + * "OnlineQ": rcu_barrier() found online CPU with callbacks. * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument * is the count of remaining callbacks, and "done" is the piggybacking count. */ diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 94640482cfe7..65e786756321 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -187,7 +187,9 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, TP_ARGS(p)); #ifdef CREATE_TRACE_POINTS -static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) +static inline long __trace_sched_switch_state(bool preempt, + unsigned int prev_state, + struct task_struct *p) { unsigned int state; @@ -208,7 +210,7 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct * * it for left shift operation to get the correct task->state * mapping. */ - state = task_state_index(p); + state = __task_state_index(prev_state, p->exit_state); return state ? (1 << (state - 1)) : state; } @@ -220,10 +222,11 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct * TRACE_EVENT(sched_switch, TP_PROTO(bool preempt, + unsigned int prev_state, struct task_struct *prev, struct task_struct *next), - TP_ARGS(preempt, prev, next), + TP_ARGS(preempt, prev_state, prev, next), TP_STRUCT__entry( __array( char, prev_comm, TASK_COMM_LEN ) @@ -239,7 +242,7 @@ TRACE_EVENT(sched_switch, memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); __entry->prev_pid = prev->pid; __entry->prev_prio = prev->prio; - __entry->prev_state = __trace_sched_switch_state(preempt, prev); + __entry->prev_state = __trace_sched_switch_state(preempt, prev_state, prev); memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); __entry->next_pid = next->pid; __entry->next_prio = next->prio; diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h index f3a4b4d60714..cee4b2b64ae4 100644 --- a/include/trace/events/scmi.h +++ b/include/trace/events/scmi.h @@ -33,6 +33,34 @@ TRACE_EVENT(scmi_xfer_begin, __entry->seq, __entry->poll) ); +TRACE_EVENT(scmi_xfer_response_wait, + TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq, + u32 timeout, bool poll), + TP_ARGS(transfer_id, msg_id, protocol_id, seq, timeout, poll), + + TP_STRUCT__entry( + __field(int, transfer_id) + __field(u8, msg_id) + __field(u8, protocol_id) + __field(u16, seq) + __field(u32, timeout) + __field(bool, poll) + ), + + TP_fast_assign( + __entry->transfer_id = transfer_id; + __entry->msg_id = msg_id; + __entry->protocol_id = protocol_id; + __entry->seq = seq; + __entry->timeout = timeout; + __entry->poll = poll; + ), + + TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u tmo_ms=%u poll=%u", + __entry->transfer_id, __entry->msg_id, __entry->protocol_id, + __entry->seq, __entry->timeout, __entry->poll) +); + TRACE_EVENT(scmi_xfer_end, TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq, int status), diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index a8a64b97504d..e1670e1e4934 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -16,6 +16,51 @@ EM(SKB_DROP_REASON_TCP_CSUM, TCP_CSUM) \ EM(SKB_DROP_REASON_SOCKET_FILTER, SOCKET_FILTER) \ EM(SKB_DROP_REASON_UDP_CSUM, UDP_CSUM) \ + EM(SKB_DROP_REASON_NETFILTER_DROP, NETFILTER_DROP) \ + EM(SKB_DROP_REASON_OTHERHOST, OTHERHOST) \ + EM(SKB_DROP_REASON_IP_CSUM, IP_CSUM) \ + EM(SKB_DROP_REASON_IP_INHDR, IP_INHDR) \ + EM(SKB_DROP_REASON_IP_RPFILTER, IP_RPFILTER) \ + EM(SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST, \ + UNICAST_IN_L2_MULTICAST) \ + EM(SKB_DROP_REASON_XFRM_POLICY, XFRM_POLICY) \ + EM(SKB_DROP_REASON_IP_NOPROTO, IP_NOPROTO) \ + EM(SKB_DROP_REASON_SOCKET_RCVBUFF, SOCKET_RCVBUFF) \ + EM(SKB_DROP_REASON_PROTO_MEM, PROTO_MEM) \ + EM(SKB_DROP_REASON_TCP_MD5NOTFOUND, TCP_MD5NOTFOUND) \ + EM(SKB_DROP_REASON_TCP_MD5UNEXPECTED, \ + TCP_MD5UNEXPECTED) \ + EM(SKB_DROP_REASON_TCP_MD5FAILURE, TCP_MD5FAILURE) \ + EM(SKB_DROP_REASON_SOCKET_BACKLOG, SOCKET_BACKLOG) \ + EM(SKB_DROP_REASON_TCP_FLAGS, TCP_FLAGS) \ + EM(SKB_DROP_REASON_TCP_ZEROWINDOW, TCP_ZEROWINDOW) \ + EM(SKB_DROP_REASON_TCP_OLD_DATA, TCP_OLD_DATA) \ + EM(SKB_DROP_REASON_TCP_OVERWINDOW, TCP_OVERWINDOW) \ + EM(SKB_DROP_REASON_TCP_OFOMERGE, TCP_OFOMERGE) \ + EM(SKB_DROP_REASON_IP_OUTNOROUTES, IP_OUTNOROUTES) \ + EM(SKB_DROP_REASON_BPF_CGROUP_EGRESS, \ + BPF_CGROUP_EGRESS) \ + EM(SKB_DROP_REASON_IPV6DISABLED, IPV6DISABLED) \ + EM(SKB_DROP_REASON_NEIGH_CREATEFAIL, NEIGH_CREATEFAIL) \ + EM(SKB_DROP_REASON_NEIGH_FAILED, NEIGH_FAILED) \ + EM(SKB_DROP_REASON_NEIGH_QUEUEFULL, NEIGH_QUEUEFULL) \ + EM(SKB_DROP_REASON_NEIGH_DEAD, NEIGH_DEAD) \ + EM(SKB_DROP_REASON_TC_EGRESS, TC_EGRESS) \ + EM(SKB_DROP_REASON_QDISC_DROP, QDISC_DROP) \ + EM(SKB_DROP_REASON_CPU_BACKLOG, CPU_BACKLOG) \ + EM(SKB_DROP_REASON_XDP, XDP) \ + EM(SKB_DROP_REASON_TC_INGRESS, TC_INGRESS) \ + EM(SKB_DROP_REASON_PTYPE_ABSENT, PTYPE_ABSENT) \ + EM(SKB_DROP_REASON_SKB_CSUM, SKB_CSUM) \ + EM(SKB_DROP_REASON_SKB_GSO_SEG, SKB_GSO_SEG) \ + EM(SKB_DROP_REASON_SKB_UCOPY_FAULT, SKB_UCOPY_FAULT) \ + EM(SKB_DROP_REASON_DEV_HDR, DEV_HDR) \ + EM(SKB_DROP_REASON_DEV_READY, DEV_READY) \ + EM(SKB_DROP_REASON_FULL_RING, FULL_RING) \ + EM(SKB_DROP_REASON_NOMEM, NOMEM) \ + EM(SKB_DROP_REASON_HDR_TRUNC, HDR_TRUNC) \ + EM(SKB_DROP_REASON_TAP_FILTER, TAP_FILTER) \ + EM(SKB_DROP_REASON_TAP_TXFILTER, TAP_TXFILTER) \ EMe(SKB_DROP_REASON_MAX, MAX) #undef EM diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 29982d60b68a..ab8ae1f6ba84 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1625,26 +1625,53 @@ TRACE_DEFINE_ENUM(SVC_COMPLETE); { SVC_PENDING, "SVC_PENDING" }, \ { SVC_COMPLETE, "SVC_COMPLETE" }) +#define SVC_RQST_ENDPOINT_FIELDS(r) \ + __sockaddr(server, (r)->rq_xprt->xpt_locallen) \ + __sockaddr(client, (r)->rq_xprt->xpt_remotelen) \ + __field(unsigned int, netns_ino) \ + __field(u32, xid) + +#define SVC_RQST_ENDPOINT_ASSIGNMENTS(r) \ + do { \ + struct svc_xprt *xprt = (r)->rq_xprt; \ + __assign_sockaddr(server, &xprt->xpt_local, \ + xprt->xpt_locallen); \ + __assign_sockaddr(client, &xprt->xpt_remote, \ + xprt->xpt_remotelen); \ + __entry->netns_ino = xprt->xpt_net->ns.inum; \ + __entry->xid = be32_to_cpu((r)->rq_xid); \ + } while (0) + +#define SVC_RQST_ENDPOINT_FORMAT \ + "xid=0x%08x server=%pISpc client=%pISpc" + +#define SVC_RQST_ENDPOINT_VARARGS \ + __entry->xid, __get_sockaddr(server), __get_sockaddr(client) + TRACE_EVENT(svc_authenticate, TP_PROTO(const struct svc_rqst *rqst, int auth_res), TP_ARGS(rqst, auth_res), TP_STRUCT__entry( - __field(u32, xid) + SVC_RQST_ENDPOINT_FIELDS(rqst) + __field(unsigned long, svc_status) __field(unsigned long, auth_stat) ), TP_fast_assign( - __entry->xid = be32_to_cpu(rqst->rq_xid); + SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst); + __entry->svc_status = auth_res; __entry->auth_stat = be32_to_cpu(rqst->rq_auth_stat); ), - TP_printk("xid=0x%08x auth_res=%s auth_stat=%s", - __entry->xid, svc_show_status(__entry->svc_status), - rpc_show_auth_stat(__entry->auth_stat)) + TP_printk(SVC_RQST_ENDPOINT_FORMAT + " auth_res=%s auth_stat=%s", + SVC_RQST_ENDPOINT_VARARGS, + svc_show_status(__entry->svc_status), + rpc_show_auth_stat(__entry->auth_stat)) ); TRACE_EVENT(svc_process, @@ -1680,7 +1707,6 @@ TRACE_EVENT(svc_process, ); DECLARE_EVENT_CLASS(svc_rqst_event, - TP_PROTO( const struct svc_rqst *rqst ), @@ -1688,20 +1714,20 @@ DECLARE_EVENT_CLASS(svc_rqst_event, TP_ARGS(rqst), TP_STRUCT__entry( - __field(u32, xid) + SVC_RQST_ENDPOINT_FIELDS(rqst) + __field(unsigned long, flags) - __string(addr, rqst->rq_xprt->xpt_remotebuf) ), TP_fast_assign( - __entry->xid = be32_to_cpu(rqst->rq_xid); + SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst); + __entry->flags = rqst->rq_flags; - __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); ), - TP_printk("addr=%s xid=0x%08x flags=%s", - __get_str(addr), __entry->xid, - show_rqstp_flags(__entry->flags)) + TP_printk(SVC_RQST_ENDPOINT_FORMAT " flags=%s", + SVC_RQST_ENDPOINT_VARARGS, + show_rqstp_flags(__entry->flags)) ); #define DEFINE_SVC_RQST_EVENT(name) \ DEFINE_EVENT(svc_rqst_event, svc_##name, \ @@ -1714,34 +1740,63 @@ DEFINE_SVC_RQST_EVENT(defer); DEFINE_SVC_RQST_EVENT(drop); DECLARE_EVENT_CLASS(svc_rqst_status, - - TP_PROTO(struct svc_rqst *rqst, int status), + TP_PROTO( + const struct svc_rqst *rqst, + int status + ), TP_ARGS(rqst, status), TP_STRUCT__entry( - __field(u32, xid) + SVC_RQST_ENDPOINT_FIELDS(rqst) + __field(int, status) __field(unsigned long, flags) - __string(addr, rqst->rq_xprt->xpt_remotebuf) ), TP_fast_assign( - __entry->xid = be32_to_cpu(rqst->rq_xid); + SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst); + __entry->status = status; __entry->flags = rqst->rq_flags; - __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); ), - TP_printk("addr=%s xid=0x%08x status=%d flags=%s", - __get_str(addr), __entry->xid, - __entry->status, show_rqstp_flags(__entry->flags)) + TP_printk(SVC_RQST_ENDPOINT_FORMAT " status=%d flags=%s", + SVC_RQST_ENDPOINT_VARARGS, + __entry->status, show_rqstp_flags(__entry->flags)) ); DEFINE_EVENT(svc_rqst_status, svc_send, - TP_PROTO(struct svc_rqst *rqst, int status), + TP_PROTO(const struct svc_rqst *rqst, int status), TP_ARGS(rqst, status)); +TRACE_EVENT(svc_stats_latency, + TP_PROTO( + const struct svc_rqst *rqst + ), + + TP_ARGS(rqst), + + TP_STRUCT__entry( + SVC_RQST_ENDPOINT_FIELDS(rqst) + + __field(unsigned long, execute) + __string(procedure, svc_proc_name(rqst)) + ), + + TP_fast_assign( + SVC_RQST_ENDPOINT_ASSIGNMENTS(rqst); + + __entry->execute = ktime_to_us(ktime_sub(ktime_get(), + rqst->rq_stime)); + __assign_str(procedure, svc_proc_name(rqst)); + ), + + TP_printk(SVC_RQST_ENDPOINT_FORMAT " proc=%s execute-us=%lu", + SVC_RQST_ENDPOINT_VARARGS, + __get_str(procedure), __entry->execute) +); + #define show_svc_xprt_flags(flags) \ __print_flags(flags, "|", \ { (1UL << XPT_BUSY), "XPT_BUSY"}, \ @@ -1774,65 +1829,114 @@ TRACE_EVENT(svc_xprt_create_err, __field(long, error) __string(program, program) __string(protocol, protocol) - __array(unsigned char, addr, sizeof(struct sockaddr_in6)) + __sockaddr(addr, salen) ), TP_fast_assign( __entry->error = PTR_ERR(xprt); __assign_str(program, program); __assign_str(protocol, protocol); - memcpy(__entry->addr, sap, min(salen, sizeof(__entry->addr))); + __assign_sockaddr(addr, sap, salen); ), TP_printk("addr=%pISpc program=%s protocol=%s error=%ld", - __entry->addr, __get_str(program), __get_str(protocol), + __get_sockaddr(addr), __get_str(program), __get_str(protocol), __entry->error) ); +#define SVC_XPRT_ENDPOINT_FIELDS(x) \ + __sockaddr(server, (x)->xpt_locallen) \ + __sockaddr(client, (x)->xpt_remotelen) \ + __field(unsigned long, flags) \ + __field(unsigned int, netns_ino) + +#define SVC_XPRT_ENDPOINT_ASSIGNMENTS(x) \ + do { \ + __assign_sockaddr(server, &(x)->xpt_local, \ + (x)->xpt_locallen); \ + __assign_sockaddr(client, &(x)->xpt_remote, \ + (x)->xpt_remotelen); \ + __entry->flags = (x)->xpt_flags; \ + __entry->netns_ino = (x)->xpt_net->ns.inum; \ + } while (0) + +#define SVC_XPRT_ENDPOINT_FORMAT \ + "server=%pISpc client=%pISpc flags=%s" + +#define SVC_XPRT_ENDPOINT_VARARGS \ + __get_sockaddr(server), __get_sockaddr(client), \ + show_svc_xprt_flags(__entry->flags) + TRACE_EVENT(svc_xprt_enqueue, - TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst), + TP_PROTO( + const struct svc_xprt *xprt, + const struct svc_rqst *rqst + ), TP_ARGS(xprt, rqst), TP_STRUCT__entry( + SVC_XPRT_ENDPOINT_FIELDS(xprt) + __field(int, pid) - __field(unsigned long, flags) - __string(addr, xprt->xpt_remotebuf) ), TP_fast_assign( + SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt); + __entry->pid = rqst? rqst->rq_task->pid : 0; - __entry->flags = xprt->xpt_flags; - __assign_str(addr, xprt->xpt_remotebuf); ), - TP_printk("addr=%s pid=%d flags=%s", __get_str(addr), - __entry->pid, show_svc_xprt_flags(__entry->flags)) + TP_printk(SVC_XPRT_ENDPOINT_FORMAT " pid=%d", + SVC_XPRT_ENDPOINT_VARARGS, __entry->pid) +); + +TRACE_EVENT(svc_xprt_dequeue, + TP_PROTO( + const struct svc_rqst *rqst + ), + + TP_ARGS(rqst), + + TP_STRUCT__entry( + SVC_XPRT_ENDPOINT_FIELDS(rqst->rq_xprt) + + __field(unsigned long, wakeup) + ), + + TP_fast_assign( + SVC_XPRT_ENDPOINT_ASSIGNMENTS(rqst->rq_xprt); + + __entry->wakeup = ktime_to_us(ktime_sub(ktime_get(), + rqst->rq_qtime)); + ), + + TP_printk(SVC_XPRT_ENDPOINT_FORMAT " wakeup-us=%lu", + SVC_XPRT_ENDPOINT_VARARGS, __entry->wakeup) ); DECLARE_EVENT_CLASS(svc_xprt_event, - TP_PROTO(struct svc_xprt *xprt), + TP_PROTO( + const struct svc_xprt *xprt + ), TP_ARGS(xprt), TP_STRUCT__entry( - __field(unsigned long, flags) - __string(addr, xprt->xpt_remotebuf) + SVC_XPRT_ENDPOINT_FIELDS(xprt) ), TP_fast_assign( - __entry->flags = xprt->xpt_flags; - __assign_str(addr, xprt->xpt_remotebuf); + SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt); ), - TP_printk("addr=%s flags=%s", __get_str(addr), - show_svc_xprt_flags(__entry->flags)) + TP_printk(SVC_XPRT_ENDPOINT_FORMAT, SVC_XPRT_ENDPOINT_VARARGS) ); #define DEFINE_SVC_XPRT_EVENT(name) \ DEFINE_EVENT(svc_xprt_event, svc_xprt_##name, \ TP_PROTO( \ - struct svc_xprt *xprt \ + const struct svc_xprt *xprt \ ), \ TP_ARGS(xprt)) @@ -1850,44 +1954,25 @@ TRACE_EVENT(svc_xprt_accept, TP_ARGS(xprt, service), TP_STRUCT__entry( - __string(addr, xprt->xpt_remotebuf) + SVC_XPRT_ENDPOINT_FIELDS(xprt) + __string(protocol, xprt->xpt_class->xcl_name) __string(service, service) ), TP_fast_assign( - __assign_str(addr, xprt->xpt_remotebuf); + SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt); + __assign_str(protocol, xprt->xpt_class->xcl_name); __assign_str(service, service); ), - TP_printk("addr=%s protocol=%s service=%s", - __get_str(addr), __get_str(protocol), __get_str(service) + TP_printk(SVC_XPRT_ENDPOINT_FORMAT " protocol=%s service=%s", + SVC_XPRT_ENDPOINT_VARARGS, + __get_str(protocol), __get_str(service) ) ); -TRACE_EVENT(svc_xprt_dequeue, - TP_PROTO(struct svc_rqst *rqst), - - TP_ARGS(rqst), - - TP_STRUCT__entry( - __field(unsigned long, flags) - __field(unsigned long, wakeup) - __string(addr, rqst->rq_xprt->xpt_remotebuf) - ), - - TP_fast_assign( - __entry->flags = rqst->rq_xprt->xpt_flags; - __entry->wakeup = ktime_to_us(ktime_sub(ktime_get(), - rqst->rq_qtime)); - __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); - ), - - TP_printk("addr=%s flags=%s wakeup-us=%lu", __get_str(addr), - show_svc_xprt_flags(__entry->flags), __entry->wakeup) -); - TRACE_EVENT(svc_wake_up, TP_PROTO(int pid), @@ -1922,31 +2007,6 @@ TRACE_EVENT(svc_alloc_arg_err, TP_printk("pages=%u", __entry->pages) ); -TRACE_EVENT(svc_stats_latency, - TP_PROTO(const struct svc_rqst *rqst), - - TP_ARGS(rqst), - - TP_STRUCT__entry( - __field(u32, xid) - __field(unsigned long, execute) - __string(procedure, svc_proc_name(rqst)) - __string(addr, rqst->rq_xprt->xpt_remotebuf) - ), - - TP_fast_assign( - __entry->xid = be32_to_cpu(rqst->rq_xid); - __entry->execute = ktime_to_us(ktime_sub(ktime_get(), - rqst->rq_stime)); - __assign_str(procedure, svc_proc_name(rqst)); - __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); - ), - - TP_printk("addr=%s xid=0x%08x proc=%s execute-us=%lu", - __get_str(addr), __entry->xid, __get_str(procedure), - __entry->execute) -); - DECLARE_EVENT_CLASS(svc_deferred_event, TP_PROTO( const struct svc_deferred_req *dr diff --git a/include/trace/events/thp.h b/include/trace/events/thp.h index ca3f2767828a..202b3e3e67ff 100644 --- a/include/trace/events/thp.h +++ b/include/trace/events/thp.h @@ -48,6 +48,33 @@ TRACE_EVENT(hugepage_update, TP_printk("hugepage update at addr 0x%lx and pte = 0x%lx clr = 0x%lx, set = 0x%lx", __entry->addr, __entry->pte, __entry->clr, __entry->set) ); +DECLARE_EVENT_CLASS(migration_pmd, + + TP_PROTO(unsigned long addr, unsigned long pmd), + + TP_ARGS(addr, pmd), + + TP_STRUCT__entry( + __field(unsigned long, addr) + __field(unsigned long, pmd) + ), + + TP_fast_assign( + __entry->addr = addr; + __entry->pmd = pmd; + ), + TP_printk("addr=%lx, pmd=%lx", __entry->addr, __entry->pmd) +); + +DEFINE_EVENT(migration_pmd, set_migration_pmd, + TP_PROTO(unsigned long addr, unsigned long pmd), + TP_ARGS(addr, pmd) +); + +DEFINE_EVENT(migration_pmd, remove_migration_pmd, + TP_PROTO(unsigned long addr, unsigned long pmd), + TP_ARGS(addr, pmd) +); #endif /* _TRACE_THP_H */ /* This part must be outside protection */ diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index ca2e9009a651..de136dbd623a 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -327,11 +327,11 @@ TRACE_EVENT(mm_vmscan_lru_isolate, __print_symbolic(__entry->lru, LRU_NAMES)) ); -TRACE_EVENT(mm_vmscan_writepage, +TRACE_EVENT(mm_vmscan_write_folio, - TP_PROTO(struct page *page), + TP_PROTO(struct folio *folio), - TP_ARGS(page), + TP_ARGS(folio), TP_STRUCT__entry( __field(unsigned long, pfn) @@ -339,9 +339,9 @@ TRACE_EVENT(mm_vmscan_writepage, ), TP_fast_assign( - __entry->pfn = page_to_pfn(page); + __entry->pfn = folio_pfn(folio); __entry->reclaim_flags = trace_reclaim_flags( - page_is_file_lru(page)); + folio_is_file_lru(folio)); ), TP_printk("page=%p pfn=0x%lx flags=%s", diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index a345b1e12daf..86b2a82da546 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -735,34 +735,6 @@ TRACE_EVENT(writeback_sb_inodes_requeue, ) ); -DECLARE_EVENT_CLASS(writeback_congest_waited_template, - - TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), - - TP_ARGS(usec_timeout, usec_delayed), - - TP_STRUCT__entry( - __field( unsigned int, usec_timeout ) - __field( unsigned int, usec_delayed ) - ), - - TP_fast_assign( - __entry->usec_timeout = usec_timeout; - __entry->usec_delayed = usec_delayed; - ), - - TP_printk("usec_timeout=%u usec_delayed=%u", - __entry->usec_timeout, - __entry->usec_delayed) -); - -DEFINE_EVENT(writeback_congest_waited_template, writeback_congestion_wait, - - TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), - - TP_ARGS(usec_timeout, usec_delayed) -); - DECLARE_EVENT_CLASS(writeback_single_inode_template, TP_PROTO(struct inode *inode, diff --git a/include/trace/perf.h b/include/trace/perf.h index 5d48c46a3008..5800d13146c3 100644 --- a/include/trace/perf.h +++ b/include/trace/perf.h @@ -21,6 +21,9 @@ #undef __get_bitmask #define __get_bitmask(field) (char *)__get_dynamic_array(field) +#undef __get_sockaddr +#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field)) + #undef __get_rel_dynamic_array #define __get_rel_dynamic_array(field) \ ((void *)__entry + \ @@ -38,6 +41,9 @@ #undef __get_rel_bitmask #define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field) +#undef __get_rel_sockaddr +#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field)) + #undef __perf_count #define __perf_count(c) (__count = (c)) diff --git a/include/trace/stages/init.h b/include/trace/stages/init.h new file mode 100644 index 000000000000..000bcfc8dd2e --- /dev/null +++ b/include/trace/stages/init.h @@ -0,0 +1,37 @@ + +#define __app__(x, y) str__##x##y +#define __app(x, y) __app__(x, y) + +#define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name) + +#define TRACE_MAKE_SYSTEM_STR() \ + static const char TRACE_SYSTEM_STRING[] = \ + __stringify(TRACE_SYSTEM) + +TRACE_MAKE_SYSTEM_STR(); + +#undef TRACE_DEFINE_ENUM +#define TRACE_DEFINE_ENUM(a) \ + static struct trace_eval_map __used __initdata \ + __##TRACE_SYSTEM##_##a = \ + { \ + .system = TRACE_SYSTEM_STRING, \ + .eval_string = #a, \ + .eval_value = a \ + }; \ + static struct trace_eval_map __used \ + __section("_ftrace_eval_map") \ + *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a + +#undef TRACE_DEFINE_SIZEOF +#define TRACE_DEFINE_SIZEOF(a) \ + static struct trace_eval_map __used __initdata \ + __##TRACE_SYSTEM##_##a = \ + { \ + .system = TRACE_SYSTEM_STRING, \ + .eval_string = "sizeof(" #a ")", \ + .eval_value = sizeof(a) \ + }; \ + static struct trace_eval_map __used \ + __section("_ftrace_eval_map") \ + *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a diff --git a/include/trace/stages/stage1_defines.h b/include/trace/stages/stage1_defines.h new file mode 100644 index 000000000000..a16783419687 --- /dev/null +++ b/include/trace/stages/stage1_defines.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Stage 1 definitions for creating trace events */ + +#undef __field +#define __field(type, item) type item; + +#undef __field_ext +#define __field_ext(type, item, filter_type) type item; + +#undef __field_struct +#define __field_struct(type, item) type item; + +#undef __field_struct_ext +#define __field_struct_ext(type, item, filter_type) type item; + +#undef __array +#define __array(type, item, len) type item[len]; + +#undef __dynamic_array +#define __dynamic_array(type, item, len) u32 __data_loc_##item; + +#undef __string +#define __string(item, src) __dynamic_array(char, item, -1) + +#undef __string_len +#define __string_len(item, src, len) __dynamic_array(char, item, -1) + +#undef __bitmask +#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1) + +#undef __sockaddr +#define __sockaddr(field, len) __dynamic_array(u8, field, len) + +#undef __rel_dynamic_array +#define __rel_dynamic_array(type, item, len) u32 __rel_loc_##item; + +#undef __rel_string +#define __rel_string(item, src) __rel_dynamic_array(char, item, -1) + +#undef __rel_string_len +#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1) + +#undef __rel_bitmask +#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(char, item, -1) + +#undef __rel_sockaddr +#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len) + +#undef TP_STRUCT__entry +#define TP_STRUCT__entry(args...) args diff --git a/include/trace/stages/stage2_defines.h b/include/trace/stages/stage2_defines.h new file mode 100644 index 000000000000..42fd1e8813ec --- /dev/null +++ b/include/trace/stages/stage2_defines.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Stage 2 definitions for creating trace events */ + +#undef TRACE_DEFINE_ENUM +#define TRACE_DEFINE_ENUM(a) + +#undef TRACE_DEFINE_SIZEOF +#define TRACE_DEFINE_SIZEOF(a) + +#undef __field +#define __field(type, item) + +#undef __field_ext +#define __field_ext(type, item, filter_type) + +#undef __field_struct +#define __field_struct(type, item) + +#undef __field_struct_ext +#define __field_struct_ext(type, item, filter_type) + +#undef __array +#define __array(type, item, len) + +#undef __dynamic_array +#define __dynamic_array(type, item, len) u32 item; + +#undef __string +#define __string(item, src) __dynamic_array(char, item, -1) + +#undef __string_len +#define __string_len(item, src, len) __dynamic_array(char, item, -1) + +#undef __bitmask +#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) + +#undef __sockaddr +#define __sockaddr(field, len) __dynamic_array(u8, field, len) + +#undef __rel_dynamic_array +#define __rel_dynamic_array(type, item, len) u32 item; + +#undef __rel_string +#define __rel_string(item, src) __rel_dynamic_array(char, item, -1) + +#undef __rel_string_len +#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1) + +#undef __rel_bitmask +#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1) + +#undef __rel_sockaddr +#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len) diff --git a/include/trace/stages/stage3_defines.h b/include/trace/stages/stage3_defines.h new file mode 100644 index 000000000000..e3b183e9d18e --- /dev/null +++ b/include/trace/stages/stage3_defines.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Stage 3 definitions for creating trace events */ + +#undef __entry +#define __entry field + +#undef TP_printk +#define TP_printk(fmt, args...) fmt "\n", args + +#undef __get_dynamic_array +#define __get_dynamic_array(field) \ + ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) + +#undef __get_dynamic_array_len +#define __get_dynamic_array_len(field) \ + ((__entry->__data_loc_##field >> 16) & 0xffff) + +#undef __get_str +#define __get_str(field) ((char *)__get_dynamic_array(field)) + +#undef __get_rel_dynamic_array +#define __get_rel_dynamic_array(field) \ + ((void *)__entry + \ + offsetof(typeof(*__entry), __rel_loc_##field) + \ + sizeof(__entry->__rel_loc_##field) + \ + (__entry->__rel_loc_##field & 0xffff)) + +#undef __get_rel_dynamic_array_len +#define __get_rel_dynamic_array_len(field) \ + ((__entry->__rel_loc_##field >> 16) & 0xffff) + +#undef __get_rel_str +#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field)) + +#undef __get_bitmask +#define __get_bitmask(field) \ + ({ \ + void *__bitmask = __get_dynamic_array(field); \ + unsigned int __bitmask_size; \ + __bitmask_size = __get_dynamic_array_len(field); \ + trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ + }) + +#undef __get_rel_bitmask +#define __get_rel_bitmask(field) \ + ({ \ + void *__bitmask = __get_rel_dynamic_array(field); \ + unsigned int __bitmask_size; \ + __bitmask_size = __get_rel_dynamic_array_len(field); \ + trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ + }) + +#undef __get_sockaddr +#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field)) + +#undef __get_rel_sockaddr +#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field)) + +#undef __print_flags +#define __print_flags(flag, delim, flag_array...) \ + ({ \ + static const struct trace_print_flags __flags[] = \ + { flag_array, { -1, NULL }}; \ + trace_print_flags_seq(p, delim, flag, __flags); \ + }) + +#undef __print_symbolic +#define __print_symbolic(value, symbol_array...) \ + ({ \ + static const struct trace_print_flags symbols[] = \ + { symbol_array, { -1, NULL }}; \ + trace_print_symbols_seq(p, value, symbols); \ + }) + +#undef __print_flags_u64 +#undef __print_symbolic_u64 +#if BITS_PER_LONG == 32 +#define __print_flags_u64(flag, delim, flag_array...) \ + ({ \ + static const struct trace_print_flags_u64 __flags[] = \ + { flag_array, { -1, NULL } }; \ + trace_print_flags_seq_u64(p, delim, flag, __flags); \ + }) + +#define __print_symbolic_u64(value, symbol_array...) \ + ({ \ + static const struct trace_print_flags_u64 symbols[] = \ + { symbol_array, { -1, NULL } }; \ + trace_print_symbols_seq_u64(p, value, symbols); \ + }) +#else +#define __print_flags_u64(flag, delim, flag_array...) \ + __print_flags(flag, delim, flag_array) + +#define __print_symbolic_u64(value, symbol_array...) \ + __print_symbolic(value, symbol_array) +#endif + +#undef __print_hex +#define __print_hex(buf, buf_len) \ + trace_print_hex_seq(p, buf, buf_len, false) + +#undef __print_hex_str +#define __print_hex_str(buf, buf_len) \ + trace_print_hex_seq(p, buf, buf_len, true) + +#undef __print_array +#define __print_array(array, count, el_size) \ + ({ \ + BUILD_BUG_ON(el_size != 1 && el_size != 2 && \ + el_size != 4 && el_size != 8); \ + trace_print_array_seq(p, array, count, el_size); \ + }) + +#undef __print_hex_dump +#define __print_hex_dump(prefix_str, prefix_type, \ + rowsize, groupsize, buf, len, ascii) \ + trace_print_hex_dump_seq(p, prefix_str, prefix_type, \ + rowsize, groupsize, buf, len, ascii) + +#undef __print_ns_to_secs +#define __print_ns_to_secs(value) \ + ({ \ + u64 ____val = (u64)(value); \ + do_div(____val, NSEC_PER_SEC); \ + ____val; \ + }) + +#undef __print_ns_without_secs +#define __print_ns_without_secs(value) \ + ({ \ + u64 ____val = (u64)(value); \ + (u32) do_div(____val, NSEC_PER_SEC); \ + }) diff --git a/include/trace/stages/stage4_defines.h b/include/trace/stages/stage4_defines.h new file mode 100644 index 000000000000..e80cdc397a43 --- /dev/null +++ b/include/trace/stages/stage4_defines.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Stage 4 definitions for creating trace events */ + +#undef __field_ext +#define __field_ext(_type, _item, _filter_type) { \ + .type = #_type, .name = #_item, \ + .size = sizeof(_type), .align = __alignof__(_type), \ + .is_signed = is_signed_type(_type), .filter_type = _filter_type }, + +#undef __field_struct_ext +#define __field_struct_ext(_type, _item, _filter_type) { \ + .type = #_type, .name = #_item, \ + .size = sizeof(_type), .align = __alignof__(_type), \ + 0, .filter_type = _filter_type }, + +#undef __field +#define __field(type, item) __field_ext(type, item, FILTER_OTHER) + +#undef __field_struct +#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER) + +#undef __array +#define __array(_type, _item, _len) { \ + .type = #_type"["__stringify(_len)"]", .name = #_item, \ + .size = sizeof(_type[_len]), .align = __alignof__(_type), \ + .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }, + +#undef __dynamic_array +#define __dynamic_array(_type, _item, _len) { \ + .type = "__data_loc " #_type "[]", .name = #_item, \ + .size = 4, .align = 4, \ + .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }, + +#undef __string +#define __string(item, src) __dynamic_array(char, item, -1) + +#undef __string_len +#define __string_len(item, src, len) __dynamic_array(char, item, -1) + +#undef __bitmask +#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) + +#undef __sockaddr +#define __sockaddr(field, len) __dynamic_array(u8, field, len) + +#undef __rel_dynamic_array +#define __rel_dynamic_array(_type, _item, _len) { \ + .type = "__rel_loc " #_type "[]", .name = #_item, \ + .size = 4, .align = 4, \ + .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }, + +#undef __rel_string +#define __rel_string(item, src) __rel_dynamic_array(char, item, -1) + +#undef __rel_string_len +#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1) + +#undef __rel_bitmask +#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1) + +#undef __rel_sockaddr +#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len) diff --git a/include/trace/stages/stage5_defines.h b/include/trace/stages/stage5_defines.h new file mode 100644 index 000000000000..7ee5931300e6 --- /dev/null +++ b/include/trace/stages/stage5_defines.h @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Stage 5 definitions for creating trace events */ + +/* + * remember the offset of each array from the beginning of the event. + */ + +#undef __entry +#define __entry entry + +#undef __field +#define __field(type, item) + +#undef __field_ext +#define __field_ext(type, item, filter_type) + +#undef __field_struct +#define __field_struct(type, item) + +#undef __field_struct_ext +#define __field_struct_ext(type, item, filter_type) + +#undef __array +#define __array(type, item, len) + +#undef __dynamic_array +#define __dynamic_array(type, item, len) \ + __item_length = (len) * sizeof(type); \ + __data_offsets->item = __data_size + \ + offsetof(typeof(*entry), __data); \ + __data_offsets->item |= __item_length << 16; \ + __data_size += __item_length; + +#undef __string +#define __string(item, src) __dynamic_array(char, item, \ + strlen((src) ? (const char *)(src) : "(null)") + 1) + +#undef __string_len +#define __string_len(item, src, len) __dynamic_array(char, item, (len) + 1) + +#undef __rel_dynamic_array +#define __rel_dynamic_array(type, item, len) \ + __item_length = (len) * sizeof(type); \ + __data_offsets->item = __data_size + \ + offsetof(typeof(*entry), __data) - \ + offsetof(typeof(*entry), __rel_loc_##item) - \ + sizeof(u32); \ + __data_offsets->item |= __item_length << 16; \ + __data_size += __item_length; + +#undef __rel_string +#define __rel_string(item, src) __rel_dynamic_array(char, item, \ + strlen((src) ? (const char *)(src) : "(null)") + 1) + +#undef __rel_string_len +#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, (len) + 1) +/* + * __bitmask_size_in_bytes_raw is the number of bytes needed to hold + * num_possible_cpus(). + */ +#define __bitmask_size_in_bytes_raw(nr_bits) \ + (((nr_bits) + 7) / 8) + +#define __bitmask_size_in_longs(nr_bits) \ + ((__bitmask_size_in_bytes_raw(nr_bits) + \ + ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8)) + +/* + * __bitmask_size_in_bytes is the number of bytes needed to hold + * num_possible_cpus() padded out to the nearest long. This is what + * is saved in the buffer, just to be consistent. + */ +#define __bitmask_size_in_bytes(nr_bits) \ + (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8)) + +#undef __bitmask +#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \ + __bitmask_size_in_longs(nr_bits)) + +#undef __rel_bitmask +#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, \ + __bitmask_size_in_longs(nr_bits)) + +#undef __sockaddr +#define __sockaddr(field, len) __dynamic_array(u8, field, len) + +#undef __rel_sockaddr +#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len) diff --git a/include/trace/stages/stage6_defines.h b/include/trace/stages/stage6_defines.h new file mode 100644 index 000000000000..e1724f73594b --- /dev/null +++ b/include/trace/stages/stage6_defines.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Stage 6 definitions for creating trace events */ + +#undef __entry +#define __entry entry + +#undef __field +#define __field(type, item) + +#undef __field_struct +#define __field_struct(type, item) + +#undef __array +#define __array(type, item, len) + +#undef __dynamic_array +#define __dynamic_array(type, item, len) \ + __entry->__data_loc_##item = __data_offsets.item; + +#undef __string +#define __string(item, src) __dynamic_array(char, item, -1) + +#undef __string_len +#define __string_len(item, src, len) __dynamic_array(char, item, -1) + +#undef __assign_str +#define __assign_str(dst, src) \ + strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); + +#undef __assign_str_len +#define __assign_str_len(dst, src, len) \ + do { \ + memcpy(__get_str(dst), (src), (len)); \ + __get_str(dst)[len] = '\0'; \ + } while(0) + +#undef __bitmask +#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) + +#undef __get_bitmask +#define __get_bitmask(field) (char *)__get_dynamic_array(field) + +#undef __assign_bitmask +#define __assign_bitmask(dst, src, nr_bits) \ + memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) + +#undef __sockaddr +#define __sockaddr(field, len) __dynamic_array(u8, field, len) + +#undef __get_sockaddr +#define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field)) + +#undef __assign_sockaddr +#define __assign_sockaddr(dest, src, len) \ + memcpy(__get_dynamic_array(dest), src, len) + +#undef __rel_dynamic_array +#define __rel_dynamic_array(type, item, len) \ + __entry->__rel_loc_##item = __data_offsets.item; + +#undef __rel_string +#define __rel_string(item, src) __rel_dynamic_array(char, item, -1) + +#undef __rel_string_len +#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1) + +#undef __assign_rel_str +#define __assign_rel_str(dst, src) \ + strcpy(__get_rel_str(dst), (src) ? (const char *)(src) : "(null)"); + +#undef __assign_rel_str_len +#define __assign_rel_str_len(dst, src, len) \ + do { \ + memcpy(__get_rel_str(dst), (src), (len)); \ + __get_rel_str(dst)[len] = '\0'; \ + } while (0) + +#undef __rel_bitmask +#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1) + +#undef __get_rel_bitmask +#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field) + +#undef __assign_rel_bitmask +#define __assign_rel_bitmask(dst, src, nr_bits) \ + memcpy(__get_rel_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) + +#undef __rel_sockaddr +#define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len) + +#undef __get_rel_sockaddr +#define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field)) + +#undef __assign_rel_sockaddr +#define __assign_rel_sockaddr(dest, src, len) \ + memcpy(__get_rel_dynamic_array(dest), src, len) + +#undef TP_fast_assign +#define TP_fast_assign(args...) args + +#undef __perf_count +#define __perf_count(c) (c) + +#undef __perf_task +#define __perf_task(t) (t) diff --git a/include/trace/stages/stage7_defines.h b/include/trace/stages/stage7_defines.h new file mode 100644 index 000000000000..8a7ec24c246d --- /dev/null +++ b/include/trace/stages/stage7_defines.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Stage 7 definitions for creating trace events */ + +#undef __entry +#define __entry REC + +#undef __print_flags +#undef __print_symbolic +#undef __print_hex +#undef __print_hex_str +#undef __get_dynamic_array +#undef __get_dynamic_array_len +#undef __get_str +#undef __get_bitmask +#undef __get_sockaddr +#undef __get_rel_dynamic_array +#undef __get_rel_dynamic_array_len +#undef __get_rel_str +#undef __get_rel_bitmask +#undef __get_rel_sockaddr +#undef __print_array +#undef __print_hex_dump + +/* + * The below is not executed in the kernel. It is only what is + * displayed in the print format for userspace to parse. + */ +#undef __print_ns_to_secs +#define __print_ns_to_secs(val) (val) / 1000000000UL + +#undef __print_ns_without_secs +#define __print_ns_without_secs(val) (val) % 1000000000UL + +#undef TP_printk +#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) diff --git a/include/trace/trace_custom_events.h b/include/trace/trace_custom_events.h new file mode 100644 index 000000000000..b567c7202339 --- /dev/null +++ b/include/trace/trace_custom_events.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This is similar to the trace_events.h file, but is to only + * create custom trace events to be attached to existing tracepoints. + * Where as the TRACE_EVENT() macro (from trace_events.h) will create + * both the trace event and the tracepoint it will attach the event to, + * TRACE_CUSTOM_EVENT() is to create only a custom version of an existing + * trace event (created by TRACE_EVENT() or DEFINE_EVENT()), and will + * be placed in the "custom" system. + */ + +#include <linux/trace_events.h> + +/* All custom events are placed in the custom group */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM custom + +#ifndef TRACE_SYSTEM_VAR +#define TRACE_SYSTEM_VAR TRACE_SYSTEM +#endif + +/* The init stage creates the system string and enum mappings */ + +#include "stages/init.h" + +#undef TRACE_CUSTOM_EVENT +#define TRACE_CUSTOM_EVENT(name, proto, args, tstruct, assign, print) \ + DECLARE_CUSTOM_EVENT_CLASS(name, \ + PARAMS(proto), \ + PARAMS(args), \ + PARAMS(tstruct), \ + PARAMS(assign), \ + PARAMS(print)); \ + DEFINE_CUSTOM_EVENT(name, name, PARAMS(proto), PARAMS(args)); + +/* Stage 1 creates the structure of the recorded event layout */ + +#include "stages/stage1_defines.h" + +#undef DECLARE_CUSTOM_EVENT_CLASS +#define DECLARE_CUSTOM_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ + struct trace_custom_event_raw_##name { \ + struct trace_entry ent; \ + tstruct \ + char __data[]; \ + }; \ + \ + static struct trace_event_class custom_event_class_##name; + +#undef DEFINE_CUSTOM_EVENT +#define DEFINE_CUSTOM_EVENT(template, name, proto, args) \ + static struct trace_event_call __used \ + __attribute__((__aligned__(4))) custom_event_##name + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* Stage 2 creates the custom class */ + +#include "stages/stage2_defines.h" + +#undef DECLARE_CUSTOM_EVENT_CLASS +#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ + struct trace_custom_event_data_offsets_##call { \ + tstruct; \ + }; + +#undef DEFINE_CUSTOM_EVENT +#define DEFINE_CUSTOM_EVENT(template, name, proto, args) + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* Stage 3 create the way to print the custom event */ + +#include "stages/stage3_defines.h" + +#undef DECLARE_CUSTOM_EVENT_CLASS +#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +static notrace enum print_line_t \ +trace_custom_raw_output_##call(struct trace_iterator *iter, int flags, \ + struct trace_event *trace_event) \ +{ \ + struct trace_seq *s = &iter->seq; \ + struct trace_seq __maybe_unused *p = &iter->tmp_seq; \ + struct trace_custom_event_raw_##call *field; \ + int ret; \ + \ + field = (typeof(field))iter->ent; \ + \ + ret = trace_raw_output_prep(iter, trace_event); \ + if (ret != TRACE_TYPE_HANDLED) \ + return ret; \ + \ + trace_event_printf(iter, print); \ + \ + return trace_handle_return(s); \ +} \ +static struct trace_event_functions trace_custom_event_type_funcs_##call = { \ + .trace = trace_custom_raw_output_##call, \ +}; + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* Stage 4 creates the offset layout for the fields */ + +#include "stages/stage4_defines.h" + +#undef DECLARE_CUSTOM_EVENT_CLASS +#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, func, print) \ +static struct trace_event_fields trace_custom_event_fields_##call[] = { \ + tstruct \ + {} }; + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* Stage 5 creates the helper function for dynamic fields */ + +#include "stages/stage5_defines.h" + +#undef DECLARE_CUSTOM_EVENT_CLASS +#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +static inline notrace int trace_custom_event_get_offsets_##call( \ + struct trace_custom_event_data_offsets_##call *__data_offsets, proto) \ +{ \ + int __data_size = 0; \ + int __maybe_unused __item_length; \ + struct trace_custom_event_raw_##call __maybe_unused *entry; \ + \ + tstruct; \ + \ + return __data_size; \ +} + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* Stage 6 creates the probe function that records the event */ + +#include "stages/stage6_defines.h" + +#undef DECLARE_CUSTOM_EVENT_CLASS +#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ + \ +static notrace void \ +trace_custom_event_raw_event_##call(void *__data, proto) \ +{ \ + struct trace_event_file *trace_file = __data; \ + struct trace_custom_event_data_offsets_##call __maybe_unused __data_offsets; \ + struct trace_event_buffer fbuffer; \ + struct trace_custom_event_raw_##call *entry; \ + int __data_size; \ + \ + if (trace_trigger_soft_disabled(trace_file)) \ + return; \ + \ + __data_size = trace_custom_event_get_offsets_##call(&__data_offsets, args); \ + \ + entry = trace_event_buffer_reserve(&fbuffer, trace_file, \ + sizeof(*entry) + __data_size); \ + \ + if (!entry) \ + return; \ + \ + tstruct \ + \ + { assign; } \ + \ + trace_event_buffer_commit(&fbuffer); \ +} +/* + * The ftrace_test_custom_probe is compiled out, it is only here as a build time check + * to make sure that if the tracepoint handling changes, the ftrace probe will + * fail to compile unless it too is updated. + */ + +#undef DEFINE_CUSTOM_EVENT +#define DEFINE_CUSTOM_EVENT(template, call, proto, args) \ +static inline void ftrace_test_custom_probe_##call(void) \ +{ \ + check_trace_callback_type_##call(trace_custom_event_raw_event_##template); \ +} + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* Stage 7 creates the actual class and event structure for the custom event */ + +#include "stages/stage7_defines.h" + +#undef DECLARE_CUSTOM_EVENT_CLASS +#define DECLARE_CUSTOM_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +static char custom_print_fmt_##call[] = print; \ +static struct trace_event_class __used __refdata custom_event_class_##call = { \ + .system = TRACE_SYSTEM_STRING, \ + .fields_array = trace_custom_event_fields_##call, \ + .fields = LIST_HEAD_INIT(custom_event_class_##call.fields),\ + .raw_init = trace_event_raw_init, \ + .probe = trace_custom_event_raw_event_##call, \ + .reg = trace_event_reg, \ +}; + +#undef DEFINE_CUSTOM_EVENT +#define DEFINE_CUSTOM_EVENT(template, call, proto, args) \ + \ +static struct trace_event_call __used custom_event_##call = { \ + .name = #call, \ + .class = &custom_event_class_##template, \ + .event.funcs = &trace_custom_event_type_funcs_##template, \ + .print_fmt = custom_print_fmt_##template, \ + .flags = TRACE_EVENT_FL_CUSTOM, \ +}; \ +static inline int trace_custom_event_##call##_update(struct tracepoint *tp) \ +{ \ + if (tp->name && strcmp(tp->name, #call) == 0) { \ + custom_event_##call.tp = tp; \ + custom_event_##call.flags = TRACE_EVENT_FL_TRACEPOINT; \ + return 1; \ + } \ + return 0; \ +} \ +static struct trace_event_call __used \ +__section("_ftrace_events") *__custom_event_##call = &custom_event_##call + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index 3d29919045af..8a8cd66cc6d5 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -24,42 +24,7 @@ #define TRACE_SYSTEM_VAR TRACE_SYSTEM #endif -#define __app__(x, y) str__##x##y -#define __app(x, y) __app__(x, y) - -#define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name) - -#define TRACE_MAKE_SYSTEM_STR() \ - static const char TRACE_SYSTEM_STRING[] = \ - __stringify(TRACE_SYSTEM) - -TRACE_MAKE_SYSTEM_STR(); - -#undef TRACE_DEFINE_ENUM -#define TRACE_DEFINE_ENUM(a) \ - static struct trace_eval_map __used __initdata \ - __##TRACE_SYSTEM##_##a = \ - { \ - .system = TRACE_SYSTEM_STRING, \ - .eval_string = #a, \ - .eval_value = a \ - }; \ - static struct trace_eval_map __used \ - __section("_ftrace_eval_map") \ - *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a - -#undef TRACE_DEFINE_SIZEOF -#define TRACE_DEFINE_SIZEOF(a) \ - static struct trace_eval_map __used __initdata \ - __##TRACE_SYSTEM##_##a = \ - { \ - .system = TRACE_SYSTEM_STRING, \ - .eval_string = "sizeof(" #a ")", \ - .eval_value = sizeof(a) \ - }; \ - static struct trace_eval_map __used \ - __section("_ftrace_eval_map") \ - *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a +#include "stages/init.h" /* * DECLARE_EVENT_CLASS can be used to add a generic function @@ -80,48 +45,7 @@ TRACE_MAKE_SYSTEM_STR(); PARAMS(print)); \ DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); - -#undef __field -#define __field(type, item) type item; - -#undef __field_ext -#define __field_ext(type, item, filter_type) type item; - -#undef __field_struct -#define __field_struct(type, item) type item; - -#undef __field_struct_ext -#define __field_struct_ext(type, item, filter_type) type item; - -#undef __array -#define __array(type, item, len) type item[len]; - -#undef __dynamic_array -#define __dynamic_array(type, item, len) u32 __data_loc_##item; - -#undef __string -#define __string(item, src) __dynamic_array(char, item, -1) - -#undef __string_len -#define __string_len(item, src, len) __dynamic_array(char, item, -1) - -#undef __bitmask -#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1) - -#undef __rel_dynamic_array -#define __rel_dynamic_array(type, item, len) u32 __rel_loc_##item; - -#undef __rel_string -#define __rel_string(item, src) __rel_dynamic_array(char, item, -1) - -#undef __rel_string_len -#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1) - -#undef __rel_bitmask -#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(char, item, -1) - -#undef TP_STRUCT__entry -#define TP_STRUCT__entry(args...) args +#include "stages/stage1_defines.h" #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ @@ -185,50 +109,7 @@ TRACE_MAKE_SYSTEM_STR(); * The size of an array is also encoded, in the higher 16 bits of <item>. */ -#undef TRACE_DEFINE_ENUM -#define TRACE_DEFINE_ENUM(a) - -#undef TRACE_DEFINE_SIZEOF -#define TRACE_DEFINE_SIZEOF(a) - -#undef __field -#define __field(type, item) - -#undef __field_ext -#define __field_ext(type, item, filter_type) - -#undef __field_struct -#define __field_struct(type, item) - -#undef __field_struct_ext -#define __field_struct_ext(type, item, filter_type) - -#undef __array -#define __array(type, item, len) - -#undef __dynamic_array -#define __dynamic_array(type, item, len) u32 item; - -#undef __string -#define __string(item, src) __dynamic_array(char, item, -1) - -#undef __bitmask -#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) - -#undef __string_len -#define __string_len(item, src, len) __dynamic_array(char, item, -1) - -#undef __rel_dynamic_array -#define __rel_dynamic_array(type, item, len) u32 item; - -#undef __rel_string -#define __rel_string(item, src) __rel_dynamic_array(char, item, -1) - -#undef __rel_string_len -#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1) - -#undef __rel_bitmask -#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1) +#include "stages/stage2_defines.h" #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ @@ -300,131 +181,7 @@ TRACE_MAKE_SYSTEM_STR(); * in binary. */ -#undef __entry -#define __entry field - -#undef TP_printk -#define TP_printk(fmt, args...) fmt "\n", args - -#undef __get_dynamic_array -#define __get_dynamic_array(field) \ - ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) - -#undef __get_dynamic_array_len -#define __get_dynamic_array_len(field) \ - ((__entry->__data_loc_##field >> 16) & 0xffff) - -#undef __get_str -#define __get_str(field) ((char *)__get_dynamic_array(field)) - -#undef __get_rel_dynamic_array -#define __get_rel_dynamic_array(field) \ - ((void *)__entry + \ - offsetof(typeof(*__entry), __rel_loc_##field) + \ - sizeof(__entry->__rel_loc_##field) + \ - (__entry->__rel_loc_##field & 0xffff)) - -#undef __get_rel_dynamic_array_len -#define __get_rel_dynamic_array_len(field) \ - ((__entry->__rel_loc_##field >> 16) & 0xffff) - -#undef __get_rel_str -#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field)) - -#undef __get_bitmask -#define __get_bitmask(field) \ - ({ \ - void *__bitmask = __get_dynamic_array(field); \ - unsigned int __bitmask_size; \ - __bitmask_size = __get_dynamic_array_len(field); \ - trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ - }) - -#undef __get_rel_bitmask -#define __get_rel_bitmask(field) \ - ({ \ - void *__bitmask = __get_rel_dynamic_array(field); \ - unsigned int __bitmask_size; \ - __bitmask_size = __get_rel_dynamic_array_len(field); \ - trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ - }) - -#undef __print_flags -#define __print_flags(flag, delim, flag_array...) \ - ({ \ - static const struct trace_print_flags __flags[] = \ - { flag_array, { -1, NULL }}; \ - trace_print_flags_seq(p, delim, flag, __flags); \ - }) - -#undef __print_symbolic -#define __print_symbolic(value, symbol_array...) \ - ({ \ - static const struct trace_print_flags symbols[] = \ - { symbol_array, { -1, NULL }}; \ - trace_print_symbols_seq(p, value, symbols); \ - }) - -#undef __print_flags_u64 -#undef __print_symbolic_u64 -#if BITS_PER_LONG == 32 -#define __print_flags_u64(flag, delim, flag_array...) \ - ({ \ - static const struct trace_print_flags_u64 __flags[] = \ - { flag_array, { -1, NULL } }; \ - trace_print_flags_seq_u64(p, delim, flag, __flags); \ - }) - -#define __print_symbolic_u64(value, symbol_array...) \ - ({ \ - static const struct trace_print_flags_u64 symbols[] = \ - { symbol_array, { -1, NULL } }; \ - trace_print_symbols_seq_u64(p, value, symbols); \ - }) -#else -#define __print_flags_u64(flag, delim, flag_array...) \ - __print_flags(flag, delim, flag_array) - -#define __print_symbolic_u64(value, symbol_array...) \ - __print_symbolic(value, symbol_array) -#endif - -#undef __print_hex -#define __print_hex(buf, buf_len) \ - trace_print_hex_seq(p, buf, buf_len, false) - -#undef __print_hex_str -#define __print_hex_str(buf, buf_len) \ - trace_print_hex_seq(p, buf, buf_len, true) - -#undef __print_array -#define __print_array(array, count, el_size) \ - ({ \ - BUILD_BUG_ON(el_size != 1 && el_size != 2 && \ - el_size != 4 && el_size != 8); \ - trace_print_array_seq(p, array, count, el_size); \ - }) - -#undef __print_hex_dump -#define __print_hex_dump(prefix_str, prefix_type, \ - rowsize, groupsize, buf, len, ascii) \ - trace_print_hex_dump_seq(p, prefix_str, prefix_type, \ - rowsize, groupsize, buf, len, ascii) - -#undef __print_ns_to_secs -#define __print_ns_to_secs(value) \ - ({ \ - u64 ____val = (u64)(value); \ - do_div(____val, NSEC_PER_SEC); \ - ____val; \ - }) - -#undef __print_ns_without_secs -#define __print_ns_without_secs(value) \ - ({ \ - u64 ____val = (u64)(value); \ - (u32) do_div(____val, NSEC_PER_SEC); \ - }) +#include "stages/stage3_defines.h" #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ @@ -479,59 +236,7 @@ static struct trace_event_functions trace_event_type_funcs_##call = { \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#undef __field_ext -#define __field_ext(_type, _item, _filter_type) { \ - .type = #_type, .name = #_item, \ - .size = sizeof(_type), .align = __alignof__(_type), \ - .is_signed = is_signed_type(_type), .filter_type = _filter_type }, - -#undef __field_struct_ext -#define __field_struct_ext(_type, _item, _filter_type) { \ - .type = #_type, .name = #_item, \ - .size = sizeof(_type), .align = __alignof__(_type), \ - 0, .filter_type = _filter_type }, - -#undef __field -#define __field(type, item) __field_ext(type, item, FILTER_OTHER) - -#undef __field_struct -#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER) - -#undef __array -#define __array(_type, _item, _len) { \ - .type = #_type"["__stringify(_len)"]", .name = #_item, \ - .size = sizeof(_type[_len]), .align = __alignof__(_type), \ - .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }, - -#undef __dynamic_array -#define __dynamic_array(_type, _item, _len) { \ - .type = "__data_loc " #_type "[]", .name = #_item, \ - .size = 4, .align = 4, \ - .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }, - -#undef __string -#define __string(item, src) __dynamic_array(char, item, -1) - -#undef __string_len -#define __string_len(item, src, len) __dynamic_array(char, item, -1) - -#undef __bitmask -#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) - -#undef __rel_dynamic_array -#define __rel_dynamic_array(_type, _item, _len) { \ - .type = "__rel_loc " #_type "[]", .name = #_item, \ - .size = 4, .align = 4, \ - .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }, - -#undef __rel_string -#define __rel_string(item, src) __rel_dynamic_array(char, item, -1) - -#undef __rel_string_len -#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1) - -#undef __rel_bitmask -#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1) +#include "stages/stage4_defines.h" #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ @@ -544,85 +249,7 @@ static struct trace_event_fields trace_event_fields_##call[] = { \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -/* - * remember the offset of each array from the beginning of the event. - */ - -#undef __entry -#define __entry entry - -#undef __field -#define __field(type, item) - -#undef __field_ext -#define __field_ext(type, item, filter_type) - -#undef __field_struct -#define __field_struct(type, item) - -#undef __field_struct_ext -#define __field_struct_ext(type, item, filter_type) - -#undef __array -#define __array(type, item, len) - -#undef __dynamic_array -#define __dynamic_array(type, item, len) \ - __item_length = (len) * sizeof(type); \ - __data_offsets->item = __data_size + \ - offsetof(typeof(*entry), __data); \ - __data_offsets->item |= __item_length << 16; \ - __data_size += __item_length; - -#undef __string -#define __string(item, src) __dynamic_array(char, item, \ - strlen((src) ? (const char *)(src) : "(null)") + 1) - -#undef __string_len -#define __string_len(item, src, len) __dynamic_array(char, item, (len) + 1) - -#undef __rel_dynamic_array -#define __rel_dynamic_array(type, item, len) \ - __item_length = (len) * sizeof(type); \ - __data_offsets->item = __data_size + \ - offsetof(typeof(*entry), __data) - \ - offsetof(typeof(*entry), __rel_loc_##item) - \ - sizeof(u32); \ - __data_offsets->item |= __item_length << 16; \ - __data_size += __item_length; - -#undef __rel_string -#define __rel_string(item, src) __rel_dynamic_array(char, item, \ - strlen((src) ? (const char *)(src) : "(null)") + 1) - -#undef __rel_string_len -#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, (len) + 1) -/* - * __bitmask_size_in_bytes_raw is the number of bytes needed to hold - * num_possible_cpus(). - */ -#define __bitmask_size_in_bytes_raw(nr_bits) \ - (((nr_bits) + 7) / 8) - -#define __bitmask_size_in_longs(nr_bits) \ - ((__bitmask_size_in_bytes_raw(nr_bits) + \ - ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8)) - -/* - * __bitmask_size_in_bytes is the number of bytes needed to hold - * num_possible_cpus() padded out to the nearest long. This is what - * is saved in the buffer, just to be consistent. - */ -#define __bitmask_size_in_bytes(nr_bits) \ - (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8)) - -#undef __bitmask -#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \ - __bitmask_size_in_longs(nr_bits)) - -#undef __rel_bitmask -#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, \ - __bitmask_size_in_longs(nr_bits)) +#include "stages/stage5_defines.h" #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ @@ -745,88 +372,7 @@ static inline notrace int trace_event_get_offsets_##call( \ #define _TRACE_PERF_INIT(call) #endif /* CONFIG_PERF_EVENTS */ -#undef __entry -#define __entry entry - -#undef __field -#define __field(type, item) - -#undef __field_struct -#define __field_struct(type, item) - -#undef __array -#define __array(type, item, len) - -#undef __dynamic_array -#define __dynamic_array(type, item, len) \ - __entry->__data_loc_##item = __data_offsets.item; - -#undef __string -#define __string(item, src) __dynamic_array(char, item, -1) - -#undef __string_len -#define __string_len(item, src, len) __dynamic_array(char, item, -1) - -#undef __assign_str -#define __assign_str(dst, src) \ - strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); - -#undef __assign_str_len -#define __assign_str_len(dst, src, len) \ - do { \ - memcpy(__get_str(dst), (src), (len)); \ - __get_str(dst)[len] = '\0'; \ - } while(0) - -#undef __bitmask -#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) - -#undef __get_bitmask -#define __get_bitmask(field) (char *)__get_dynamic_array(field) - -#undef __assign_bitmask -#define __assign_bitmask(dst, src, nr_bits) \ - memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) - -#undef __rel_dynamic_array -#define __rel_dynamic_array(type, item, len) \ - __entry->__rel_loc_##item = __data_offsets.item; - -#undef __rel_string -#define __rel_string(item, src) __rel_dynamic_array(char, item, -1) - -#undef __rel_string_len -#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1) - -#undef __assign_rel_str -#define __assign_rel_str(dst, src) \ - strcpy(__get_rel_str(dst), (src) ? (const char *)(src) : "(null)"); - -#undef __assign_rel_str_len -#define __assign_rel_str_len(dst, src, len) \ - do { \ - memcpy(__get_rel_str(dst), (src), (len)); \ - __get_rel_str(dst)[len] = '\0'; \ - } while (0) - -#undef __rel_bitmask -#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1) - -#undef __get_rel_bitmask -#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field) - -#undef __assign_rel_bitmask -#define __assign_rel_bitmask(dst, src, nr_bits) \ - memcpy(__get_rel_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) - -#undef TP_fast_assign -#define TP_fast_assign(args...) args - -#undef __perf_count -#define __perf_count(c) (c) - -#undef __perf_task -#define __perf_task(t) (t) +#include "stages/stage6_defines.h" #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ @@ -872,36 +418,7 @@ static inline void ftrace_test_probe_##call(void) \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#undef __entry -#define __entry REC - -#undef __print_flags -#undef __print_symbolic -#undef __print_hex -#undef __print_hex_str -#undef __get_dynamic_array -#undef __get_dynamic_array_len -#undef __get_str -#undef __get_bitmask -#undef __get_rel_dynamic_array -#undef __get_rel_dynamic_array_len -#undef __get_rel_str -#undef __get_rel_bitmask -#undef __print_array -#undef __print_hex_dump - -/* - * The below is not executed in the kernel. It is only what is - * displayed in the print format for userspace to parse. - */ -#undef __print_ns_to_secs -#define __print_ns_to_secs(val) (val) / 1000000000UL - -#undef __print_ns_without_secs -#define __print_ns_without_secs(val) (val) % 1000000000UL - -#undef TP_printk -#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) +#include "stages/stage7_defines.h" #undef DECLARE_EVENT_CLASS #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index 1567a3294c3d..6c1aa92a92e4 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h @@ -75,6 +75,8 @@ #define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */ #define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */ +#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/include/uapi/asm-generic/shmbuf.h b/include/uapi/asm-generic/shmbuf.h index 2bab955e0fed..2979b6dd2c56 100644 --- a/include/uapi/asm-generic/shmbuf.h +++ b/include/uapi/asm-generic/shmbuf.h @@ -3,6 +3,8 @@ #define __ASM_GENERIC_SHMBUF_H #include <asm/bitsperlong.h> +#include <asm/ipcbuf.h> +#include <asm/posix_types.h> /* * The shmid64_ds structure for x86 architecture. @@ -24,7 +26,7 @@ struct shmid64_ds { struct ipc64_perm shm_perm; /* operation perms */ - size_t shm_segsz; /* size of segment (bytes) */ + __kernel_size_t shm_segsz; /* size of segment (bytes) */ #if __BITS_PER_LONG == 64 long shm_atime; /* last attach time */ long shm_dtime; /* last detach time */ diff --git a/include/uapi/asm-generic/signal.h b/include/uapi/asm-generic/signal.h index f634822906e4..0eb69dc8e572 100644 --- a/include/uapi/asm-generic/signal.h +++ b/include/uapi/asm-generic/signal.h @@ -85,7 +85,7 @@ struct sigaction { typedef struct sigaltstack { void __user *ss_sp; int ss_flags; - size_t ss_size; + __kernel_size_t ss_size; } stack_t; #endif /* __ASSEMBLY__ */ diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index c77a1313b3b0..467ca2f28760 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -128,6 +128,8 @@ #define SO_RESERVE_MEM 73 +#define SO_TXREHASH 74 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 0b94ec7b73e7..1d65c1fbc4ec 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -206,6 +206,8 @@ union drm_amdgpu_bo_list { #define AMDGPU_CTX_OP_FREE_CTX 2 #define AMDGPU_CTX_OP_QUERY_STATE 3 #define AMDGPU_CTX_OP_QUERY_STATE2 4 +#define AMDGPU_CTX_OP_GET_STABLE_PSTATE 5 +#define AMDGPU_CTX_OP_SET_STABLE_PSTATE 6 /* GPU reset status */ #define AMDGPU_CTX_NO_RESET 0 @@ -238,10 +240,18 @@ union drm_amdgpu_bo_list { #define AMDGPU_CTX_PRIORITY_HIGH 512 #define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023 +/* select a stable profiling pstate for perfmon tools */ +#define AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK 0xf +#define AMDGPU_CTX_STABLE_PSTATE_NONE 0 +#define AMDGPU_CTX_STABLE_PSTATE_STANDARD 1 +#define AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK 2 +#define AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK 3 +#define AMDGPU_CTX_STABLE_PSTATE_PEAK 4 + struct drm_amdgpu_ctx_in { /** AMDGPU_CTX_OP_* */ __u32 op; - /** For future use, no flags defined so far */ + /** Flags */ __u32 flags; __u32 ctx_id; /** AMDGPU_CTX_PRIORITY_* */ @@ -262,6 +272,11 @@ union drm_amdgpu_ctx_out { /** Reset status since the last call of the ioctl. */ __u32 reset_status; } state; + + struct { + __u32 flags; + __u32 _pad; + } pstate; }; union drm_amdgpu_ctx { @@ -728,6 +743,8 @@ struct drm_amdgpu_cs_chunk_data { #define AMDGPU_INFO_FW_DMCUB 0x14 /* Subquery id: Query TOC firmware version */ #define AMDGPU_INFO_FW_TOC 0x15 + /* Subquery id: Query CAP firmware version */ + #define AMDGPU_INFO_FW_CAP 0x16 /* number of bytes moved for TTM migration */ #define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f @@ -1134,6 +1151,8 @@ struct drm_amdgpu_info_video_caps { #define AMDGPU_FAMILY_NV 143 /* Navi10 */ #define AMDGPU_FAMILY_VGH 144 /* Van Gogh */ #define AMDGPU_FAMILY_YC 146 /* Yellow Carp */ +#define AMDGPU_FAMILY_GC_10_3_6 149 /* GC 10.3.6 */ +#define AMDGPU_FAMILY_GC_10_3_7 151 /* GC 10.3.7 */ #if defined(__cplusplus) } diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index e1e351682872..0a0d56a6158e 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -663,41 +663,73 @@ struct drm_mode_fb_cmd { #define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */ #define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */ +/** + * struct drm_mode_fb_cmd2 - Frame-buffer metadata. + * + * This struct holds frame-buffer metadata. There are two ways to use it: + * + * - User-space can fill this struct and perform a &DRM_IOCTL_MODE_ADDFB2 + * ioctl to register a new frame-buffer. The new frame-buffer object ID will + * be set by the kernel in @fb_id. + * - User-space can set @fb_id and perform a &DRM_IOCTL_MODE_GETFB2 ioctl to + * fetch metadata about an existing frame-buffer. + * + * In case of planar formats, this struct allows up to 4 buffer objects with + * offsets and pitches per plane. The pitch and offset order is dictated by the + * format FourCC as defined by ``drm_fourcc.h``, e.g. NV12 is described as: + * + * YUV 4:2:0 image with a plane of 8 bit Y samples followed by an + * interleaved U/V plane containing 8 bit 2x2 subsampled colour difference + * samples. + * + * So it would consist of a Y plane at ``offsets[0]`` and a UV plane at + * ``offsets[1]``. + * + * To accommodate tiled, compressed, etc formats, a modifier can be specified. + * For more information see the "Format Modifiers" section. Note that even + * though it looks like we have a modifier per-plane, we in fact do not. The + * modifier for each plane must be identical. Thus all combinations of + * different data layouts for multi-plane formats must be enumerated as + * separate modifiers. + * + * All of the entries in @handles, @pitches, @offsets and @modifier must be + * zero when unused. Warning, for @offsets and @modifier zero can't be used to + * figure out whether the entry is used or not since it's a valid value (a zero + * offset is common, and a zero modifier is &DRM_FORMAT_MOD_LINEAR). + */ struct drm_mode_fb_cmd2 { + /** @fb_id: Object ID of the frame-buffer. */ __u32 fb_id; + /** @width: Width of the frame-buffer. */ __u32 width; + /** @height: Height of the frame-buffer. */ __u32 height; - __u32 pixel_format; /* fourcc code from drm_fourcc.h */ - __u32 flags; /* see above flags */ + /** + * @pixel_format: FourCC format code, see ``DRM_FORMAT_*`` constants in + * ``drm_fourcc.h``. + */ + __u32 pixel_format; + /** + * @flags: Frame-buffer flags (see &DRM_MODE_FB_INTERLACED and + * &DRM_MODE_FB_MODIFIERS). + */ + __u32 flags; - /* - * In case of planar formats, this ioctl allows up to 4 - * buffer objects with offsets and pitches per plane. - * The pitch and offset order is dictated by the fourcc, - * e.g. NV12 (https://fourcc.org/yuv.php#NV12) is described as: - * - * YUV 4:2:0 image with a plane of 8 bit Y samples - * followed by an interleaved U/V plane containing - * 8 bit 2x2 subsampled colour difference samples. - * - * So it would consist of Y as offsets[0] and UV as - * offsets[1]. Note that offsets[0] will generally - * be 0 (but this is not required). - * - * To accommodate tiled, compressed, etc formats, a - * modifier can be specified. The default value of zero - * indicates "native" format as specified by the fourcc. - * Vendor specific modifier token. Note that even though - * it looks like we have a modifier per-plane, we in fact - * do not. The modifier for each plane must be identical. - * Thus all combinations of different data layouts for - * multi plane formats must be enumerated as separate - * modifiers. + /** + * @handles: GEM buffer handle, one per plane. Set to 0 if the plane is + * unused. The same handle can be used for multiple planes. */ __u32 handles[4]; - __u32 pitches[4]; /* pitch for each plane */ - __u32 offsets[4]; /* offset of each plane */ - __u64 modifier[4]; /* ie, tiling, compress */ + /** @pitches: Pitch (aka. stride) in bytes, one per plane. */ + __u32 pitches[4]; + /** @offsets: Offset into the buffer in bytes, one per plane. */ + __u32 offsets[4]; + /** + * @modifier: Format modifier, one per plane. See ``DRM_FORMAT_MOD_*`` + * constants in ``drm_fourcc.h``. All planes must use the same + * modifier. Ignored unless &DRM_MODE_FB_MODIFIERS is set in @flags. + */ + __u64 modifier[4]; }; #define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01 diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 914ebd9290e5..05c3642aaece 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -1118,10 +1118,16 @@ struct drm_i915_gem_exec_object2 { /** * When the EXEC_OBJECT_PINNED flag is specified this is populated by * the user with the GTT offset at which this object will be pinned. + * * When the I915_EXEC_NO_RELOC flag is specified this must contain the * presumed_offset of the object. + * * During execbuffer2 the kernel populates it with the value of the * current GTT offset of the object, for future presumed_offset writes. + * + * See struct drm_i915_gem_create_ext for the rules when dealing with + * alignment restrictions with I915_MEMORY_CLASS_DEVICE, on devices with + * minimum page sizes, like DG2. */ __u64 offset; @@ -3144,11 +3150,40 @@ struct drm_i915_gem_create_ext { * * The (page-aligned) allocated size for the object will be returned. * - * Note that for some devices we have might have further minimum - * page-size restrictions(larger than 4K), like for device local-memory. - * However in general the final size here should always reflect any - * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS - * extension to place the object in device local-memory. + * + * DG2 64K min page size implications: + * + * On discrete platforms, starting from DG2, we have to contend with GTT + * page size restrictions when dealing with I915_MEMORY_CLASS_DEVICE + * objects. Specifically the hardware only supports 64K or larger GTT + * page sizes for such memory. The kernel will already ensure that all + * I915_MEMORY_CLASS_DEVICE memory is allocated using 64K or larger page + * sizes underneath. + * + * Note that the returned size here will always reflect any required + * rounding up done by the kernel, i.e 4K will now become 64K on devices + * such as DG2. + * + * Special DG2 GTT address alignment requirement: + * + * The GTT alignment will also need to be at least 2M for such objects. + * + * Note that due to how the hardware implements 64K GTT page support, we + * have some further complications: + * + * 1) The entire PDE (which covers a 2MB virtual address range), must + * contain only 64K PTEs, i.e mixing 4K and 64K PTEs in the same + * PDE is forbidden by the hardware. + * + * 2) We still need to support 4K PTEs for I915_MEMORY_CLASS_SYSTEM + * objects. + * + * To keep things simple for userland, we mandate that any GTT mappings + * must be aligned to and rounded up to 2MB. The kernel will internally + * pad them out to the next 2MB boundary. As this only wastes virtual + * address space and avoids userland having to copy any needlessly + * complicated PDE sharing scheme (coloring) and only affects DG2, this + * is deemed to be a good compromise. */ __u64 size; /** diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 6b8fffc28a50..07efc8033492 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -67,16 +67,21 @@ struct drm_msm_timespec { __s64 tv_nsec; /* nanoseconds */ }; -#define MSM_PARAM_GPU_ID 0x01 -#define MSM_PARAM_GMEM_SIZE 0x02 -#define MSM_PARAM_CHIP_ID 0x03 -#define MSM_PARAM_MAX_FREQ 0x04 -#define MSM_PARAM_TIMESTAMP 0x05 -#define MSM_PARAM_GMEM_BASE 0x06 -#define MSM_PARAM_PRIORITIES 0x07 /* The # of priority levels */ -#define MSM_PARAM_PP_PGTABLE 0x08 /* => 1 for per-process pagetables, else 0 */ -#define MSM_PARAM_FAULTS 0x09 -#define MSM_PARAM_SUSPENDS 0x0a +/* Below "RO" indicates a read-only param, "WO" indicates write-only, and + * "RW" indicates a param that can be both read (GET_PARAM) and written + * (SET_PARAM) + */ +#define MSM_PARAM_GPU_ID 0x01 /* RO */ +#define MSM_PARAM_GMEM_SIZE 0x02 /* RO */ +#define MSM_PARAM_CHIP_ID 0x03 /* RO */ +#define MSM_PARAM_MAX_FREQ 0x04 /* RO */ +#define MSM_PARAM_TIMESTAMP 0x05 /* RO */ +#define MSM_PARAM_GMEM_BASE 0x06 /* RO */ +#define MSM_PARAM_PRIORITIES 0x07 /* RO: The # of priority levels */ +#define MSM_PARAM_PP_PGTABLE 0x08 /* RO: Deprecated, always returns zero */ +#define MSM_PARAM_FAULTS 0x09 /* RO */ +#define MSM_PARAM_SUSPENDS 0x0a /* RO */ +#define MSM_PARAM_SYSPROF 0x0b /* WO: 1 preserves perfcntrs, 2 also disables suspend */ /* For backwards compat. The original support for preemption was based on * a single ring per priority level so # of priority levels equals the # @@ -227,6 +232,7 @@ struct drm_msm_gem_submit_bo { #define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */ #define MSM_SUBMIT_SYNCOBJ_IN 0x08000000 /* enable input syncobj */ #define MSM_SUBMIT_SYNCOBJ_OUT 0x04000000 /* enable output syncobj */ +#define MSM_SUBMIT_FENCE_SN_IN 0x02000000 /* userspace passes in seqno fence */ #define MSM_SUBMIT_FLAGS ( \ MSM_SUBMIT_NO_IMPLICIT | \ MSM_SUBMIT_FENCE_FD_IN | \ @@ -234,6 +240,7 @@ struct drm_msm_gem_submit_bo { MSM_SUBMIT_SUDO | \ MSM_SUBMIT_SYNCOBJ_IN | \ MSM_SUBMIT_SYNCOBJ_OUT | \ + MSM_SUBMIT_FENCE_SN_IN | \ 0) #define MSM_SUBMIT_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */ @@ -253,7 +260,7 @@ struct drm_msm_gem_submit_syncobj { */ struct drm_msm_gem_submit { __u32 flags; /* MSM_PIPE_x | MSM_SUBMIT_x */ - __u32 fence; /* out */ + __u32 fence; /* out (or in with MSM_SUBMIT_FENCE_SN_IN flag) */ __u32 nr_bos; /* in, number of submit_bo's */ __u32 nr_cmds; /* in, number of submit_cmd's */ __u64 bos; /* in, ptr to array of submit_bo's */ @@ -333,9 +340,7 @@ struct drm_msm_submitqueue_query { }; #define DRM_MSM_GET_PARAM 0x00 -/* placeholder: #define DRM_MSM_SET_PARAM 0x01 - */ #define DRM_MSM_GEM_NEW 0x02 #define DRM_MSM_GEM_INFO 0x03 #define DRM_MSM_GEM_CPU_PREP 0x04 @@ -351,6 +356,7 @@ struct drm_msm_submitqueue_query { #define DRM_MSM_SUBMITQUEUE_QUERY 0x0C #define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param) +#define DRM_IOCTL_MSM_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SET_PARAM, struct drm_msm_param) #define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new) #define DRM_IOCTL_MSM_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_INFO, struct drm_msm_gem_info) #define DRM_IOCTL_MSM_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_PREP, struct drm_msm_gem_cpu_prep) diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h index 061e700dd06c..9e40277d8185 100644 --- a/include/uapi/drm/panfrost_drm.h +++ b/include/uapi/drm/panfrost_drm.h @@ -84,14 +84,14 @@ struct drm_panfrost_wait_bo { __s64 timeout_ns; /* absolute */ }; +/* Valid flags to pass to drm_panfrost_create_bo */ #define PANFROST_BO_NOEXEC 1 #define PANFROST_BO_HEAP 2 /** * struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs. * - * There are currently no values for the flags argument, but it may be - * used in a future extension. + * The flags argument is a bit mask of PANFROST_BO_* flags. */ struct drm_panfrost_create_bo { __u32 size; diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index 3246f2c74696..11157fae8a8e 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -288,8 +288,8 @@ struct binder_transaction_data { /* General information about the transaction. */ __u32 flags; - pid_t sender_pid; - uid_t sender_euid; + __kernel_pid_t sender_pid; + __kernel_uid_t sender_euid; binder_size_t data_size; /* number of bytes of data */ binder_size_t offsets_size; /* number of bytes of offsets */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index b0383d371b9a..d14b10b85e51 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -330,6 +330,8 @@ union bpf_iter_link_info { * *ctx_out*, *data_in* and *data_out* must be NULL. * *repeat* must be zero. * + * BPF_PROG_RUN is an alias for BPF_PROG_TEST_RUN. + * * Return * Returns zero on success. On error, -1 is returned and *errno* * is set appropriately. @@ -995,6 +997,7 @@ enum bpf_attach_type { BPF_SK_REUSEPORT_SELECT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, BPF_PERF_EVENT, + BPF_TRACE_KPROBE_MULTI, __MAX_BPF_ATTACH_TYPE }; @@ -1009,6 +1012,7 @@ enum bpf_link_type { BPF_LINK_TYPE_NETNS = 5, BPF_LINK_TYPE_XDP = 6, BPF_LINK_TYPE_PERF_EVENT = 7, + BPF_LINK_TYPE_KPROBE_MULTI = 8, MAX_BPF_LINK_TYPE, }; @@ -1111,6 +1115,16 @@ enum bpf_link_type { */ #define BPF_F_SLEEPABLE (1U << 4) +/* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program + * fully support xdp frags. + */ +#define BPF_F_XDP_HAS_FRAGS (1U << 5) + +/* link_create.kprobe_multi.flags used in LINK_CREATE command for + * BPF_TRACE_KPROBE_MULTI attach type to create return probe. + */ +#define BPF_F_KPROBE_MULTI_RETURN (1U << 0) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * the following extensions: * @@ -1225,6 +1239,8 @@ enum { /* If set, run the test on the cpu specified by bpf_attr.test.cpu */ #define BPF_F_TEST_RUN_ON_CPU (1U << 0) +/* If set, XDP frames will be transmitted after processing */ +#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1) /* type for BPF_ENABLE_STATS */ enum bpf_stats_type { @@ -1386,6 +1402,7 @@ union bpf_attr { __aligned_u64 ctx_out; __u32 flags; __u32 cpu; + __u32 batch_size; } test; struct { /* anonymous struct used by BPF_*_GET_*_ID */ @@ -1465,6 +1482,13 @@ union bpf_attr { */ __u64 bpf_cookie; } perf_event; + struct { + __u32 flags; + __u32 cnt; + __aligned_u64 syms; + __aligned_u64 addrs; + __aligned_u64 cookies; + } kprobe_multi; }; } link_create; @@ -1775,6 +1799,8 @@ union bpf_attr { * 0 on success, or a negative error in case of failure. * * u64 bpf_get_current_pid_tgid(void) + * Description + * Get the current pid and tgid. * Return * A 64-bit integer containing the current tgid and pid, and * created as such: @@ -1782,6 +1808,8 @@ union bpf_attr { * *current_task*\ **->pid**. * * u64 bpf_get_current_uid_gid(void) + * Description + * Get the current uid and gid. * Return * A 64-bit integer containing the current GID and UID, and * created as such: *current_gid* **<< 32 \|** *current_uid*. @@ -2256,6 +2284,8 @@ union bpf_attr { * The 32-bit hash. * * u64 bpf_get_current_task(void) + * Description + * Get the current task. * Return * A pointer to the current task struct. * @@ -2286,8 +2316,8 @@ union bpf_attr { * Return * The return value depends on the result of the test, and can be: * - * * 0, if current task belongs to the cgroup2. - * * 1, if current task does not belong to the cgroup2. + * * 1, if current task belongs to the cgroup2. + * * 0, if current task does not belong to the cgroup2. * * A negative error code, if an error occurred. * * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) @@ -2369,6 +2399,8 @@ union bpf_attr { * indicate that the hash is outdated and to trigger a * recalculation the next time the kernel tries to access this * hash or when the **bpf_get_hash_recalc**\ () helper is called. + * Return + * void. * * long bpf_get_numa_node_id(void) * Description @@ -2466,6 +2498,8 @@ union bpf_attr { * A 8-byte long unique number or 0 if *sk* is NULL. * * u32 bpf_get_socket_uid(struct sk_buff *skb) + * Description + * Get the owner UID of the socked associated to *skb*. * Return * The owner UID of the socket associated to *skb*. If the socket * is **NULL**, or if it is not a full socket (i.e. if it is a @@ -2975,8 +3009,8 @@ union bpf_attr { * * # sysctl kernel.perf_event_max_stack=<new value> * Return - * A non-negative value equal to or less than *size* on success, - * or a negative error in case of failure. + * The non-negative copied *buf* length equal to or less than + * *size* on success, or a negative error in case of failure. * * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header) * Description @@ -3240,6 +3274,9 @@ union bpf_attr { * The id is returned or 0 in case the id could not be retrieved. * * u64 bpf_get_current_cgroup_id(void) + * Description + * Get the current cgroup id based on the cgroup within which + * the current task is running. * Return * A 64-bit integer containing the current cgroup id based * on the cgroup within which the current task is running. @@ -4279,8 +4316,8 @@ union bpf_attr { * * # sysctl kernel.perf_event_max_stack=<new value> * Return - * A non-negative value equal to or less than *size* on success, - * or a negative error in case of failure. + * The non-negative copied *buf* length equal to or less than + * *size* on success, or a negative error in case of failure. * * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags) * Description @@ -5018,6 +5055,94 @@ union bpf_attr { * * Return * The number of arguments of the traced function. + * + * int bpf_get_retval(void) + * Description + * Get the syscall's return value that will be returned to userspace. + * + * This helper is currently supported by cgroup programs only. + * Return + * The syscall's return value. + * + * int bpf_set_retval(int retval) + * Description + * Set the syscall's return value that will be returned to userspace. + * + * This helper is currently supported by cgroup programs only. + * Return + * 0 on success, or a negative error in case of failure. + * + * u64 bpf_xdp_get_buff_len(struct xdp_buff *xdp_md) + * Description + * Get the total size of a given xdp buff (linear and paged area) + * Return + * The total size of a given xdp buffer. + * + * long bpf_xdp_load_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) + * Description + * This helper is provided as an easy way to load data from a + * xdp buffer. It can be used to load *len* bytes from *offset* from + * the frame associated to *xdp_md*, into the buffer pointed by + * *buf*. + * Return + * 0 on success, or a negative error in case of failure. + * + * long bpf_xdp_store_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len) + * Description + * Store *len* bytes from buffer *buf* into the frame + * associated to *xdp_md*, at *offset*. + * Return + * 0 on success, or a negative error in case of failure. + * + * long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags) + * Description + * Read *size* bytes from user space address *user_ptr* in *tsk*'s + * address space, and stores the data in *dst*. *flags* is not + * used yet and is provided for future extensibility. This helper + * can only be used by sleepable programs. + * Return + * 0 on success, or a negative error in case of failure. On error + * *dst* buffer is zeroed out. + * + * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type) + * Description + * Change the __sk_buff->tstamp_type to *tstamp_type* + * and set *tstamp* to the __sk_buff->tstamp together. + * + * If there is no need to change the __sk_buff->tstamp_type, + * the tstamp value can be directly written to __sk_buff->tstamp + * instead. + * + * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that + * will be kept during bpf_redirect_*(). A non zero + * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO + * *tstamp_type*. + * + * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used + * with a zero *tstamp*. + * + * Only IPv4 and IPv6 skb->protocol are supported. + * + * This function is most useful when it needs to set a + * mono delivery time to __sk_buff->tstamp and then + * bpf_redirect_*() to the egress of an iface. For example, + * changing the (rcv) timestamp in __sk_buff->tstamp at + * ingress to a mono delivery time and then bpf_redirect_*() + * to sch_fq@phy-dev. + * Return + * 0 on success. + * **-EINVAL** for invalid input + * **-EOPNOTSUPP** for unsupported protocol + * + * long bpf_ima_file_hash(struct file *file, void *dst, u32 size) + * Description + * Returns a calculated IMA hash of the *file*. + * If the hash is larger than *size*, then only *size* + * bytes will be copied to *dst* + * Return + * The **hash_algo** is returned on success, + * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if + * invalid arguments are passed. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5206,6 +5331,14 @@ union bpf_attr { FN(get_func_arg), \ FN(get_func_ret), \ FN(get_func_arg_cnt), \ + FN(get_retval), \ + FN(set_retval), \ + FN(xdp_get_buff_len), \ + FN(xdp_load_bytes), \ + FN(xdp_store_bytes), \ + FN(copy_from_user_task), \ + FN(skb_set_tstamp), \ + FN(ima_file_hash), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -5395,6 +5528,15 @@ union { \ __u64 :64; \ } __attribute__((aligned(8))) +enum { + BPF_SKB_TSTAMP_UNSPEC, + BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */ + /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle, + * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC + * and try to deduce it by ingress, egress or skb->sk->sk_clockid. + */ +}; + /* user accessible mirror of in-kernel sk_buff. * new fields can only be added to the end of this structure */ @@ -5435,7 +5577,8 @@ struct __sk_buff { __u32 gso_segs; __bpf_md_ptr(struct bpf_sock *, sk); __u32 gso_size; - __u32 :32; /* Padding, future use. */ + __u8 tstamp_type; + __u32 :24; /* Padding, future use. */ __u64 hwtstamp; }; @@ -5500,7 +5643,8 @@ struct bpf_sock { __u32 src_ip4; __u32 src_ip6[4]; __u32 src_port; /* host byte order */ - __u32 dst_port; /* network byte order */ + __be16 dst_port; /* network byte order */ + __u16 :16; /* zero padding */ __u32 dst_ip4; __u32 dst_ip6[4]; __u32 state; @@ -6378,7 +6522,8 @@ struct bpf_sk_lookup { __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */ __u32 remote_ip4; /* Network byte order */ __u32 remote_ip6[4]; /* Network byte order */ - __u32 remote_port; /* Network byte order */ + __be16 remote_port; /* Network byte order */ + __u16 :16; /* Zero padding */ __u32 local_ip4; /* Network byte order */ __u32 local_ip6[4]; /* Network byte order */ __u32 local_port; /* Host byte order */ diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 738619994e26..d956b2993970 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -309,6 +309,7 @@ struct btrfs_ioctl_fs_info_args { #define BTRFS_FEATURE_INCOMPAT_METADATA_UUID (1ULL << 10) #define BTRFS_FEATURE_INCOMPAT_RAID1C34 (1ULL << 11) #define BTRFS_FEATURE_INCOMPAT_ZONED (1ULL << 12) +#define BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2 (1ULL << 13) struct btrfs_ioctl_feature_flags { __u64 compat_flags; @@ -868,6 +869,134 @@ struct btrfs_ioctl_get_subvol_rootref_args { __u8 align[7]; }; +/* + * Data and metadata for an encoded read or write. + * + * Encoded I/O bypasses any encoding automatically done by the filesystem (e.g., + * compression). This can be used to read the compressed contents of a file or + * write pre-compressed data directly to a file. + * + * BTRFS_IOC_ENCODED_READ and BTRFS_IOC_ENCODED_WRITE are essentially + * preadv/pwritev with additional metadata about how the data is encoded and the + * size of the unencoded data. + * + * BTRFS_IOC_ENCODED_READ fills the given iovecs with the encoded data, fills + * the metadata fields, and returns the size of the encoded data. It reads one + * extent per call. It can also read data which is not encoded. + * + * BTRFS_IOC_ENCODED_WRITE uses the metadata fields, writes the encoded data + * from the iovecs, and returns the size of the encoded data. Note that the + * encoded data is not validated when it is written; if it is not valid (e.g., + * it cannot be decompressed), then a subsequent read may return an error. + * + * Since the filesystem page cache contains decoded data, encoded I/O bypasses + * the page cache. Encoded I/O requires CAP_SYS_ADMIN. + */ +struct btrfs_ioctl_encoded_io_args { + /* Input parameters for both reads and writes. */ + + /* + * iovecs containing encoded data. + * + * For reads, if the size of the encoded data is larger than the sum of + * iov[n].iov_len for 0 <= n < iovcnt, then the ioctl fails with + * ENOBUFS. + * + * For writes, the size of the encoded data is the sum of iov[n].iov_len + * for 0 <= n < iovcnt. This must be less than 128 KiB (this limit may + * increase in the future). This must also be less than or equal to + * unencoded_len. + */ + const struct iovec __user *iov; + /* Number of iovecs. */ + unsigned long iovcnt; + /* + * Offset in file. + * + * For writes, must be aligned to the sector size of the filesystem. + */ + __s64 offset; + /* Currently must be zero. */ + __u64 flags; + + /* + * For reads, the following members are output parameters that will + * contain the returned metadata for the encoded data. + * For writes, the following members must be set to the metadata for the + * encoded data. + */ + + /* + * Length of the data in the file. + * + * Must be less than or equal to unencoded_len - unencoded_offset. For + * writes, must be aligned to the sector size of the filesystem unless + * the data ends at or beyond the current end of the file. + */ + __u64 len; + /* + * Length of the unencoded (i.e., decrypted and decompressed) data. + * + * For writes, must be no more than 128 KiB (this limit may increase in + * the future). If the unencoded data is actually longer than + * unencoded_len, then it is truncated; if it is shorter, then it is + * extended with zeroes. + */ + __u64 unencoded_len; + /* + * Offset from the first byte of the unencoded data to the first byte of + * logical data in the file. + * + * Must be less than unencoded_len. + */ + __u64 unencoded_offset; + /* + * BTRFS_ENCODED_IO_COMPRESSION_* type. + * + * For writes, must not be BTRFS_ENCODED_IO_COMPRESSION_NONE. + */ + __u32 compression; + /* Currently always BTRFS_ENCODED_IO_ENCRYPTION_NONE. */ + __u32 encryption; + /* + * Reserved for future expansion. + * + * For reads, always returned as zero. Users should check for non-zero + * bytes. If there are any, then the kernel has a newer version of this + * structure with additional information that the user definition is + * missing. + * + * For writes, must be zeroed. + */ + __u8 reserved[64]; +}; + +/* Data is not compressed. */ +#define BTRFS_ENCODED_IO_COMPRESSION_NONE 0 +/* Data is compressed as a single zlib stream. */ +#define BTRFS_ENCODED_IO_COMPRESSION_ZLIB 1 +/* + * Data is compressed as a single zstd frame with the windowLog compression + * parameter set to no more than 17. + */ +#define BTRFS_ENCODED_IO_COMPRESSION_ZSTD 2 +/* + * Data is compressed sector by sector (using the sector size indicated by the + * name of the constant) with LZO1X and wrapped in the format documented in + * fs/btrfs/lzo.c. For writes, the compression sector size must match the + * filesystem sector size. + */ +#define BTRFS_ENCODED_IO_COMPRESSION_LZO_4K 3 +#define BTRFS_ENCODED_IO_COMPRESSION_LZO_8K 4 +#define BTRFS_ENCODED_IO_COMPRESSION_LZO_16K 5 +#define BTRFS_ENCODED_IO_COMPRESSION_LZO_32K 6 +#define BTRFS_ENCODED_IO_COMPRESSION_LZO_64K 7 +#define BTRFS_ENCODED_IO_COMPRESSION_TYPES 8 + +/* Data is not encrypted. */ +#define BTRFS_ENCODED_IO_ENCRYPTION_NONE 0 +#define BTRFS_ENCODED_IO_ENCRYPTION_TYPES 1 + /* Error codes as returned by the kernel */ enum btrfs_err_code { BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET = 1, @@ -996,5 +1125,9 @@ enum btrfs_err_code { struct btrfs_ioctl_ino_lookup_user_args) #define BTRFS_IOC_SNAP_DESTROY_V2 _IOW(BTRFS_IOCTL_MAGIC, 63, \ struct btrfs_ioctl_vol_args_v2) +#define BTRFS_IOC_ENCODED_READ _IOR(BTRFS_IOCTL_MAGIC, 64, \ + struct btrfs_ioctl_encoded_io_args) +#define BTRFS_IOC_ENCODED_WRITE _IOW(BTRFS_IOCTL_MAGIC, 64, \ + struct btrfs_ioctl_encoded_io_args) #endif /* _UAPI_LINUX_BTRFS_H */ diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index 5416f1f1a77a..b069752a8ecf 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -53,6 +53,9 @@ /* tracks free space in block groups. */ #define BTRFS_FREE_SPACE_TREE_OBJECTID 10ULL +/* Holds the block group items for extent tree v2. */ +#define BTRFS_BLOCK_GROUP_TREE_OBJECTID 11ULL + /* device stats in the device tree */ #define BTRFS_DEV_STATS_OBJECTID 0ULL diff --git a/include/uapi/linux/can/isotp.h b/include/uapi/linux/can/isotp.h index c55935b64ccc..590f8aea2b6d 100644 --- a/include/uapi/linux/can/isotp.h +++ b/include/uapi/linux/can/isotp.h @@ -137,20 +137,16 @@ struct can_isotp_ll_options { #define CAN_ISOTP_WAIT_TX_DONE 0x400 /* wait for tx completion */ #define CAN_ISOTP_SF_BROADCAST 0x800 /* 1-to-N functional addressing */ -/* default values */ +/* protocol machine default values */ #define CAN_ISOTP_DEFAULT_FLAGS 0 #define CAN_ISOTP_DEFAULT_EXT_ADDRESS 0x00 #define CAN_ISOTP_DEFAULT_PAD_CONTENT 0xCC /* prevent bit-stuffing */ -#define CAN_ISOTP_DEFAULT_FRAME_TXTIME 0 +#define CAN_ISOTP_DEFAULT_FRAME_TXTIME 50000 /* 50 micro seconds */ #define CAN_ISOTP_DEFAULT_RECV_BS 0 #define CAN_ISOTP_DEFAULT_RECV_STMIN 0x00 #define CAN_ISOTP_DEFAULT_RECV_WFTMAX 0 -#define CAN_ISOTP_DEFAULT_LL_MTU CAN_MTU -#define CAN_ISOTP_DEFAULT_LL_TX_DL CAN_MAX_DLEN -#define CAN_ISOTP_DEFAULT_LL_TX_FLAGS 0 - /* * Remark on CAN_ISOTP_DEFAULT_RECV_* values: * @@ -162,4 +158,24 @@ struct can_isotp_ll_options { * consistency and copied directly into the flow control (FC) frame. */ +/* link layer default values => make use of Classical CAN frames */ + +#define CAN_ISOTP_DEFAULT_LL_MTU CAN_MTU +#define CAN_ISOTP_DEFAULT_LL_TX_DL CAN_MAX_DLEN +#define CAN_ISOTP_DEFAULT_LL_TX_FLAGS 0 + +/* + * The CAN_ISOTP_DEFAULT_FRAME_TXTIME has become a non-zero value as + * it only makes sense for isotp implementation tests to run without + * a N_As value. As user space applications usually do not set the + * frame_txtime element of struct can_isotp_options the new in-kernel + * default is very likely overwritten with zero when the sockopt() + * CAN_ISOTP_OPTS is invoked. + * To make sure that a N_As value of zero is only set intentional the + * value '0' is now interpreted as 'do not change the current value'. + * When a frame_txtime of zero is required for testing purposes this + * CAN_ISOTP_FRAME_TXTIME_ZERO u32 value has to be set in frame_txtime. + */ +#define CAN_ISOTP_FRAME_TXTIME_ZERO 0xFFFFFFFF + #endif /* !_UAPI_CAN_ISOTP_H */ diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index c12ce30b52df..2e9550fef90f 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -286,9 +286,9 @@ enum { #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 45 +#define DM_VERSION_MINOR 46 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2021-03-22)" +#define DM_VERSION_EXTRA "-ioctl (2022-02-22)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 61bf4774b8f2..787c657bfae8 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -35,10 +35,14 @@ typedef __s64 Elf64_Sxword; #define PT_HIOS 0x6fffffff /* OS-specific */ #define PT_LOPROC 0x70000000 #define PT_HIPROC 0x7fffffff -#define PT_GNU_EH_FRAME 0x6474e550 -#define PT_GNU_PROPERTY 0x6474e553 - +#define PT_GNU_EH_FRAME (PT_LOOS + 0x474e550) #define PT_GNU_STACK (PT_LOOS + 0x474e551) +#define PT_GNU_RELRO (PT_LOOS + 0x474e552) +#define PT_GNU_PROPERTY (PT_LOOS + 0x474e553) + + +/* ARM MTE memory tag segment type */ +#define PT_ARM_MEMTAG_MTE (PT_LOPROC + 0x1) /* * Extended Numbering diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index cca6e474a085..979850221b8d 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -319,6 +319,12 @@ enum { /* RINGS */ enum { + ETHTOOL_TCP_DATA_SPLIT_UNKNOWN = 0, + ETHTOOL_TCP_DATA_SPLIT_DISABLED, + ETHTOOL_TCP_DATA_SPLIT_ENABLED, +}; + +enum { ETHTOOL_A_RINGS_UNSPEC, ETHTOOL_A_RINGS_HEADER, /* nest - _A_HEADER_* */ ETHTOOL_A_RINGS_RX_MAX, /* u32 */ @@ -330,6 +336,8 @@ enum { ETHTOOL_A_RINGS_RX_JUMBO, /* u32 */ ETHTOOL_A_RINGS_TX, /* u32 */ ETHTOOL_A_RINGS_RX_BUF_LEN, /* u32 */ + ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */ + ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */ /* add new constants above here */ __ETHTOOL_A_RINGS_CNT, diff --git a/include/uapi/linux/fsmap.h b/include/uapi/linux/fsmap.h index 91fd519a3f7d..c690d17f1d07 100644 --- a/include/uapi/linux/fsmap.h +++ b/include/uapi/linux/fsmap.h @@ -69,7 +69,7 @@ struct fsmap_head { }; /* Size of an fsmap_head with room for nr records. */ -static inline size_t +static inline __kernel_size_t fsmap_sizeof( unsigned int nr) { diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h index 79f9191bbb24..2f61298a7b77 100644 --- a/include/uapi/linux/gtp.h +++ b/include/uapi/linux/gtp.h @@ -8,6 +8,7 @@ enum gtp_genl_cmds { GTP_CMD_NEWPDP, GTP_CMD_DELPDP, GTP_CMD_GETPDP, + GTP_CMD_ECHOREQ, GTP_CMD_MAX, }; diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h index daf82a230c0e..aaa502a7bff4 100644 --- a/include/uapi/linux/hyperv.h +++ b/include/uapi/linux/hyperv.h @@ -90,6 +90,17 @@ struct hv_vss_check_dm_info { __u32 flags; } __attribute__((packed)); +/* + * struct hv_vss_msg encodes the fields that the Linux VSS + * driver accesses. However, FREEZE messages from Hyper-V contain + * additional LUN information that Linux doesn't use and are not + * represented in struct hv_vss_msg. A received FREEZE message may + * be as large as 6,260 bytes, so the driver must allocate at least + * that much space, not sizeof(struct hv_vss_msg). Other messages + * such as AUTO_RECOVER may be as large as 12,500 bytes. However, + * because the Linux VSS driver responds that it doesn't support + * auto-recovery, it should not receive such messages. + */ struct hv_vss_msg { union { struct hv_vss_hdr vss_hdr; diff --git a/include/uapi/linux/if_addr.h b/include/uapi/linux/if_addr.h index dfcf3ce0097f..1c392dd95a5e 100644 --- a/include/uapi/linux/if_addr.h +++ b/include/uapi/linux/if_addr.h @@ -33,8 +33,9 @@ enum { IFA_CACHEINFO, IFA_MULTICAST, IFA_FLAGS, - IFA_RT_PRIORITY, /* u32, priority/metric for prefix route */ + IFA_RT_PRIORITY, /* u32, priority/metric for prefix route */ IFA_TARGET_NETNSID, + IFA_PROTO, /* u8, address protocol */ __IFA_MAX, }; @@ -69,4 +70,10 @@ struct ifa_cacheinfo { #define IFA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifaddrmsg)) #endif +/* ifa_proto */ +#define IFAPROT_UNSPEC 0 +#define IFAPROT_KERNEL_LO 1 /* loopback */ +#define IFAPROT_KERNEL_RA 2 /* set by kernel from router announcement */ +#define IFAPROT_KERNEL_LL 3 /* link-local set by kernel */ + #endif diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index 2711c3522010..a86a7e7b811f 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -122,6 +122,7 @@ enum { IFLA_BRIDGE_VLAN_TUNNEL_INFO, IFLA_BRIDGE_MRP, IFLA_BRIDGE_CFM, + IFLA_BRIDGE_MST, __IFLA_BRIDGE_MAX, }; #define IFLA_BRIDGE_MAX (__IFLA_BRIDGE_MAX - 1) @@ -453,6 +454,21 @@ enum { #define IFLA_BRIDGE_CFM_CC_PEER_STATUS_MAX (__IFLA_BRIDGE_CFM_CC_PEER_STATUS_MAX - 1) +enum { + IFLA_BRIDGE_MST_UNSPEC, + IFLA_BRIDGE_MST_ENTRY, + __IFLA_BRIDGE_MST_MAX, +}; +#define IFLA_BRIDGE_MST_MAX (__IFLA_BRIDGE_MST_MAX - 1) + +enum { + IFLA_BRIDGE_MST_ENTRY_UNSPEC, + IFLA_BRIDGE_MST_ENTRY_MSTI, + IFLA_BRIDGE_MST_ENTRY_STATE, + __IFLA_BRIDGE_MST_ENTRY_MAX, +}; +#define IFLA_BRIDGE_MST_ENTRY_MAX (__IFLA_BRIDGE_MST_ENTRY_MAX - 1) + struct bridge_stp_xstats { __u64 transition_blk; __u64 transition_fwd; @@ -564,6 +580,7 @@ enum { BRIDGE_VLANDB_GOPTS_MCAST_QUERIER, BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS, BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_STATE, + BRIDGE_VLANDB_GOPTS_MSTI, __BRIDGE_VLANDB_GOPTS_MAX }; #define BRIDGE_VLANDB_GOPTS_MAX (__BRIDGE_VLANDB_GOPTS_MAX - 1) @@ -759,6 +776,7 @@ struct br_mcast_stats { enum br_boolopt_id { BR_BOOLOPT_NO_LL_LEARN, BR_BOOLOPT_MCAST_VLAN_SNOOPING, + BR_BOOLOPT_MST_ENABLE, BR_BOOLOPT_MAX }; diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index c0c2f3ed5729..1d0bccc3fa54 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -86,8 +86,10 @@ * over Ethernet */ #define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ +#define ETH_P_PROFINET 0x8892 /* PROFINET */ #define ETH_P_REALTEK 0x8899 /* Multiple proprietary protocols */ #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ +#define ETH_P_ETHERCAT 0x88A4 /* EtherCAT */ #define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ #define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */ #define ETH_P_PREAUTH 0x88C7 /* 802.11 Preauthentication */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 6218f93f5c1a..cc284c048e69 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -245,6 +245,21 @@ struct rtnl_link_stats64 { __u64 rx_nohandler; }; +/* Subset of link stats useful for in-HW collection. Meaning of the fields is as + * for struct rtnl_link_stats64. + */ +struct rtnl_hw_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; +}; + /* The struct should be in sync with struct ifmap */ struct rtnl_link_ifmap { __u64 mem_start; @@ -537,6 +552,7 @@ enum { IFLA_BRPORT_MRP_IN_OPEN, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT, + IFLA_BRPORT_LOCKED, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -712,7 +728,55 @@ enum ipvlan_mode { #define IPVLAN_F_PRIVATE 0x01 #define IPVLAN_F_VEPA 0x02 +/* Tunnel RTM header */ +struct tunnel_msg { + __u8 family; + __u8 flags; + __u16 reserved2; + __u32 ifindex; +}; + /* VXLAN section */ + +/* include statistics in the dump */ +#define TUNNEL_MSG_FLAG_STATS 0x01 + +#define TUNNEL_MSG_VALID_USER_FLAGS TUNNEL_MSG_FLAG_STATS + +/* Embedded inside VXLAN_VNIFILTER_ENTRY_STATS */ +enum { + VNIFILTER_ENTRY_STATS_UNSPEC, + VNIFILTER_ENTRY_STATS_RX_BYTES, + VNIFILTER_ENTRY_STATS_RX_PKTS, + VNIFILTER_ENTRY_STATS_RX_DROPS, + VNIFILTER_ENTRY_STATS_RX_ERRORS, + VNIFILTER_ENTRY_STATS_TX_BYTES, + VNIFILTER_ENTRY_STATS_TX_PKTS, + VNIFILTER_ENTRY_STATS_TX_DROPS, + VNIFILTER_ENTRY_STATS_TX_ERRORS, + VNIFILTER_ENTRY_STATS_PAD, + __VNIFILTER_ENTRY_STATS_MAX +}; +#define VNIFILTER_ENTRY_STATS_MAX (__VNIFILTER_ENTRY_STATS_MAX - 1) + +enum { + VXLAN_VNIFILTER_ENTRY_UNSPEC, + VXLAN_VNIFILTER_ENTRY_START, + VXLAN_VNIFILTER_ENTRY_END, + VXLAN_VNIFILTER_ENTRY_GROUP, + VXLAN_VNIFILTER_ENTRY_GROUP6, + VXLAN_VNIFILTER_ENTRY_STATS, + __VXLAN_VNIFILTER_ENTRY_MAX +}; +#define VXLAN_VNIFILTER_ENTRY_MAX (__VXLAN_VNIFILTER_ENTRY_MAX - 1) + +enum { + VXLAN_VNIFILTER_UNSPEC, + VXLAN_VNIFILTER_ENTRY, + __VXLAN_VNIFILTER_MAX +}; +#define VXLAN_VNIFILTER_MAX (__VXLAN_VNIFILTER_MAX - 1) + enum { IFLA_VXLAN_UNSPEC, IFLA_VXLAN_ID, @@ -744,6 +808,7 @@ enum { IFLA_VXLAN_GPE, IFLA_VXLAN_TTL_INHERIT, IFLA_VXLAN_DF, + IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */ __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) @@ -777,6 +842,7 @@ enum { IFLA_GENEVE_LABEL, IFLA_GENEVE_TTL_INHERIT, IFLA_GENEVE_DF, + IFLA_GENEVE_INNER_PROTO_INHERIT, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) @@ -822,6 +888,8 @@ enum { IFLA_GTP_FD1, IFLA_GTP_PDP_HASHSIZE, IFLA_GTP_ROLE, + IFLA_GTP_CREATE_SOCKETS, + IFLA_GTP_RESTART_COUNT, __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) @@ -860,6 +928,7 @@ enum { IFLA_BOND_PEER_NOTIF_DELAY, IFLA_BOND_AD_LACP_ACTIVE, IFLA_BOND_MISSED_MAX, + IFLA_BOND_NS_IP6_TARGET, __IFLA_BOND_MAX, }; @@ -1156,6 +1225,17 @@ enum { #define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1)) +enum { + IFLA_STATS_GETSET_UNSPEC, + IFLA_STATS_GET_FILTERS, /* Nest of IFLA_STATS_LINK_xxx, each a u32 with + * a filter mask for the corresponding group. + */ + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS, /* 0 or 1 as u8 */ + __IFLA_STATS_GETSET_MAX, +}; + +#define IFLA_STATS_GETSET_MAX (__IFLA_STATS_GETSET_MAX - 1) + /* These are embedded into IFLA_STATS_LINK_XSTATS: * [IFLA_STATS_LINK_XSTATS] * -> [LINK_XSTATS_TYPE_xxx] @@ -1173,10 +1253,21 @@ enum { enum { IFLA_OFFLOAD_XSTATS_UNSPEC, IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */ + IFLA_OFFLOAD_XSTATS_HW_S_INFO, /* HW stats info. A nest */ + IFLA_OFFLOAD_XSTATS_L3_STATS, /* struct rtnl_hw_stats64 */ __IFLA_OFFLOAD_XSTATS_MAX }; #define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1) +enum { + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC, + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, /* u8 */ + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, /* u8 */ + __IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX, +}; +#define IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX \ + (__IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX - 1) + /* XDP section */ #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h index 7d9105533c7b..102119628ff5 100644 --- a/include/uapi/linux/if_tunnel.h +++ b/include/uapi/linux/if_tunnel.h @@ -176,8 +176,10 @@ enum { #define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000) #define TUNNEL_NOCACHE __cpu_to_be16(0x2000) #define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000) +#define TUNNEL_GTP_OPT __cpu_to_be16(0x8000) #define TUNNEL_OPTIONS_PRESENT \ - (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT) + (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT | \ + TUNNEL_GTP_OPT) #endif /* _UAPI_IF_TUNNEL_H_ */ diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 787f491f0d2a..d2be4eb22008 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -101,6 +101,7 @@ enum { #define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */ #define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */ #define IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */ +#define IORING_SETUP_SUBMIT_ALL (1U << 7) /* continue submit on error */ enum { IORING_OP_NOP, @@ -143,6 +144,7 @@ enum { IORING_OP_MKDIRAT, IORING_OP_SYMLINKAT, IORING_OP_LINKAT, + IORING_OP_MSG_RING, /* this goes last, obviously */ IORING_OP_LAST, @@ -199,9 +201,11 @@ struct io_uring_cqe { * * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID * IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries + * IORING_CQE_F_MSG If set, CQE was generated with IORING_OP_MSG_RING */ #define IORING_CQE_F_BUFFER (1U << 0) #define IORING_CQE_F_MORE (1U << 1) +#define IORING_CQE_F_MSG (1U << 2) enum { IORING_CQE_BUFFER_SHIFT = 16, @@ -257,10 +261,11 @@ struct io_cqring_offsets { /* * io_uring_enter(2) flags */ -#define IORING_ENTER_GETEVENTS (1U << 0) -#define IORING_ENTER_SQ_WAKEUP (1U << 1) -#define IORING_ENTER_SQ_WAIT (1U << 2) -#define IORING_ENTER_EXT_ARG (1U << 3) +#define IORING_ENTER_GETEVENTS (1U << 0) +#define IORING_ENTER_SQ_WAKEUP (1U << 1) +#define IORING_ENTER_SQ_WAIT (1U << 2) +#define IORING_ENTER_EXT_ARG (1U << 3) +#define IORING_ENTER_REGISTERED_RING (1U << 4) /* * Passed in for io_uring_setup(2). Copied back with updated info on success @@ -325,6 +330,10 @@ enum { /* set/get max number of io-wq workers */ IORING_REGISTER_IOWQ_MAX_WORKERS = 19, + /* register/unregister io_uring fd with the ring */ + IORING_REGISTER_RING_FDS = 20, + IORING_UNREGISTER_RING_FDS = 21, + /* this goes last */ IORING_REGISTER_LAST }; diff --git a/include/uapi/linux/ioam6_iptunnel.h b/include/uapi/linux/ioam6_iptunnel.h index 829ffdfcacca..38f6a8fdfd34 100644 --- a/include/uapi/linux/ioam6_iptunnel.h +++ b/include/uapi/linux/ioam6_iptunnel.h @@ -41,6 +41,15 @@ enum { /* IOAM Trace Header */ IOAM6_IPTUNNEL_TRACE, /* struct ioam6_trace_hdr */ + /* Insertion frequency: + * "k over n" packets (0 < k <= n) + * [0.0001% ... 100%] + */ +#define IOAM6_IPTUNNEL_FREQ_MIN 1 +#define IOAM6_IPTUNNEL_FREQ_MAX 1000000 + IOAM6_IPTUNNEL_FREQ_K, /* u32 */ + IOAM6_IPTUNNEL_FREQ_N, /* u32 */ + __IOAM6_IPTUNNEL_MAX, }; diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h index 59178fc229ca..65d8b0234f69 100644 --- a/include/uapi/linux/iommu.h +++ b/include/uapi/linux/iommu.h @@ -158,185 +158,4 @@ struct iommu_page_response { __u32 code; }; -/* defines the granularity of the invalidation */ -enum iommu_inv_granularity { - IOMMU_INV_GRANU_DOMAIN, /* domain-selective invalidation */ - IOMMU_INV_GRANU_PASID, /* PASID-selective invalidation */ - IOMMU_INV_GRANU_ADDR, /* page-selective invalidation */ - IOMMU_INV_GRANU_NR, /* number of invalidation granularities */ -}; - -/** - * struct iommu_inv_addr_info - Address Selective Invalidation Structure - * - * @flags: indicates the granularity of the address-selective invalidation - * - If the PASID bit is set, the @pasid field is populated and the invalidation - * relates to cache entries tagged with this PASID and matching the address - * range. - * - If ARCHID bit is set, @archid is populated and the invalidation relates - * to cache entries tagged with this architecture specific ID and matching - * the address range. - * - Both PASID and ARCHID can be set as they may tag different caches. - * - If neither PASID or ARCHID is set, global addr invalidation applies. - * - The LEAF flag indicates whether only the leaf PTE caching needs to be - * invalidated and other paging structure caches can be preserved. - * @pasid: process address space ID - * @archid: architecture-specific ID - * @addr: first stage/level input address - * @granule_size: page/block size of the mapping in bytes - * @nb_granules: number of contiguous granules to be invalidated - */ -struct iommu_inv_addr_info { -#define IOMMU_INV_ADDR_FLAGS_PASID (1 << 0) -#define IOMMU_INV_ADDR_FLAGS_ARCHID (1 << 1) -#define IOMMU_INV_ADDR_FLAGS_LEAF (1 << 2) - __u32 flags; - __u32 archid; - __u64 pasid; - __u64 addr; - __u64 granule_size; - __u64 nb_granules; -}; - -/** - * struct iommu_inv_pasid_info - PASID Selective Invalidation Structure - * - * @flags: indicates the granularity of the PASID-selective invalidation - * - If the PASID bit is set, the @pasid field is populated and the invalidation - * relates to cache entries tagged with this PASID and matching the address - * range. - * - If the ARCHID bit is set, the @archid is populated and the invalidation - * relates to cache entries tagged with this architecture specific ID and - * matching the address range. - * - Both PASID and ARCHID can be set as they may tag different caches. - * - At least one of PASID or ARCHID must be set. - * @pasid: process address space ID - * @archid: architecture-specific ID - */ -struct iommu_inv_pasid_info { -#define IOMMU_INV_PASID_FLAGS_PASID (1 << 0) -#define IOMMU_INV_PASID_FLAGS_ARCHID (1 << 1) - __u32 flags; - __u32 archid; - __u64 pasid; -}; - -/** - * struct iommu_cache_invalidate_info - First level/stage invalidation - * information - * @argsz: User filled size of this data - * @version: API version of this structure - * @cache: bitfield that allows to select which caches to invalidate - * @granularity: defines the lowest granularity used for the invalidation: - * domain > PASID > addr - * @padding: reserved for future use (should be zero) - * @pasid_info: invalidation data when @granularity is %IOMMU_INV_GRANU_PASID - * @addr_info: invalidation data when @granularity is %IOMMU_INV_GRANU_ADDR - * - * Not all the combinations of cache/granularity are valid: - * - * +--------------+---------------+---------------+---------------+ - * | type / | DEV_IOTLB | IOTLB | PASID | - * | granularity | | | cache | - * +==============+===============+===============+===============+ - * | DOMAIN | N/A | Y | Y | - * +--------------+---------------+---------------+---------------+ - * | PASID | Y | Y | Y | - * +--------------+---------------+---------------+---------------+ - * | ADDR | Y | Y | N/A | - * +--------------+---------------+---------------+---------------+ - * - * Invalidations by %IOMMU_INV_GRANU_DOMAIN don't take any argument other than - * @version and @cache. - * - * If multiple cache types are invalidated simultaneously, they all - * must support the used granularity. - */ -struct iommu_cache_invalidate_info { - __u32 argsz; -#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1 - __u32 version; -/* IOMMU paging structure cache */ -#define IOMMU_CACHE_INV_TYPE_IOTLB (1 << 0) /* IOMMU IOTLB */ -#define IOMMU_CACHE_INV_TYPE_DEV_IOTLB (1 << 1) /* Device IOTLB */ -#define IOMMU_CACHE_INV_TYPE_PASID (1 << 2) /* PASID cache */ -#define IOMMU_CACHE_INV_TYPE_NR (3) - __u8 cache; - __u8 granularity; - __u8 padding[6]; - union { - struct iommu_inv_pasid_info pasid_info; - struct iommu_inv_addr_info addr_info; - } granu; -}; - -/** - * struct iommu_gpasid_bind_data_vtd - Intel VT-d specific data on device and guest - * SVA binding. - * - * @flags: VT-d PASID table entry attributes - * @pat: Page attribute table data to compute effective memory type - * @emt: Extended memory type - * - * Only guest vIOMMU selectable and effective options are passed down to - * the host IOMMU. - */ -struct iommu_gpasid_bind_data_vtd { -#define IOMMU_SVA_VTD_GPASID_SRE (1 << 0) /* supervisor request */ -#define IOMMU_SVA_VTD_GPASID_EAFE (1 << 1) /* extended access enable */ -#define IOMMU_SVA_VTD_GPASID_PCD (1 << 2) /* page-level cache disable */ -#define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */ -#define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */ -#define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */ -#define IOMMU_SVA_VTD_GPASID_WPE (1 << 6) /* Write protect enable */ -#define IOMMU_SVA_VTD_GPASID_LAST (1 << 7) - __u64 flags; - __u32 pat; - __u32 emt; -}; - -#define IOMMU_SVA_VTD_GPASID_MTS_MASK (IOMMU_SVA_VTD_GPASID_CD | \ - IOMMU_SVA_VTD_GPASID_EMTE | \ - IOMMU_SVA_VTD_GPASID_PCD | \ - IOMMU_SVA_VTD_GPASID_PWT) - -/** - * struct iommu_gpasid_bind_data - Information about device and guest PASID binding - * @argsz: User filled size of this data - * @version: Version of this data structure - * @format: PASID table entry format - * @flags: Additional information on guest bind request - * @gpgd: Guest page directory base of the guest mm to bind - * @hpasid: Process address space ID used for the guest mm in host IOMMU - * @gpasid: Process address space ID used for the guest mm in guest IOMMU - * @addr_width: Guest virtual address width - * @padding: Reserved for future use (should be zero) - * @vtd: Intel VT-d specific data - * - * Guest to host PASID mapping can be an identity or non-identity, where guest - * has its own PASID space. For non-identify mapping, guest to host PASID lookup - * is needed when VM programs guest PASID into an assigned device. VMM may - * trap such PASID programming then request host IOMMU driver to convert guest - * PASID to host PASID based on this bind data. - */ -struct iommu_gpasid_bind_data { - __u32 argsz; -#define IOMMU_GPASID_BIND_VERSION_1 1 - __u32 version; -#define IOMMU_PASID_FORMAT_INTEL_VTD 1 -#define IOMMU_PASID_FORMAT_LAST 2 - __u32 format; - __u32 addr_width; -#define IOMMU_SVA_GPASID_VAL (1 << 0) /* guest PASID valid */ - __u64 flags; - __u64 gpgd; - __u64 hpasid; - __u64 gpasid; - __u8 padding[8]; - /* Vendor specific data */ - union { - struct iommu_gpasid_bind_data_vtd vtd; - } vendor; -}; - #endif /* _UAPI_IOMMU_H */ diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index 778dc191c265..fb7e2ef60825 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -54,9 +54,9 @@ */ struct kexec_segment { const void *buf; - size_t bufsz; + __kernel_size_t bufsz; const void *mem; - size_t memsz; + __kernel_size_t memsz; }; #endif /* __KERNEL__ */ diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index af96af174dc4..42975e940758 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -32,9 +32,11 @@ * - 1.4 - Indicate new SRAM EDC bit in device properties * - 1.5 - Add SVM API * - 1.6 - Query clear flags in SVM get_attr API + * - 1.7 - Checkpoint Restore (CRIU) API + * - 1.8 - CRIU - Support for SDMA transfers with GTT BOs */ #define KFD_IOCTL_MAJOR_VERSION 1 -#define KFD_IOCTL_MINOR_VERSION 6 +#define KFD_IOCTL_MINOR_VERSION 8 struct kfd_ioctl_get_version_args { __u32 major_version; /* from KFD */ @@ -194,6 +196,8 @@ struct kfd_ioctl_dbg_wave_control_args { __u32 buf_size_in_bytes; /*including gpu_id and buf_size */ }; +#define KFD_INVALID_FD 0xffffffff + /* Matching HSA_EVENTTYPE */ #define KFD_IOC_EVENT_SIGNAL 0 #define KFD_IOC_EVENT_NODECHANGE 1 @@ -462,12 +466,89 @@ enum kfd_smi_event { }; #define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1)) +#define KFD_SMI_EVENT_MSG_SIZE 96 struct kfd_ioctl_smi_events_args { __u32 gpuid; /* to KFD */ __u32 anon_fd; /* from KFD */ }; +/************************************************************************************************** + * CRIU IOCTLs (Checkpoint Restore In Userspace) + * + * When checkpointing a process, the userspace application will perform: + * 1. PROCESS_INFO op to determine current process information. This pauses execution and evicts + * all the queues. + * 2. CHECKPOINT op to checkpoint process contents (BOs, queues, events, svm-ranges) + * 3. UNPAUSE op to un-evict all the queues + * + * When restoring a process, the CRIU userspace application will perform: + * + * 1. RESTORE op to restore process contents + * 2. RESUME op to start the process + * + * Note: Queues are forced into an evicted state after a successful PROCESS_INFO. User + * application needs to perform an UNPAUSE operation after calling PROCESS_INFO. + */ + +enum kfd_criu_op { + KFD_CRIU_OP_PROCESS_INFO, + KFD_CRIU_OP_CHECKPOINT, + KFD_CRIU_OP_UNPAUSE, + KFD_CRIU_OP_RESTORE, + KFD_CRIU_OP_RESUME, +}; + +/** + * kfd_ioctl_criu_args - Arguments perform CRIU operation + * @devices: [in/out] User pointer to memory location for devices information. + * This is an array of type kfd_criu_device_bucket. + * @bos: [in/out] User pointer to memory location for BOs information + * This is an array of type kfd_criu_bo_bucket. + * @priv_data: [in/out] User pointer to memory location for private data + * @priv_data_size: [in/out] Size of priv_data in bytes + * @num_devices: [in/out] Number of GPUs used by process. Size of @devices array. + * @num_bos [in/out] Number of BOs used by process. Size of @bos array. + * @num_objects: [in/out] Number of objects used by process. Objects are opaque to + * user application. + * @pid: [in/out] PID of the process being checkpointed + * @op [in] Type of operation (kfd_criu_op) + * + * Return: 0 on success, -errno on failure + */ +struct kfd_ioctl_criu_args { + __u64 devices; /* Used during ops: CHECKPOINT, RESTORE */ + __u64 bos; /* Used during ops: CHECKPOINT, RESTORE */ + __u64 priv_data; /* Used during ops: CHECKPOINT, RESTORE */ + __u64 priv_data_size; /* Used during ops: PROCESS_INFO, RESTORE */ + __u32 num_devices; /* Used during ops: PROCESS_INFO, RESTORE */ + __u32 num_bos; /* Used during ops: PROCESS_INFO, RESTORE */ + __u32 num_objects; /* Used during ops: PROCESS_INFO, RESTORE */ + __u32 pid; /* Used during ops: PROCESS_INFO, RESUME */ + __u32 op; +}; + +struct kfd_criu_device_bucket { + __u32 user_gpu_id; + __u32 actual_gpu_id; + __u32 drm_fd; + __u32 pad; +}; + +struct kfd_criu_bo_bucket { + __u64 addr; + __u64 size; + __u64 offset; + __u64 restored_offset; /* During restore, updated offset for BO */ + __u32 gpu_id; /* This is the user_gpu_id */ + __u32 alloc_flags; + __u32 dmabuf_fd; + __u32 pad; +}; + +/* CRIU IOCTLs - END */ +/**************************************************************************************************/ + /* Register offset inside the remapped mmio page */ enum kfd_mmio_remap { @@ -596,7 +677,7 @@ struct kfd_ioctl_svm_args { __u32 op; __u32 nattr; /* Variable length array of attributes */ - struct kfd_ioctl_svm_attribute attrs[0]; + struct kfd_ioctl_svm_attribute attrs[]; }; /** @@ -679,16 +760,16 @@ struct kfd_ioctl_set_xnack_mode_args { #define AMDKFD_IOC_WAIT_EVENTS \ AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args) -#define AMDKFD_IOC_DBG_REGISTER \ +#define AMDKFD_IOC_DBG_REGISTER_DEPRECATED \ AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args) -#define AMDKFD_IOC_DBG_UNREGISTER \ +#define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED \ AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args) -#define AMDKFD_IOC_DBG_ADDRESS_WATCH \ +#define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED \ AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args) -#define AMDKFD_IOC_DBG_WAVE_CONTROL \ +#define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED \ AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args) #define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \ @@ -742,7 +823,10 @@ struct kfd_ioctl_set_xnack_mode_args { #define AMDKFD_IOC_SET_XNACK_MODE \ AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args) +#define AMDKFD_IOC_CRIU_OP \ + AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args) + #define AMDKFD_COMMAND_START 0x01 -#define AMDKFD_COMMAND_END 0x22 +#define AMDKFD_COMMAND_END 0x23 #endif diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 507ee1f2aa96..91a6fe4e02c0 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -562,9 +562,12 @@ struct kvm_s390_mem_op { __u32 op; /* type of operation */ __u64 buf; /* buffer in userspace */ union { - __u8 ar; /* the access register number */ + struct { + __u8 ar; /* the access register number */ + __u8 key; /* access key, ignored if flag unset */ + }; __u32 sida_offset; /* offset into the sida */ - __u8 reserved[32]; /* should be set to 0 */ + __u8 reserved[32]; /* ignored */ }; }; /* types for kvm_s390_mem_op->op */ @@ -572,9 +575,12 @@ struct kvm_s390_mem_op { #define KVM_S390_MEMOP_LOGICAL_WRITE 1 #define KVM_S390_MEMOP_SIDA_READ 2 #define KVM_S390_MEMOP_SIDA_WRITE 3 +#define KVM_S390_MEMOP_ABSOLUTE_READ 4 +#define KVM_S390_MEMOP_ABSOLUTE_WRITE 5 /* flags for kvm_s390_mem_op->flags */ #define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0) #define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1) +#define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2) /* for KVM_INTERRUPT */ struct kvm_interrupt { @@ -1135,6 +1141,9 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_XSAVE2 208 #define KVM_CAP_SYS_ATTRIBUTES 209 #define KVM_CAP_PPC_AIL_MODE_3 210 +#define KVM_CAP_S390_MEM_OP_EXTENSION 211 +#define KVM_CAP_PMU_CAPABILITY 212 +#define KVM_CAP_DISABLE_QUIRKS2 213 #ifdef KVM_CAP_IRQ_ROUTING @@ -1971,6 +1980,8 @@ struct kvm_dirty_gfn { #define KVM_BUS_LOCK_DETECTION_OFF (1 << 0) #define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1) +#define KVM_PMU_CAP_DISABLE (1 << 0) + /** * struct kvm_stats_header - Header of per vm/vcpu binary statistics data. * @flags: Some extra information for header, always 0 for now. diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h index 9919f2062b14..23b0f2c8ba81 100644 --- a/include/uapi/linux/lirc.h +++ b/include/uapi/linux/lirc.h @@ -16,14 +16,16 @@ #define LIRC_MODE2_PULSE 0x01000000 #define LIRC_MODE2_FREQUENCY 0x02000000 #define LIRC_MODE2_TIMEOUT 0x03000000 +#define LIRC_MODE2_OVERFLOW 0x04000000 #define LIRC_VALUE_MASK 0x00FFFFFF #define LIRC_MODE2_MASK 0xFF000000 -#define LIRC_SPACE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_SPACE) -#define LIRC_PULSE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_PULSE) -#define LIRC_FREQUENCY(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_FREQUENCY) -#define LIRC_TIMEOUT(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_TIMEOUT) +#define LIRC_SPACE(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_SPACE) +#define LIRC_PULSE(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_PULSE) +#define LIRC_FREQUENCY(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_FREQUENCY) +#define LIRC_TIMEOUT(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_TIMEOUT) +#define LIRC_OVERFLOW(val) (((val) & LIRC_VALUE_MASK) | LIRC_MODE2_OVERFLOW) #define LIRC_VALUE(val) ((val)&LIRC_VALUE_MASK) #define LIRC_MODE2(val) ((val)&LIRC_MODE2_MASK) @@ -32,6 +34,7 @@ #define LIRC_IS_PULSE(val) (LIRC_MODE2(val) == LIRC_MODE2_PULSE) #define LIRC_IS_FREQUENCY(val) (LIRC_MODE2(val) == LIRC_MODE2_FREQUENCY) #define LIRC_IS_TIMEOUT(val) (LIRC_MODE2(val) == LIRC_MODE2_TIMEOUT) +#define LIRC_IS_OVERFLOW(val) (LIRC_MODE2(val) == LIRC_MODE2_OVERFLOW) /* used heavily by lirc userspace */ #define lirc_t int @@ -70,13 +73,10 @@ #define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK) #define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16) -#define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16) -#define LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE 0x40000000 #define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000 #define LIRC_CAN_GET_REC_RESOLUTION 0x20000000 #define LIRC_CAN_SET_REC_TIMEOUT 0x10000000 -#define LIRC_CAN_SET_REC_FILTER 0x08000000 #define LIRC_CAN_MEASURE_CARRIER 0x02000000 #define LIRC_CAN_USE_WIDEBAND_RECEIVER 0x04000000 @@ -84,8 +84,6 @@ #define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK) #define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK) -#define LIRC_CAN_NOTIFY_DECODE 0x01000000 - /*** IOCTL commands for lirc driver ***/ #define LIRC_GET_FEATURES _IOR('i', 0x00000000, __u32) diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index 0425cd79af9a..f724129c0425 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -36,6 +36,7 @@ #define EFIVARFS_MAGIC 0xde5e81e4 #define HOSTFS_SUPER_MAGIC 0x00c0ffee #define OVERLAYFS_SUPER_MAGIC 0x794c7630 +#define FUSE_SUPER_MAGIC 0x65735546 #define MINIX_SUPER_MAGIC 0x137F /* minix v1 fs, 14 char names */ #define MINIX_SUPER_MAGIC2 0x138F /* minix v1 fs, 30 char names */ diff --git a/include/uapi/linux/mctp.h b/include/uapi/linux/mctp.h index 07b0318716fc..154ab56651f1 100644 --- a/include/uapi/linux/mctp.h +++ b/include/uapi/linux/mctp.h @@ -44,7 +44,25 @@ struct sockaddr_mctp_ext { #define MCTP_TAG_MASK 0x07 #define MCTP_TAG_OWNER 0x08 +#define MCTP_TAG_PREALLOC 0x10 #define MCTP_OPT_ADDR_EXT 1 +#define SIOCMCTPALLOCTAG (SIOCPROTOPRIVATE + 0) +#define SIOCMCTPDROPTAG (SIOCPROTOPRIVATE + 1) + +struct mctp_ioc_tag_ctl { + mctp_eid_t peer_addr; + + /* For SIOCMCTPALLOCTAG: must be passed as zero, kernel will + * populate with the allocated tag value. Returned tag value will + * always have TO and PREALLOC set. + * + * For SIOCMCTPDROPTAG: userspace provides tag value to drop, from + * a prior SIOCMCTPALLOCTAG call (and so must have TO and PREALLOC set). + */ + __u8 tag; + __u16 flags; +}; + #endif /* __UAPI_MCTP_H */ diff --git a/include/uapi/linux/mptcp.h b/include/uapi/linux/mptcp.h index f106a3941cdf..9690efedb5fa 100644 --- a/include/uapi/linux/mptcp.h +++ b/include/uapi/linux/mptcp.h @@ -81,6 +81,7 @@ enum { #define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1) #define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2) #define MPTCP_PM_ADDR_FLAG_FULLMESH (1 << 3) +#define MPTCP_PM_ADDR_FLAG_IMPLICIT (1 << 4) enum { MPTCP_PM_CMD_UNSPEC, diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h index a1fd6173e2db..1d90c21a6251 100644 --- a/include/uapi/linux/mroute6.h +++ b/include/uapi/linux/mroute6.h @@ -134,6 +134,7 @@ struct mrt6msg { #define MRT6MSG_NOCACHE 1 #define MRT6MSG_WRONGMIF 2 #define MRT6MSG_WHOLEPKT 3 /* used for use level encap */ +#define MRT6MSG_WRMIFWHOLE 4 /* For PIM Register and assert processing */ __u8 im6_mbz; /* must be zero */ __u8 im6_msgtype; /* what type of message */ __u16 im6_mif; /* mif rec'd on */ diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h index 66048cc5d7b3..1bbea8f0681e 100644 --- a/include/uapi/linux/net_dropmon.h +++ b/include/uapi/linux/net_dropmon.h @@ -93,6 +93,7 @@ enum net_dm_attr { NET_DM_ATTR_SW_DROPS, /* flag */ NET_DM_ATTR_HW_DROPS, /* flag */ NET_DM_ATTR_FLOW_ACTION_COOKIE, /* binary */ + NET_DM_ATTR_REASON, /* string */ __NET_DM_ATTR_MAX, NET_DM_ATTR_MAX = __NET_DM_ATTR_MAX - 1 diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h index aed90c4df0c8..ef7c97f21a15 100644 --- a/include/uapi/linux/netfilter/nfnetlink_queue.h +++ b/include/uapi/linux/netfilter/nfnetlink_queue.h @@ -61,6 +61,7 @@ enum nfqnl_attr_type { NFQA_SECCTX, /* security context string */ NFQA_VLAN, /* nested attribute: packet vlan info */ NFQA_L2HDR, /* full L2 header */ + NFQA_PRIORITY, /* skb->priority */ __NFQA_MAX }; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 195a238a322e..0568a79097b8 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -11,7 +11,7 @@ * Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com> * Copyright 2008 Colin McCabe <colin@cozybit.com> * Copyright 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2021 Intel Corporation + * Copyright (C) 2018-2022 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -2659,6 +2659,10 @@ enum nl80211_commands { * enumerated in &enum nl80211_ap_settings_flags. This attribute shall be * used with %NL80211_CMD_START_AP request. * + * @NL80211_ATTR_EHT_CAPABILITY: EHT Capability information element (from + * association request when used with NL80211_CMD_NEW_STATION). Can be set + * only if %NL80211_STA_FLAG_WME is set. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -3169,6 +3173,8 @@ enum nl80211_attrs { NL80211_ATTR_AP_SETTINGS_FLAGS, + NL80211_ATTR_EHT_CAPABILITY, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3224,6 +3230,8 @@ enum nl80211_attrs { #define NL80211_HE_MAX_CAPABILITY_LEN 54 #define NL80211_MAX_NR_CIPHER_SUITES 5 #define NL80211_MAX_NR_AKM_SUITES 2 +#define NL80211_EHT_MIN_CAPABILITY_LEN 13 +#define NL80211_EHT_MAX_CAPABILITY_LEN 51 #define NL80211_MIN_REMAIN_ON_CHANNEL_TIME 10 @@ -3251,7 +3259,7 @@ enum nl80211_attrs { * and therefore can't be created in the normal ways, use the * %NL80211_CMD_START_P2P_DEVICE and %NL80211_CMD_STOP_P2P_DEVICE * commands to create and destroy one - * @NL80211_IF_TYPE_OCB: Outside Context of a BSS + * @NL80211_IFTYPE_OCB: Outside Context of a BSS * This mode corresponds to the MIB variable dot11OCBActivated=true * @NL80211_IFTYPE_NAN: NAN device interface type (not a netdev) * @NL80211_IFTYPE_MAX: highest interface type number currently defined @@ -3393,6 +3401,56 @@ enum nl80211_he_ru_alloc { }; /** + * enum nl80211_eht_gi - EHT guard interval + * @NL80211_RATE_INFO_EHT_GI_0_8: 0.8 usec + * @NL80211_RATE_INFO_EHT_GI_1_6: 1.6 usec + * @NL80211_RATE_INFO_EHT_GI_3_2: 3.2 usec + */ +enum nl80211_eht_gi { + NL80211_RATE_INFO_EHT_GI_0_8, + NL80211_RATE_INFO_EHT_GI_1_6, + NL80211_RATE_INFO_EHT_GI_3_2, +}; + +/** + * enum nl80211_eht_ru_alloc - EHT RU allocation values + * @NL80211_RATE_INFO_EHT_RU_ALLOC_26: 26-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_52: 52-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_52P26: 52+26-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_106: 106-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_106P26: 106+26 tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_242: 242-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_484: 484-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_484P242: 484+242 tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_996: 996-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_996P484: 996+484 tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242: 996+484+242 tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_2x996: 2x996-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484: 2x996+484 tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_3x996: 3x996-tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484: 3x996+484 tone RU allocation + * @NL80211_RATE_INFO_EHT_RU_ALLOC_4x996: 4x996-tone RU allocation + */ +enum nl80211_eht_ru_alloc { + NL80211_RATE_INFO_EHT_RU_ALLOC_26, + NL80211_RATE_INFO_EHT_RU_ALLOC_52, + NL80211_RATE_INFO_EHT_RU_ALLOC_52P26, + NL80211_RATE_INFO_EHT_RU_ALLOC_106, + NL80211_RATE_INFO_EHT_RU_ALLOC_106P26, + NL80211_RATE_INFO_EHT_RU_ALLOC_242, + NL80211_RATE_INFO_EHT_RU_ALLOC_484, + NL80211_RATE_INFO_EHT_RU_ALLOC_484P242, + NL80211_RATE_INFO_EHT_RU_ALLOC_996, + NL80211_RATE_INFO_EHT_RU_ALLOC_996P484, + NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242, + NL80211_RATE_INFO_EHT_RU_ALLOC_2x996, + NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484, + NL80211_RATE_INFO_EHT_RU_ALLOC_3x996, + NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484, + NL80211_RATE_INFO_EHT_RU_ALLOC_4x996, +}; + +/** * enum nl80211_rate_info - bitrate information * * These attribute types are used with %NL80211_STA_INFO_TXRATE @@ -3431,6 +3489,13 @@ enum nl80211_he_ru_alloc { * @NL80211_RATE_INFO_HE_DCM: HE DCM value (u8, 0/1) * @NL80211_RATE_INFO_RU_ALLOC: HE RU allocation, if not present then * non-OFDMA was used (u8, see &enum nl80211_he_ru_alloc) + * @NL80211_RATE_INFO_320_MHZ_WIDTH: 320 MHz bitrate + * @NL80211_RATE_INFO_EHT_MCS: EHT MCS index (u8, 0-15) + * @NL80211_RATE_INFO_EHT_NSS: EHT NSS value (u8, 1-8) + * @NL80211_RATE_INFO_EHT_GI: EHT guard interval identifier + * (u8, see &enum nl80211_eht_gi) + * @NL80211_RATE_INFO_EHT_RU_ALLOC: EHT RU allocation, if not present then + * non-OFDMA was used (u8, see &enum nl80211_eht_ru_alloc) * @__NL80211_RATE_INFO_AFTER_LAST: internal use */ enum nl80211_rate_info { @@ -3452,6 +3517,11 @@ enum nl80211_rate_info { NL80211_RATE_INFO_HE_GI, NL80211_RATE_INFO_HE_DCM, NL80211_RATE_INFO_HE_RU_ALLOC, + NL80211_RATE_INFO_320_MHZ_WIDTH, + NL80211_RATE_INFO_EHT_MCS, + NL80211_RATE_INFO_EHT_NSS, + NL80211_RATE_INFO_EHT_GI, + NL80211_RATE_INFO_EHT_RU_ALLOC, /* keep last */ __NL80211_RATE_INFO_AFTER_LAST, @@ -3766,6 +3836,14 @@ enum nl80211_mpath_info { * given for all 6 GHz band channels * @NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS: vendor element capabilities that are * advertised on this band/for this iftype (binary) + * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC: EHT MAC capabilities as in EHT + * capabilities element + * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY: EHT PHY capabilities as in EHT + * capabilities element + * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET: EHT supported NSS/MCS as in EHT + * capabilities element + * @NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE: EHT PPE thresholds information as + * defined in EHT capabilities element * @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use * @NL80211_BAND_IFTYPE_ATTR_MAX: highest band attribute currently defined */ @@ -3779,6 +3857,10 @@ enum nl80211_band_iftype_attr { NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE, NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA, NL80211_BAND_IFTYPE_ATTR_VENDOR_ELEMS, + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC, + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PHY, + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MCS_SET, + NL80211_BAND_IFTYPE_ATTR_EHT_CAP_PPE, /* keep last */ __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST, @@ -3923,6 +4005,10 @@ enum nl80211_wmm_rule { * on this channel in current regulatory domain. * @NL80211_FREQUENCY_ATTR_16MHZ: 16 MHz operation is allowed * on this channel in current regulatory domain. + * @NL80211_FREQUENCY_ATTR_NO_320MHZ: any 320 MHz channel using this channel + * as the primary or any of the secondary channels isn't possible + * @NL80211_FREQUENCY_ATTR_NO_EHT: EHT operation is not allowed on this channel + * in current regulatory domain. * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number * currently defined * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use @@ -3959,6 +4045,8 @@ enum nl80211_frequency_attr { NL80211_FREQUENCY_ATTR_4MHZ, NL80211_FREQUENCY_ATTR_8MHZ, NL80211_FREQUENCY_ATTR_16MHZ, + NL80211_FREQUENCY_ATTR_NO_320MHZ, + NL80211_FREQUENCY_ATTR_NO_EHT, /* keep last */ __NL80211_FREQUENCY_ATTR_AFTER_LAST, @@ -4157,6 +4245,7 @@ enum nl80211_sched_scan_match_attr { * @NL80211_RRF_NO_80MHZ: 80MHz operation not allowed * @NL80211_RRF_NO_160MHZ: 160MHz operation not allowed * @NL80211_RRF_NO_HE: HE operation not allowed + * @NL80211_RRF_NO_320MHZ: 320MHz operation not allowed */ enum nl80211_reg_rule_flags { NL80211_RRF_NO_OFDM = 1<<0, @@ -4175,6 +4264,7 @@ enum nl80211_reg_rule_flags { NL80211_RRF_NO_80MHZ = 1<<15, NL80211_RRF_NO_160MHZ = 1<<16, NL80211_RRF_NO_HE = 1<<17, + NL80211_RRF_NO_320MHZ = 1<<18, }; #define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR @@ -4672,6 +4762,8 @@ enum nl80211_key_mode { * @NL80211_CHAN_WIDTH_4: 4 MHz OFDM channel * @NL80211_CHAN_WIDTH_8: 8 MHz OFDM channel * @NL80211_CHAN_WIDTH_16: 16 MHz OFDM channel + * @NL80211_CHAN_WIDTH_320: 320 MHz channel, the %NL80211_ATTR_CENTER_FREQ1 + * attribute must be provided as well */ enum nl80211_chan_width { NL80211_CHAN_WIDTH_20_NOHT, @@ -4687,6 +4779,7 @@ enum nl80211_chan_width { NL80211_CHAN_WIDTH_4, NL80211_CHAN_WIDTH_8, NL80211_CHAN_WIDTH_16, + NL80211_CHAN_WIDTH_320, }; /** diff --git a/include/uapi/linux/omap3isp.h b/include/uapi/linux/omap3isp.h index 87b55755f4ff..d9db7ad43890 100644 --- a/include/uapi/linux/omap3isp.h +++ b/include/uapi/linux/omap3isp.h @@ -162,6 +162,7 @@ struct omap3isp_h3a_aewb_config { * struct omap3isp_stat_data - Statistic data sent to or received from user * @ts: Timestamp of returned framestats. * @buf: Pointer to pass to user. + * @buf_size: Size of buffer. * @frame_number: Frame number of requested stats. * @cur_frame: Current frame number being processed. * @config_counter: Number of the configuration associated with the data. @@ -176,10 +177,12 @@ struct omap3isp_stat_data { struct timeval ts; #endif void __user *buf; - __u32 buf_size; - __u16 frame_number; - __u16 cur_frame; - __u16 config_counter; + __struct_group(/* no tag */, frame, /* no attrs */, + __u32 buf_size; + __u16 frame_number; + __u16 cur_frame; + __u16 config_counter; + ); }; #ifdef __KERNEL__ @@ -189,10 +192,12 @@ struct omap3isp_stat_data_time32 { __s32 tv_usec; } ts; __u32 buf; - __u32 buf_size; - __u16 frame_number; - __u16 cur_frame; - __u16 config_counter; + __struct_group(/* no tag */, frame, /* no attrs */, + __u32 buf_size; + __u16 frame_number; + __u16 cur_frame; + __u16 config_counter; + ); }; #endif diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 150bcff49b1c..ce3e1738d427 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -352,9 +352,20 @@ enum ovs_key_attr { OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, /* struct ovs_key_ct_tuple_ipv6 */ OVS_KEY_ATTR_NSH, /* Nested set of ovs_nsh_key_* */ -#ifdef __KERNEL__ - OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info */ -#endif + /* User space decided to squat on types 29 and 30. They are defined + * below, but should not be sent to the kernel. + * + * WARNING: No new types should be added unless they are defined + * for both kernel and user space (no 'ifdef's). It's hard + * to keep compatibility otherwise. + */ + OVS_KEY_ATTR_PACKET_TYPE, /* be32 packet type */ + OVS_KEY_ATTR_ND_EXTENSIONS, /* IPv6 Neighbor Discovery extensions */ + + OVS_KEY_ATTR_TUNNEL_INFO, /* struct ip_tunnel_info. + * For in-kernel use only. + */ + OVS_KEY_ATTR_IPV6_EXTHDRS, /* struct ovs_key_ipv6_exthdr */ __OVS_KEY_ATTR_MAX }; @@ -430,6 +441,11 @@ struct ovs_key_ipv6 { __u8 ipv6_frag; /* One of OVS_FRAG_TYPE_*. */ }; +/* separate structure to support backward compatibility with older user space */ +struct ovs_key_ipv6_exthdrs { + __u16 hdrs; +}; + struct ovs_key_tcp { __be16 tcp_src; __be16 tcp_dst; diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 82858b697c05..d37629dbad72 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -251,6 +251,8 @@ enum { PERF_BR_SYSRET = 8, /* syscall return */ PERF_BR_COND_CALL = 9, /* conditional function call */ PERF_BR_COND_RET = 10, /* conditional function return */ + PERF_BR_ERET = 11, /* exception return */ + PERF_BR_IRQ = 12, /* irq */ PERF_BR_MAX, }; diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index ee38b35c3f57..404f97fb239c 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -616,6 +616,10 @@ enum { * TCA_FLOWER_KEY_ENC_OPT_ERSPAN_ * attributes */ + TCA_FLOWER_KEY_ENC_OPTS_GTP, /* Nested + * TCA_FLOWER_KEY_ENC_OPT_GTP_ + * attributes + */ __TCA_FLOWER_KEY_ENC_OPTS_MAX, }; @@ -655,6 +659,17 @@ enum { (__TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX - 1) enum { + TCA_FLOWER_KEY_ENC_OPT_GTP_UNSPEC, + TCA_FLOWER_KEY_ENC_OPT_GTP_PDU_TYPE, /* u8 */ + TCA_FLOWER_KEY_ENC_OPT_GTP_QFI, /* u8 */ + + __TCA_FLOWER_KEY_ENC_OPT_GTP_MAX, +}; + +#define TCA_FLOWER_KEY_ENC_OPT_GTP_MAX \ + (__TCA_FLOWER_KEY_ENC_OPT_GTP_MAX - 1) + +enum { TCA_FLOWER_KEY_MPLS_OPTS_UNSPEC, TCA_FLOWER_KEY_MPLS_OPTS_LSE, __TCA_FLOWER_KEY_MPLS_OPTS_MAX, diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h index 2fcad1dd0b0e..2bf93c0d6354 100644 --- a/include/uapi/linux/psci.h +++ b/include/uapi/linux/psci.h @@ -82,6 +82,10 @@ #define PSCI_0_2_TOS_UP_NO_MIGRATE 1 #define PSCI_0_2_TOS_MP 2 +/* PSCI v1.1 reset type encoding for SYSTEM_RESET2 */ +#define PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET 0 +#define PSCI_1_1_RESET_TYPE_VENDOR_START 0x80000000U + /* PSCI version decoding (independent of PSCI version) */ #define PSCI_VERSION_MAJOR_SHIFT 16 #define PSCI_VERSION_MINOR_MASK \ diff --git a/include/uapi/linux/reiserfs_xattr.h b/include/uapi/linux/reiserfs_xattr.h index 28f10842f047..503ad018ce5b 100644 --- a/include/uapi/linux/reiserfs_xattr.h +++ b/include/uapi/linux/reiserfs_xattr.h @@ -19,7 +19,7 @@ struct reiserfs_xattr_header { struct reiserfs_security_handle { const char *name; void *value; - size_t length; + __kernel_size_t length; }; #endif /* _LINUX_REISERFS_XATTR_H */ diff --git a/include/uapi/linux/rfkill.h b/include/uapi/linux/rfkill.h index 9b77cfc42efa..283c5a7b3f2c 100644 --- a/include/uapi/linux/rfkill.h +++ b/include/uapi/linux/rfkill.h @@ -159,8 +159,16 @@ struct rfkill_event_ext { * old behaviour for all userspace, unless it explicitly opts in to the * rules outlined here by using the new &struct rfkill_event_ext. * - * Userspace using &struct rfkill_event_ext must adhere to the following - * rules + * Additionally, some other userspace (bluez, g-s-d) was reading with a + * large size but as streaming reads rather than message-based, or with + * too strict checks for the returned size. So eventually, we completely + * reverted this, and extended messages need to be opted in to by using + * an ioctl: + * + * ioctl(fd, RFKILL_IOCTL_MAX_SIZE, sizeof(struct rfkill_event_ext)); + * + * Userspace using &struct rfkill_event_ext and the ioctl must adhere to + * the following rules: * * 1. accept short writes, optionally using them to detect that it's * running on an older kernel; @@ -175,6 +183,8 @@ struct rfkill_event_ext { #define RFKILL_IOC_MAGIC 'R' #define RFKILL_IOC_NOINPUT 1 #define RFKILL_IOCTL_NOINPUT _IO(RFKILL_IOC_MAGIC, RFKILL_IOC_NOINPUT) +#define RFKILL_IOC_MAX_SIZE 2 +#define RFKILL_IOCTL_MAX_SIZE _IOW(RFKILL_IOC_MAGIC, RFKILL_IOC_EXT_SIZE, __u32) /* and that's all userspace gets */ diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h index 9a402fdb60e9..77ee207623a9 100644 --- a/include/uapi/linux/rseq.h +++ b/include/uapi/linux/rseq.h @@ -105,23 +105,11 @@ struct rseq { * Read and set by the kernel. Set by user-space with single-copy * atomicity semantics. This field should only be updated by the * thread which registered this data structure. Aligned on 64-bit. + * + * 32-bit architectures should update the low order bits of the + * rseq_cs field, leaving the high order bits initialized to 0. */ - union { - __u64 ptr64; -#ifdef __LP64__ - __u64 ptr; -#else - struct { -#if (defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)) || defined(__BIG_ENDIAN) - __u32 padding; /* Initialized to zero. */ - __u32 ptr32; -#else /* LITTLE */ - __u32 ptr32; - __u32 padding; /* Initialized to zero. */ -#endif /* ENDIAN */ - } ptr; -#endif - } rseq_cs; + __u64 rseq_cs; /* * Restartable sequences flags field. diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 93d934cc4613..83849a37db5b 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -146,6 +146,8 @@ enum { #define RTM_NEWSTATS RTM_NEWSTATS RTM_GETSTATS = 94, #define RTM_GETSTATS RTM_GETSTATS + RTM_SETSTATS, +#define RTM_SETSTATS RTM_SETSTATS RTM_NEWCACHEREPORT = 96, #define RTM_NEWCACHEREPORT RTM_NEWCACHEREPORT @@ -185,6 +187,13 @@ enum { RTM_GETNEXTHOPBUCKET, #define RTM_GETNEXTHOPBUCKET RTM_GETNEXTHOPBUCKET + RTM_NEWTUNNEL = 120, +#define RTM_NEWTUNNEL RTM_NEWTUNNEL + RTM_DELTUNNEL, +#define RTM_DELTUNNEL RTM_DELTUNNEL + RTM_GETTUNNEL, +#define RTM_GETTUNNEL RTM_GETTUNNEL + __RTM_MAX, #define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) }; @@ -756,6 +765,10 @@ enum rtnetlink_groups { #define RTNLGRP_BRVLAN RTNLGRP_BRVLAN RTNLGRP_MCTP_IFADDR, #define RTNLGRP_MCTP_IFADDR RTNLGRP_MCTP_IFADDR + RTNLGRP_TUNNEL, +#define RTNLGRP_TUNNEL RTNLGRP_TUNNEL + RTNLGRP_STATS, +#define RTNLGRP_STATS RTNLGRP_STATS __RTNLGRP_MAX }; #define RTNLGRP_MAX (__RTNLGRP_MAX - 1) @@ -804,6 +817,7 @@ enum { #define RTEXT_FILTER_MRP (1 << 4) #define RTEXT_FILTER_CFM_CONFIG (1 << 5) #define RTEXT_FILTER_CFM_STATUS (1 << 6) +#define RTEXT_FILTER_MST (1 << 7) /* End of information exported to user level */ diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h index 6c2874fd2c00..693f549f6966 100644 --- a/include/uapi/linux/smc.h +++ b/include/uapi/linux/smc.h @@ -59,6 +59,9 @@ enum { SMC_NETLINK_DUMP_SEID, SMC_NETLINK_ENABLE_SEID, SMC_NETLINK_DISABLE_SEID, + SMC_NETLINK_DUMP_HS_LIMITATION, + SMC_NETLINK_ENABLE_HS_LIMITATION, + SMC_NETLINK_DISABLE_HS_LIMITATION, }; /* SMC_GENL_FAMILY top level attributes */ @@ -284,4 +287,16 @@ enum { __SMC_NLA_SEID_TABLE_MAX, SMC_NLA_SEID_TABLE_MAX = __SMC_NLA_SEID_TABLE_MAX - 1 }; + +/* SMC_NETLINK_HS_LIMITATION attributes */ +enum { + SMC_NLA_HS_LIMITATION_UNSPEC, + SMC_NLA_HS_LIMITATION_ENABLED, /* u8 */ + __SMC_NLA_HS_LIMITATION_MAX, + SMC_NLA_HS_LIMITATION_MAX = __SMC_NLA_HS_LIMITATION_MAX - 1 +}; + +/* SMC socket options */ +#define SMC_LIMIT_HS 1 /* constraint on smc handshake */ + #endif /* _UAPI_LINUX_SMC_H */ diff --git a/include/uapi/linux/socket.h b/include/uapi/linux/socket.h index eb0a9a5b6e71..51d6bb2f6765 100644 --- a/include/uapi/linux/socket.h +++ b/include/uapi/linux/socket.h @@ -31,4 +31,8 @@ struct __kernel_sockaddr_storage { #define SOCK_BUF_LOCK_MASK (SOCK_SNDBUF_LOCK | SOCK_RCVBUF_LOCK) +#define SOCK_TXREHASH_DEFAULT ((u8)-1) +#define SOCK_TXREHASH_DISABLED 0 +#define SOCK_TXREHASH_ENABLED 1 + #endif /* _UAPI_LINUX_SOCKET_H */ diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h index 9aa2fedfa309..fc78bf3aead7 100644 --- a/include/uapi/linux/thermal.h +++ b/include/uapi/linux/thermal.h @@ -44,7 +44,10 @@ enum thermal_genl_attr { THERMAL_GENL_ATTR_CDEV_MAX_STATE, THERMAL_GENL_ATTR_CDEV_NAME, THERMAL_GENL_ATTR_GOV_NAME, - + THERMAL_GENL_ATTR_CPU_CAPABILITY, + THERMAL_GENL_ATTR_CPU_CAPABILITY_ID, + THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE, + THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY, __THERMAL_GENL_ATTR_MAX, }; #define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1) @@ -71,6 +74,7 @@ enum thermal_genl_event { THERMAL_GENL_EVENT_CDEV_DELETE, /* Cdev unbound */ THERMAL_GENL_EVENT_CDEV_STATE_UPDATE, /* Cdev state updated */ THERMAL_GENL_EVENT_TZ_GOV_CHANGE, /* Governor policy changed */ + THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE, /* CPU capability changed */ __THERMAL_GENL_EVENT_MAX, }; #define THERMAL_GENL_EVENT_MAX (__THERMAL_GENL_EVENT_MAX - 1) diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h index f6d2f83cbe29..c4dc597f3dcf 100644 --- a/include/uapi/linux/types.h +++ b/include/uapi/linux/types.h @@ -19,12 +19,12 @@ * any application/library that wants linux/types.h. */ +/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */ #ifdef __CHECKER__ -#define __bitwise__ __attribute__((bitwise)) +#define __bitwise __attribute__((bitwise)) #else -#define __bitwise__ +#define __bitwise #endif -#define __bitwise __bitwise__ typedef __u16 __bitwise __le16; typedef __u16 __bitwise __be16; diff --git a/include/uapi/linux/user_events.h b/include/uapi/linux/user_events.h new file mode 100644 index 000000000000..e570840571e1 --- /dev/null +++ b/include/uapi/linux/user_events.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (c) 2021, Microsoft Corporation. + * + * Authors: + * Beau Belgrave <beaub@linux.microsoft.com> + */ +#ifndef _UAPI_LINUX_USER_EVENTS_H +#define _UAPI_LINUX_USER_EVENTS_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +#ifdef __KERNEL__ +#include <linux/uio.h> +#else +#include <sys/uio.h> +#endif + +#define USER_EVENTS_SYSTEM "user_events" +#define USER_EVENTS_PREFIX "u:" + +/* Bits 0-6 are for known probe types, Bit 7 is for unknown probes */ +#define EVENT_BIT_FTRACE 0 +#define EVENT_BIT_PERF 1 +#define EVENT_BIT_OTHER 7 + +#define EVENT_STATUS_FTRACE (1 << EVENT_BIT_FTRACE) +#define EVENT_STATUS_PERF (1 << EVENT_BIT_PERF) +#define EVENT_STATUS_OTHER (1 << EVENT_BIT_OTHER) + +/* Create dynamic location entry within a 32-bit value */ +#define DYN_LOC(offset, size) ((size) << 16 | (offset)) + +/* Use raw iterator for attached BPF program(s), no affect on ftrace/perf */ +#define FLAG_BPF_ITER (1 << 0) + +/* + * Describes an event registration and stores the results of the registration. + * This structure is passed to the DIAG_IOCSREG ioctl, callers at a minimum + * must set the size and name_args before invocation. + */ +struct user_reg { + + /* Input: Size of the user_reg structure being used */ + __u32 size; + + /* Input: Pointer to string with event name, description and flags */ + __u64 name_args; + + /* Output: Byte index of the event within the status page */ + __u32 status_index; + + /* Output: Index of the event to use when writing data */ + __u32 write_index; +}; + +#define DIAG_IOC_MAGIC '*' + +/* Requests to register a user_event */ +#define DIAG_IOCSREG _IOWR(DIAG_IOC_MAGIC, 0, struct user_reg*) + +/* Requests to delete a user_event */ +#define DIAG_IOCSDEL _IOW(DIAG_IOC_MAGIC, 1, char*) + +/* Data type that was passed to the BPF program */ +enum { + /* Data resides in kernel space */ + USER_BPF_DATA_KERNEL, + + /* Data resides in user space */ + USER_BPF_DATA_USER, + + /* Data is a pointer to a user_bpf_iter structure */ + USER_BPF_DATA_ITER, +}; + +/* + * Describes an iovec iterator that BPF programs can use to access data for + * a given user_event write() / writev() call. + */ +struct user_bpf_iter { + + /* Offset of the data within the first iovec */ + __u32 iov_offset; + + /* Number of iovec structures */ + __u32 nr_segs; + + /* Pointer to iovec structures */ + const struct iovec *iov; +}; + +/* Context that BPF programs receive when attached to a user_event */ +struct user_bpf_context { + + /* Data type being passed (see union below) */ + __u32 data_type; + + /* Length of the data */ + __u32 data_len; + + /* Pointer to data, varies by data type */ + union { + /* Kernel data (data_type == USER_BPF_DATA_KERNEL) */ + void *kdata; + + /* User data (data_type == USER_BPF_DATA_USER) */ + void *udata; + + /* Direct iovec (data_type == USER_BPF_DATA_ITER) */ + struct user_bpf_iter *iter; + }; +}; + +#endif /* _UAPI_LINUX_USER_EVENTS_H */ diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h index 05b31d60acf6..ef739054cb1c 100644 --- a/include/uapi/linux/userfaultfd.h +++ b/include/uapi/linux/userfaultfd.h @@ -32,7 +32,8 @@ UFFD_FEATURE_SIGBUS | \ UFFD_FEATURE_THREAD_ID | \ UFFD_FEATURE_MINOR_HUGETLBFS | \ - UFFD_FEATURE_MINOR_SHMEM) + UFFD_FEATURE_MINOR_SHMEM | \ + UFFD_FEATURE_EXACT_ADDRESS) #define UFFD_API_IOCTLS \ ((__u64)1 << _UFFDIO_REGISTER | \ (__u64)1 << _UFFDIO_UNREGISTER | \ @@ -189,6 +190,10 @@ struct uffdio_api { * * UFFD_FEATURE_MINOR_SHMEM indicates the same support as * UFFD_FEATURE_MINOR_HUGETLBFS, but for shmem-backed pages instead. + * + * UFFD_FEATURE_EXACT_ADDRESS indicates that the exact address of page + * faults would be provided and the offset within the page would not be + * masked. */ #define UFFD_FEATURE_PAGEFAULT_FLAG_WP (1<<0) #define UFFD_FEATURE_EVENT_FORK (1<<1) @@ -201,6 +206,7 @@ struct uffdio_api { #define UFFD_FEATURE_THREAD_ID (1<<8) #define UFFD_FEATURE_MINOR_HUGETLBFS (1<<9) #define UFFD_FEATURE_MINOR_SHMEM (1<<10) +#define UFFD_FEATURE_EXACT_ADDRESS (1<<11) __u64 features; __u64 ioctls; diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index c8e0f84d204d..bb40129446d4 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -219,6 +219,12 @@ enum v4l2_colorfx { */ #define V4L2_CID_USER_ALLEGRO_BASE (V4L2_CID_USER_BASE + 0x1170) +/* + * The base for the isl7998x driver controls. + * We reserve 16 controls for this driver. + */ +#define V4L2_CID_USER_ISL7998X_BASE (V4L2_CID_USER_BASE + 0x1180) + /* MPEG-class control IDs */ /* The MPEG controls are applicable to all codec controls * and the 'MPEG' part of the define is historical */ @@ -1563,6 +1569,8 @@ struct v4l2_h264_dpb_entry { #define V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC 0x01 #define V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC 0x02 #define V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD 0x04 +#define V4L2_H264_DECODE_PARAM_FLAG_PFRAME 0x08 +#define V4L2_H264_DECODE_PARAM_FLAG_BFRAME 0x10 #define V4L2_CID_STATELESS_H264_DECODE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 7) /** diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index ef33ea002b0b..fea86061b44e 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -323,7 +323,7 @@ struct vfio_region_info_cap_type { #define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff) #define VFIO_REGION_TYPE_GFX (1) #define VFIO_REGION_TYPE_CCW (2) -#define VFIO_REGION_TYPE_MIGRATION (3) +#define VFIO_REGION_TYPE_MIGRATION_DEPRECATED (3) /* sub-types for VFIO_REGION_TYPE_PCI_* */ @@ -405,225 +405,29 @@ struct vfio_region_gfx_edid { #define VFIO_REGION_SUBTYPE_CCW_CRW (3) /* sub-types for VFIO_REGION_TYPE_MIGRATION */ -#define VFIO_REGION_SUBTYPE_MIGRATION (1) - -/* - * The structure vfio_device_migration_info is placed at the 0th offset of - * the VFIO_REGION_SUBTYPE_MIGRATION region to get and set VFIO device related - * migration information. Field accesses from this structure are only supported - * at their native width and alignment. Otherwise, the result is undefined and - * vendor drivers should return an error. - * - * device_state: (read/write) - * - The user application writes to this field to inform the vendor driver - * about the device state to be transitioned to. - * - The vendor driver should take the necessary actions to change the - * device state. After successful transition to a given state, the - * vendor driver should return success on write(device_state, state) - * system call. If the device state transition fails, the vendor driver - * should return an appropriate -errno for the fault condition. - * - On the user application side, if the device state transition fails, - * that is, if write(device_state, state) returns an error, read - * device_state again to determine the current state of the device from - * the vendor driver. - * - The vendor driver should return previous state of the device unless - * the vendor driver has encountered an internal error, in which case - * the vendor driver may report the device_state VFIO_DEVICE_STATE_ERROR. - * - The user application must use the device reset ioctl to recover the - * device from VFIO_DEVICE_STATE_ERROR state. If the device is - * indicated to be in a valid device state by reading device_state, the - * user application may attempt to transition the device to any valid - * state reachable from the current state or terminate itself. - * - * device_state consists of 3 bits: - * - If bit 0 is set, it indicates the _RUNNING state. If bit 0 is clear, - * it indicates the _STOP state. When the device state is changed to - * _STOP, driver should stop the device before write() returns. - * - If bit 1 is set, it indicates the _SAVING state, which means that the - * driver should start gathering device state information that will be - * provided to the VFIO user application to save the device's state. - * - If bit 2 is set, it indicates the _RESUMING state, which means that - * the driver should prepare to resume the device. Data provided through - * the migration region should be used to resume the device. - * Bits 3 - 31 are reserved for future use. To preserve them, the user - * application should perform a read-modify-write operation on this - * field when modifying the specified bits. - * - * +------- _RESUMING - * |+------ _SAVING - * ||+----- _RUNNING - * ||| - * 000b => Device Stopped, not saving or resuming - * 001b => Device running, which is the default state - * 010b => Stop the device & save the device state, stop-and-copy state - * 011b => Device running and save the device state, pre-copy state - * 100b => Device stopped and the device state is resuming - * 101b => Invalid state - * 110b => Error state - * 111b => Invalid state - * - * State transitions: - * - * _RESUMING _RUNNING Pre-copy Stop-and-copy _STOP - * (100b) (001b) (011b) (010b) (000b) - * 0. Running or default state - * | - * - * 1. Normal Shutdown (optional) - * |------------------------------------->| - * - * 2. Save the state or suspend - * |------------------------->|---------->| - * - * 3. Save the state during live migration - * |----------->|------------>|---------->| - * - * 4. Resuming - * |<---------| - * - * 5. Resumed - * |--------->| - * - * 0. Default state of VFIO device is _RUNNING when the user application starts. - * 1. During normal shutdown of the user application, the user application may - * optionally change the VFIO device state from _RUNNING to _STOP. This - * transition is optional. The vendor driver must support this transition but - * must not require it. - * 2. When the user application saves state or suspends the application, the - * device state transitions from _RUNNING to stop-and-copy and then to _STOP. - * On state transition from _RUNNING to stop-and-copy, driver must stop the - * device, save the device state and send it to the application through the - * migration region. The sequence to be followed for such transition is given - * below. - * 3. In live migration of user application, the state transitions from _RUNNING - * to pre-copy, to stop-and-copy, and to _STOP. - * On state transition from _RUNNING to pre-copy, the driver should start - * gathering the device state while the application is still running and send - * the device state data to application through the migration region. - * On state transition from pre-copy to stop-and-copy, the driver must stop - * the device, save the device state and send it to the user application - * through the migration region. - * Vendor drivers must support the pre-copy state even for implementations - * where no data is provided to the user before the stop-and-copy state. The - * user must not be required to consume all migration data before the device - * transitions to a new state, including the stop-and-copy state. - * The sequence to be followed for above two transitions is given below. - * 4. To start the resuming phase, the device state should be transitioned from - * the _RUNNING to the _RESUMING state. - * In the _RESUMING state, the driver should use the device state data - * received through the migration region to resume the device. - * 5. After providing saved device data to the driver, the application should - * change the state from _RESUMING to _RUNNING. - * - * reserved: - * Reads on this field return zero and writes are ignored. - * - * pending_bytes: (read only) - * The number of pending bytes still to be migrated from the vendor driver. - * - * data_offset: (read only) - * The user application should read data_offset field from the migration - * region. The user application should read the device data from this - * offset within the migration region during the _SAVING state or write - * the device data during the _RESUMING state. See below for details of - * sequence to be followed. - * - * data_size: (read/write) - * The user application should read data_size to get the size in bytes of - * the data copied in the migration region during the _SAVING state and - * write the size in bytes of the data copied in the migration region - * during the _RESUMING state. - * - * The format of the migration region is as follows: - * ------------------------------------------------------------------ - * |vfio_device_migration_info| data section | - * | | /////////////////////////////// | - * ------------------------------------------------------------------ - * ^ ^ - * offset 0-trapped part data_offset - * - * The structure vfio_device_migration_info is always followed by the data - * section in the region, so data_offset will always be nonzero. The offset - * from where the data is copied is decided by the kernel driver. The data - * section can be trapped, mmapped, or partitioned, depending on how the kernel - * driver defines the data section. The data section partition can be defined - * as mapped by the sparse mmap capability. If mmapped, data_offset must be - * page aligned, whereas initial section which contains the - * vfio_device_migration_info structure, might not end at the offset, which is - * page aligned. The user is not required to access through mmap regardless - * of the capabilities of the region mmap. - * The vendor driver should determine whether and how to partition the data - * section. The vendor driver should return data_offset accordingly. - * - * The sequence to be followed while in pre-copy state and stop-and-copy state - * is as follows: - * a. Read pending_bytes, indicating the start of a new iteration to get device - * data. Repeated read on pending_bytes at this stage should have no side - * effects. - * If pending_bytes == 0, the user application should not iterate to get data - * for that device. - * If pending_bytes > 0, perform the following steps. - * b. Read data_offset, indicating that the vendor driver should make data - * available through the data section. The vendor driver should return this - * read operation only after data is available from (region + data_offset) - * to (region + data_offset + data_size). - * c. Read data_size, which is the amount of data in bytes available through - * the migration region. - * Read on data_offset and data_size should return the offset and size of - * the current buffer if the user application reads data_offset and - * data_size more than once here. - * d. Read data_size bytes of data from (region + data_offset) from the - * migration region. - * e. Process the data. - * f. Read pending_bytes, which indicates that the data from the previous - * iteration has been read. If pending_bytes > 0, go to step b. - * - * The user application can transition from the _SAVING|_RUNNING - * (pre-copy state) to the _SAVING (stop-and-copy) state regardless of the - * number of pending bytes. The user application should iterate in _SAVING - * (stop-and-copy) until pending_bytes is 0. - * - * The sequence to be followed while _RESUMING device state is as follows: - * While data for this device is available, repeat the following steps: - * a. Read data_offset from where the user application should write data. - * b. Write migration data starting at the migration region + data_offset for - * the length determined by data_size from the migration source. - * c. Write data_size, which indicates to the vendor driver that data is - * written in the migration region. Vendor driver must return this write - * operations on consuming data. Vendor driver should apply the - * user-provided migration region data to the device resume state. - * - * If an error occurs during the above sequences, the vendor driver can return - * an error code for next read() or write() operation, which will terminate the - * loop. The user application should then take the next necessary action, for - * example, failing migration or terminating the user application. - * - * For the user application, data is opaque. The user application should write - * data in the same order as the data is received and the data should be of - * same transaction size at the source. - */ +#define VFIO_REGION_SUBTYPE_MIGRATION_DEPRECATED (1) struct vfio_device_migration_info { __u32 device_state; /* VFIO device state */ -#define VFIO_DEVICE_STATE_STOP (0) -#define VFIO_DEVICE_STATE_RUNNING (1 << 0) -#define VFIO_DEVICE_STATE_SAVING (1 << 1) -#define VFIO_DEVICE_STATE_RESUMING (1 << 2) -#define VFIO_DEVICE_STATE_MASK (VFIO_DEVICE_STATE_RUNNING | \ - VFIO_DEVICE_STATE_SAVING | \ - VFIO_DEVICE_STATE_RESUMING) +#define VFIO_DEVICE_STATE_V1_STOP (0) +#define VFIO_DEVICE_STATE_V1_RUNNING (1 << 0) +#define VFIO_DEVICE_STATE_V1_SAVING (1 << 1) +#define VFIO_DEVICE_STATE_V1_RESUMING (1 << 2) +#define VFIO_DEVICE_STATE_MASK (VFIO_DEVICE_STATE_V1_RUNNING | \ + VFIO_DEVICE_STATE_V1_SAVING | \ + VFIO_DEVICE_STATE_V1_RESUMING) #define VFIO_DEVICE_STATE_VALID(state) \ - (state & VFIO_DEVICE_STATE_RESUMING ? \ - (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_RESUMING : 1) + (state & VFIO_DEVICE_STATE_V1_RESUMING ? \ + (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_V1_RESUMING : 1) #define VFIO_DEVICE_STATE_IS_ERROR(state) \ - ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_SAVING | \ - VFIO_DEVICE_STATE_RESUMING)) + ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_V1_SAVING | \ + VFIO_DEVICE_STATE_V1_RESUMING)) #define VFIO_DEVICE_STATE_SET_ERROR(state) \ - ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_SATE_SAVING | \ - VFIO_DEVICE_STATE_RESUMING) + ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_STATE_V1_SAVING | \ + VFIO_DEVICE_STATE_V1_RESUMING) __u32 reserved; __u64 pending_bytes; @@ -1002,6 +806,186 @@ struct vfio_device_feature { */ #define VFIO_DEVICE_FEATURE_PCI_VF_TOKEN (0) +/* + * Indicates the device can support the migration API through + * VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE. If this GET succeeds, the RUNNING and + * ERROR states are always supported. Support for additional states is + * indicated via the flags field; at least VFIO_MIGRATION_STOP_COPY must be + * set. + * + * VFIO_MIGRATION_STOP_COPY means that STOP, STOP_COPY and + * RESUMING are supported. + * + * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P means that RUNNING_P2P + * is supported in addition to the STOP_COPY states. + * + * Other combinations of flags have behavior to be defined in the future. + */ +struct vfio_device_feature_migration { + __aligned_u64 flags; +#define VFIO_MIGRATION_STOP_COPY (1 << 0) +#define VFIO_MIGRATION_P2P (1 << 1) +}; +#define VFIO_DEVICE_FEATURE_MIGRATION 1 + +/* + * Upon VFIO_DEVICE_FEATURE_SET, execute a migration state change on the VFIO + * device. The new state is supplied in device_state, see enum + * vfio_device_mig_state for details + * + * The kernel migration driver must fully transition the device to the new state + * value before the operation returns to the user. + * + * The kernel migration driver must not generate asynchronous device state + * transitions outside of manipulation by the user or the VFIO_DEVICE_RESET + * ioctl as described above. + * + * If this function fails then current device_state may be the original + * operating state or some other state along the combination transition path. + * The user can then decide if it should execute a VFIO_DEVICE_RESET, attempt + * to return to the original state, or attempt to return to some other state + * such as RUNNING or STOP. + * + * If the new_state starts a new data transfer session then the FD associated + * with that session is returned in data_fd. The user is responsible to close + * this FD when it is finished. The user must consider the migration data stream + * carried over the FD to be opaque and must preserve the byte order of the + * stream. The user is not required to preserve buffer segmentation when writing + * the data stream during the RESUMING operation. + * + * Upon VFIO_DEVICE_FEATURE_GET, get the current migration state of the VFIO + * device, data_fd will be -1. + */ +struct vfio_device_feature_mig_state { + __u32 device_state; /* From enum vfio_device_mig_state */ + __s32 data_fd; +}; +#define VFIO_DEVICE_FEATURE_MIG_DEVICE_STATE 2 + +/* + * The device migration Finite State Machine is described by the enum + * vfio_device_mig_state. Some of the FSM arcs will create a migration data + * transfer session by returning a FD, in this case the migration data will + * flow over the FD using read() and write() as discussed below. + * + * There are 5 states to support VFIO_MIGRATION_STOP_COPY: + * RUNNING - The device is running normally + * STOP - The device does not change the internal or external state + * STOP_COPY - The device internal state can be read out + * RESUMING - The device is stopped and is loading a new internal state + * ERROR - The device has failed and must be reset + * + * And 1 optional state to support VFIO_MIGRATION_P2P: + * RUNNING_P2P - RUNNING, except the device cannot do peer to peer DMA + * + * The FSM takes actions on the arcs between FSM states. The driver implements + * the following behavior for the FSM arcs: + * + * RUNNING_P2P -> STOP + * STOP_COPY -> STOP + * While in STOP the device must stop the operation of the device. The device + * must not generate interrupts, DMA, or any other change to external state. + * It must not change its internal state. When stopped the device and kernel + * migration driver must accept and respond to interaction to support external + * subsystems in the STOP state, for example PCI MSI-X and PCI config space. + * Failure by the user to restrict device access while in STOP must not result + * in error conditions outside the user context (ex. host system faults). + * + * The STOP_COPY arc will terminate a data transfer session. + * + * RESUMING -> STOP + * Leaving RESUMING terminates a data transfer session and indicates the + * device should complete processing of the data delivered by write(). The + * kernel migration driver should complete the incorporation of data written + * to the data transfer FD into the device internal state and perform + * final validity and consistency checking of the new device state. If the + * user provided data is found to be incomplete, inconsistent, or otherwise + * invalid, the migration driver must fail the SET_STATE ioctl and + * optionally go to the ERROR state as described below. + * + * While in STOP the device has the same behavior as other STOP states + * described above. + * + * To abort a RESUMING session the device must be reset. + * + * RUNNING_P2P -> RUNNING + * While in RUNNING the device is fully operational, the device may generate + * interrupts, DMA, respond to MMIO, all vfio device regions are functional, + * and the device may advance its internal state. + * + * RUNNING -> RUNNING_P2P + * STOP -> RUNNING_P2P + * While in RUNNING_P2P the device is partially running in the P2P quiescent + * state defined below. + * + * STOP -> STOP_COPY + * This arc begin the process of saving the device state and will return a + * new data_fd. + * + * While in the STOP_COPY state the device has the same behavior as STOP + * with the addition that the data transfers session continues to stream the + * migration state. End of stream on the FD indicates the entire device + * state has been transferred. + * + * The user should take steps to restrict access to vfio device regions while + * the device is in STOP_COPY or risk corruption of the device migration data + * stream. + * + * STOP -> RESUMING + * Entering the RESUMING state starts a process of restoring the device state + * and will return a new data_fd. The data stream fed into the data_fd should + * be taken from the data transfer output of a single FD during saving from + * a compatible device. The migration driver may alter/reset the internal + * device state for this arc if required to prepare the device to receive the + * migration data. + * + * any -> ERROR + * ERROR cannot be specified as a device state, however any transition request + * can be failed with an errno return and may then move the device_state into + * ERROR. In this case the device was unable to execute the requested arc and + * was also unable to restore the device to any valid device_state. + * To recover from ERROR VFIO_DEVICE_RESET must be used to return the + * device_state back to RUNNING. + * + * The optional peer to peer (P2P) quiescent state is intended to be a quiescent + * state for the device for the purposes of managing multiple devices within a + * user context where peer-to-peer DMA between devices may be active. The + * RUNNING_P2P states must prevent the device from initiating + * any new P2P DMA transactions. If the device can identify P2P transactions + * then it can stop only P2P DMA, otherwise it must stop all DMA. The migration + * driver must complete any such outstanding operations prior to completing the + * FSM arc into a P2P state. For the purpose of specification the states + * behave as though the device was fully running if not supported. Like while in + * STOP or STOP_COPY the user must not touch the device, otherwise the state + * can be exited. + * + * The remaining possible transitions are interpreted as combinations of the + * above FSM arcs. As there are multiple paths through the FSM arcs the path + * should be selected based on the following rules: + * - Select the shortest path. + * Refer to vfio_mig_get_next_state() for the result of the algorithm. + * + * The automatic transit through the FSM arcs that make up the combination + * transition is invisible to the user. When working with combination arcs the + * user may see any step along the path in the device_state if SET_STATE + * fails. When handling these types of errors users should anticipate future + * revisions of this protocol using new states and those states becoming + * visible in this case. + * + * The optional states cannot be used with SET_STATE if the device does not + * support them. The user can discover if these states are supported by using + * VFIO_DEVICE_FEATURE_MIGRATION. By using combination transitions the user can + * avoid knowing about these optional states if the kernel driver supports them. + */ +enum vfio_device_mig_state { + VFIO_DEVICE_STATE_ERROR = 0, + VFIO_DEVICE_STATE_STOP = 1, + VFIO_DEVICE_STATE_RUNNING = 2, + VFIO_DEVICE_STATE_STOP_COPY = 3, + VFIO_DEVICE_STATE_RESUMING = 4, + VFIO_DEVICE_STATE_RUNNING_P2P = 5, +}; + /* -------- API for Type1 VFIO IOMMU -------- */ /** diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index df8b9c486ba1..3768a0a80830 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -632,6 +632,8 @@ struct v4l2_pix_format { /* Tiled YUV formats, non contiguous planes */ #define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 tiles */ #define V4L2_PIX_FMT_NV12MT_16X16 v4l2_fourcc('V', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 tiles */ +#define V4L2_PIX_FMT_NV12M_8L128 v4l2_fourcc('N', 'A', '1', '2') /* Y/CbCr 4:2:0 8x128 tiles */ +#define V4L2_PIX_FMT_NV12M_10BE_8L128 v4l2_fourcc_be('N', 'T', '1', '2') /* Y/CbCr 4:2:0 10-bit 8x128 tiles */ /* Bayer formats - see http://www.siliconimaging.com/RGB%20Bayer.htm */ #define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h index fe2cfae94b45..e052653a6e4c 100644 --- a/include/uapi/sound/sof/abi.h +++ b/include/uapi/sound/sof/abi.h @@ -26,8 +26,8 @@ /* SOF ABI version major, minor and patch numbers */ #define SOF_ABI_MAJOR 3 -#define SOF_ABI_MINOR 18 -#define SOF_ABI_PATCH 0 +#define SOF_ABI_MINOR 19 +#define SOF_ABI_PATCH 1 /* SOF ABI version number. Format within 32bit word is MMmmmppp */ #define SOF_ABI_MAJOR_SHIFT 24 diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h index c4a93ce1de48..e6966d187591 100644 --- a/include/video/samsung_fimd.h +++ b/include/video/samsung_fimd.h @@ -476,6 +476,10 @@ * 1111 -none- -none- -none- -none- -none- */ +#define WIN_RGB_ORDER(_win) (0x2020 + ((_win) * 4)) +#define WIN_RGB_ORDER_FORWARD (0 << 11) +#define WIN_RGB_ORDER_REVERSE (1 << 11) + /* FIMD Version 8 register offset definitions */ #define FIMD_V8_VIDTCON0 0x20010 #define FIMD_V8_VIDTCON1 0x20014 diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index cb854df031ce..c9fea9389ebe 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -104,17 +104,32 @@ int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly); * access has been ended, free the given page too. Access will be ended * immediately iff the grant entry is not in use, otherwise it will happen * some time later. page may be 0, in which case no freeing will occur. + * Note that the granted page might still be accessed (read or write) by the + * other side after gnttab_end_foreign_access() returns, so even if page was + * specified as 0 it is not allowed to just reuse the page for other + * purposes immediately. gnttab_end_foreign_access() will take an additional + * reference to the granted page in this case, which is dropped only after + * the grant is no longer in use. + * This requires that multi page allocations for areas subject to + * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing + * via free_pages_exact()) in order to avoid high order pages. */ void gnttab_end_foreign_access(grant_ref_t ref, int readonly, unsigned long page); +/* + * End access through the given grant reference, iff the grant entry is + * no longer in use. In case of success ending foreign access, the + * grant reference is deallocated. + * Return 1 if the grant entry was freed, 0 if it is still in use. + */ +int gnttab_try_end_foreign_access(grant_ref_t ref); + int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn); unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref); unsigned long gnttab_end_foreign_transfer(grant_ref_t ref); -int gnttab_query_foreign_access(grant_ref_t ref); - /* * operations on reserved batches of grant references */ |