diff options
Diffstat (limited to 'include')
631 files changed, 16886 insertions, 5906 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 48f0fd499274..e7d27373ff71 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -344,8 +344,9 @@ struct acpi_device_physical_node { struct acpi_device_properties { const guid_t *guid; - const union acpi_object *properties; + union acpi_object *properties; struct list_head list; + void **bufs; }; /* ACPI Device Specific Data (_DSD) */ diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 36db8b9eb68a..941be574bbe0 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -45,7 +45,6 @@ mandatory-y += msi.h mandatory-y += pci.h mandatory-y += percpu.h mandatory-y += pgalloc.h -mandatory-y += platform-feature.h mandatory-y += preempt.h mandatory-y += rwonce.h mandatory-y += sections.h diff --git a/include/asm-generic/bitops/generic-non-atomic.h b/include/asm-generic/bitops/generic-non-atomic.h new file mode 100644 index 000000000000..3d5ebd24652b --- /dev/null +++ b/include/asm-generic/bitops/generic-non-atomic.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H +#define __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H + +#include <linux/bits.h> + +#ifndef _LINUX_BITOPS_H +#error only <linux/bitops.h> can be included directly +#endif + +/* + * Generic definitions for bit operations, should not be used in regular code + * directly. + */ + +/** + * generic___set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __always_inline void +generic___set_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p |= mask; +} + +static __always_inline void +generic___clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p &= ~mask; +} + +/** + * generic___change_bit - Toggle a bit in memory + * @nr: the bit to change + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __always_inline void +generic___change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p ^= mask; +} + +/** + * generic___test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __always_inline bool +generic___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old | mask; + return (old & mask) != 0; +} + +/** + * generic___test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __always_inline bool +generic___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old & ~mask; + return (old & mask) != 0; +} + +/* WARNING: non atomic and it can be reordered! */ +static __always_inline bool +generic___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old = *p; + + *p = old ^ mask; + return (old & mask) != 0; +} + +/** + * generic_test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static __always_inline bool +generic_test_bit(unsigned long nr, const volatile unsigned long *addr) +{ + /* + * Unlike the bitops with the '__' prefix above, this one *is* atomic, + * so `volatile` must always stay here with no cast-aways. See + * `Documentation/atomic_bitops.txt` for the details. + */ + return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); +} + +/* + * const_*() definitions provide good compile-time optimizations when + * the passed arguments can be resolved at compile time. + */ +#define const___set_bit generic___set_bit +#define const___clear_bit generic___clear_bit +#define const___change_bit generic___change_bit +#define const___test_and_set_bit generic___test_and_set_bit +#define const___test_and_clear_bit generic___test_and_clear_bit +#define const___test_and_change_bit generic___test_and_change_bit + +/** + * const_test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + * + * A version of generic_test_bit() which discards the `volatile` qualifier to + * allow a compiler to optimize code harder. Non-atomic and to be called only + * for testing compile-time constants, e.g. by the corresponding macros, not + * directly from "regular" code. + */ +static __always_inline bool +const_test_bit(unsigned long nr, const volatile unsigned long *addr) +{ + const unsigned long *p = (const unsigned long *)addr + BIT_WORD(nr); + unsigned long mask = BIT_MASK(nr); + unsigned long val = *p; + + return !!(val & mask); +} + +#endif /* __ASM_GENERIC_BITOPS_GENERIC_NON_ATOMIC_H */ diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h index 7ab1ecc37782..988a3bbfba34 100644 --- a/include/asm-generic/bitops/instrumented-non-atomic.h +++ b/include/asm-generic/bitops/instrumented-non-atomic.h @@ -14,7 +14,7 @@ #include <linux/instrumented.h> /** - * __set_bit - Set a bit in memory + * ___set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * @@ -22,14 +22,15 @@ * region of memory concurrently, the effect may be that only one operation * succeeds. */ -static __always_inline void __set_bit(long nr, volatile unsigned long *addr) +static __always_inline void +___set_bit(unsigned long nr, volatile unsigned long *addr) { instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___set_bit(nr, addr); } /** - * __clear_bit - Clears a bit in memory + * ___clear_bit - Clears a bit in memory * @nr: the bit to clear * @addr: the address to start counting from * @@ -37,14 +38,15 @@ static __always_inline void __set_bit(long nr, volatile unsigned long *addr) * region of memory concurrently, the effect may be that only one operation * succeeds. */ -static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) +static __always_inline void +___clear_bit(unsigned long nr, volatile unsigned long *addr) { instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___clear_bit(nr, addr); } /** - * __change_bit - Toggle a bit in memory + * ___change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * @@ -52,7 +54,8 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) * region of memory concurrently, the effect may be that only one operation * succeeds. */ -static __always_inline void __change_bit(long nr, volatile unsigned long *addr) +static __always_inline void +___change_bit(unsigned long nr, volatile unsigned long *addr) { instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___change_bit(nr, addr); @@ -83,53 +86,57 @@ static __always_inline void __instrument_read_write_bitop(long nr, volatile unsi } /** - * __test_and_set_bit - Set a bit and return its old value + * ___test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is non-atomic. If two instances of this operation race, one * can appear to succeed but actually fail. */ -static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) +static __always_inline bool +___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { __instrument_read_write_bitop(nr, addr); return arch___test_and_set_bit(nr, addr); } /** - * __test_and_clear_bit - Clear a bit and return its old value + * ___test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation is non-atomic. If two instances of this operation race, one * can appear to succeed but actually fail. */ -static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) +static __always_inline bool +___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { __instrument_read_write_bitop(nr, addr); return arch___test_and_clear_bit(nr, addr); } /** - * __test_and_change_bit - Change a bit and return its old value + * ___test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is non-atomic. If two instances of this operation race, one * can appear to succeed but actually fail. */ -static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) +static __always_inline bool +___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { __instrument_read_write_bitop(nr, addr); return arch___test_and_change_bit(nr, addr); } /** - * test_bit - Determine whether a bit is set + * _test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ -static __always_inline bool test_bit(long nr, const volatile unsigned long *addr) +static __always_inline bool +_test_bit(unsigned long nr, const volatile unsigned long *addr) { instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long)); return arch_test_bit(nr, addr); diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h index 078cc68be2f1..5c37ced343ae 100644 --- a/include/asm-generic/bitops/non-atomic.h +++ b/include/asm-generic/bitops/non-atomic.h @@ -2,121 +2,18 @@ #ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ -#include <asm/types.h> +#include <asm-generic/bitops/generic-non-atomic.h> -/** - * arch___set_bit - Set a bit in memory - * @nr: the bit to set - * @addr: the address to start counting from - * - * Unlike set_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static __always_inline void -arch___set_bit(unsigned int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); +#define arch___set_bit generic___set_bit +#define arch___clear_bit generic___clear_bit +#define arch___change_bit generic___change_bit - *p |= mask; -} -#define __set_bit arch___set_bit +#define arch___test_and_set_bit generic___test_and_set_bit +#define arch___test_and_clear_bit generic___test_and_clear_bit +#define arch___test_and_change_bit generic___test_and_change_bit -static __always_inline void -arch___clear_bit(unsigned int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); +#define arch_test_bit generic_test_bit - *p &= ~mask; -} -#define __clear_bit arch___clear_bit - -/** - * arch___change_bit - Toggle a bit in memory - * @nr: the bit to change - * @addr: the address to start counting from - * - * Unlike change_bit(), this function is non-atomic and may be reordered. - * If it's called on the same region of memory simultaneously, the effect - * may be that only one operation succeeds. - */ -static __always_inline -void arch___change_bit(unsigned int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - - *p ^= mask; -} -#define __change_bit arch___change_bit - -/** - * arch___test_and_set_bit - Set a bit and return its old value - * @nr: Bit to set - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static __always_inline int -arch___test_and_set_bit(unsigned int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old = *p; - - *p = old | mask; - return (old & mask) != 0; -} -#define __test_and_set_bit arch___test_and_set_bit - -/** - * arch___test_and_clear_bit - Clear a bit and return its old value - * @nr: Bit to clear - * @addr: Address to count from - * - * This operation is non-atomic and can be reordered. - * If two examples of this operation race, one can appear to succeed - * but actually fail. You must protect multiple accesses with a lock. - */ -static __always_inline int -arch___test_and_clear_bit(unsigned int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old = *p; - - *p = old & ~mask; - return (old & mask) != 0; -} -#define __test_and_clear_bit arch___test_and_clear_bit - -/* WARNING: non atomic and it can be reordered! */ -static __always_inline int -arch___test_and_change_bit(unsigned int nr, volatile unsigned long *addr) -{ - unsigned long mask = BIT_MASK(nr); - unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); - unsigned long old = *p; - - *p = old ^ mask; - return (old & mask) != 0; -} -#define __test_and_change_bit arch___test_and_change_bit - -/** - * arch_test_bit - Determine whether a bit is set - * @nr: bit number to test - * @addr: Address to start counting from - */ -static __always_inline int -arch_test_bit(unsigned int nr, const volatile unsigned long *addr) -{ - return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); -} -#define test_bit arch_test_bit +#include <asm-generic/bitops/non-instrumented-non-atomic.h> #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ diff --git a/include/asm-generic/bitops/non-instrumented-non-atomic.h b/include/asm-generic/bitops/non-instrumented-non-atomic.h new file mode 100644 index 000000000000..bdb9b1ffaee9 --- /dev/null +++ b/include/asm-generic/bitops/non-instrumented-non-atomic.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H +#define __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H + +#define ___set_bit arch___set_bit +#define ___clear_bit arch___clear_bit +#define ___change_bit arch___change_bit + +#define ___test_and_set_bit arch___test_and_set_bit +#define ___test_and_clear_bit arch___test_and_clear_bit +#define ___test_and_change_bit arch___test_and_change_bit + +#define _test_bit arch_test_bit + +#endif /* __ASM_GENERIC_BITOPS_NON_INSTRUMENTED_NON_ATOMIC_H */ diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 72974cb81343..a68f8fbf423b 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -10,6 +10,7 @@ #include <asm/page.h> /* I/O is all done through memory accesses */ #include <linux/string.h> /* for memset() and memcpy() */ #include <linux/types.h> +#include <linux/instruction_pointer.h> #ifdef CONFIG_GENERIC_IOMAP #include <asm-generic/iomap.h> @@ -61,6 +62,44 @@ #define __io_par(v) __io_ar(v) #endif +/* + * "__DISABLE_TRACE_MMIO__" flag can be used to disable MMIO tracing for + * specific kernel drivers in case of excessive/unwanted logging. + * + * Usage: Add a #define flag at the beginning of the driver file. + * Ex: #define __DISABLE_TRACE_MMIO__ + * #include <...> + * ... + */ +#if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__)) +#include <linux/tracepoint-defs.h> + +DECLARE_TRACEPOINT(rwmmio_write); +DECLARE_TRACEPOINT(rwmmio_post_write); +DECLARE_TRACEPOINT(rwmmio_read); +DECLARE_TRACEPOINT(rwmmio_post_read); + +void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, + unsigned long caller_addr); +void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, + unsigned long caller_addr); +void log_read_mmio(u8 width, const volatile void __iomem *addr, + unsigned long caller_addr); +void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, + unsigned long caller_addr); + +#else + +static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, + unsigned long caller_addr) {} +static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, + unsigned long caller_addr) {} +static inline void log_read_mmio(u8 width, const volatile void __iomem *addr, + unsigned long caller_addr) {} +static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, + unsigned long caller_addr) {} + +#endif /* CONFIG_TRACE_MMIO_ACCESS */ /* * __raw_{read,write}{b,w,l,q}() access memory in native endianness. @@ -149,9 +188,11 @@ static inline u8 readb(const volatile void __iomem *addr) { u8 val; + log_read_mmio(8, addr, _THIS_IP_); __io_br(); val = __raw_readb(addr); __io_ar(val); + log_post_read_mmio(val, 8, addr, _THIS_IP_); return val; } #endif @@ -162,9 +203,11 @@ static inline u16 readw(const volatile void __iomem *addr) { u16 val; + log_read_mmio(16, addr, _THIS_IP_); __io_br(); val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); __io_ar(val); + log_post_read_mmio(val, 16, addr, _THIS_IP_); return val; } #endif @@ -175,9 +218,11 @@ static inline u32 readl(const volatile void __iomem *addr) { u32 val; + log_read_mmio(32, addr, _THIS_IP_); __io_br(); val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); __io_ar(val); + log_post_read_mmio(val, 32, addr, _THIS_IP_); return val; } #endif @@ -189,9 +234,11 @@ static inline u64 readq(const volatile void __iomem *addr) { u64 val; + log_read_mmio(64, addr, _THIS_IP_); __io_br(); val = __le64_to_cpu(__raw_readq(addr)); __io_ar(val); + log_post_read_mmio(val, 64, addr, _THIS_IP_); return val; } #endif @@ -201,9 +248,11 @@ static inline u64 readq(const volatile void __iomem *addr) #define writeb writeb static inline void writeb(u8 value, volatile void __iomem *addr) { + log_write_mmio(value, 8, addr, _THIS_IP_); __io_bw(); __raw_writeb(value, addr); __io_aw(); + log_post_write_mmio(value, 8, addr, _THIS_IP_); } #endif @@ -211,9 +260,11 @@ static inline void writeb(u8 value, volatile void __iomem *addr) #define writew writew static inline void writew(u16 value, volatile void __iomem *addr) { + log_write_mmio(value, 16, addr, _THIS_IP_); __io_bw(); __raw_writew((u16 __force)cpu_to_le16(value), addr); __io_aw(); + log_post_write_mmio(value, 16, addr, _THIS_IP_); } #endif @@ -221,9 +272,11 @@ static inline void writew(u16 value, volatile void __iomem *addr) #define writel writel static inline void writel(u32 value, volatile void __iomem *addr) { + log_write_mmio(value, 32, addr, _THIS_IP_); __io_bw(); __raw_writel((u32 __force)__cpu_to_le32(value), addr); __io_aw(); + log_post_write_mmio(value, 32, addr, _THIS_IP_); } #endif @@ -232,9 +285,11 @@ static inline void writel(u32 value, volatile void __iomem *addr) #define writeq writeq static inline void writeq(u64 value, volatile void __iomem *addr) { + log_write_mmio(value, 64, addr, _THIS_IP_); __io_bw(); __raw_writeq(__cpu_to_le64(value), addr); __io_aw(); + log_post_write_mmio(value, 64, addr, _THIS_IP_); } #endif #endif /* CONFIG_64BIT */ @@ -248,7 +303,12 @@ static inline void writeq(u64 value, volatile void __iomem *addr) #define readb_relaxed readb_relaxed static inline u8 readb_relaxed(const volatile void __iomem *addr) { - return __raw_readb(addr); + u8 val; + + log_read_mmio(8, addr, _THIS_IP_); + val = __raw_readb(addr); + log_post_read_mmio(val, 8, addr, _THIS_IP_); + return val; } #endif @@ -256,7 +316,12 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr) #define readw_relaxed readw_relaxed static inline u16 readw_relaxed(const volatile void __iomem *addr) { - return __le16_to_cpu(__raw_readw(addr)); + u16 val; + + log_read_mmio(16, addr, _THIS_IP_); + val = __le16_to_cpu(__raw_readw(addr)); + log_post_read_mmio(val, 16, addr, _THIS_IP_); + return val; } #endif @@ -264,7 +329,12 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr) #define readl_relaxed readl_relaxed static inline u32 readl_relaxed(const volatile void __iomem *addr) { - return __le32_to_cpu(__raw_readl(addr)); + u32 val; + + log_read_mmio(32, addr, _THIS_IP_); + val = __le32_to_cpu(__raw_readl(addr)); + log_post_read_mmio(val, 32, addr, _THIS_IP_); + return val; } #endif @@ -272,7 +342,12 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr) #define readq_relaxed readq_relaxed static inline u64 readq_relaxed(const volatile void __iomem *addr) { - return __le64_to_cpu(__raw_readq(addr)); + u64 val; + + log_read_mmio(64, addr, _THIS_IP_); + val = __le64_to_cpu(__raw_readq(addr)); + log_post_read_mmio(val, 64, addr, _THIS_IP_); + return val; } #endif @@ -280,7 +355,9 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr) #define writeb_relaxed writeb_relaxed static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) { + log_write_mmio(value, 8, addr, _THIS_IP_); __raw_writeb(value, addr); + log_post_write_mmio(value, 8, addr, _THIS_IP_); } #endif @@ -288,7 +365,9 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) #define writew_relaxed writew_relaxed static inline void writew_relaxed(u16 value, volatile void __iomem *addr) { + log_write_mmio(value, 16, addr, _THIS_IP_); __raw_writew(cpu_to_le16(value), addr); + log_post_write_mmio(value, 16, addr, _THIS_IP_); } #endif @@ -296,7 +375,9 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr) #define writel_relaxed writel_relaxed static inline void writel_relaxed(u32 value, volatile void __iomem *addr) { + log_write_mmio(value, 32, addr, _THIS_IP_); __raw_writel(__cpu_to_le32(value), addr); + log_post_write_mmio(value, 32, addr, _THIS_IP_); } #endif @@ -304,7 +385,9 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr) #define writeq_relaxed writeq_relaxed static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) { + log_write_mmio(value, 64, addr, _THIS_IP_); __raw_writeq(__cpu_to_le64(value), addr); + log_post_write_mmio(value, 64, addr, _THIS_IP_); } #endif @@ -1086,20 +1169,6 @@ static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) } #endif -#ifdef CONFIG_VIRT_TO_BUS -#ifndef virt_to_bus -static inline unsigned long virt_to_bus(void *address) -{ - return (unsigned long)address; -} - -static inline void *bus_to_virt(unsigned long address) -{ - return (void *)address; -} -#endif -#endif - #ifndef memset_io #define memset_io memset_io /** diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h index 6bb3cd3d695a..6869f1061528 100644 --- a/include/asm-generic/pci.h +++ b/include/asm-generic/pci.h @@ -1,17 +1,30 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * linux/include/asm-generic/pci.h - * - * Copyright (C) 2003 Russell King - */ -#ifndef _ASM_GENERIC_PCI_H -#define _ASM_GENERIC_PCI_H +/* SPDX-License-Identifier: GPL-2.0-only */ -#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ -static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) +#ifndef __ASM_GENERIC_PCI_H +#define __ASM_GENERIC_PCI_H + +#ifndef PCIBIOS_MIN_IO +#define PCIBIOS_MIN_IO 0 +#endif + +#ifndef PCIBIOS_MIN_MEM +#define PCIBIOS_MIN_MEM 0 +#endif + +#ifndef pcibios_assign_all_busses +/* For bootloaders that do not initialize the PCI bus */ +#define pcibios_assign_all_busses() 1 +#endif + +/* Enable generic resource mapping code in drivers/pci/ */ +#define ARCH_GENERIC_PCI_MMAP_RESOURCE + +#ifdef CONFIG_PCI_DOMAINS +static inline int pci_proc_domain(struct pci_bus *bus) { - return channel ? 15 : 14; + /* always show the domain in /proc */ + return 1; } -#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */ +#endif /* CONFIG_PCI_DOMAINS */ -#endif /* _ASM_GENERIC_PCI_H */ +#endif /* __ASM_GENERIC_PCI_H */ diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h index 5a2f9bf53384..8fbb0a55545d 100644 --- a/include/asm-generic/pci_iomap.h +++ b/include/asm-generic/pci_iomap.h @@ -25,6 +25,8 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *); #ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port, unsigned int nr); +#elif !defined(CONFIG_HAS_IOPORT_MAP) +#define __pci_ioport_map(dev, port, nr) NULL #else #define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr)) #endif diff --git a/include/asm-generic/platform-feature.h b/include/asm-generic/platform-feature.h deleted file mode 100644 index 4b0af3d51588..000000000000 --- a/include/asm-generic/platform-feature.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_GENERIC_PLATFORM_FEATURE_H -#define _ASM_GENERIC_PLATFORM_FEATURE_H - -/* Number of arch specific feature flags. */ -#define PLATFORM_ARCH_FEAT_N 0 - -#endif /* _ASM_GENERIC_PLATFORM_FEATURE_H */ diff --git a/include/asm-generic/softirq_stack.h b/include/asm-generic/softirq_stack.h index eceeecf6a5bd..d3e2d81656e0 100644 --- a/include/asm-generic/softirq_stack.h +++ b/include/asm-generic/softirq_stack.h @@ -2,7 +2,7 @@ #ifndef __ASM_GENERIC_SOFTIRQ_STACK_H #define __ASM_GENERIC_SOFTIRQ_STACK_H -#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK +#if defined(CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK) && !defined(CONFIG_PREEMPT_RT) void do_softirq_own_stack(void); #else static inline void do_softirq_own_stack(void) diff --git a/include/crypto/hash.h b/include/crypto/hash.h index f140e4643949..f5841992dc9b 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -718,6 +718,8 @@ static inline void ahash_request_set_crypt(struct ahash_request *req, struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type, u32 mask); +int crypto_has_shash(const char *alg_name, u32 type, u32 mask); + static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm) { return &tfm->base; diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index cccceadc164b..24d01e9877c1 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -104,6 +104,8 @@ struct kpp_alg { */ struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask); +int crypto_has_kpp(const char *alg_name, u32 type, u32 mask); + static inline struct crypto_tfm *crypto_kpp_tfm(struct crypto_kpp *tfm) { return &tfm->base; diff --git a/include/drm/display/drm_dp_aux_bus.h b/include/drm/display/drm_dp_aux_bus.h index 4f19b20b1dd6..8a0a486383c5 100644 --- a/include/drm/display/drm_dp_aux_bus.h +++ b/include/drm/display/drm_dp_aux_bus.h @@ -44,9 +44,37 @@ static inline struct dp_aux_ep_driver *to_dp_aux_ep_drv(struct device_driver *dr return container_of(drv, struct dp_aux_ep_driver, driver); } -int of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux); -void of_dp_aux_depopulate_ep_devices(struct drm_dp_aux *aux); -int devm_of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux); +int of_dp_aux_populate_bus(struct drm_dp_aux *aux, + int (*done_probing)(struct drm_dp_aux *aux)); +void of_dp_aux_depopulate_bus(struct drm_dp_aux *aux); +int devm_of_dp_aux_populate_bus(struct drm_dp_aux *aux, + int (*done_probing)(struct drm_dp_aux *aux)); + +/* Deprecated versions of the above functions. To be removed when no callers. */ +static inline int of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux) +{ + int ret; + + ret = of_dp_aux_populate_bus(aux, NULL); + + /* New API returns -ENODEV for no child case; adapt to old assumption */ + return (ret != -ENODEV) ? ret : 0; +} + +static inline int devm_of_dp_aux_populate_ep_devices(struct drm_dp_aux *aux) +{ + int ret; + + ret = devm_of_dp_aux_populate_bus(aux, NULL); + + /* New API returns -ENODEV for no child case; adapt to old assumption */ + return (ret != -ENODEV) ? ret : 0; +} + +static inline void of_dp_aux_depopulate_ep_devices(struct drm_dp_aux *aux) +{ + of_dp_aux_depopulate_bus(aux); +} #define dp_aux_dp_driver_register(aux_ep_drv) \ __dp_aux_dp_driver_register(aux_ep_drv, THIS_MODULE) diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h index dca40a045dd6..db0fe9f8a612 100644 --- a/include/drm/display/drm_dp_helper.h +++ b/include/drm/display/drm_dp_helper.h @@ -370,14 +370,56 @@ struct drm_dp_aux { * helpers assume this is the case. * * Also note that this callback can be called no matter the - * state @dev is in. Drivers that need that device to be powered - * to perform this operation will first need to make sure it's - * been properly enabled. + * state @dev is in and also no matter what state the panel is + * in. It's expected: + * + * - If the @dev providing the AUX bus is currently unpowered then + * it will power itself up for the transfer. + * + * - If we're on eDP (using a drm_panel) and the panel is not in a + * state where it can respond (it's not powered or it's in a + * low power state) then this function may return an error, but + * not crash. It's up to the caller of this code to make sure that + * the panel is powered on if getting an error back is not OK. If a + * drm_panel driver is initiating a DP AUX transfer it may power + * itself up however it wants. All other code should ensure that + * the pre_enable() bridge chain (which eventually calls the + * drm_panel prepare function) has powered the panel. */ ssize_t (*transfer)(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg); /** + * @wait_hpd_asserted: wait for HPD to be asserted + * + * This is mainly useful for eDP panels drivers to wait for an eDP + * panel to finish powering on. This is an optional function. + * + * This function will efficiently wait for the HPD signal to be + * asserted. The `wait_us` parameter that is passed in says that we + * know that the HPD signal is expected to be asserted within `wait_us` + * microseconds. This function could wait for longer than `wait_us` if + * the logic in the DP controller has a long debouncing time. The + * important thing is that if this function returns success that the + * DP controller is ready to send AUX transactions. + * + * This function returns 0 if HPD was asserted or -ETIMEDOUT if time + * expired and HPD wasn't asserted. This function should not print + * timeout errors to the log. + * + * The semantics of this function are designed to match the + * readx_poll_timeout() function. That means a `wait_us` of 0 means + * to wait forever. Like readx_poll_timeout(), this function may sleep. + * + * NOTE: this function specifically reports the state of the HPD pin + * that's associated with the DP AUX channel. This is different from + * the HPD concept in much of the rest of DRM which is more about + * physical presence of a display. For eDP, for instance, a display is + * assumed always present even if the HPD pin is deasserted. + */ + int (*wait_hpd_asserted)(struct drm_dp_aux *aux, unsigned long wait_us); + + /** * @i2c_nack_count: Counts I2C NACKs, used for DP validation. */ unsigned i2c_nack_count; diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 4045e2507e11..2a0b17842402 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -46,6 +46,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state, int max_scale, bool can_position, bool can_update_disabled); +int drm_atomic_helper_check_crtc_state(struct drm_crtc_state *crtc_state, + bool can_disable_primary_plane); int drm_atomic_helper_check_planes(struct drm_device *dev, struct drm_atomic_state *state); int drm_atomic_helper_check(struct drm_device *dev, diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index f27b4060faa2..d434ab416ad4 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -796,6 +796,7 @@ drm_priv_to_bridge(struct drm_private_obj *priv) } void drm_bridge_add(struct drm_bridge *bridge); +int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge); void drm_bridge_remove(struct drm_bridge *bridge); int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, struct drm_bridge *previous, @@ -917,16 +918,30 @@ void drm_bridge_hpd_notify(struct drm_bridge *bridge, enum drm_connector_status status); #ifdef CONFIG_DRM_PANEL_BRIDGE +bool drm_bridge_is_panel(const struct drm_bridge *bridge); struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel); struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel, u32 connector_type); void drm_panel_bridge_remove(struct drm_bridge *bridge); +int drm_panel_bridge_set_orientation(struct drm_connector *connector, + struct drm_bridge *bridge); struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev, struct drm_panel *panel); struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev, struct drm_panel *panel, u32 connector_type); struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge); +#else +static inline bool drm_bridge_is_panel(const struct drm_bridge *bridge) +{ + return false; +} + +static inline int drm_panel_bridge_set_orientation(struct drm_connector *connector, + struct drm_bridge *bridge) +{ + return -EINVAL; +} #endif #if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL_BRIDGE) diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 3ac4bf87f257..a1705d6b3fba 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -38,6 +38,7 @@ struct drm_modeset_acquire_ctx; struct drm_device; struct drm_crtc; struct drm_encoder; +struct drm_panel; struct drm_property; struct drm_property_blob; struct drm_printer; @@ -1526,7 +1527,11 @@ struct drm_connector { struct drm_cmdline_mode cmdline_mode; /** @force: a DRM_FORCE_<foo> state for forced mode sets */ enum drm_connector_force force; - /** @override_edid: has the EDID been overwritten through debugfs for testing? */ + /** + * @override_edid: has the EDID been overwritten through debugfs for + * testing? Do not modify outside of drm_edid_override_set() and + * drm_edid_override_reset(). + */ bool override_edid; /** @epoch_counter: used to detect any other changes in connector, besides status */ u64 epoch_counter; @@ -1802,6 +1807,9 @@ int drm_connector_set_panel_orientation_with_quirk( struct drm_connector *connector, enum drm_panel_orientation panel_orientation, int width, int height); +int drm_connector_set_orientation_from_panel( + struct drm_connector *connector, + struct drm_panel *panel); int drm_connector_attach_max_bpc_property(struct drm_connector *connector, int min, int max); void drm_connector_create_privacy_screen_properties(struct drm_connector *conn); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index a70baea0636c..ffc1cde331d3 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -25,37 +25,24 @@ #ifndef __DRM_CRTC_H__ #define __DRM_CRTC_H__ -#include <linux/i2c.h> #include <linux/spinlock.h> #include <linux/types.h> -#include <linux/fb.h> -#include <linux/hdmi.h> -#include <linux/media-bus-format.h> -#include <uapi/drm/drm_mode.h> -#include <uapi/drm/drm_fourcc.h> #include <drm/drm_modeset_lock.h> -#include <drm/drm_rect.h> #include <drm/drm_mode_object.h> -#include <drm/drm_framebuffer.h> #include <drm/drm_modes.h> -#include <drm/drm_connector.h> #include <drm/drm_device.h> -#include <drm/drm_property.h> -#include <drm/drm_edid.h> #include <drm/drm_plane.h> -#include <drm/drm_blend.h> -#include <drm/drm_color_mgmt.h> #include <drm/drm_debugfs_crc.h> #include <drm/drm_mode_config.h> +struct drm_connector; struct drm_device; +struct drm_framebuffer; struct drm_mode_set; struct drm_file; -struct drm_clip_rect; struct drm_printer; struct drm_self_refresh_data; struct device_node; -struct dma_fence; struct edid; static inline int64_t U642I64(uint64_t val) diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h index 7ffbd9f7bfc7..49649eb8447e 100644 --- a/include/drm/drm_displayid.h +++ b/include/drm/drm_displayid.h @@ -25,7 +25,7 @@ #include <linux/types.h> #include <linux/bits.h> -struct edid; +struct drm_edid; #define VESA_IEEE_OUI 0x3a0292 @@ -141,7 +141,7 @@ struct displayid_vesa_vendor_specific_block { /* DisplayID iteration */ struct displayid_iter { - const struct edid *edid; + const struct drm_edid *drm_edid; const u8 *section; int length; @@ -149,7 +149,7 @@ struct displayid_iter { int ext_index; }; -void displayid_iter_edid_begin(const struct edid *edid, +void displayid_iter_edid_begin(const struct drm_edid *drm_edid, struct displayid_iter *iter); const struct displayid_block * __displayid_iter_next(struct displayid_iter *iter); diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index b2756753370b..2181977ae683 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h @@ -28,6 +28,7 @@ #include <drm/drm_mode.h> struct drm_device; +struct drm_edid; struct i2c_adapter; #define EDID_LENGTH 128 @@ -497,6 +498,22 @@ static inline u8 drm_eld_get_conn_type(const uint8_t *eld) } /** + * drm_edid_decode_mfg_id - Decode the manufacturer ID + * @mfg_id: The manufacturer ID + * @vend: A 4-byte buffer to store the 3-letter vendor string plus a '\0' + * termination + */ +static inline const char *drm_edid_decode_mfg_id(u16 mfg_id, char vend[4]) +{ + vend[0] = '@' + ((mfg_id >> 10) & 0x1f); + vend[1] = '@' + ((mfg_id >> 5) & 0x1f); + vend[2] = '@' + ((mfg_id >> 0) & 0x1f); + vend[3] = '\0'; + + return vend; +} + +/** * drm_edid_encode_panel_id - Encode an ID for matching against drm_edid_get_panel_id() * @vend_chr_0: First character of the vendor string. * @vend_chr_1: Second character of the vendor string. @@ -536,10 +553,7 @@ static inline u8 drm_eld_get_conn_type(const uint8_t *eld) static inline void drm_edid_decode_panel_id(u32 panel_id, char vend[4], u16 *product_id) { *product_id = (u16)(panel_id & 0xffff); - vend[0] = '@' + ((panel_id >> 26) & 0x1f); - vend[1] = '@' + ((panel_id >> 21) & 0x1f); - vend[2] = '@' + ((panel_id >> 16) & 0x1f); - vend[3] = '\0'; + drm_edid_decode_mfg_id(panel_id >> 16, vend); } bool drm_probe_ddc(struct i2c_adapter *adapter); @@ -578,8 +592,21 @@ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, struct drm_display_mode * drm_display_mode_from_cea_vic(struct drm_device *dev, u8 video_code); -const u8 *drm_find_edid_extension(const struct edid *edid, - int ext_id, int *ext_index); +/* Interface based on struct drm_edid */ +const struct drm_edid *drm_edid_alloc(const void *edid, size_t size); +const struct drm_edid *drm_edid_dup(const struct drm_edid *drm_edid); +void drm_edid_free(const struct drm_edid *drm_edid); +const struct edid *drm_edid_raw(const struct drm_edid *drm_edid); +const struct drm_edid *drm_edid_read(struct drm_connector *connector); +const struct drm_edid *drm_edid_read_ddc(struct drm_connector *connector, + struct i2c_adapter *adapter); +const struct drm_edid *drm_edid_read_custom(struct drm_connector *connector, + int (*read_block)(void *context, u8 *buf, unsigned int block, size_t len), + void *context); +int drm_edid_connector_update(struct drm_connector *connector, + const struct drm_edid *edid); +const u8 *drm_find_edid_extension(const struct drm_edid *drm_edid, + int ext_id, int *ext_index); #endif /* __DRM_EDID_H__ */ diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h index a09864f6d684..7214101fd731 100644 --- a/include/drm/drm_encoder_slave.h +++ b/include/drm/drm_encoder_slave.h @@ -27,6 +27,8 @@ #ifndef __DRM_ENCODER_SLAVE_H__ #define __DRM_ENCODER_SLAVE_H__ +#include <linux/i2c.h> + #include <drm/drm_crtc.h> #include <drm/drm_encoder.h> diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 329607ca65c0..fddd0d1af689 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -35,6 +35,7 @@ struct drm_fb_helper; #include <drm/drm_client.h> #include <drm/drm_crtc.h> #include <drm/drm_device.h> +#include <linux/fb.h> #include <linux/kgdb.h> enum mode_set_atomic { diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 9d7c61a122dc..87cffc9efa85 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -315,6 +315,23 @@ struct drm_gem_object { }; /** + * DRM_GEM_FOPS - Default drm GEM file operations + * + * This macro provides a shorthand for setting the GEM file ops in the + * &file_operations structure. If all you need are the default ops, use + * DEFINE_DRM_GEM_FOPS instead. + */ +#define DRM_GEM_FOPS \ + .open = drm_open,\ + .release = drm_release,\ + .unlocked_ioctl = drm_ioctl,\ + .compat_ioctl = drm_compat_ioctl,\ + .poll = drm_poll,\ + .read = drm_read,\ + .llseek = noop_llseek,\ + .mmap = drm_gem_mmap + +/** * DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers * @name: name for the generated structure * @@ -330,14 +347,7 @@ struct drm_gem_object { #define DEFINE_DRM_GEM_FOPS(name) \ static const struct file_operations name = {\ .owner = THIS_MODULE,\ - .open = drm_open,\ - .release = drm_release,\ - .unlocked_ioctl = drm_ioctl,\ - .compat_ioctl = drm_compat_ioctl,\ - .poll = drm_poll,\ - .read = drm_read,\ - .llseek = noop_llseek,\ - .mmap = drm_gem_mmap,\ + DRM_GEM_FOPS,\ } void drm_gem_object_release(struct drm_gem_object *obj); diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h index 1091e4fa08cb..d302521f3dd4 100644 --- a/include/drm/drm_gem_framebuffer_helper.h +++ b/include/drm/drm_gem_framebuffer_helper.h @@ -4,8 +4,6 @@ #include <linux/dma-buf.h> #include <linux/iosys-map.h> -#include <drm/drm_fourcc.h> - struct drm_afbc_framebuffer; struct drm_device; struct drm_fb_helper_surface_size; @@ -39,11 +37,9 @@ struct drm_framebuffer * drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); -int drm_gem_fb_vmap(struct drm_framebuffer *fb, - struct iosys_map map[static DRM_FORMAT_MAX_PLANES], - struct iosys_map data[DRM_FORMAT_MAX_PLANES]); -void drm_gem_fb_vunmap(struct drm_framebuffer *fb, - struct iosys_map map[static DRM_FORMAT_MAX_PLANES]); +int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map, + struct iosys_map *data); +void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map); int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir); void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir); diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index 51e09a1a106a..91a164bdd8f3 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -296,6 +296,23 @@ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi, u16 *brightness); /** + * mipi_dsi_dcs_write_seq - transmit a DCS command with payload + * @dsi: DSI peripheral device + * @cmd: Command + * @seq: buffer containing data to be transmitted + */ +#define mipi_dsi_dcs_write_seq(dsi, cmd, seq...) do { \ + static const u8 d[] = { cmd, seq }; \ + struct device *dev = &dsi->dev; \ + int ret; \ + ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \ + if (ret < 0) { \ + dev_err_ratelimited(dev, "sending command %#02x failed: %d\n", cmd, ret); \ + return ret; \ + } \ + } while (0) + +/** * struct mipi_dsi_driver - DSI driver * @driver: device driver model driver * @probe: callback for device binding diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h index 99f79ac8b4cd..10ab58c40746 100644 --- a/include/drm/drm_of.h +++ b/include/drm/drm_of.h @@ -50,6 +50,12 @@ int drm_of_find_panel_or_bridge(const struct device_node *np, int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, const struct device_node *port2); int drm_of_lvds_get_data_mapping(const struct device_node *port); +int drm_of_get_data_lanes_count(const struct device_node *endpoint, + const unsigned int min, const unsigned int max); +int drm_of_get_data_lanes_count_ep(const struct device_node *port, + int port_reg, int reg, + const unsigned int min, + const unsigned int max); #else static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev, struct device_node *port) @@ -105,6 +111,22 @@ drm_of_lvds_get_data_mapping(const struct device_node *port) { return -EINVAL; } + +static inline int +drm_of_get_data_lanes_count(const struct device_node *endpoint, + const unsigned int min, const unsigned int max) +{ + return -EINVAL; +} + +static inline int +drm_of_get_data_lanes_count_ep(const struct device_node *port, + int port_reg, int reg, + const unsigned int min, + const unsigned int max) +{ + return -EINVAL; +} #endif /* diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h index d279ee455f01..3a271128c078 100644 --- a/include/drm/drm_panel.h +++ b/include/drm/drm_panel.h @@ -117,6 +117,15 @@ struct drm_panel_funcs { struct drm_connector *connector); /** + * @get_orientation: + * + * Return the panel orientation set by device tree or EDID. + * + * This function is optional. + */ + enum drm_panel_orientation (*get_orientation)(struct drm_panel *panel); + + /** * @get_timings: * * Copy display timings into the provided array and return diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h index 48300aa6ca71..8075e02aa865 100644 --- a/include/drm/drm_probe_helper.h +++ b/include/drm/drm_probe_helper.h @@ -26,4 +26,7 @@ void drm_kms_helper_poll_disable(struct drm_device *dev); void drm_kms_helper_poll_enable(struct drm_device *dev); bool drm_kms_helper_is_poll_worker(void); +int drm_connector_helper_get_modes_from_ddc(struct drm_connector *connector); +int drm_connector_helper_get_modes(struct drm_connector *connector); + #endif diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h index 6f6e19bd4dac..e8d94fca2703 100644 --- a/include/drm/drm_rect.h +++ b/include/drm/drm_rect.h @@ -48,6 +48,22 @@ struct drm_rect { }; /** + * DRM_RECT_INIT - initialize a rectangle from x/y/w/h + * @x: x coordinate + * @y: y coordinate + * @w: width + * @h: height + * + * RETURNS: + * A new rectangle of the specified size. + */ +#define DRM_RECT_INIT(x, y, w, h) ((struct drm_rect){ \ + .x1 = (x), \ + .y1 = (y), \ + .x2 = (x) + (w), \ + .y2 = (y) + (h) }) + +/** * DRM_RECT_FMT - printf string for &struct drm_rect */ #define DRM_RECT_FMT "%dx%d%+d%+d" diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 283dadfbb4db..278031aa2e84 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -696,22 +696,55 @@ #define INTEL_DG2_G10_IDS(info) \ INTEL_VGA_DEVICE(0x5690, info), \ INTEL_VGA_DEVICE(0x5691, info), \ - INTEL_VGA_DEVICE(0x5692, info) + INTEL_VGA_DEVICE(0x5692, info), \ + INTEL_VGA_DEVICE(0x56A0, info), \ + INTEL_VGA_DEVICE(0x56A1, info), \ + INTEL_VGA_DEVICE(0x56A2, info) #define INTEL_DG2_G11_IDS(info) \ INTEL_VGA_DEVICE(0x5693, info), \ INTEL_VGA_DEVICE(0x5694, info), \ INTEL_VGA_DEVICE(0x5695, info), \ - INTEL_VGA_DEVICE(0x56B0, info) + INTEL_VGA_DEVICE(0x5698, info), \ + INTEL_VGA_DEVICE(0x56A5, info), \ + INTEL_VGA_DEVICE(0x56A6, info), \ + INTEL_VGA_DEVICE(0x56B0, info), \ + INTEL_VGA_DEVICE(0x56B1, info) #define INTEL_DG2_G12_IDS(info) \ INTEL_VGA_DEVICE(0x5696, info), \ INTEL_VGA_DEVICE(0x5697, info), \ - INTEL_VGA_DEVICE(0x56B2, info) + INTEL_VGA_DEVICE(0x56A3, info), \ + INTEL_VGA_DEVICE(0x56A4, info), \ + INTEL_VGA_DEVICE(0x56B2, info), \ + INTEL_VGA_DEVICE(0x56B3, info) #define INTEL_DG2_IDS(info) \ INTEL_DG2_G10_IDS(info), \ INTEL_DG2_G11_IDS(info), \ INTEL_DG2_G12_IDS(info) +#define INTEL_ATS_M150_IDS(info) \ + INTEL_VGA_DEVICE(0x56C0, info) + +#define INTEL_ATS_M75_IDS(info) \ + INTEL_VGA_DEVICE(0x56C1, info) + +#define INTEL_ATS_M_IDS(info) \ + INTEL_ATS_M150_IDS(info), \ + INTEL_ATS_M75_IDS(info) +/* MTL */ +#define INTEL_MTL_M_IDS(info) \ + INTEL_VGA_DEVICE(0x7D40, info), \ + INTEL_VGA_DEVICE(0x7D60, info) + +#define INTEL_MTL_P_IDS(info) \ + INTEL_VGA_DEVICE(0x7D45, info), \ + INTEL_VGA_DEVICE(0x7D55, info), \ + INTEL_VGA_DEVICE(0x7DD5, info) + +#define INTEL_MTL_IDS(info) \ + INTEL_MTL_M_IDS(info), \ + INTEL_MTL_P_IDS(info) + #endif /* _I915_PCIIDS_H */ diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index 67530bfef129..cb0d5b7200c7 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h @@ -10,24 +10,24 @@ struct agp_bridge_data; struct pci_dev; struct sg_table; -void intel_gtt_get(u64 *gtt_total, - phys_addr_t *mappable_base, - resource_size_t *mappable_end); +void intel_gmch_gtt_get(u64 *gtt_total, + phys_addr_t *mappable_base, + resource_size_t *mappable_end); int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, struct agp_bridge_data *bridge); void intel_gmch_remove(void); -bool intel_enable_gtt(void); +bool intel_gmch_enable_gtt(void); -void intel_gtt_chipset_flush(void); -void intel_gtt_insert_page(dma_addr_t addr, - unsigned int pg, - unsigned int flags); -void intel_gtt_insert_sg_entries(struct sg_table *st, - unsigned int pg_start, - unsigned int flags); -void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); +void intel_gmch_gtt_flush(void); +void intel_gmch_gtt_insert_page(dma_addr_t addr, + unsigned int pg, + unsigned int flags); +void intel_gmch_gtt_insert_sg_entries(struct sg_table *st, + unsigned int pg_start, + unsigned int flags); +void intel_gmch_gtt_clear_range(unsigned int first_entry, unsigned int num_entries); /* Special gtt memory types */ #define AGP_DCACHE_MEMORY 1 diff --git a/include/dt-bindings/clock/bcm21664.h b/include/dt-bindings/clock/bcm21664.h index 5a7f0e4750a8..7c7492742f3d 100644 --- a/include/dt-bindings/clock/bcm21664.h +++ b/include/dt-bindings/clock/bcm21664.h @@ -1,15 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 Broadcom Corporation * Copyright 2013 Linaro Limited - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef _CLOCK_BCM21664_H diff --git a/include/dt-bindings/clock/bcm281xx.h b/include/dt-bindings/clock/bcm281xx.h index a763460cf1af..d74ca42112e7 100644 --- a/include/dt-bindings/clock/bcm281xx.h +++ b/include/dt-bindings/clock/bcm281xx.h @@ -1,15 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2013 Broadcom Corporation * Copyright 2013 Linaro Limited - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef _CLOCK_BCM281XX_H diff --git a/include/dt-bindings/clock/efm32-cmu.h b/include/dt-bindings/clock/efm32-cmu.h deleted file mode 100644 index 4b48d15fe194..000000000000 --- a/include/dt-bindings/clock/efm32-cmu.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __DT_BINDINGS_CLOCK_EFM32_CMU_H -#define __DT_BINDINGS_CLOCK_EFM32_CMU_H - -#define clk_HFXO 0 -#define clk_HFRCO 1 -#define clk_LFXO 2 -#define clk_LFRCO 3 -#define clk_ULFRCO 4 -#define clk_AUXHFRCO 5 -#define clk_HFCLKNODIV 6 -#define clk_HFCLK 7 -#define clk_HFPERCLK 8 -#define clk_HFCORECLK 9 -#define clk_LFACLK 10 -#define clk_LFBCLK 11 -#define clk_WDOGCLK 12 -#define clk_HFCORECLKDMA 13 -#define clk_HFCORECLKAES 14 -#define clk_HFCORECLKUSBC 15 -#define clk_HFCORECLKUSB 16 -#define clk_HFCORECLKLE 17 -#define clk_HFCORECLKEBI 18 -#define clk_HFPERCLKUSART0 19 -#define clk_HFPERCLKUSART1 20 -#define clk_HFPERCLKUSART2 21 -#define clk_HFPERCLKUART0 22 -#define clk_HFPERCLKUART1 23 -#define clk_HFPERCLKTIMER0 24 -#define clk_HFPERCLKTIMER1 25 -#define clk_HFPERCLKTIMER2 26 -#define clk_HFPERCLKTIMER3 27 -#define clk_HFPERCLKACMP0 28 -#define clk_HFPERCLKACMP1 29 -#define clk_HFPERCLKI2C0 30 -#define clk_HFPERCLKI2C1 31 -#define clk_HFPERCLKGPIO 32 -#define clk_HFPERCLKVCMP 33 -#define clk_HFPERCLKPRS 34 -#define clk_HFPERCLKADC0 35 -#define clk_HFPERCLKDAC0 36 - -#endif /* __DT_BINDINGS_CLOCK_EFM32_CMU_H */ diff --git a/include/dt-bindings/clock/qcom,gcc-ipq8074.h b/include/dt-bindings/clock/qcom,gcc-ipq8074.h index 55f8322a1e50..e4991d303708 100644 --- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h +++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h @@ -233,6 +233,7 @@ #define GCC_PCIE0_AXI_S_BRIDGE_CLK 224 #define GCC_PCIE0_RCHNG_CLK_SRC 225 #define GCC_PCIE0_RCHNG_CLK 226 +#define GCC_CRYPTO_PPE_CLK 227 #define GCC_BLSP1_BCR 0 #define GCC_BLSP1_QUP1_BCR 1 diff --git a/include/dt-bindings/clock/qcom,gcc-msm8939.h b/include/dt-bindings/clock/qcom,gcc-msm8939.h index 0634467c4ce5..2d545ed0d35a 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8939.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8939.h @@ -192,6 +192,7 @@ #define GCC_VENUS0_CORE0_VCODEC0_CLK 183 #define GCC_VENUS0_CORE1_VCODEC0_CLK 184 #define GCC_OXILI_TIMER_CLK 185 +#define SYSTEM_MM_NOC_BFDCD_CLK_SRC 186 /* Indexes for GDSCs */ #define BIMC_GDSC 0 diff --git a/include/dt-bindings/clock/r9a07g043-cpg.h b/include/dt-bindings/clock/r9a07g043-cpg.h index 27e232733096..77cde8effdc7 100644 --- a/include/dt-bindings/clock/r9a07g043-cpg.h +++ b/include/dt-bindings/clock/r9a07g043-cpg.h @@ -108,6 +108,15 @@ #define R9A07G043_ADC_ADCLK 76 #define R9A07G043_ADC_PCLK 77 #define R9A07G043_TSU_PCLK 78 +#define R9A07G043_NCEPLDM_DM_CLK 79 /* RZ/Five Only */ +#define R9A07G043_NCEPLDM_ACLK 80 /* RZ/Five Only */ +#define R9A07G043_NCEPLDM_TCK 81 /* RZ/Five Only */ +#define R9A07G043_NCEPLMT_ACLK 82 /* RZ/Five Only */ +#define R9A07G043_NCEPLIC_ACLK 83 /* RZ/Five Only */ +#define R9A07G043_AX45MP_CORE0_CLK 84 /* RZ/Five Only */ +#define R9A07G043_AX45MP_ACLK 85 /* RZ/Five Only */ +#define R9A07G043_IAX45_CLK 86 /* RZ/Five Only */ +#define R9A07G043_IAX45_PCLK 87 /* RZ/Five Only */ /* R9A07G043 Resets */ #define R9A07G043_CA55_RST_1_0 0 /* RZ/G2UL Only */ @@ -180,5 +189,16 @@ #define R9A07G043_ADC_PRESETN 67 #define R9A07G043_ADC_ADRST_N 68 #define R9A07G043_TSU_PRESETN 69 +#define R9A07G043_NCEPLDM_DTM_PWR_RST_N 70 /* RZ/Five Only */ +#define R9A07G043_NCEPLDM_ARESETN 71 /* RZ/Five Only */ +#define R9A07G043_NCEPLMT_POR_RSTN 72 /* RZ/Five Only */ +#define R9A07G043_NCEPLMT_ARESETN 73 /* RZ/Five Only */ +#define R9A07G043_NCEPLIC_ARESETN 74 /* RZ/Five Only */ +#define R9A07G043_AX45MP_ARESETNM 75 /* RZ/Five Only */ +#define R9A07G043_AX45MP_ARESETNS 76 /* RZ/Five Only */ +#define R9A07G043_AX45MP_L2_RESETN 77 /* RZ/Five Only */ +#define R9A07G043_AX45MP_CORE0_RESETN 78 /* RZ/Five Only */ +#define R9A07G043_IAX45_RESETN 79 /* RZ/Five Only */ + #endif /* __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__ */ diff --git a/include/dt-bindings/clock/sprd,ums512-clk.h b/include/dt-bindings/clock/sprd,ums512-clk.h new file mode 100644 index 000000000000..4f1d90849944 --- /dev/null +++ b/include/dt-bindings/clock/sprd,ums512-clk.h @@ -0,0 +1,397 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Unisoc UMS512 SoC DTS file + * + * Copyright (C) 2022, Unisoc Inc. + */ + +#ifndef _DT_BINDINGS_CLK_UMS512_H_ +#define _DT_BINDINGS_CLK_UMS512_H_ + +#define CLK_26M_AUD 0 +#define CLK_13M 1 +#define CLK_6M5 2 +#define CLK_4M3 3 +#define CLK_2M 4 +#define CLK_1M 5 +#define CLK_250K 6 +#define CLK_RCO_25M 7 +#define CLK_RCO_4M 8 +#define CLK_RCO_2M 9 +#define CLK_ISPPLL_GATE 10 +#define CLK_DPLL0_GATE 11 +#define CLK_DPLL1_GATE 12 +#define CLK_LPLL_GATE 13 +#define CLK_TWPLL_GATE 14 +#define CLK_GPLL_GATE 15 +#define CLK_RPLL_GATE 16 +#define CLK_CPPLL_GATE 17 +#define CLK_MPLL0_GATE 18 +#define CLK_MPLL1_GATE 19 +#define CLK_MPLL2_GATE 20 +#define CLK_PMU_GATE_NUM (CLK_MPLL2_GATE + 1) + +#define CLK_DPLL0 0 +#define CLK_DPLL0_58M31 1 +#define CLK_ANLG_PHY_G0_NUM (CLK_DPLL0_58M31 + 1) + +#define CLK_MPLL1 0 +#define CLK_MPLL1_63M38 1 +#define CLK_ANLG_PHY_G2_NUM (CLK_MPLL1_63M38 + 1) + +#define CLK_RPLL 0 +#define CLK_AUDIO_GATE 1 +#define CLK_MPLL0 2 +#define CLK_MPLL0_56M88 3 +#define CLK_MPLL2 4 +#define CLK_MPLL2_47M13 5 +#define CLK_ANLG_PHY_G3_NUM (CLK_MPLL2_47M13 + 1) + +#define CLK_TWPLL 0 +#define CLK_TWPLL_768M 1 +#define CLK_TWPLL_384M 2 +#define CLK_TWPLL_192M 3 +#define CLK_TWPLL_96M 4 +#define CLK_TWPLL_48M 5 +#define CLK_TWPLL_24M 6 +#define CLK_TWPLL_12M 7 +#define CLK_TWPLL_512M 8 +#define CLK_TWPLL_256M 9 +#define CLK_TWPLL_128M 10 +#define CLK_TWPLL_64M 11 +#define CLK_TWPLL_307M2 12 +#define CLK_TWPLL_219M4 13 +#define CLK_TWPLL_170M6 14 +#define CLK_TWPLL_153M6 15 +#define CLK_TWPLL_76M8 16 +#define CLK_TWPLL_51M2 17 +#define CLK_TWPLL_38M4 18 +#define CLK_TWPLL_19M2 19 +#define CLK_TWPLL_12M29 20 +#define CLK_LPLL 21 +#define CLK_LPLL_614M4 22 +#define CLK_LPLL_409M6 23 +#define CLK_LPLL_245M76 24 +#define CLK_LPLL_30M72 25 +#define CLK_ISPPLL 26 +#define CLK_ISPPLL_468M 27 +#define CLK_ISPPLL_78M 28 +#define CLK_GPLL 29 +#define CLK_GPLL_40M 30 +#define CLK_CPPLL 31 +#define CLK_CPPLL_39M32 32 +#define CLK_ANLG_PHY_GC_NUM (CLK_CPPLL_39M32 + 1) + +#define CLK_AP_APB 0 +#define CLK_IPI 1 +#define CLK_AP_UART0 2 +#define CLK_AP_UART1 3 +#define CLK_AP_UART2 4 +#define CLK_AP_I2C0 5 +#define CLK_AP_I2C1 6 +#define CLK_AP_I2C2 7 +#define CLK_AP_I2C3 8 +#define CLK_AP_I2C4 9 +#define CLK_AP_SPI0 10 +#define CLK_AP_SPI1 11 +#define CLK_AP_SPI2 12 +#define CLK_AP_SPI3 13 +#define CLK_AP_IIS0 14 +#define CLK_AP_IIS1 15 +#define CLK_AP_IIS2 16 +#define CLK_AP_SIM 17 +#define CLK_AP_CE 18 +#define CLK_SDIO0_2X 19 +#define CLK_SDIO1_2X 20 +#define CLK_EMMC_2X 21 +#define CLK_VSP 22 +#define CLK_DISPC0 23 +#define CLK_DISPC0_DPI 24 +#define CLK_DSI_APB 25 +#define CLK_DSI_RXESC 26 +#define CLK_DSI_LANEBYTE 27 +#define CLK_VDSP 28 +#define CLK_VDSP_M 29 +#define CLK_AP_CLK_NUM (CLK_VDSP_M + 1) + +#define CLK_DSI_EB 0 +#define CLK_DISPC_EB 1 +#define CLK_VSP_EB 2 +#define CLK_VDMA_EB 3 +#define CLK_DMA_PUB_EB 4 +#define CLK_DMA_SEC_EB 5 +#define CLK_IPI_EB 6 +#define CLK_AHB_CKG_EB 7 +#define CLK_BM_CLK_EB 8 +#define CLK_AP_AHB_GATE_NUM (CLK_BM_CLK_EB + 1) + +#define CLK_AON_APB 0 +#define CLK_ADI 1 +#define CLK_AUX0 2 +#define CLK_AUX1 3 +#define CLK_AUX2 4 +#define CLK_PROBE 5 +#define CLK_PWM0 6 +#define CLK_PWM1 7 +#define CLK_PWM2 8 +#define CLK_PWM3 9 +#define CLK_EFUSE 10 +#define CLK_UART0 11 +#define CLK_UART1 12 +#define CLK_THM0 13 +#define CLK_THM1 14 +#define CLK_THM2 15 +#define CLK_THM3 16 +#define CLK_AON_I2C 17 +#define CLK_AON_IIS 18 +#define CLK_SCC 19 +#define CLK_APCPU_DAP 20 +#define CLK_APCPU_DAP_MTCK 21 +#define CLK_APCPU_TS 22 +#define CLK_DEBUG_TS 23 +#define CLK_DSI_TEST_S 24 +#define CLK_DJTAG_TCK 25 +#define CLK_DJTAG_TCK_HW 26 +#define CLK_AON_TMR 27 +#define CLK_AON_PMU 28 +#define CLK_DEBOUNCE 29 +#define CLK_APCPU_PMU 30 +#define CLK_TOP_DVFS 31 +#define CLK_OTG_UTMI 32 +#define CLK_OTG_REF 33 +#define CLK_CSSYS 34 +#define CLK_CSSYS_PUB 35 +#define CLK_CSSYS_APB 36 +#define CLK_AP_AXI 37 +#define CLK_AP_MM 38 +#define CLK_SDIO2_2X 39 +#define CLK_ANALOG_IO_APB 40 +#define CLK_DMC_REF_CLK 41 +#define CLK_EMC 42 +#define CLK_USB 43 +#define CLK_26M_PMU 44 +#define CLK_AON_APB_NUM (CLK_26M_PMU + 1) + +#define CLK_MM_AHB 0 +#define CLK_MM_MTX 1 +#define CLK_SENSOR0 2 +#define CLK_SENSOR1 3 +#define CLK_SENSOR2 4 +#define CLK_CPP 5 +#define CLK_JPG 6 +#define CLK_FD 7 +#define CLK_DCAM_IF 8 +#define CLK_DCAM_AXI 9 +#define CLK_ISP 10 +#define CLK_MIPI_CSI0 11 +#define CLK_MIPI_CSI1 12 +#define CLK_MIPI_CSI2 13 +#define CLK_MM_CLK_NUM (CLK_MIPI_CSI2 + 1) + +#define CLK_RC100M_CAL_EB 0 +#define CLK_DJTAG_TCK_EB 1 +#define CLK_DJTAG_EB 2 +#define CLK_AUX0_EB 3 +#define CLK_AUX1_EB 4 +#define CLK_AUX2_EB 5 +#define CLK_PROBE_EB 6 +#define CLK_MM_EB 7 +#define CLK_GPU_EB 8 +#define CLK_MSPI_EB 9 +#define CLK_APCPU_DAP_EB 10 +#define CLK_AON_CSSYS_EB 11 +#define CLK_CSSYS_APB_EB 12 +#define CLK_CSSYS_PUB_EB 13 +#define CLK_SDPHY_CFG_EB 14 +#define CLK_SDPHY_REF_EB 15 +#define CLK_EFUSE_EB 16 +#define CLK_GPIO_EB 17 +#define CLK_MBOX_EB 18 +#define CLK_KPD_EB 19 +#define CLK_AON_SYST_EB 20 +#define CLK_AP_SYST_EB 21 +#define CLK_AON_TMR_EB 22 +#define CLK_OTG_UTMI_EB 23 +#define CLK_OTG_PHY_EB 24 +#define CLK_SPLK_EB 25 +#define CLK_PIN_EB 26 +#define CLK_ANA_EB 27 +#define CLK_APCPU_TS0_EB 28 +#define CLK_APB_BUSMON_EB 29 +#define CLK_AON_IIS_EB 30 +#define CLK_SCC_EB 31 +#define CLK_THM0_EB 32 +#define CLK_THM1_EB 33 +#define CLK_THM2_EB 34 +#define CLK_ASIM_TOP_EB 35 +#define CLK_I2C_EB 36 +#define CLK_PMU_EB 37 +#define CLK_ADI_EB 38 +#define CLK_EIC_EB 39 +#define CLK_AP_INTC0_EB 40 +#define CLK_AP_INTC1_EB 41 +#define CLK_AP_INTC2_EB 42 +#define CLK_AP_INTC3_EB 43 +#define CLK_AP_INTC4_EB 44 +#define CLK_AP_INTC5_EB 45 +#define CLK_AUDCP_INTC_EB 46 +#define CLK_AP_TMR0_EB 47 +#define CLK_AP_TMR1_EB 48 +#define CLK_AP_TMR2_EB 49 +#define CLK_PWM0_EB 50 +#define CLK_PWM1_EB 51 +#define CLK_PWM2_EB 52 +#define CLK_PWM3_EB 53 +#define CLK_AP_WDG_EB 54 +#define CLK_APCPU_WDG_EB 55 +#define CLK_SERDES_EB 56 +#define CLK_ARCH_RTC_EB 57 +#define CLK_KPD_RTC_EB 58 +#define CLK_AON_SYST_RTC_EB 59 +#define CLK_AP_SYST_RTC_EB 60 +#define CLK_AON_TMR_RTC_EB 61 +#define CLK_EIC_RTC_EB 62 +#define CLK_EIC_RTCDV5_EB 63 +#define CLK_AP_WDG_RTC_EB 64 +#define CLK_AC_WDG_RTC_EB 65 +#define CLK_AP_TMR0_RTC_EB 66 +#define CLK_AP_TMR1_RTC_EB 67 +#define CLK_AP_TMR2_RTC_EB 68 +#define CLK_DCXO_LC_RTC_EB 69 +#define CLK_BB_CAL_RTC_EB 70 +#define CLK_AP_EMMC_RTC_EB 71 +#define CLK_AP_SDIO0_RTC_EB 72 +#define CLK_AP_SDIO1_RTC_EB 73 +#define CLK_AP_SDIO2_RTC_EB 74 +#define CLK_DSI_CSI_TEST_EB 75 +#define CLK_DJTAG_TCK_EN 76 +#define CLK_DPHY_REF_EB 77 +#define CLK_DMC_REF_EB 78 +#define CLK_OTG_REF_EB 79 +#define CLK_TSEN_EB 80 +#define CLK_TMR_EB 81 +#define CLK_RC100M_REF_EB 82 +#define CLK_RC100M_FDK_EB 83 +#define CLK_DEBOUNCE_EB 84 +#define CLK_DET_32K_EB 85 +#define CLK_TOP_CSSYS_EB 86 +#define CLK_AP_AXI_EN 87 +#define CLK_SDIO0_2X_EN 88 +#define CLK_SDIO0_1X_EN 89 +#define CLK_SDIO1_2X_EN 90 +#define CLK_SDIO1_1X_EN 91 +#define CLK_SDIO2_2X_EN 92 +#define CLK_SDIO2_1X_EN 93 +#define CLK_EMMC_2X_EN 94 +#define CLK_EMMC_1X_EN 95 +#define CLK_PLL_TEST_EN 96 +#define CLK_CPHY_CFG_EN 97 +#define CLK_DEBUG_TS_EN 98 +#define CLK_ACCESS_AUD_EN 99 +#define CLK_AON_APB_GATE_NUM (CLK_ACCESS_AUD_EN + 1) + +#define CLK_MM_CPP_EB 0 +#define CLK_MM_JPG_EB 1 +#define CLK_MM_DCAM_EB 2 +#define CLK_MM_ISP_EB 3 +#define CLK_MM_CSI2_EB 4 +#define CLK_MM_CSI1_EB 5 +#define CLK_MM_CSI0_EB 6 +#define CLK_MM_CKG_EB 7 +#define CLK_ISP_AHB_EB 8 +#define CLK_MM_DVFS_EB 9 +#define CLK_MM_FD_EB 10 +#define CLK_MM_SENSOR2_EB 11 +#define CLK_MM_SENSOR1_EB 12 +#define CLK_MM_SENSOR0_EB 13 +#define CLK_MM_MIPI_CSI2_EB 14 +#define CLK_MM_MIPI_CSI1_EB 15 +#define CLK_MM_MIPI_CSI0_EB 16 +#define CLK_DCAM_AXI_EB 17 +#define CLK_ISP_AXI_EB 18 +#define CLK_MM_CPHY_EB 19 +#define CLK_MM_GATE_CLK_NUM (CLK_MM_CPHY_EB + 1) + +#define CLK_SIM0_EB 0 +#define CLK_IIS0_EB 1 +#define CLK_IIS1_EB 2 +#define CLK_IIS2_EB 3 +#define CLK_APB_REG_EB 4 +#define CLK_SPI0_EB 5 +#define CLK_SPI1_EB 6 +#define CLK_SPI2_EB 7 +#define CLK_SPI3_EB 8 +#define CLK_I2C0_EB 9 +#define CLK_I2C1_EB 10 +#define CLK_I2C2_EB 11 +#define CLK_I2C3_EB 12 +#define CLK_I2C4_EB 13 +#define CLK_UART0_EB 14 +#define CLK_UART1_EB 15 +#define CLK_UART2_EB 16 +#define CLK_SIM0_32K_EB 17 +#define CLK_SPI0_LFIN_EB 18 +#define CLK_SPI1_LFIN_EB 19 +#define CLK_SPI2_LFIN_EB 20 +#define CLK_SPI3_LFIN_EB 21 +#define CLK_SDIO0_EB 22 +#define CLK_SDIO1_EB 23 +#define CLK_SDIO2_EB 24 +#define CLK_EMMC_EB 25 +#define CLK_SDIO0_32K_EB 26 +#define CLK_SDIO1_32K_EB 27 +#define CLK_SDIO2_32K_EB 28 +#define CLK_EMMC_32K_EB 29 +#define CLK_AP_APB_GATE_NUM (CLK_EMMC_32K_EB + 1) + +#define CLK_GPU_CORE_EB 0 +#define CLK_GPU_CORE 1 +#define CLK_GPU_MEM_EB 2 +#define CLK_GPU_MEM 3 +#define CLK_GPU_SYS_EB 4 +#define CLK_GPU_SYS 5 +#define CLK_GPU_CLK_NUM (CLK_GPU_SYS + 1) + +#define CLK_AUDCP_IIS0_EB 0 +#define CLK_AUDCP_IIS1_EB 1 +#define CLK_AUDCP_IIS2_EB 2 +#define CLK_AUDCP_UART_EB 3 +#define CLK_AUDCP_DMA_CP_EB 4 +#define CLK_AUDCP_DMA_AP_EB 5 +#define CLK_AUDCP_SRC48K_EB 6 +#define CLK_AUDCP_MCDT_EB 7 +#define CLK_AUDCP_VBCIFD_EB 8 +#define CLK_AUDCP_VBC_EB 9 +#define CLK_AUDCP_SPLK_EB 10 +#define CLK_AUDCP_ICU_EB 11 +#define CLK_AUDCP_DMA_AP_ASHB_EB 12 +#define CLK_AUDCP_DMA_CP_ASHB_EB 13 +#define CLK_AUDCP_AUD_EB 14 +#define CLK_AUDCP_VBC_24M_EB 15 +#define CLK_AUDCP_TMR_26M_EB 16 +#define CLK_AUDCP_DVFS_ASHB_EB 17 +#define CLK_AUDCP_AHB_GATE_NUM (CLK_AUDCP_DVFS_ASHB_EB + 1) + +#define CLK_AUDCP_WDG_EB 0 +#define CLK_AUDCP_RTC_WDG_EB 1 +#define CLK_AUDCP_TMR0_EB 2 +#define CLK_AUDCP_TMR1_EB 3 +#define CLK_AUDCP_APB_GATE_NUM (CLK_AUDCP_TMR1_EB + 1) + +#define CLK_ACORE0 0 +#define CLK_ACORE1 1 +#define CLK_ACORE2 2 +#define CLK_ACORE3 3 +#define CLK_ACORE4 4 +#define CLK_ACORE5 5 +#define CLK_PCORE0 6 +#define CLK_PCORE1 7 +#define CLK_SCU 8 +#define CLK_ACE 9 +#define CLK_PERIPH 10 +#define CLK_GIC 11 +#define CLK_ATB 12 +#define CLK_DEBUG_APB 13 +#define CLK_APCPU_SEC_NUM (CLK_DEBUG_APB + 1) + +#endif /* _DT_BINDINGS_CLK_UMS512_H_ */ diff --git a/include/dt-bindings/clock/ti-dra7-atl.h b/include/dt-bindings/clock/ti-dra7-atl.h index 42dd4164f6f4..b0e71e3cce95 100644 --- a/include/dt-bindings/clock/ti-dra7-atl.h +++ b/include/dt-bindings/clock/ti-dra7-atl.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * This header provides constants for DRA7 ATL (Audio Tracking Logic) * @@ -6,15 +7,6 @@ * Copyright (C) 2013 Texas Instruments, Inc. * * Peter Ujfalusi <peter.ujfalusi@ti.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_CLK_DRA7_ATL_H diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h index c029467e828b..5566e58196a2 100644 --- a/include/dt-bindings/gpio/gpio.h +++ b/include/dt-bindings/gpio/gpio.h @@ -39,4 +39,7 @@ /* Bit 5 express pull down */ #define GPIO_PULL_DOWN 32 +/* Bit 6 express pull disable */ +#define GPIO_PULL_DISABLE 64 + #endif diff --git a/include/dt-bindings/interconnect/fsl,imx8mp.h b/include/dt-bindings/interconnect/fsl,imx8mp.h new file mode 100644 index 000000000000..7357d417529a --- /dev/null +++ b/include/dt-bindings/interconnect/fsl,imx8mp.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/* + * Interconnect framework driver for i.MX SoC + * + * Copyright 2022 NXP + * Peng Fan <peng.fan@nxp.com> + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_IMX8MP_H +#define __DT_BINDINGS_INTERCONNECT_IMX8MP_H + +#define IMX8MP_ICN_NOC 0 +#define IMX8MP_ICN_MAIN 1 +#define IMX8MP_ICS_DRAM 2 +#define IMX8MP_ICS_OCRAM 3 +#define IMX8MP_ICM_A53 4 +#define IMX8MP_ICM_SUPERMIX 5 +#define IMX8MP_ICM_GIC 6 +#define IMX8MP_ICM_MLMIX 7 + +#define IMX8MP_ICN_AUDIO 8 +#define IMX8MP_ICM_DSP 9 +#define IMX8MP_ICM_SDMA2PER 10 +#define IMX8MP_ICM_SDMA2BURST 11 +#define IMX8MP_ICM_SDMA3PER 12 +#define IMX8MP_ICM_SDMA3BURST 13 +#define IMX8MP_ICM_EDMA 14 + +#define IMX8MP_ICN_GPU 15 +#define IMX8MP_ICM_GPU2D 16 +#define IMX8MP_ICM_GPU3D 17 + +#define IMX8MP_ICN_HDMI 18 +#define IMX8MP_ICM_HRV 19 +#define IMX8MP_ICM_LCDIF_HDMI 20 +#define IMX8MP_ICM_HDCP 21 + +#define IMX8MP_ICN_HSIO 22 +#define IMX8MP_ICM_NOC_PCIE 23 +#define IMX8MP_ICM_USB1 24 +#define IMX8MP_ICM_USB2 25 +#define IMX8MP_ICM_PCIE 26 + +#define IMX8MP_ICN_MEDIA 27 +#define IMX8MP_ICM_LCDIF_RD 28 +#define IMX8MP_ICM_LCDIF_WR 29 +#define IMX8MP_ICM_ISI0 30 +#define IMX8MP_ICM_ISI1 31 +#define IMX8MP_ICM_ISI2 32 +#define IMX8MP_ICM_ISP0 33 +#define IMX8MP_ICM_ISP1 34 +#define IMX8MP_ICM_DWE 35 + +#define IMX8MP_ICN_VIDEO 36 +#define IMX8MP_ICM_VPU_G1 37 +#define IMX8MP_ICM_VPU_G2 38 +#define IMX8MP_ICM_VPU_H1 39 + +#endif /* __DT_BINDINGS_INTERCONNECT_IMX8MP_H */ diff --git a/include/dt-bindings/interconnect/qcom,sm6350.h b/include/dt-bindings/interconnect/qcom,sm6350.h new file mode 100644 index 000000000000..e662cede9aaa --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sm6350.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ +/* + * Qualcomm SM6350 interconnect IDs + * + * Copyright (C) 2022 Luca Weiss <luca.weiss@fairphone.com> + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM6350_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SM6350_H + +#define MASTER_A1NOC_CFG 0 +#define MASTER_QUP_0 1 +#define MASTER_EMMC 2 +#define MASTER_UFS_MEM 3 +#define A1NOC_SNOC_SLV 4 +#define SLAVE_SERVICE_A1NOC 5 + +#define MASTER_A2NOC_CFG 0 +#define MASTER_QDSS_BAM 1 +#define MASTER_QUP_1 2 +#define MASTER_CRYPTO_CORE_0 3 +#define MASTER_IPA 4 +#define MASTER_QDSS_ETR 5 +#define MASTER_SDCC_2 6 +#define MASTER_USB3 7 +#define A2NOC_SNOC_SLV 8 +#define SLAVE_SERVICE_A2NOC 9 + +#define MASTER_CAMNOC_HF0_UNCOMP 0 +#define MASTER_CAMNOC_ICP_UNCOMP 1 +#define MASTER_CAMNOC_SF_UNCOMP 2 +#define MASTER_QUP_CORE_0 3 +#define MASTER_QUP_CORE_1 4 +#define MASTER_LLCC 5 +#define SLAVE_CAMNOC_UNCOMP 6 +#define SLAVE_QUP_CORE_0 7 +#define SLAVE_QUP_CORE_1 8 +#define SLAVE_EBI_CH0 9 + +#define MASTER_NPU 0 +#define MASTER_NPU_PROC 1 +#define SLAVE_CDSP_GEM_NOC 2 + +#define SNOC_CNOC_MAS 0 +#define MASTER_QDSS_DAP 1 +#define SLAVE_A1NOC_CFG 2 +#define SLAVE_A2NOC_CFG 3 +#define SLAVE_AHB2PHY 4 +#define SLAVE_AHB2PHY_2 5 +#define SLAVE_AOSS 6 +#define SLAVE_BOOT_ROM 7 +#define SLAVE_CAMERA_CFG 8 +#define SLAVE_CAMERA_NRT_THROTTLE_CFG 9 +#define SLAVE_CAMERA_RT_THROTTLE_CFG 10 +#define SLAVE_CLK_CTL 11 +#define SLAVE_RBCPR_CX_CFG 12 +#define SLAVE_RBCPR_MX_CFG 13 +#define SLAVE_CRYPTO_0_CFG 14 +#define SLAVE_DCC_CFG 15 +#define SLAVE_CNOC_DDRSS 16 +#define SLAVE_DISPLAY_CFG 17 +#define SLAVE_DISPLAY_THROTTLE_CFG 18 +#define SLAVE_EMMC_CFG 19 +#define SLAVE_GLM 20 +#define SLAVE_GRAPHICS_3D_CFG 21 +#define SLAVE_IMEM_CFG 22 +#define SLAVE_IPA_CFG 23 +#define SLAVE_CNOC_MNOC_CFG 24 +#define SLAVE_CNOC_MSS 25 +#define SLAVE_NPU_CFG 26 +#define SLAVE_PDM 27 +#define SLAVE_PIMEM_CFG 28 +#define SLAVE_PRNG 29 +#define SLAVE_QDSS_CFG 30 +#define SLAVE_QM_CFG 31 +#define SLAVE_QM_MPU_CFG 32 +#define SLAVE_QUP_0 33 +#define SLAVE_QUP_1 34 +#define SLAVE_SDCC_2 35 +#define SLAVE_SECURITY 36 +#define SLAVE_SNOC_CFG 37 +#define SLAVE_TCSR 38 +#define SLAVE_UFS_MEM_CFG 39 +#define SLAVE_USB3 40 +#define SLAVE_VENUS_CFG 41 +#define SLAVE_VENUS_THROTTLE_CFG 42 +#define SLAVE_VSENSE_CTRL_CFG 43 +#define SLAVE_SERVICE_CNOC 44 + +#define MASTER_CNOC_DC_NOC 0 +#define SLAVE_GEM_NOC_CFG 1 +#define SLAVE_LLCC_CFG 2 + +#define MASTER_AMPSS_M0 0 +#define MASTER_SYS_TCU 1 +#define MASTER_GEM_NOC_CFG 2 +#define MASTER_COMPUTE_NOC 3 +#define MASTER_MNOC_HF_MEM_NOC 4 +#define MASTER_MNOC_SF_MEM_NOC 5 +#define MASTER_SNOC_GC_MEM_NOC 6 +#define MASTER_SNOC_SF_MEM_NOC 7 +#define MASTER_GRAPHICS_3D 8 +#define SLAVE_MCDMA_MS_MPU_CFG 9 +#define SLAVE_MSS_PROC_MS_MPU_CFG 10 +#define SLAVE_GEM_NOC_SNOC 11 +#define SLAVE_LLCC 12 +#define SLAVE_SERVICE_GEM_NOC 13 + +#define MASTER_CNOC_MNOC_CFG 0 +#define MASTER_VIDEO_P0 1 +#define MASTER_VIDEO_PROC 2 +#define MASTER_CAMNOC_HF 3 +#define MASTER_CAMNOC_ICP 4 +#define MASTER_CAMNOC_SF 5 +#define MASTER_MDP_PORT0 6 +#define SLAVE_MNOC_HF_MEM_NOC 7 +#define SLAVE_MNOC_SF_MEM_NOC 8 +#define SLAVE_SERVICE_MNOC 9 + +#define MASTER_NPU_SYS 0 +#define MASTER_NPU_NOC_CFG 1 +#define SLAVE_NPU_CAL_DP0 2 +#define SLAVE_NPU_CP 3 +#define SLAVE_NPU_INT_DMA_BWMON_CFG 4 +#define SLAVE_NPU_DPM 5 +#define SLAVE_ISENSE_CFG 6 +#define SLAVE_NPU_LLM_CFG 7 +#define SLAVE_NPU_TCM 8 +#define SLAVE_NPU_COMPUTE_NOC 9 +#define SLAVE_SERVICE_NPU_NOC 10 + +#define MASTER_SNOC_CFG 0 +#define A1NOC_SNOC_MAS 1 +#define A2NOC_SNOC_MAS 2 +#define MASTER_GEM_NOC_SNOC 3 +#define MASTER_PIMEM 4 +#define MASTER_GIC 5 +#define SLAVE_APPSS 6 +#define SNOC_CNOC_SLV 7 +#define SLAVE_SNOC_GEM_NOC_GC 8 +#define SLAVE_SNOC_GEM_NOC_SF 9 +#define SLAVE_OCIMEM 10 +#define SLAVE_PIMEM 11 +#define SLAVE_SERVICE_SNOC 12 +#define SLAVE_QDSS_STM 13 +#define SLAVE_TCU 14 + +#endif diff --git a/include/dt-bindings/nvmem/microchip,sama7g5-otpc.h b/include/dt-bindings/nvmem/microchip,sama7g5-otpc.h new file mode 100644 index 000000000000..f570b23165a2 --- /dev/null +++ b/include/dt-bindings/nvmem/microchip,sama7g5-otpc.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ + +#ifndef _DT_BINDINGS_NVMEM_MICROCHIP_OTPC_H +#define _DT_BINDINGS_NVMEM_MICROCHIP_OTPC_H + +/* + * Need to have it as a multiple of 4 as NVMEM memory is registered with + * stride = 4. + */ +#define OTP_PKT(id) ((id) * 4) + +#endif diff --git a/include/dt-bindings/pinctrl/hisi.h b/include/dt-bindings/pinctrl/hisi.h index 93064c750c8c..2175ec89c82f 100644 --- a/include/dt-bindings/pinctrl/hisi.h +++ b/include/dt-bindings/pinctrl/hisi.h @@ -1,17 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * This header provides constants for hisilicon pinctrl bindings. * * Copyright (c) 2015 HiSilicon Limited. * Copyright (c) 2015 Linaro Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_PINCTRL_HISI_H diff --git a/include/dt-bindings/pinctrl/keystone.h b/include/dt-bindings/pinctrl/keystone.h index 7f97d776a8ff..66f8aecada53 100644 --- a/include/dt-bindings/pinctrl/keystone.h +++ b/include/dt-bindings/pinctrl/keystone.h @@ -1,16 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * This header provides constants for Keystone pinctrl bindings. * * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_PINCTRL_KEYSTONE_H diff --git a/include/dt-bindings/pinctrl/r7s9210-pinctrl.h b/include/dt-bindings/pinctrl/r7s9210-pinctrl.h index 2d0c23e5d3a7..8736ce038eca 100644 --- a/include/dt-bindings/pinctrl/r7s9210-pinctrl.h +++ b/include/dt-bindings/pinctrl/r7s9210-pinctrl.h @@ -42,6 +42,6 @@ /* * Convert a port and pin label to its global pin index */ - #define RZA2_PIN(port, pin) ((port) * RZA2_PINS_PER_PORT + (pin)) +#define RZA2_PIN(port, pin) ((port) * RZA2_PINS_PER_PORT + (pin)) #endif /* __DT_BINDINGS_PINCTRL_RENESAS_RZA2_H */ diff --git a/include/dt-bindings/pinctrl/rzg2l-pinctrl.h b/include/dt-bindings/pinctrl/rzg2l-pinctrl.h index b48f8c7a5556..c78ed5e5efb7 100644 --- a/include/dt-bindings/pinctrl/rzg2l-pinctrl.h +++ b/include/dt-bindings/pinctrl/rzg2l-pinctrl.h @@ -18,6 +18,6 @@ #define RZG2L_PORT_PINMUX(b, p, f) ((b) * RZG2L_PINS_PER_PORT + (p) | ((f) << 16)) /* Convert a port and pin label to its global pin index */ - #define RZG2L_GPIO(port, pin) ((port) * RZG2L_PINS_PER_PORT + (pin)) +#define RZG2L_GPIO(port, pin) ((port) * RZG2L_PINS_PER_PORT + (pin)) #endif /* __DT_BINDINGS_RZG2L_PINCTRL_H */ diff --git a/include/dt-bindings/pinctrl/rzv2m-pinctrl.h b/include/dt-bindings/pinctrl/rzv2m-pinctrl.h new file mode 100644 index 000000000000..525532cd15da --- /dev/null +++ b/include/dt-bindings/pinctrl/rzv2m-pinctrl.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * This header provides constants for Renesas RZ/V2M pinctrl bindings. + * + * Copyright (C) 2022 Renesas Electronics Corp. + * + */ + +#ifndef __DT_BINDINGS_RZV2M_PINCTRL_H +#define __DT_BINDINGS_RZV2M_PINCTRL_H + +#define RZV2M_PINS_PER_PORT 16 + +/* + * Create the pin index from its bank and position numbers and store in + * the upper 16 bits the alternate function identifier + */ +#define RZV2M_PORT_PINMUX(b, p, f) ((b) * RZV2M_PINS_PER_PORT + (p) | ((f) << 16)) + +/* Convert a port and pin label to its global pin index */ +#define RZV2M_GPIO(port, pin) ((port) * RZV2M_PINS_PER_PORT + (pin)) + +#endif /* __DT_BINDINGS_RZV2M_PINCTRL_H */ diff --git a/include/dt-bindings/power/mt6797-power.h b/include/dt-bindings/power/mt6797-power.h index a60c1d81cf75..bd451d860e6a 100644 --- a/include/dt-bindings/power/mt6797-power.h +++ b/include/dt-bindings/power/mt6797-power.h @@ -1,14 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2017 MediaTek Inc. * Author: Mars.C <mars.cheng@mediatek.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef _DT_BINDINGS_POWER_MT6797_POWER_H diff --git a/include/dt-bindings/reset/mt8186-resets.h b/include/dt-bindings/reset/mt8186-resets.h index 5f850370c42c..2e9029c22f38 100644 --- a/include/dt-bindings/reset/mt8186-resets.h +++ b/include/dt-bindings/reset/mt8186-resets.h @@ -7,6 +7,7 @@ #ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8186 #define _DT_BINDINGS_RESET_CONTROLLER_MT8186 +/* TOPRGU resets */ #define MT8186_TOPRGU_INFRA_SW_RST 0 #define MT8186_TOPRGU_MM_SW_RST 1 #define MT8186_TOPRGU_MFG_SW_RST 2 @@ -33,4 +34,8 @@ /* MMSYS resets */ #define MT8186_MMSYS_SW0_RST_B_DISP_DSI0 19 +/* INFRA resets */ +#define MT8186_INFRA_THERMAL_CTRL_RST 0 +#define MT8186_INFRA_PTP_CTRL_RST 1 + #endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8186 */ diff --git a/include/dt-bindings/reset/mt8192-resets.h b/include/dt-bindings/reset/mt8192-resets.h index 764ca9910fa9..12e2087c90a3 100644 --- a/include/dt-bindings/reset/mt8192-resets.h +++ b/include/dt-bindings/reset/mt8192-resets.h @@ -7,6 +7,7 @@ #ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8192 #define _DT_BINDINGS_RESET_CONTROLLER_MT8192 +/* TOPRGU resets */ #define MT8192_TOPRGU_MM_SW_RST 1 #define MT8192_TOPRGU_MFG_SW_RST 2 #define MT8192_TOPRGU_VENC_SW_RST 3 @@ -30,4 +31,11 @@ /* MMSYS resets */ #define MT8192_MMSYS_SW0_RST_B_DISP_DSI0 15 +/* INFRA resets */ +#define MT8192_INFRA_RST0_THERM_CTRL_SWRST 0 +#define MT8192_INFRA_RST2_PEXTP_PHY_SWRST 1 +#define MT8192_INFRA_RST3_THERM_CTRL_PTP_SWRST 2 +#define MT8192_INFRA_RST4_PCIE_TOP_SWRST 3 +#define MT8192_INFRA_RST4_THERM_CTRL_MCU_SWRST 4 + #endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8192 */ diff --git a/include/dt-bindings/reset/mt8195-resets.h b/include/dt-bindings/reset/mt8195-resets.h index a26bccc8b957..0b1937f14b36 100644 --- a/include/dt-bindings/reset/mt8195-resets.h +++ b/include/dt-bindings/reset/mt8195-resets.h @@ -7,6 +7,7 @@ #ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8195 #define _DT_BINDINGS_RESET_CONTROLLER_MT8195 +/* TOPRGU resets */ #define MT8195_TOPRGU_CONN_MCU_SW_RST 0 #define MT8195_TOPRGU_INFRA_GRST_SW_RST 1 #define MT8195_TOPRGU_APU_SW_RST 2 @@ -26,4 +27,9 @@ #define MT8195_TOPRGU_SW_RST_NUM 16 +/* INFRA resets */ +#define MT8195_INFRA_RST0_THERM_CTRL_SWRST 0 +#define MT8195_INFRA_RST3_THERM_CTRL_PTP_SWRST 1 +#define MT8195_INFRA_RST4_THERM_CTRL_MCU_SWRST 2 + #endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8195 */ diff --git a/include/dt-bindings/reset/sama7g5-reset.h b/include/dt-bindings/reset/sama7g5-reset.h new file mode 100644 index 000000000000..2116f41d04e0 --- /dev/null +++ b/include/dt-bindings/reset/sama7g5-reset.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef __DT_BINDINGS_RESET_SAMA7G5_H +#define __DT_BINDINGS_RESET_SAMA7G5_H + +#define SAMA7G5_RESET_USB_PHY1 4 +#define SAMA7G5_RESET_USB_PHY2 5 +#define SAMA7G5_RESET_USB_PHY3 6 + +#endif /* __DT_BINDINGS_RESET_SAMA7G5_H */ diff --git a/include/dt-bindings/sound/qcom,wcd9335.h b/include/dt-bindings/sound/qcom,wcd9335.h new file mode 100644 index 000000000000..f5e9f1db091e --- /dev/null +++ b/include/dt-bindings/sound/qcom,wcd9335.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef __DT_SOUND_QCOM_WCD9335_H +#define __DT_SOUND_QCOM_WCD9335_H + +#define AIF1_PB 0 +#define AIF1_CAP 1 +#define AIF2_PB 2 +#define AIF2_CAP 3 +#define AIF3_PB 4 +#define AIF3_CAP 5 +#define AIF4_PB 6 +#define NUM_CODEC_DAIS 7 + +#endif diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 2d8f2e90edc2..4df9e73a8bb5 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -364,7 +364,7 @@ struct vgic_cpu { extern struct static_key_false vgic_v2_cpuif_trap; extern struct static_key_false vgic_v3_cpuif_trap; -int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); +int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr); void kvm_vgic_early_init(struct kvm *kvm); int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu); int kvm_vgic_create(struct kvm *kvm, u32 type); diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 7e7a33b6c8d7..6f64b2f3dc54 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -764,6 +764,7 @@ static inline u64 acpi_arch_get_root_pointer(void) #endif int acpi_get_local_address(acpi_handle handle, u32 *addr); +const char *acpi_get_subsystem_id(acpi_handle handle); #else /* !CONFIG_ACPI */ @@ -1025,6 +1026,11 @@ static inline int acpi_get_local_address(acpi_handle handle, u32 *addr) return -ENODEV; } +static inline const char *acpi_get_subsystem_id(acpi_handle handle) +{ + return ERR_PTR(-ENODEV); +} + static inline int acpi_register_wakeup_handler(int wake_irq, bool (*wakeup)(void *context), void *context) { @@ -1245,7 +1251,7 @@ static inline bool acpi_dev_has_props(const struct acpi_device *adev) struct acpi_device_properties * acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid, - const union acpi_object *properties); + union acpi_object *properties); int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname, void **valptr); @@ -1431,7 +1437,6 @@ int find_acpi_cpu_topology(unsigned int cpu, int level); int find_acpi_cpu_topology_cluster(unsigned int cpu); int find_acpi_cpu_topology_package(unsigned int cpu); int find_acpi_cpu_topology_hetero_id(unsigned int cpu); -int find_acpi_cpu_cache_topology(unsigned int cpu, int level); #else static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) { @@ -1453,10 +1458,6 @@ static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu) { return -EINVAL; } -static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level) -{ - return -EINVAL; -} #endif #ifdef CONFIG_ACPI_PCC diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index f1f0842a2cb2..b43be0987b19 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -33,10 +33,14 @@ struct irq_domain *iort_get_device_domain(struct device *dev, u32 id, enum irq_domain_bus_token bus_token); void acpi_configure_pmsi_domain(struct device *dev); int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id); +void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, + struct list_head *head); +void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, + struct list_head *head); /* IOMMU interface */ int iort_dma_get_ranges(struct device *dev, u64 *size); int iort_iommu_configure_id(struct device *dev, const u32 *id_in); -int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head); +void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head); phys_addr_t acpi_iort_dma_get_max_cpu_address(void); #else static inline void acpi_iort_init(void) { } @@ -46,14 +50,18 @@ static inline struct irq_domain *iort_get_device_domain( struct device *dev, u32 id, enum irq_domain_bus_token bus_token) { return NULL; } static inline void acpi_configure_pmsi_domain(struct device *dev) { } +static inline +void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { } +static inline +void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { } /* IOMMU interface */ static inline int iort_dma_get_ranges(struct device *dev, u64 *size) { return -ENODEV; } static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in) { return -ENODEV; } static inline -int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) -{ return 0; } +void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head) +{ } static inline phys_addr_t acpi_iort_dma_get_max_cpu_address(void) { return PHYS_ADDR_MAX; } diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 58e6c3806c09..953e6f12fa1c 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -206,4 +206,8 @@ int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value); struct amd_iommu *get_amd_iommu(unsigned int idx); +#ifdef CONFIG_AMD_MEM_ENCRYPT +int amd_iommu_snp_enable(void); +#endif + #endif /* _ASM_X86_AMD_IOMMU_H */ diff --git a/include/linux/aperture.h b/include/linux/aperture.h new file mode 100644 index 000000000000..442f15a57cad --- /dev/null +++ b/include/linux/aperture.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef _LINUX_APERTURE_H_ +#define _LINUX_APERTURE_H_ + +#include <linux/types.h> + +struct pci_dev; +struct platform_device; + +#if defined(CONFIG_APERTURE_HELPERS) +int devm_aperture_acquire_for_platform_device(struct platform_device *pdev, + resource_size_t base, + resource_size_t size); + +int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size, + bool primary, const char *name); + +int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name); +#else +static inline int devm_aperture_acquire_for_platform_device(struct platform_device *pdev, + resource_size_t base, + resource_size_t size) +{ + return 0; +} + +static inline int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t size, + bool primary, const char *name) +{ + return 0; +} + +static inline int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *name) +{ + return 0; +} +#endif + +/** + * aperture_remove_all_conflicting_devices - remove all existing framebuffers + * @primary: also kick vga16fb if present; only relevant for VGA devices + * @name: a descriptive name of the requesting driver + * + * This function removes all graphics device drivers. Use this function on systems + * that can have their framebuffer located anywhere in memory. + * + * Returns: + * 0 on success, or a negative errno code otherwise + */ +static inline int aperture_remove_all_conflicting_devices(bool primary, const char *name) +{ + return aperture_remove_conflicting_devices(0, (resource_size_t)-1, primary, name); +} + +#endif diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 58cbe18d825c..a07b510e7dc5 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -68,7 +68,6 @@ struct cpu_topology { int core_id; int cluster_id; int package_id; - int llc_id; cpumask_t thread_sibling; cpumask_t core_sibling; cpumask_t cluster_sibling; diff --git a/include/linux/atm_tcp.h b/include/linux/atm_tcp.h index c8ecf6f68fb5..2558439d849b 100644 --- a/include/linux/atm_tcp.h +++ b/include/linux/atm_tcp.h @@ -9,6 +9,8 @@ #include <uapi/linux/atm_tcp.h> +struct atm_vcc; +struct module; struct atm_tcp_ops { int (*attach)(struct atm_vcc *vcc,int itf); diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index e863c88df95f..ae12696ec492 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -28,11 +28,6 @@ enum wb_state { WB_start_all, /* nr_pages == 0 (all) work pending */ }; -enum wb_congested_state { - WB_async_congested, /* The async (write) queue is getting full */ - WB_sync_congested, /* The sync queue is getting full */ -}; - enum wb_stat_item { WB_RECLAIMABLE, WB_WRITEBACK, @@ -122,8 +117,6 @@ struct bdi_writeback { atomic_t writeback_inodes; /* number of inodes under writeback */ struct percpu_counter stat[NR_WB_STAT_ITEMS]; - unsigned long congested; /* WB_[a]sync_congested flags */ - unsigned long bw_time_stamp; /* last time write bw is updated */ unsigned long dirtied_stamp; unsigned long written_stamp; /* pages written at bw_time_stamp */ diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index d452071db572..439815cc1ab9 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -140,12 +140,6 @@ static inline bool mapping_can_writeback(struct address_space *mapping) return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK; } -static inline int bdi_sched_wait(void *word) -{ - schedule(); - return 0; -} - #ifdef CONFIG_CGROUP_WRITEBACK struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, @@ -236,18 +230,6 @@ wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) } /** - * inode_to_wb_is_valid - test whether an inode has a wb associated - * @inode: inode of interest - * - * Returns %true if @inode has a wb associated. May be called without any - * locking. - */ -static inline bool inode_to_wb_is_valid(struct inode *inode) -{ - return inode->i_wb; -} - -/** * inode_to_wb - determine the wb of an inode * @inode: inode of interest * @@ -345,11 +327,6 @@ wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) return &bdi->wb; } -static inline bool inode_to_wb_is_valid(struct inode *inode) -{ - return true; -} - static inline struct bdi_writeback *inode_to_wb(struct inode *inode) { return &inode_to_bdi(inode)->wb; diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index edb7f6d41faa..5ca2d5699620 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -57,7 +57,6 @@ struct balloon_dev_info { struct list_head pages; /* Pages enqueued & handled to Host */ int (*migratepage)(struct balloon_dev_info *, struct page *newpage, struct page *page, enum migrate_mode mode); - struct inode *inode; }; extern struct page *balloon_page_alloc(void); @@ -75,11 +74,10 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) spin_lock_init(&balloon->pages_lock); INIT_LIST_HEAD(&balloon->pages); balloon->migratepage = NULL; - balloon->inode = NULL; } #ifdef CONFIG_BALLOON_COMPACTION -extern const struct address_space_operations balloon_aops; +extern const struct movable_operations balloon_mops; /* * balloon_page_insert - insert a page into the balloon's page list and make @@ -94,7 +92,7 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon, struct page *page) { __SetPageOffline(page); - __SetPageMovable(page, balloon->inode->i_mapping); + __SetPageMovable(page, &balloon_mops); set_page_private(page, (unsigned long)balloon); list_add(&page->lru, &balloon->pages); } diff --git a/include/linux/base64.h b/include/linux/base64.h new file mode 100644 index 000000000000..660d4cb1ef31 --- /dev/null +++ b/include/linux/base64.h @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * base64 encoding, lifted from fs/crypto/fname.c. + */ + +#ifndef _LINUX_BASE64_H +#define _LINUX_BASE64_H + +#include <linux/types.h> + +#define BASE64_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3) + +int base64_encode(const u8 *src, int len, char *dst); +int base64_decode(const char *src, int len, u8 *dst); + +#endif /* _LINUX_BASE64_H */ diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 2e6cd5681040..f65410a49fda 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -71,9 +71,9 @@ struct device; * bitmap_release_region(bitmap, pos, order) Free specified bit region * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst + * bitmap_from_arr64(dst, buf, nbits) Copy nbits from u64[] buf to dst * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst - * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst * bitmap_get_value8(map, start) Get 8bit value from map at start * bitmap_set_value8(map, value, start) Set 8bit value to map at start * @@ -148,13 +148,13 @@ void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, unsigned int shift, unsigned int nbits); void bitmap_cut(unsigned long *dst, const unsigned long *src, unsigned int first, unsigned int cut, unsigned int nbits); -int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, +bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); -int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, +bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); void __bitmap_replace(unsigned long *dst, const unsigned long *old, const unsigned long *new, @@ -163,7 +163,7 @@ bool __bitmap_intersects(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); bool __bitmap_subset(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int nbits); -int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); +unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); void __bitmap_set(unsigned long *map, unsigned int start, int len); void __bitmap_clear(unsigned long *map, unsigned int start, int len); @@ -238,20 +238,32 @@ extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp, static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) { unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); - memset(dst, 0, len); + + if (small_const_nbits(nbits)) + *dst = 0; + else + memset(dst, 0, len); } static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) { unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); - memset(dst, 0xff, len); + + if (small_const_nbits(nbits)) + *dst = ~0UL; + else + memset(dst, 0xff, len); } static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits) { unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); - memcpy(dst, src, len); + + if (small_const_nbits(nbits)) + *dst = *src; + else + memcpy(dst, src, len); } /* @@ -303,7 +315,7 @@ void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits); bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits)) #endif -static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, +static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { if (small_const_nbits(nbits)) @@ -329,7 +341,7 @@ static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, __bitmap_xor(dst, src1, src2, nbits); } -static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1, +static inline bool bitmap_andnot(unsigned long *dst, const unsigned long *src1, const unsigned long *src2, unsigned int nbits) { if (small_const_nbits(nbits)) @@ -419,7 +431,8 @@ static inline bool bitmap_full(const unsigned long *src, unsigned int nbits) return find_first_zero_bit(src, nbits) == nbits; } -static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits) +static __always_inline +unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits) { if (small_const_nbits(nbits)) return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); @@ -431,6 +444,8 @@ static __always_inline void bitmap_set(unsigned long *map, unsigned int start, { if (__builtin_constant_p(nbits) && nbits == 1) __set_bit(start, map); + else if (small_const_nbits(start + nbits)) + *map |= GENMASK(start + nbits - 1, start); else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && __builtin_constant_p(nbits & BITMAP_MEM_MASK) && @@ -445,6 +460,8 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start, { if (__builtin_constant_p(nbits) && nbits == 1) __clear_bit(start, map); + else if (small_const_nbits(start + nbits)) + *map &= ~GENMASK(start + nbits - 1, start); else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && __builtin_constant_p(nbits & BITMAP_MEM_MASK) && diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 7aaed501f768..cf9bf65039f2 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -27,11 +27,61 @@ extern unsigned int __sw_hweight32(unsigned int w); extern unsigned long __sw_hweight64(__u64 w); /* + * Defined here because those may be needed by architecture-specific static + * inlines. + */ + +#include <asm-generic/bitops/generic-non-atomic.h> + +/* + * Many architecture-specific non-atomic bitops contain inline asm code and due + * to that the compiler can't optimize them to compile-time expressions or + * constants. In contrary, generic_*() helpers are defined in pure C and + * compilers optimize them just well. + * Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively + * equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when + * the arguments can be resolved at compile time. That expression itself is a + * constant and doesn't bring any functional changes to the rest of cases. + * The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when + * passing a bitmap from .bss or .data (-> `!!addr` is always true). + */ +#define bitop(op, nr, addr) \ + ((__builtin_constant_p(nr) && \ + __builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) && \ + (uintptr_t)(addr) != (uintptr_t)NULL && \ + __builtin_constant_p(*(const unsigned long *)(addr))) ? \ + const##op(nr, addr) : op(nr, addr)) + +#define __set_bit(nr, addr) bitop(___set_bit, nr, addr) +#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr) +#define __change_bit(nr, addr) bitop(___change_bit, nr, addr) +#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr) +#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr) +#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr) +#define test_bit(nr, addr) bitop(_test_bit, nr, addr) + +/* * Include this here because some architectures need generic_ffs/fls in * scope */ #include <asm/bitops.h> +/* Check that the bitops prototypes are sane */ +#define __check_bitop_pr(name) \ + static_assert(__same_type(arch_##name, generic_##name) && \ + __same_type(const_##name, generic_##name) && \ + __same_type(_##name, generic_##name)) + +__check_bitop_pr(__set_bit); +__check_bitop_pr(__clear_bit); +__check_bitop_pr(__change_bit); +__check_bitop_pr(__test_and_set_bit); +__check_bitop_pr(__test_and_clear_bit); +__check_bitop_pr(__test_and_change_bit); +__check_bitop_pr(test_bit); + +#undef __check_bitop_pr + static inline int get_bitmask_order(unsigned int count) { int order; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d04bdf549efa..84b13fdd34a7 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -140,6 +140,8 @@ struct gendisk { struct request_queue *queue; void *private_data; + struct bio_set bio_split; + int flags; unsigned long state; #define GD_NEED_PART_SCAN 0 @@ -531,7 +533,6 @@ struct request_queue { struct blk_mq_tag_set *tag_set; struct list_head tag_set_list; - struct bio_set bio_split; struct dentry *debugfs_dir; struct dentry *sched_debugfs_dir; @@ -864,9 +865,9 @@ void blk_request_module(dev_t devt); extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); void submit_bio_noacct(struct bio *bio); +struct bio *bio_split_to_limits(struct bio *bio); extern int blk_lld_busy(struct request_queue *q); -extern void blk_queue_split(struct bio **); extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); extern void blk_queue_exit(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); @@ -1167,6 +1168,11 @@ bdev_max_zone_append_sectors(struct block_device *bdev) return queue_max_zone_append_sectors(bdev_get_queue(bdev)); } +static inline unsigned int bdev_max_segments(struct block_device *bdev) +{ + return queue_max_segments(bdev_get_queue(bdev)); +} + static inline unsigned queue_logical_block_size(const struct request_queue *q) { int retval = 512; diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h index 695d1224a71b..7b121bd780eb 100644 --- a/include/linux/bpf-cgroup-defs.h +++ b/include/linux/bpf-cgroup-defs.h @@ -10,6 +10,13 @@ struct bpf_prog_array; +#ifdef CONFIG_BPF_LSM +/* Maximum number of concurrently attachable per-cgroup LSM hooks. */ +#define CGROUP_LSM_NUM 10 +#else +#define CGROUP_LSM_NUM 0 +#endif + enum cgroup_bpf_attach_type { CGROUP_BPF_ATTACH_TYPE_INVALID = -1, CGROUP_INET_INGRESS = 0, @@ -35,6 +42,8 @@ enum cgroup_bpf_attach_type { CGROUP_INET4_GETSOCKNAME, CGROUP_INET6_GETSOCKNAME, CGROUP_INET_SOCK_RELEASE, + CGROUP_LSM_START, + CGROUP_LSM_END = CGROUP_LSM_START + CGROUP_LSM_NUM - 1, MAX_CGROUP_BPF_ATTACH_TYPE }; @@ -47,8 +56,8 @@ struct cgroup_bpf { * have either zero or one element * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS */ - struct list_head progs[MAX_CGROUP_BPF_ATTACH_TYPE]; - u32 flags[MAX_CGROUP_BPF_ATTACH_TYPE]; + struct hlist_head progs[MAX_CGROUP_BPF_ATTACH_TYPE]; + u8 flags[MAX_CGROUP_BPF_ATTACH_TYPE]; /* list of cgroup shared storages */ struct list_head storages; diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 669d96d074ad..2bd1b5f8de9b 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -23,6 +23,13 @@ struct ctl_table; struct ctl_table_header; struct task_struct; +unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx, + const struct bpf_insn *insn); +unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx, + const struct bpf_insn *insn); +unsigned int __cgroup_bpf_run_lsm_current(const void *ctx, + const struct bpf_insn *insn); + #ifdef CONFIG_CGROUP_BPF #define CGROUP_ATYPE(type) \ @@ -95,7 +102,7 @@ struct bpf_cgroup_link { }; struct bpf_prog_list { - struct list_head node; + struct hlist_node node; struct bpf_prog *prog; struct bpf_cgroup_link *link; struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 2b914a56a2c5..20c26aed7896 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -5,6 +5,7 @@ #define _LINUX_BPF_H 1 #include <uapi/linux/bpf.h> +#include <uapi/linux/filter.h> #include <linux/workqueue.h> #include <linux/file.h> @@ -22,8 +23,10 @@ #include <linux/sched/mm.h> #include <linux/slab.h> #include <linux/percpu-refcount.h> +#include <linux/stddef.h> #include <linux/bpfptr.h> #include <linux/btf.h> +#include <linux/rcupdate_trace.h> struct bpf_verifier_env; struct bpf_verifier_log; @@ -44,6 +47,7 @@ struct kobject; struct mem_cgroup; struct module; struct bpf_func_state; +struct ftrace_ops; extern struct idr btf_idr; extern spinlock_t btf_idr_lock; @@ -53,6 +57,8 @@ typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, struct bpf_iter_aux_info *aux); typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); +typedef unsigned int (*bpf_func_t)(const void *, + const struct bpf_insn *); struct bpf_iter_seq_info { const struct seq_operations *seq_ops; bpf_iter_init_seq_priv_t init_seq_private; @@ -216,7 +222,7 @@ struct bpf_map { u32 btf_vmlinux_value_type_id; struct btf *btf; #ifdef CONFIG_MEMCG_KMEM - struct mem_cgroup *memcg; + struct obj_cgroup *objcg; #endif char name[BPF_OBJ_NAME_LEN]; struct bpf_map_off_arr *off_arr; @@ -398,6 +404,9 @@ enum bpf_type_flag { /* DYNPTR points to a ringbuf record. */ DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS), + /* Size is known at compile time. */ + MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS), + __BPF_TYPE_FLAG_MAX, __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, }; @@ -461,6 +470,8 @@ enum bpf_arg_type { * all bytes or clear them in error case. */ ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM, + /* Pointer to valid memory of size known at compile time. */ + ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM, /* This must be the last entry. Its purpose is to ensure the enum is * wide enough to hold the higher bits reserved for bpf_type_flag. @@ -526,6 +537,14 @@ struct bpf_func_proto { u32 *arg5_btf_id; }; u32 *arg_btf_id[5]; + struct { + size_t arg1_size; + size_t arg2_size; + size_t arg3_size; + size_t arg4_size; + size_t arg5_size; + }; + size_t arg_size[5]; }; int *ret_btf_id; /* return value btf_id */ bool (*allowed)(const struct bpf_prog *prog); @@ -733,6 +752,16 @@ struct btf_func_model { /* Return the return value of fentry prog. Only used by bpf_struct_ops. */ #define BPF_TRAMP_F_RET_FENTRY_RET BIT(4) +/* Get original function from stack instead of from provided direct address. + * Makes sense for trampolines with fexit or fmod_ret programs. + */ +#define BPF_TRAMP_F_ORIG_STACK BIT(5) + +/* This trampoline is on a function with another ftrace_ops with IPMODIFY, + * e.g., a live patch. This flag is set and cleared by ftrace call backs, + */ +#define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6) + /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 * bytes on x86. */ @@ -776,6 +805,10 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_ u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx); void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx); +u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog, + struct bpf_tramp_run_ctx *run_ctx); +void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start, + struct bpf_tramp_run_ctx *run_ctx); void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr); void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr); @@ -811,9 +844,11 @@ struct bpf_tramp_image { struct bpf_trampoline { /* hlist for trampoline_table */ struct hlist_node hlist; + struct ftrace_ops *fops; /* serializes access to fields of this trampoline */ struct mutex mutex; refcount_t refcnt; + u32 flags; u64 key; struct { struct btf_func_model model; @@ -863,8 +898,7 @@ struct bpf_dispatcher { static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func( const void *ctx, const struct bpf_insn *insnsi, - unsigned int (*bpf_func)(const void *, - const struct bpf_insn *)) + bpf_func_t bpf_func) { return bpf_func(ctx, insnsi); } @@ -893,8 +927,7 @@ int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs); noinline __nocfi unsigned int bpf_dispatcher_##name##_func( \ const void *ctx, \ const struct bpf_insn *insnsi, \ - unsigned int (*bpf_func)(const void *, \ - const struct bpf_insn *)) \ + bpf_func_t bpf_func) \ { \ return bpf_func(ctx, insnsi); \ } \ @@ -905,8 +938,7 @@ int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs); unsigned int bpf_dispatcher_##name##_func( \ const void *ctx, \ const struct bpf_insn *insnsi, \ - unsigned int (*bpf_func)(const void *, \ - const struct bpf_insn *)); \ + bpf_func_t bpf_func); \ extern struct bpf_dispatcher bpf_dispatcher_##name; #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) @@ -1025,7 +1057,6 @@ struct bpf_prog_aux { bool sleepable; bool tail_call_reachable; bool xdp_has_frags; - bool use_bpf_prog_pack; /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ const struct btf_type *attach_func_proto; /* function name for valid attach_btf_id */ @@ -1045,6 +1076,7 @@ struct bpf_prog_aux { struct user_struct *user; u64 load_time; /* ns since boottime */ u32 verified_insns; + int cgroup_atype; /* enum cgroup_bpf_attach_type */ struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; char name[BPF_OBJ_NAME_LEN]; #ifdef CONFIG_SECURITY @@ -1084,6 +1116,40 @@ struct bpf_prog_aux { }; }; +struct bpf_prog { + u16 pages; /* Number of allocated pages */ + u16 jited:1, /* Is our filter JIT'ed? */ + jit_requested:1,/* archs need to JIT the prog */ + gpl_compatible:1, /* Is filter GPL compatible? */ + cb_access:1, /* Is control block accessed? */ + dst_needed:1, /* Do we need dst entry? */ + blinding_requested:1, /* needs constant blinding */ + blinded:1, /* Was blinded */ + is_func:1, /* program is a bpf function */ + kprobe_override:1, /* Do we override a kprobe? */ + has_callchain_buf:1, /* callchain buffer allocated? */ + enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ + call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ + call_get_func_ip:1, /* Do we call get_func_ip() */ + tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */ + enum bpf_prog_type type; /* Type of BPF program */ + enum bpf_attach_type expected_attach_type; /* For some prog types */ + u32 len; /* Number of filter blocks */ + u32 jited_len; /* Size of jited insns in bytes */ + u8 tag[BPF_TAG_SIZE]; + struct bpf_prog_stats __percpu *stats; + int __percpu *active; + unsigned int (*bpf_func)(const void *ctx, + const struct bpf_insn *insn); + struct bpf_prog_aux *aux; /* Auxiliary fields */ + struct sock_fprog_kern *orig_prog; /* Original BPF program */ + /* Instructions for interpreter */ + union { + DECLARE_FLEX_ARRAY(struct sock_filter, insns); + DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi); + }; +}; + struct bpf_array_aux { /* Programs with direct jumps into programs part of this array. */ struct list_head poke_progs; @@ -1118,6 +1184,11 @@ struct bpf_tramp_link { u64 cookie; }; +struct bpf_shim_tramp_link { + struct bpf_tramp_link link; + struct bpf_trampoline *trampoline; +}; + struct bpf_tracing_link { struct bpf_tramp_link link; enum bpf_attach_type attach_type; @@ -1221,6 +1292,21 @@ static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, } #endif +#if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM) +int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, + int cgroup_atype); +void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog); +#else +static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, + int cgroup_atype) +{ + return -EOPNOTSUPP; +} +static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog) +{ +} +#endif + struct bpf_array { struct bpf_map map; u32 elem_size; @@ -1236,6 +1322,9 @@ struct bpf_array { #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ #define MAX_TAIL_CALL_CNT 33 +/* Maximum number of loops for bpf_loop */ +#define BPF_MAX_LOOPS BIT(23) + #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ BPF_F_RDONLY_PROG | \ BPF_F_WRONLY | \ @@ -1336,6 +1425,8 @@ extern struct bpf_empty_prog_array bpf_empty_prog_array; struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); void bpf_prog_array_free(struct bpf_prog_array *progs); +/* Use when traversal over the bpf_prog_array uses tasks_trace rcu */ +void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs); int bpf_prog_array_length(struct bpf_prog_array *progs); bool bpf_prog_array_is_empty(struct bpf_prog_array *array); int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, @@ -1427,6 +1518,55 @@ bpf_prog_run_array(const struct bpf_prog_array *array, return ret; } +/* Notes on RCU design for bpf_prog_arrays containing sleepable programs: + * + * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array + * overall. As a result, we must use the bpf_prog_array_free_sleepable + * in order to use the tasks_trace rcu grace period. + * + * When a non-sleepable program is inside the array, we take the rcu read + * section and disable preemption for that program alone, so it can access + * rcu-protected dynamically sized maps. + */ +static __always_inline u32 +bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu, + const void *ctx, bpf_prog_run_fn run_prog) +{ + const struct bpf_prog_array_item *item; + const struct bpf_prog *prog; + const struct bpf_prog_array *array; + struct bpf_run_ctx *old_run_ctx; + struct bpf_trace_run_ctx run_ctx; + u32 ret = 1; + + might_fault(); + + rcu_read_lock_trace(); + migrate_disable(); + + array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held()); + if (unlikely(!array)) + goto out; + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + item = &array->items[0]; + while ((prog = READ_ONCE(item->prog))) { + if (!prog->aux->sleepable) + rcu_read_lock(); + + run_ctx.bpf_cookie = item->bpf_cookie; + ret &= run_prog(prog, ctx); + item++; + + if (!prog->aux->sleepable) + rcu_read_unlock(); + } + bpf_reset_run_ctx(old_run_ctx); +out: + migrate_enable(); + rcu_read_unlock_trace(); + return ret; +} + #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); extern struct mutex bpf_stats_enabled_mutex; @@ -1797,7 +1937,8 @@ int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog, struct bpf_reg_state *regs); int btf_check_kfunc_arg_match(struct bpf_verifier_env *env, const struct btf *btf, u32 func_id, - struct bpf_reg_state *regs); + struct bpf_reg_state *regs, + u32 kfunc_flags); int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog, struct bpf_reg_state *reg); int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, @@ -2104,6 +2245,7 @@ int sock_map_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); void sock_map_unhash(struct sock *sk); +void sock_map_destroy(struct sock *sk); void sock_map_close(struct sock *sk, long timeout); #else static inline int bpf_prog_offload_init(struct bpf_prog *prog, @@ -2261,12 +2403,13 @@ extern const struct bpf_func_proto bpf_for_each_map_elem_proto; extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; extern const struct bpf_func_proto bpf_sk_setsockopt_proto; extern const struct bpf_func_proto bpf_sk_getsockopt_proto; -extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto; +extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto; +extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto; extern const struct bpf_func_proto bpf_find_vma_proto; extern const struct bpf_func_proto bpf_loop_proto; -extern const struct bpf_func_proto bpf_strncmp_proto; extern const struct bpf_func_proto bpf_copy_from_user_task_proto; -extern const struct bpf_func_proto bpf_kptr_xchg_proto; +extern const struct bpf_func_proto bpf_set_retval_proto; +extern const struct bpf_func_proto bpf_get_retval_proto; const struct bpf_func_proto *tracing_prog_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); @@ -2420,4 +2563,12 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr); int bpf_dynptr_check_size(u32 size); +#ifdef CONFIG_BPF_LSM +void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype); +void bpf_cgroup_atype_put(int cgroup_atype); +#else +static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {} +static inline void bpf_cgroup_atype_put(int cgroup_atype) {} +#endif /* CONFIG_BPF_LSM */ + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h index 479c101546ad..4bcf76a9bb06 100644 --- a/include/linux/bpf_lsm.h +++ b/include/linux/bpf_lsm.h @@ -42,6 +42,8 @@ extern const struct bpf_func_proto bpf_inode_storage_get_proto; extern const struct bpf_func_proto bpf_inode_storage_delete_proto; void bpf_inode_storage_free(struct inode *inode); +void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func); + #else /* !CONFIG_BPF_LSM */ static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id) @@ -65,6 +67,11 @@ static inline void bpf_inode_storage_free(struct inode *inode) { } +static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, + bpf_func_t *bpf_func) +{ +} + #endif /* CONFIG_BPF_LSM */ #endif /* _LINUX_BPF_LSM_H */ diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index e8439f6cbe57..2e3bad8640dc 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -299,7 +299,7 @@ struct bpf_verifier_state { * If is_state_visited() sees a state with branches > 0 it means * there is a loop. If such state is exactly equal to the current state * it's an infinite loop. Note states_equal() checks for states - * equvalency, so two states being 'states_equal' does not mean + * equivalency, so two states being 'states_equal' does not mean * infinite loop. The exact comparison is provided by * states_maybe_looping() function. It's a stronger pre-check and * much faster than states_equal(). @@ -344,6 +344,14 @@ struct bpf_verifier_state_list { int miss_cnt, hit_cnt; }; +struct bpf_loop_inline_state { + unsigned int initialized:1; /* set to true upon first entry */ + unsigned int fit_for_inline:1; /* true if callback function is the same + * at each call and flags are always zero + */ + u32 callback_subprogno; /* valid when fit_for_inline is true */ +}; + /* Possible states for alu_state member. */ #define BPF_ALU_SANITIZE_SRC (1U << 0) #define BPF_ALU_SANITIZE_DST (1U << 1) @@ -373,6 +381,10 @@ struct bpf_insn_aux_data { u32 mem_size; /* mem_size for non-struct typed var */ }; } btf_var; + /* if instruction is a call to bpf_loop this field tracks + * the state of the relevant registers to make decision about inlining + */ + struct bpf_loop_inline_state loop_inline_state; }; u64 map_key_state; /* constant (32 bit) key tracking for maps */ int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h index 46e1757d06a3..79b2f78eec1a 100644 --- a/include/linux/bpfptr.h +++ b/include/linux/bpfptr.h @@ -49,7 +49,9 @@ static inline void bpfptr_add(bpfptr_t *bpfptr, size_t val) static inline int copy_from_bpfptr_offset(void *dst, bpfptr_t src, size_t offset, size_t size) { - return copy_from_sockptr_offset(dst, (sockptr_t) src, offset, size); + if (!bpfptr_is_kernel(src)) + return copy_from_user(dst, src.user + offset, size); + return copy_from_kernel_nofault(dst, src.kernel + offset, size); } static inline int copy_from_bpfptr(void *dst, bpfptr_t src, size_t size) @@ -78,7 +80,9 @@ static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len) static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count) { - return strncpy_from_sockptr(dst, (sockptr_t) src, count); + if (bpfptr_is_kernel(src)) + return strncpy_from_kernel_nofault(dst, src.kernel, count); + return strncpy_from_user(dst, src.user, count); } #endif /* _LINUX_BPFPTR_H */ diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 747fad264033..6ff567ece34a 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -16,6 +16,7 @@ #define PHY_ID_BCM5481 0x0143bca0 #define PHY_ID_BCM5395 0x0143bcf0 #define PHY_ID_BCM53125 0x03625f20 +#define PHY_ID_BCM53128 0x03625e10 #define PHY_ID_BCM54810 0x03625d00 #define PHY_ID_BCM54811 0x03625cc0 #define PHY_ID_BCM5482 0x0143bcb0 diff --git a/include/linux/btf.h b/include/linux/btf.h index 2611cea2c2b6..cdb376d53238 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -12,14 +12,43 @@ #define BTF_TYPE_EMIT(type) ((void)(type *)0) #define BTF_TYPE_EMIT_ENUM(enum_val) ((void)enum_val) -enum btf_kfunc_type { - BTF_KFUNC_TYPE_CHECK, - BTF_KFUNC_TYPE_ACQUIRE, - BTF_KFUNC_TYPE_RELEASE, - BTF_KFUNC_TYPE_RET_NULL, - BTF_KFUNC_TYPE_KPTR_ACQUIRE, - BTF_KFUNC_TYPE_MAX, -}; +/* These need to be macros, as the expressions are used in assembler input */ +#define KF_ACQUIRE (1 << 0) /* kfunc is an acquire function */ +#define KF_RELEASE (1 << 1) /* kfunc is a release function */ +#define KF_RET_NULL (1 << 2) /* kfunc returns a pointer that may be NULL */ +#define KF_KPTR_GET (1 << 3) /* kfunc returns reference to a kptr */ +/* Trusted arguments are those which are meant to be referenced arguments with + * unchanged offset. It is used to enforce that pointers obtained from acquire + * kfuncs remain unmodified when being passed to helpers taking trusted args. + * + * Consider + * struct foo { + * int data; + * struct foo *next; + * }; + * + * struct bar { + * int data; + * struct foo f; + * }; + * + * struct foo *f = alloc_foo(); // Acquire kfunc + * struct bar *b = alloc_bar(); // Acquire kfunc + * + * If a kfunc set_foo_data() wants to operate only on the allocated object, it + * will set the KF_TRUSTED_ARGS flag, which will prevent unsafe usage like: + * + * set_foo_data(f, 42); // Allowed + * set_foo_data(f->next, 42); // Rejected, non-referenced pointer + * set_foo_data(&f->next, 42);// Rejected, referenced, but wrong type + * set_foo_data(&b->f, 42); // Rejected, referenced, but bad offset + * + * In the final case, usually for the purposes of type matching, it is deduced + * by looking at the type of the member at the offset, but due to the + * requirement of trusted argument, this deduction will be strict and not done + * for this case. + */ +#define KF_TRUSTED_ARGS (1 << 4) /* kfunc only takes trusted pointer arguments */ struct btf; struct btf_member; @@ -30,16 +59,7 @@ struct btf_id_set; struct btf_kfunc_id_set { struct module *owner; - union { - struct { - struct btf_id_set *check_set; - struct btf_id_set *acquire_set; - struct btf_id_set *release_set; - struct btf_id_set *ret_null_set; - struct btf_id_set *kptr_acquire_set; - }; - struct btf_id_set *sets[BTF_KFUNC_TYPE_MAX]; - }; + struct btf_id_set8 *set; }; struct btf_id_dtor_kfunc { @@ -177,6 +197,19 @@ static inline bool btf_type_is_enum(const struct btf_type *t) return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM; } +static inline bool btf_is_any_enum(const struct btf_type *t) +{ + return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM || + BTF_INFO_KIND(t->info) == BTF_KIND_ENUM64; +} + +static inline bool btf_kind_core_compat(const struct btf_type *t1, + const struct btf_type *t2) +{ + return BTF_INFO_KIND(t1->info) == BTF_INFO_KIND(t2->info) || + (btf_is_any_enum(t1) && btf_is_any_enum(t2)); +} + static inline bool str_is_empty(const char *s) { return !s || !s[0]; @@ -192,6 +225,16 @@ static inline bool btf_is_enum(const struct btf_type *t) return btf_kind(t) == BTF_KIND_ENUM; } +static inline bool btf_is_enum64(const struct btf_type *t) +{ + return btf_kind(t) == BTF_KIND_ENUM64; +} + +static inline u64 btf_enum64_value(const struct btf_enum64 *e) +{ + return ((u64)e->val_hi32 << 32) | e->val_lo32; +} + static inline bool btf_is_composite(const struct btf_type *t) { u16 kind = btf_kind(t); @@ -332,6 +375,11 @@ static inline struct btf_enum *btf_enum(const struct btf_type *t) return (struct btf_enum *)(t + 1); } +static inline struct btf_enum64 *btf_enum64(const struct btf_type *t) +{ + return (struct btf_enum64 *)(t + 1); +} + static inline const struct btf_var_secinfo *btf_type_var_secinfo( const struct btf_type *t) { @@ -350,9 +398,9 @@ const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); const char *btf_name_by_offset(const struct btf *btf, u32 offset); struct btf *btf_parse_vmlinux(void); struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog); -bool btf_kfunc_id_set_contains(const struct btf *btf, +u32 *btf_kfunc_id_set_contains(const struct btf *btf, enum bpf_prog_type prog_type, - enum btf_kfunc_type type, u32 kfunc_btf_id); + u32 kfunc_btf_id); int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, const struct btf_kfunc_id_set *s); s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id); @@ -369,12 +417,11 @@ static inline const char *btf_name_by_offset(const struct btf *btf, { return NULL; } -static inline bool btf_kfunc_id_set_contains(const struct btf *btf, +static inline u32 *btf_kfunc_id_set_contains(const struct btf *btf, enum bpf_prog_type prog_type, - enum btf_kfunc_type type, u32 kfunc_btf_id) { - return false; + return NULL; } static inline int register_btf_kfunc_id_set(enum bpf_prog_type prog_type, const struct btf_kfunc_id_set *s) diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index 335a19092368..2aea877d644f 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -8,6 +8,15 @@ struct btf_id_set { u32 ids[]; }; +struct btf_id_set8 { + u32 cnt; + u32 flags; + struct { + u32 id; + u32 flags; + } pairs[]; +}; + #ifdef CONFIG_DEBUG_INFO_BTF #include <linux/compiler.h> /* for __PASTE */ @@ -25,7 +34,7 @@ struct btf_id_set { #define BTF_IDS_SECTION ".BTF_ids" -#define ____BTF_ID(symbol) \ +#define ____BTF_ID(symbol, word) \ asm( \ ".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ ".local " #symbol " ; \n" \ @@ -33,10 +42,11 @@ asm( \ ".size " #symbol ", 4; \n" \ #symbol ": \n" \ ".zero 4 \n" \ +word \ ".popsection; \n"); -#define __BTF_ID(symbol) \ - ____BTF_ID(symbol) +#define __BTF_ID(symbol, word) \ + ____BTF_ID(symbol, word) #define __ID(prefix) \ __PASTE(prefix, __COUNTER__) @@ -46,7 +56,14 @@ asm( \ * to 4 zero bytes. */ #define BTF_ID(prefix, name) \ - __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__)) + __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__), "") + +#define ____BTF_ID_FLAGS(prefix, name, flags) \ + __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__), ".long " #flags "\n") +#define __BTF_ID_FLAGS(prefix, name, flags, ...) \ + ____BTF_ID_FLAGS(prefix, name, flags) +#define BTF_ID_FLAGS(prefix, name, ...) \ + __BTF_ID_FLAGS(prefix, name, ##__VA_ARGS__, 0) /* * The BTF_ID_LIST macro defines pure (unsorted) list @@ -145,10 +162,51 @@ asm( \ ".popsection; \n"); \ extern struct btf_id_set name; +/* + * The BTF_SET8_START/END macros pair defines sorted list of + * BTF IDs and their flags plus its members count, with the + * following layout: + * + * BTF_SET8_START(list) + * BTF_ID_FLAGS(type1, name1, flags) + * BTF_ID_FLAGS(type2, name2, flags) + * BTF_SET8_END(list) + * + * __BTF_ID__set8__list: + * .zero 8 + * list: + * __BTF_ID__type1__name1__3: + * .zero 4 + * .word (1 << 0) | (1 << 2) + * __BTF_ID__type2__name2__5: + * .zero 4 + * .word (1 << 3) | (1 << 1) | (1 << 2) + * + */ +#define __BTF_SET8_START(name, scope) \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +"." #scope " __BTF_ID__set8__" #name "; \n" \ +"__BTF_ID__set8__" #name ":; \n" \ +".zero 8 \n" \ +".popsection; \n"); + +#define BTF_SET8_START(name) \ +__BTF_ID_LIST(name, local) \ +__BTF_SET8_START(name, local) + +#define BTF_SET8_END(name) \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".size __BTF_ID__set8__" #name ", .-" #name " \n" \ +".popsection; \n"); \ +extern struct btf_id_set8 name; + #else #define BTF_ID_LIST(name) static u32 __maybe_unused name[5]; #define BTF_ID(prefix, name) +#define BTF_ID_FLAGS(prefix, name, ...) #define BTF_ID_UNUSED #define BTF_ID_LIST_GLOBAL(name, n) u32 __maybe_unused name[n]; #define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 __maybe_unused name[1]; @@ -156,6 +214,8 @@ extern struct btf_id_set name; #define BTF_SET_START(name) static struct btf_id_set __maybe_unused name = { 0 }; #define BTF_SET_START_GLOBAL(name) static struct btf_id_set __maybe_unused name = { 0 }; #define BTF_SET_END(name) +#define BTF_SET8_START(name) static struct btf_id_set8 __maybe_unused name = { 0 }; +#define BTF_SET8_END(name) #endif /* CONFIG_DEBUG_INFO_BTF */ @@ -179,7 +239,8 @@ extern struct btf_id_set name; BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_MPTCP, mptcp_sock) + BTF_SOCK_TYPE(BTF_SOCK_TYPE_MPTCP, mptcp_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCKET, socket) enum { #define BTF_SOCK_TYPE(name, str) name, diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index bb68eb6407da..def8b8d30ccc 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -118,7 +118,6 @@ static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \ * of the form "mark_buffer_foo()". These are higher-level functions which * do something in addition to setting a b_state bit. */ -BUFFER_FNS(Uptodate, uptodate) BUFFER_FNS(Dirty, dirty) TAS_BUFFER_FNS(Dirty, dirty) BUFFER_FNS(Lock, locked) @@ -136,6 +135,30 @@ BUFFER_FNS(Meta, meta) BUFFER_FNS(Prio, prio) BUFFER_FNS(Defer_Completion, defer_completion) +static __always_inline void set_buffer_uptodate(struct buffer_head *bh) +{ + /* + * make it consistent with folio_mark_uptodate + * pairs with smp_load_acquire in buffer_uptodate + */ + smp_mb__before_atomic(); + set_bit(BH_Uptodate, &bh->b_state); +} + +static __always_inline void clear_buffer_uptodate(struct buffer_head *bh) +{ + clear_bit(BH_Uptodate, &bh->b_state); +} + +static __always_inline int buffer_uptodate(const struct buffer_head *bh) +{ + /* + * make it consistent with folio_test_uptodate + * pairs with smp_mb__before_atomic in set_buffer_uptodate + */ + return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0; +} + #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) /* If we *know* page->private refers to buffer_heads */ @@ -259,14 +282,16 @@ static inline vm_fault_t block_page_mkwrite_return(int err) } sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); int block_truncate_page(struct address_space *, loff_t, get_block_t *); -int nobh_write_begin(struct address_space *, loff_t, unsigned len, - struct page **, void **, get_block_t*); -int nobh_write_end(struct file *, struct address_space *, - loff_t, unsigned, unsigned, - struct page *, void *); -int nobh_truncate_page(struct address_space *, loff_t, get_block_t *); -int nobh_writepage(struct page *page, get_block_t *get_block, - struct writeback_control *wbc); + +#ifdef CONFIG_MIGRATION +extern int buffer_migrate_folio(struct address_space *, + struct folio *dst, struct folio *src, enum migrate_mode); +extern int buffer_migrate_folio_norefs(struct address_space *, + struct folio *dst, struct folio *src, enum migrate_mode); +#else +#define buffer_migrate_folio NULL +#define buffer_migrate_folio_norefs NULL +#endif void buffer_init(void); diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 4ff37cb763ae..00b7a6ae8617 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -82,6 +82,9 @@ struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu); int init_cache_level(unsigned int cpu); int populate_cache_leaves(unsigned int cpu); int cache_setup_acpi(unsigned int cpu); +bool last_level_cache_is_valid(unsigned int cpu); +bool last_level_cache_is_shared(unsigned int cpu_x, unsigned int cpu_y); +int detect_cache_attributes(unsigned int cpu); #ifndef CONFIG_ACPI_PPTT /* * acpi_find_last_cache_level is only called on ACPI enabled diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h index 7ae21c0f7f23..ef0a77173e3c 100644 --- a/include/linux/can/bittiming.h +++ b/include/linux/can/bittiming.h @@ -11,6 +11,8 @@ #define CAN_SYNC_SEG 1 +#define CAN_BITRATE_UNSET 0 +#define CAN_BITRATE_UNKNOWN (-1U) #define CAN_CTRLMODE_TDC_MASK \ (CAN_CTRLMODE_TDC_AUTO | CAN_CTRLMODE_TDC_MANUAL) diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index e22dc03c850e..c3e50e537e39 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -20,6 +20,7 @@ #include <linux/can/length.h> #include <linux/can/netlink.h> #include <linux/can/skb.h> +#include <linux/ethtool.h> #include <linux/netdevice.h> /* @@ -162,6 +163,9 @@ struct can_priv *safe_candev_priv(struct net_device *dev); int open_candev(struct net_device *dev); void close_candev(struct net_device *dev); int can_change_mtu(struct net_device *dev, int new_mtu); +int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd); +int can_ethtool_op_get_ts_info_hwts(struct net_device *dev, + struct ethtool_ts_info *info); int register_candev(struct net_device *dev); void unregister_candev(struct net_device *dev); diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index fdb22b00674a..182749e858b3 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -31,6 +31,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev, struct canfd_frame **cfd); struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf); +bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb); /* * The struct can_skb_priv is used to transport additional information along @@ -96,64 +97,6 @@ static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb) return nskb; } -/* Check for outgoing skbs that have not been created by the CAN subsystem */ -static inline bool can_skb_headroom_valid(struct net_device *dev, - struct sk_buff *skb) -{ - /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */ - if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv))) - return false; - - /* af_packet does not apply CAN skb specific settings */ - if (skb->ip_summed == CHECKSUM_NONE) { - /* init headroom */ - can_skb_prv(skb)->ifindex = dev->ifindex; - can_skb_prv(skb)->skbcnt = 0; - - skb->ip_summed = CHECKSUM_UNNECESSARY; - - /* perform proper loopback on capable devices */ - if (dev->flags & IFF_ECHO) - skb->pkt_type = PACKET_LOOPBACK; - else - skb->pkt_type = PACKET_HOST; - - skb_reset_mac_header(skb); - skb_reset_network_header(skb); - skb_reset_transport_header(skb); - } - - return true; -} - -/* Drop a given socketbuffer if it does not contain a valid CAN frame. */ -static inline bool can_dropped_invalid_skb(struct net_device *dev, - struct sk_buff *skb) -{ - const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - - if (skb->protocol == htons(ETH_P_CAN)) { - if (unlikely(skb->len != CAN_MTU || - cfd->len > CAN_MAX_DLEN)) - goto inval_skb; - } else if (skb->protocol == htons(ETH_P_CANFD)) { - if (unlikely(skb->len != CANFD_MTU || - cfd->len > CANFD_MAX_DLEN)) - goto inval_skb; - } else - goto inval_skb; - - if (!can_skb_headroom_valid(dev, skb)) - goto inval_skb; - - return false; - -inval_skb: - kfree_skb(skb); - dev->stats.tx_dropped++; - return true; -} - static inline bool can_is_canfd_skb(const struct sk_buff *skb) { /* the CAN specific type of skb is identified by its data length */ diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 86bf82dbd8b8..49586ff26152 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -433,9 +433,9 @@ union ceph_mds_request_args { __le32 stripe_unit; /* layout for newly created file */ __le32 stripe_count; /* ... */ __le32 object_size; - __le32 file_replication; - __le32 mask; /* CEPH_CAP_* */ - __le32 old_size; + __le32 pool; + __le32 mask; /* CEPH_CAP_* */ + __le64 old_size; } __attribute__ ((packed)) open; struct { __le32 flags; @@ -768,7 +768,7 @@ struct ceph_mds_caps { __le32 xattr_len; __le64 xattr_version; - /* filelock */ + /* a union of non-export and export bodies. */ __le64 size, max_size, truncate_size; __le32 truncate_seq; struct ceph_timespec mtime, atime, ctime; diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h index 523fd0452856..4c3e0648dc27 100644 --- a/include/linux/ceph/mdsmap.h +++ b/include/linux/ceph/mdsmap.h @@ -25,6 +25,7 @@ struct ceph_mdsmap { u32 m_session_timeout; /* seconds */ u32 m_session_autoclose; /* seconds */ u64 m_max_file_size; + u64 m_max_xattr_size; /* maximum size for xattrs blob */ u32 m_max_mds; /* expected up:active mds number */ u32 m_num_active_mds; /* actual up:active mds number */ u32 possible_max_rank; /* possible max rank index */ diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index cba8a6ffc329..fb6be72104df 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -507,9 +507,8 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, extern void ceph_osdc_get_request(struct ceph_osd_request *req); extern void ceph_osdc_put_request(struct ceph_osd_request *req); -extern int ceph_osdc_start_request(struct ceph_osd_client *osdc, - struct ceph_osd_request *req, - bool nofail); +void ceph_osdc_start_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *req); extern void ceph_osdc_cancel_request(struct ceph_osd_request *req); extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req); diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 187b54a9567e..4bcf56b3491c 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -89,19 +89,32 @@ enum { CGRP_ROOT_NS_DELEGATE = (1 << 3), /* + * Reduce latencies on dynamic cgroup modifications such as task + * migrations and controller on/offs by disabling percpu operation on + * cgroup_threadgroup_rwsem. This makes hot path operations such as + * forks and exits into the slow path and more expensive. + * + * The static usage pattern of creating a cgroup, enabling controllers, + * and then seeding it with CLONE_INTO_CGROUP doesn't require write + * locking cgroup_threadgroup_rwsem and thus doesn't benefit from + * favordynmod. + */ + CGRP_ROOT_FAVOR_DYNMODS = (1 << 4), + + /* * Enable cpuset controller in v1 cgroup to use v2 behavior. */ - CGRP_ROOT_CPUSET_V2_MODE = (1 << 4), + CGRP_ROOT_CPUSET_V2_MODE = (1 << 16), /* * Enable legacy local memory.events. */ - CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 5), + CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 17), /* * Enable recursive subtree protection */ - CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 6), + CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18), }; /* cftype->flags */ @@ -480,7 +493,7 @@ struct cgroup { struct work_struct release_agent_work; /* used to track pressure stalls */ - struct psi_group psi; + struct psi_group *psi; /* used to store eBPF programs */ struct cgroup_bpf bpf; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 0d1ada8968d7..ed53bfe7c46c 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -674,7 +674,7 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) static inline struct psi_group *cgroup_psi(struct cgroup *cgrp) { - return &cgrp->psi; + return cgrp->psi; } bool cgroup_psi_enabled(void); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index c10dc4c659e2..1615010aa0ec 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -832,6 +832,25 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name, NULL, (flags), (reg), (shift), (width), \ (clk_divider_flags), NULL, (lock)) /** + * devm_clk_hw_register_divider_parent_hw - register a divider clock with the clock framework + * @dev: device registering this clock + * @name: name of this clock + * @parent_hw: pointer to parent clk + * @flags: framework-specific flags + * @reg: register address to adjust divider + * @shift: number of bits to shift the bitfield + * @width: width of the bitfield + * @clk_divider_flags: divider-specific flags for this clock + * @lock: shared register lock for this clock + */ +#define devm_clk_hw_register_divider_parent_hw(dev, name, parent_hw, flags, \ + reg, shift, width, \ + clk_divider_flags, lock) \ + __devm_clk_hw_register_divider((dev), NULL, (name), NULL, \ + (parent_hw), NULL, (flags), (reg), \ + (shift), (width), (clk_divider_flags), \ + NULL, (lock)) +/** * devm_clk_hw_register_divider_table - register a table based divider clock * with the clock framework (devres variant) * @dev: device registering this clock @@ -961,6 +980,13 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name, (parent_names), NULL, NULL, (flags), (reg), \ (shift), BIT((width)) - 1, (clk_mux_flags), \ NULL, (lock)) +#define devm_clk_hw_register_mux_parent_hws(dev, name, parent_hws, \ + num_parents, flags, reg, shift, \ + width, clk_mux_flags, lock) \ + __devm_clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, \ + (parent_hws), NULL, (flags), (reg), \ + (shift), BIT((width)) - 1, \ + (clk_mux_flags), NULL, (lock)) int clk_mux_val_to_index(struct clk_hw *hw, const u32 *table, unsigned int flags, unsigned int val); @@ -1006,6 +1032,14 @@ struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev, struct clk_hw *devm_clk_hw_register_fixed_factor_index(struct device *dev, const char *name, unsigned int index, unsigned long flags, unsigned int mult, unsigned int div); + +struct clk_hw *devm_clk_hw_register_fixed_factor_parent_hw(struct device *dev, + const char *name, const struct clk_hw *parent_hw, + unsigned long flags, unsigned int mult, unsigned int div); + +struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev, + const char *name, const struct clk_hw *parent_hw, + unsigned long flags, unsigned int mult, unsigned int div); /** * struct clk_fractional_divider - adjustable fractional divider clock * @@ -1176,10 +1210,8 @@ int __must_check devm_clk_hw_register(struct device *dev, struct clk_hw *hw); int __must_check of_clk_hw_register(struct device_node *node, struct clk_hw *hw); void clk_unregister(struct clk *clk); -void devm_clk_unregister(struct device *dev, struct clk *clk); void clk_hw_unregister(struct clk_hw *hw); -void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw); /* helper functions */ const char *__clk_get_name(const struct clk *clk); diff --git a/include/linux/clk.h b/include/linux/clk.h index 39faa54efe88..c13061cabdfc 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -443,15 +443,16 @@ int __must_check devm_clk_bulk_get_all(struct device *dev, * @dev: device for clock "consumer" * @id: clock consumer ID * - * Returns a struct clk corresponding to the clock producer, or + * Context: May sleep. + * + * Return: a struct clk corresponding to the clock producer, or * valid IS_ERR() condition containing errno. The implementation * uses @dev and @id to determine the clock consumer, and thereby * the clock producer. (IOW, @id may be identical strings, but * clk_get may return different clock producers depending on @dev.) * - * Drivers must assume that the clock source is not enabled. - * - * devm_clk_get should not be called from within interrupt context. + * Drivers must assume that the clock source is neither prepared nor + * enabled. * * The clock will automatically be freed when the device is unbound * from the bus. @@ -459,17 +460,114 @@ int __must_check devm_clk_bulk_get_all(struct device *dev, struct clk *devm_clk_get(struct device *dev, const char *id); /** + * devm_clk_get_prepared - devm_clk_get() + clk_prepare() + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Context: May sleep. + * + * Return: a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. (IOW, @id may be identical strings, but + * clk_get may return different clock producers depending on @dev.) + * + * The returned clk (if valid) is prepared. Drivers must however assume + * that the clock is not enabled. + * + * The clock will automatically be unprepared and freed when the device + * is unbound from the bus. + */ +struct clk *devm_clk_get_prepared(struct device *dev, const char *id); + +/** + * devm_clk_get_enabled - devm_clk_get() + clk_prepare_enable() + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Context: May sleep. + * + * Return: a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. (IOW, @id may be identical strings, but + * clk_get may return different clock producers depending on @dev.) + * + * The returned clk (if valid) is prepared and enabled. + * + * The clock will automatically be disabled, unprepared and freed + * when the device is unbound from the bus. + */ +struct clk *devm_clk_get_enabled(struct device *dev, const char *id); + +/** * devm_clk_get_optional - lookup and obtain a managed reference to an optional * clock producer. * @dev: device for clock "consumer" * @id: clock consumer ID * - * Behaves the same as devm_clk_get() except where there is no clock producer. - * In this case, instead of returning -ENOENT, the function returns NULL. + * Context: May sleep. + * + * Return: a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. If no such clk is found, it returns NULL + * which serves as a dummy clk. That's the only difference compared + * to devm_clk_get(). + * + * Drivers must assume that the clock source is neither prepared nor + * enabled. + * + * The clock will automatically be freed when the device is unbound + * from the bus. */ struct clk *devm_clk_get_optional(struct device *dev, const char *id); /** + * devm_clk_get_optional_prepared - devm_clk_get_optional() + clk_prepare() + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Context: May sleep. + * + * Return: a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. If no such clk is found, it returns NULL + * which serves as a dummy clk. That's the only difference compared + * to devm_clk_get_prepared(). + * + * The returned clk (if valid) is prepared. Drivers must however + * assume that the clock is not enabled. + * + * The clock will automatically be unprepared and freed when the + * device is unbound from the bus. + */ +struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id); + +/** + * devm_clk_get_optional_enabled - devm_clk_get_optional() + + * clk_prepare_enable() + * @dev: device for clock "consumer" + * @id: clock consumer ID + * + * Context: May sleep. + * + * Return: a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. If no such clk is found, it returns NULL + * which serves as a dummy clk. That's the only difference compared + * to devm_clk_get_enabled(). + * + * The returned clk (if valid) is prepared and enabled. + * + * The clock will automatically be disabled, unprepared and freed + * when the device is unbound from the bus. + */ +struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id); + +/** * devm_get_clk_from_child - lookup and obtain a managed reference to a * clock producer from child node. * @dev: device for clock "consumer" @@ -813,12 +911,36 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id) return NULL; } +static inline struct clk *devm_clk_get_prepared(struct device *dev, + const char *id) +{ + return NULL; +} + +static inline struct clk *devm_clk_get_enabled(struct device *dev, + const char *id) +{ + return NULL; +} + static inline struct clk *devm_clk_get_optional(struct device *dev, const char *id) { return NULL; } +static inline struct clk *devm_clk_get_optional_prepared(struct device *dev, + const char *id) +{ + return NULL; +} + +static inline struct clk *devm_clk_get_optional_enabled(struct device *dev, + const char *id) +{ + return NULL; +} + static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, struct clk_bulk_data *clks) { diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 3486f20a3753..cbfcbf186ce3 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -1,16 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * TI clock drivers support * * Copyright (C) 2013 Texas Instruments, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __LINUX_CLK_TI_H__ #define __LINUX_CLK_TI_H__ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index a0c55eeaeaf1..9b157b71036f 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -66,17 +66,6 @@ __builtin_unreachable(); \ } while (0) -/* - * GCC 'asm goto' miscompiles certain code sequences: - * - * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 - * - * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. - * - * (asm goto is automatically volatile - the naming reflects this.) - */ -#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) - #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index d5b9c8d40c18..1518568aaf0f 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -17,7 +17,7 @@ #include <linux/vt.h> #include <linux/workqueue.h> -struct uni_pagedir; +struct uni_pagedict; struct uni_screen; #define NPAR 16 @@ -157,8 +157,8 @@ struct vc_data { unsigned int vc_bell_duration; /* Console bell duration */ unsigned short vc_cur_blink_ms; /* Cursor blink duration */ struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */ - struct uni_pagedir *vc_uni_pagedir; - struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */ + struct uni_pagedict *uni_pagedict; + struct uni_pagedict **uni_pagedict_loc; /* [!] Location of uni_pagedict variable for this console */ struct uni_screen *vc_uni_screen; /* unicode screen content */ /* additional information is in vt_kern.h */ }; diff --git a/include/linux/consolemap.h b/include/linux/consolemap.h index bcfce748c9d8..c35db4896c37 100644 --- a/include/linux/consolemap.h +++ b/include/linux/consolemap.h @@ -7,30 +7,56 @@ #ifndef __LINUX_CONSOLEMAP_H__ #define __LINUX_CONSOLEMAP_H__ -#define LAT1_MAP 0 -#define GRAF_MAP 1 -#define IBMPC_MAP 2 -#define USER_MAP 3 +enum translation_map { + LAT1_MAP, + GRAF_MAP, + IBMPC_MAP, + USER_MAP, + + FIRST_MAP = LAT1_MAP, + LAST_MAP = USER_MAP, +}; #include <linux/types.h> -#ifdef CONFIG_CONSOLE_TRANSLATIONS struct vc_data; -extern u16 inverse_translate(const struct vc_data *conp, int glyph, - int use_unicode); -extern unsigned short *set_translate(int m, struct vc_data *vc); -extern int conv_uni_to_pc(struct vc_data *conp, long ucs); -extern u32 conv_8bit_to_uni(unsigned char c); -extern int conv_uni_to_8bit(u32 uni); +#ifdef CONFIG_CONSOLE_TRANSLATIONS +u16 inverse_translate(const struct vc_data *conp, u16 glyph, bool use_unicode); +unsigned short *set_translate(enum translation_map m, struct vc_data *vc); +int conv_uni_to_pc(struct vc_data *conp, long ucs); +u32 conv_8bit_to_uni(unsigned char c); +int conv_uni_to_8bit(u32 uni); void console_map_init(void); #else -#define inverse_translate(conp, glyph, uni) ((uint16_t)glyph) -#define set_translate(m, vc) ((unsigned short *)NULL) -#define conv_uni_to_pc(conp, ucs) ((int) (ucs > 0xff ? -1: ucs)) -#define conv_8bit_to_uni(c) ((uint32_t)(c)) -#define conv_uni_to_8bit(c) ((int) ((c) & 0xff)) -#define console_map_init(c) do { ; } while (0) +static inline u16 inverse_translate(const struct vc_data *conp, u16 glyph, + bool use_unicode) +{ + return glyph; +} + +static inline unsigned short *set_translate(enum translation_map m, + struct vc_data *vc) +{ + return NULL; +} + +static inline int conv_uni_to_pc(struct vc_data *conp, long ucs) +{ + return ucs > 0xff ? -1 : ucs; +} + +static inline u32 conv_8bit_to_uni(unsigned char c) +{ + return c; +} + +static inline int conv_uni_to_8bit(u32 uni) +{ + return uni & 0xff; +} + +static inline void console_map_init(void) { } #endif /* CONFIG_CONSOLE_TRANSLATIONS */ #endif /* __LINUX_CONSOLEMAP_H__ */ diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h index 4ac5c081af93..6c2fd6cc5a98 100644 --- a/include/linux/coresight-pmu.h +++ b/include/linux/coresight-pmu.h @@ -18,6 +18,7 @@ * ETMv3.5/PTM doesn't define ETMCR config bits with prefix "ETM3_" and * directly use below macros as config bits. */ +#define ETM_OPT_BRANCH_BROADCAST 8 #define ETM_OPT_CYCACC 12 #define ETM_OPT_CTXTID 14 #define ETM_OPT_CTXTID2 15 @@ -25,6 +26,7 @@ #define ETM_OPT_RETSTK 29 /* ETMv4 CONFIGR programming bits for the ETM OPTs */ +#define ETM4_CFG_BIT_BB 3 #define ETM4_CFG_BIT_CYCACC 4 #define ETM4_CFG_BIT_CTXTID 6 #define ETM4_CFG_BIT_VMID 7 diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index fe29ac7cc469..0d435d0edbcb 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -12,6 +12,8 @@ #include <linux/bitmap.h> #include <linux/atomic.h> #include <linux/bug.h> +#include <linux/gfp_types.h> +#include <linux/numa.h> /* Don't assign or return these: may not be this big! */ typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; @@ -116,85 +118,6 @@ static __always_inline unsigned int cpumask_check(unsigned int cpu) return cpu; } -#if NR_CPUS == 1 -/* Uniprocessor. Assume all masks are "1". */ -static inline unsigned int cpumask_first(const struct cpumask *srcp) -{ - return 0; -} - -static inline unsigned int cpumask_first_zero(const struct cpumask *srcp) -{ - return 0; -} - -static inline unsigned int cpumask_first_and(const struct cpumask *srcp1, - const struct cpumask *srcp2) -{ - return 0; -} - -static inline unsigned int cpumask_last(const struct cpumask *srcp) -{ - return 0; -} - -/* Valid inputs for n are -1 and 0. */ -static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) -{ - return n+1; -} - -static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) -{ - return n+1; -} - -static inline unsigned int cpumask_next_and(int n, - const struct cpumask *srcp, - const struct cpumask *andp) -{ - return n+1; -} - -static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, - int start, bool wrap) -{ - /* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */ - return (wrap && n == 0); -} - -/* cpu must be a valid cpu, ie 0, so there's no other choice. */ -static inline unsigned int cpumask_any_but(const struct cpumask *mask, - unsigned int cpu) -{ - return 1; -} - -static inline unsigned int cpumask_local_spread(unsigned int i, int node) -{ - return 0; -} - -static inline int cpumask_any_and_distribute(const struct cpumask *src1p, - const struct cpumask *src2p) { - return cpumask_first_and(src1p, src2p); -} - -static inline int cpumask_any_distribute(const struct cpumask *srcp) -{ - return cpumask_first(srcp); -} - -#define for_each_cpu(cpu, mask) \ - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) -#define for_each_cpu_not(cpu, mask) \ - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) -#define for_each_cpu_wrap(cpu, mask, start) \ - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start)) -#define for_each_cpu_and(cpu, mask1, mask2) \ - for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2) -#else /** * cpumask_first - get the first cpu in a cpumask * @srcp: the cpumask pointer @@ -241,7 +164,21 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp) return find_last_bit(cpumask_bits(srcp), nr_cpumask_bits); } -unsigned int __pure cpumask_next(int n, const struct cpumask *srcp); +/** + * cpumask_next - get the next cpu in a cpumask + * @n: the cpu prior to the place to search (ie. return will be > @n) + * @srcp: the cpumask pointer + * + * Returns >= nr_cpu_ids if no further cpus set. + */ +static inline +unsigned int cpumask_next(int n, const struct cpumask *srcp) +{ + /* -1 is a legal arg here. */ + if (n != -1) + cpumask_check(n); + return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1); +} /** * cpumask_next_zero - get the next unset cpu in a cpumask @@ -258,12 +195,47 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); } -int __pure cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); -int __pure cpumask_any_but(const struct cpumask *mask, unsigned int cpu); +#if NR_CPUS == 1 +/* Uniprocessor: there is only one valid CPU */ +static inline unsigned int cpumask_local_spread(unsigned int i, int node) +{ + return 0; +} + +static inline int cpumask_any_and_distribute(const struct cpumask *src1p, + const struct cpumask *src2p) { + return cpumask_first_and(src1p, src2p); +} + +static inline int cpumask_any_distribute(const struct cpumask *srcp) +{ + return cpumask_first(srcp); +} +#else unsigned int cpumask_local_spread(unsigned int i, int node); -int cpumask_any_and_distribute(const struct cpumask *src1p, +unsigned int cpumask_any_and_distribute(const struct cpumask *src1p, const struct cpumask *src2p); -int cpumask_any_distribute(const struct cpumask *srcp); +unsigned int cpumask_any_distribute(const struct cpumask *srcp); +#endif /* NR_CPUS */ + +/** + * cpumask_next_and - get the next cpu in *src1p & *src2p + * @n: the cpu prior to the place to search (ie. return will be > @n) + * @src1p: the first cpumask pointer + * @src2p: the second cpumask pointer + * + * Returns >= nr_cpu_ids if no further cpus set in both. + */ +static inline +unsigned int cpumask_next_and(int n, const struct cpumask *src1p, + const struct cpumask *src2p) +{ + /* -1 is a legal arg here. */ + if (n != -1) + cpumask_check(n); + return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p), + nr_cpumask_bits, n + 1); +} /** * for_each_cpu - iterate over every cpu in a mask @@ -289,7 +261,7 @@ int cpumask_any_distribute(const struct cpumask *srcp); (cpu) = cpumask_next_zero((cpu), (mask)), \ (cpu) < nr_cpu_ids;) -extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); +unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap); /** * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location @@ -324,7 +296,26 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool for ((cpu) = -1; \ (cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \ (cpu) < nr_cpu_ids;) -#endif /* SMP */ + +/** + * cpumask_any_but - return a "random" in a cpumask, but not this one. + * @mask: the cpumask to search + * @cpu: the cpu to ignore. + * + * Often used to find any cpu but smp_processor_id() in a mask. + * Returns >= nr_cpu_ids if no cpus set. + */ +static inline +unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) +{ + unsigned int i; + + cpumask_check(cpu); + for_each_cpu(i, mask) + if (i != cpu) + break; + return i; +} #define CPU_BITS_NONE \ { \ @@ -372,9 +363,9 @@ static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) * @cpu: cpu number (< nr_cpu_ids) * @cpumask: the cpumask pointer * - * Returns 1 if @cpu is set in @cpumask, else returns 0 + * Returns true if @cpu is set in @cpumask, else returns false */ -static __always_inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask) +static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask) { return test_bit(cpumask_check(cpu), cpumask_bits((cpumask))); } @@ -384,11 +375,11 @@ static __always_inline int cpumask_test_cpu(int cpu, const struct cpumask *cpuma * @cpu: cpu number (< nr_cpu_ids) * @cpumask: the cpumask pointer * - * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0 + * Returns true if @cpu is set in old bitmap of @cpumask, else returns false * * test_and_set_bit wrapper for cpumasks. */ -static __always_inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) +static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) { return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask)); } @@ -398,11 +389,11 @@ static __always_inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpu * @cpu: cpu number (< nr_cpu_ids) * @cpumask: the cpumask pointer * - * Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0 + * Returns true if @cpu is set in old bitmap of @cpumask, else returns false * * test_and_clear_bit wrapper for cpumasks. */ -static __always_inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) +static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) { return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask)); } @@ -431,9 +422,9 @@ static inline void cpumask_clear(struct cpumask *dstp) * @src1p: the first input * @src2p: the second input * - * If *@dstp is empty, returns 0, else returns 1 + * If *@dstp is empty, returns false, else returns true */ -static inline int cpumask_and(struct cpumask *dstp, +static inline bool cpumask_and(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { @@ -474,9 +465,9 @@ static inline void cpumask_xor(struct cpumask *dstp, * @src1p: the first input * @src2p: the second input * - * If *@dstp is empty, returns 0, else returns 1 + * If *@dstp is empty, returns false, else returns true */ -static inline int cpumask_andnot(struct cpumask *dstp, +static inline bool cpumask_andnot(struct cpumask *dstp, const struct cpumask *src1p, const struct cpumask *src2p) { @@ -539,9 +530,9 @@ static inline bool cpumask_intersects(const struct cpumask *src1p, * @src1p: the first input * @src2p: the second input * - * Returns 1 if *@src1p is a subset of *@src2p, else returns 0 + * Returns true if *@src1p is a subset of *@src2p, else returns false */ -static inline int cpumask_subset(const struct cpumask *src1p, +static inline bool cpumask_subset(const struct cpumask *src1p, const struct cpumask *src2p) { return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p), @@ -743,9 +734,35 @@ typedef struct cpumask *cpumask_var_t; #define __cpumask_var_read_mostly __read_mostly bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); -bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); -bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); -bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); + +static inline +bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) +{ + return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node); +} + +/** + * alloc_cpumask_var - allocate a struct cpumask + * @mask: pointer to cpumask_var_t where the cpumask is returned + * @flags: GFP_ flags + * + * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is + * a nop returning a constant 1 (in <linux/cpumask.h>). + * + * See alloc_cpumask_var_node. + */ +static inline +bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) +{ + return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE); +} + +static inline +bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) +{ + return alloc_cpumask_var(mask, flags | __GFP_ZERO); +} + void alloc_bootmem_cpumask_var(cpumask_var_t *mask); void free_cpumask_var(cpumask_var_t mask); void free_bootmem_cpumask_var(cpumask_var_t mask); @@ -811,9 +828,16 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); /* First bits of cpu_bit_bitmap are in fact unset. */ #define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) +#if NR_CPUS == 1 +/* Uniprocessor: the possible/online/present masks are always "1" */ +#define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) +#define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) +#define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++) +#else #define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) #define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) #define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) +#endif /* Wrappers for arch boot code to manipulate normally-constant masks */ void init_cpu_present(const struct cpumask *src); @@ -1071,4 +1095,22 @@ cpumap_print_list_to_buf(char *buf, const struct cpumask *mask, [0] = 1UL \ } } +/* + * Provide a valid theoretical max size for cpumap and cpulist sysfs files + * to avoid breaking userspace which may allocate a buffer based on the size + * reported by e.g. fstat. + * + * for cpumap NR_CPUS * 9/32 - 1 should be an exact length. + * + * For cpulist 7 is (ceil(log10(NR_CPUS)) + 1) allowing for NR_CPUS to be up + * to 2 orders of magnitude larger than 8192. And then we divide by 2 to + * cover a worst-case of every other cpu being on one of two nodes for a + * very large NR_CPUS. + * + * Use PAGE_SIZE as a minimum for smaller configurations. + */ +#define CPUMAP_FILE_MAX_BYTES ((((NR_CPUS * 9)/32 - 1) > PAGE_SIZE) \ + ? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE) +#define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE) + #endif /* __LINUX_CPUMASK_H */ diff --git a/include/linux/damon.h b/include/linux/damon.h index 7c62da31ce4b..7b1f4a488230 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -86,6 +86,8 @@ struct damon_target { * @DAMOS_PAGEOUT: Call ``madvise()`` for the region with MADV_PAGEOUT. * @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE. * @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE. + * @DAMOS_LRU_PRIO: Prioritize the region on its LRU lists. + * @DAMOS_LRU_DEPRIO: Deprioritize the region on its LRU lists. * @DAMOS_STAT: Do nothing but count the stat. * @NR_DAMOS_ACTIONS: Total number of DAMOS actions */ @@ -95,6 +97,8 @@ enum damos_action { DAMOS_PAGEOUT, DAMOS_HUGEPAGE, DAMOS_NOHUGEPAGE, + DAMOS_LRU_PRIO, + DAMOS_LRU_DEPRIO, DAMOS_STAT, /* Do nothing but only record the stat */ NR_DAMOS_ACTIONS, }; @@ -397,7 +401,6 @@ struct damon_callback { * detail. * * @kdamond: Kernel thread who does the monitoring. - * @kdamond_stop: Notifies whether kdamond should stop. * @kdamond_lock: Mutex for the synchronizations with @kdamond. * * For each monitoring context, one kernel thread for the monitoring is @@ -406,14 +409,14 @@ struct damon_callback { * Once started, the monitoring thread runs until explicitly required to be * terminated or every monitoring target is invalid. The validity of the * targets is checked via the &damon_operations.target_valid of @ops. The - * termination can also be explicitly requested by writing non-zero to - * @kdamond_stop. The thread sets @kdamond to NULL when it terminates. - * Therefore, users can know whether the monitoring is ongoing or terminated by - * reading @kdamond. Reads and writes to @kdamond and @kdamond_stop from - * outside of the monitoring thread must be protected by @kdamond_lock. + * termination can also be explicitly requested by calling damon_stop(). + * The thread sets @kdamond to NULL when it terminates. Therefore, users can + * know whether the monitoring is ongoing or terminated by reading @kdamond. + * Reads and writes to @kdamond from outside of the monitoring thread must + * be protected by @kdamond_lock. * - * Note that the monitoring thread protects only @kdamond and @kdamond_stop via - * @kdamond_lock. Accesses to other fields must be protected by themselves. + * Note that the monitoring thread protects only @kdamond via @kdamond_lock. + * Accesses to other fields must be protected by themselves. * * @ops: Set of monitoring operations for given use cases. * @callback: Set of callbacks for monitoring events notifications. @@ -526,6 +529,12 @@ bool damon_is_registered_ops(enum damon_ops_id id); int damon_register_ops(struct damon_operations *ops); int damon_select_ops(struct damon_ctx *ctx, enum damon_ops_id id); +static inline bool damon_target_has_pid(const struct damon_ctx *ctx) +{ + return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR; +} + + int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive); int damon_stop(struct damon_ctx **ctxs, int nr_ctxs); diff --git a/include/linux/dax.h b/include/linux/dax.h index e7b81634c52a..ba985333e26b 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -43,8 +43,21 @@ struct dax_operations { void *addr, size_t bytes, struct iov_iter *iter); }; +struct dax_holder_operations { + /* + * notify_failure - notify memory failure into inner holder device + * @dax_dev: the dax device which contains the holder + * @offset: offset on this dax device where memory failure occurs + * @len: length of this memory failure event + * @flags: action flags for memory failure handler + */ + int (*notify_failure)(struct dax_device *dax_dev, u64 offset, + u64 len, int mf_flags); +}; + #if IS_ENABLED(CONFIG_DAX) struct dax_device *alloc_dax(void *private, const struct dax_operations *ops); +void *dax_holder(struct dax_device *dax_dev); void put_dax(struct dax_device *dax_dev); void kill_dax(struct dax_device *dax_dev); void dax_write_cache(struct dax_device *dax_dev, bool wc); @@ -66,6 +79,10 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, return dax_synchronous(dax_dev); } #else +static inline void *dax_holder(struct dax_device *dax_dev) +{ + return NULL; +} static inline struct dax_device *alloc_dax(void *private, const struct dax_operations *ops) { @@ -114,12 +131,9 @@ struct writeback_control; #if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX) int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk); void dax_remove_host(struct gendisk *disk); -struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, - u64 *start_off); -static inline void fs_put_dax(struct dax_device *dax_dev) -{ - put_dax(dax_dev); -} +struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off, + void *holder, const struct dax_holder_operations *ops); +void fs_put_dax(struct dax_device *dax_dev, void *holder); #else static inline int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk) { @@ -129,11 +143,12 @@ static inline void dax_remove_host(struct gendisk *disk) { } static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, - u64 *start_off) + u64 *start_off, void *holder, + const struct dax_holder_operations *ops) { return NULL; } -static inline void fs_put_dax(struct dax_device *dax_dev) +static inline void fs_put_dax(struct dax_device *dax_dev, void *holder) { } #endif /* CONFIG_BLOCK && CONFIG_FS_DAX */ @@ -146,6 +161,10 @@ struct page *dax_layout_busy_page(struct address_space *mapping); struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end); dax_entry_t dax_lock_page(struct page *page); void dax_unlock_page(struct page *page, dax_entry_t cookie); +dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, + unsigned long index, struct page **page); +void dax_unlock_mapping_entry(struct address_space *mapping, + unsigned long index, dax_entry_t cookie); #else static inline struct page *dax_layout_busy_page(struct address_space *mapping) { @@ -173,6 +192,17 @@ static inline dax_entry_t dax_lock_page(struct page *page) static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) { } + +static inline dax_entry_t dax_lock_mapping_entry(struct address_space *mapping, + unsigned long index, struct page **page) +{ + return 0; +} + +static inline void dax_unlock_mapping_entry(struct address_space *mapping, + unsigned long index, dax_entry_t cookie) +{ +} #endif int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, @@ -203,6 +233,8 @@ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, size_t nr_pages); +int dax_holder_notify_failure(struct dax_device *dax_dev, u64 off, u64 len, + int mf_flags); void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, @@ -214,6 +246,14 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); int dax_invalidate_mapping_entry_sync(struct address_space *mapping, pgoff_t index); +int dax_dedupe_file_range_compare(struct inode *src, loff_t srcoff, + struct inode *dest, loff_t destoff, + loff_t len, bool *is_same, + const struct iomap_ops *ops); +int dax_remap_file_range_prep(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *len, unsigned int remap_flags, + const struct iomap_ops *ops); static inline bool dax_mapping(struct address_space *mapping) { return mapping->host && IS_DAX(mapping->host); diff --git a/include/linux/dcache.h b/include/linux/dcache.h index f5bba51480b2..92c78ed02b54 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -233,6 +233,8 @@ extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, wait_queue_head_t *); extern struct dentry * d_splice_alias(struct inode *, struct dentry *); extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); +extern bool d_same_name(const struct dentry *dentry, const struct dentry *parent, + const struct qstr *name); extern struct dentry * d_exact_alias(struct dentry *, struct inode *); extern struct dentry *d_find_any_alias(struct inode *inode); extern struct dentry * d_obtain_alias(struct inode *); @@ -349,7 +351,7 @@ static inline void dont_mount(struct dentry *dentry) spin_unlock(&dentry->d_lock); } -extern void __d_lookup_done(struct dentry *); +extern void __d_lookup_unhash_wake(struct dentry *dentry); static inline int d_in_lookup(const struct dentry *dentry) { @@ -358,11 +360,8 @@ static inline int d_in_lookup(const struct dentry *dentry) static inline void d_lookup_done(struct dentry *dentry) { - if (unlikely(d_in_lookup(dentry))) { - spin_lock(&dentry->d_lock); - __d_lookup_done(dentry); - spin_unlock(&dentry->d_lock); - } + if (unlikely(d_in_lookup(dentry))) + __d_lookup_unhash_wake(dentry); } extern void dput(struct dentry *); diff --git a/include/linux/device.h b/include/linux/device.h index dc941997795c..424b55df0272 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -905,6 +905,8 @@ struct device *device_find_child(struct device *dev, void *data, int (*match)(struct device *dev, void *data)); struct device *device_find_child_by_name(struct device *parent, const char *name); +struct device *device_find_any_child(struct device *parent); + int device_rename(struct device *dev, const char *new_name); int device_move(struct device *dev, struct device *new_parent, enum dpm_order dpm_order); diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h index 700453017e1c..7acaabde5396 100644 --- a/include/linux/device/driver.h +++ b/include/linux/device/driver.h @@ -129,6 +129,7 @@ extern struct device_driver *driver_find(const char *name, struct bus_type *bus); extern int driver_probe_done(void); extern void wait_for_device_probe(void); +void __init wait_for_init_devices_probe(void); /* sysfs interface for exporting driver attributes */ @@ -241,7 +242,6 @@ driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev) extern int driver_deferred_probe_timeout; void driver_deferred_probe_add(struct device *dev); -int driver_deferred_probe_check_state(struct device *dev); void driver_init(void); /** diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h index 90bd558a17f5..15d9e15ca830 100644 --- a/include/linux/dm-bufio.h +++ b/include/linux/dm-bufio.h @@ -18,13 +18,19 @@ struct dm_bufio_client; struct dm_buffer; /* + * Flags for dm_bufio_client_create + */ +#define DM_BUFIO_CLIENT_NO_SLEEP 0x1 + +/* * Create a buffered IO cache on a given device */ struct dm_bufio_client * dm_bufio_client_create(struct block_device *bdev, unsigned block_size, unsigned reserved_buffers, unsigned aux_size, void (*alloc_callback)(struct dm_buffer *), - void (*write_callback)(struct dm_buffer *)); + void (*write_callback)(struct dm_buffer *), + unsigned int flags); /* * Release a buffered IO cache. diff --git a/include/linux/dma-fence-unwrap.h b/include/linux/dma-fence-unwrap.h index 77e335a1bcac..66b1e56fbb81 100644 --- a/include/linux/dma-fence-unwrap.h +++ b/include/linux/dma-fence-unwrap.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * fence-chain: chain fences together in a timeline - * * Copyright (C) 2022 Advanced Micro Devices, Inc. * Authors: * Christian König <christian.koenig@amd.com> @@ -10,8 +8,7 @@ #ifndef __LINUX_DMA_FENCE_UNWRAP_H #define __LINUX_DMA_FENCE_UNWRAP_H -#include <linux/dma-fence-chain.h> -#include <linux/dma-fence-array.h> +struct dma_fence; /** * struct dma_fence_unwrap - cursor into the container structure @@ -33,50 +30,9 @@ struct dma_fence_unwrap { unsigned int index; }; -/* Internal helper to start new array iteration, don't use directly */ -static inline struct dma_fence * -__dma_fence_unwrap_array(struct dma_fence_unwrap * cursor) -{ - cursor->array = dma_fence_chain_contained(cursor->chain); - cursor->index = 0; - return dma_fence_array_first(cursor->array); -} - -/** - * dma_fence_unwrap_first - return the first fence from fence containers - * @head: the entrypoint into the containers - * @cursor: current position inside the containers - * - * Unwraps potential dma_fence_chain/dma_fence_array containers and return the - * first fence. - */ -static inline struct dma_fence * -dma_fence_unwrap_first(struct dma_fence *head, struct dma_fence_unwrap *cursor) -{ - cursor->chain = dma_fence_get(head); - return __dma_fence_unwrap_array(cursor); -} - -/** - * dma_fence_unwrap_next - return the next fence from a fence containers - * @cursor: current position inside the containers - * - * Continue unwrapping the dma_fence_chain/dma_fence_array containers and return - * the next fence from them. - */ -static inline struct dma_fence * -dma_fence_unwrap_next(struct dma_fence_unwrap *cursor) -{ - struct dma_fence *tmp; - - ++cursor->index; - tmp = dma_fence_array_next(cursor->array, cursor->index); - if (tmp) - return tmp; - - cursor->chain = dma_fence_chain_walk(cursor->chain); - return __dma_fence_unwrap_array(cursor); -} +struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head, + struct dma_fence_unwrap *cursor); +struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor); /** * dma_fence_unwrap_for_each - iterate over all fences in containers @@ -92,4 +48,28 @@ dma_fence_unwrap_next(struct dma_fence_unwrap *cursor) for (fence = dma_fence_unwrap_first(head, cursor); fence; \ fence = dma_fence_unwrap_next(cursor)) +struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences, + struct dma_fence **fences, + struct dma_fence_unwrap *cursors); + +/** + * dma_fence_unwrap_merge - unwrap and merge fences + * + * All fences given as parameters are unwrapped and merged back together as flat + * dma_fence_array. Useful if multiple containers need to be merged together. + * + * Implemented as a macro to allocate the necessary arrays on the stack and + * account the stack frame size to the caller. + * + * Returns NULL on memory allocation failure, a dma_fence object representing + * all the given fences otherwise. + */ +#define dma_fence_unwrap_merge(...) \ + ({ \ + struct dma_fence *__f[] = { __VA_ARGS__ }; \ + struct dma_fence_unwrap __c[ARRAY_SIZE(__f)]; \ + \ + __dma_fence_unwrap_merge(ARRAY_SIZE(__f), __f, __c); \ + }) + #endif diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 0d5b06b3a4a6..d678afeb8a13 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -11,7 +11,17 @@ struct cma; +/* + * Values for struct dma_map_ops.flags: + * + * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can + * handle PCI P2PDMA pages in the map_sg/unmap_sg operation. + */ +#define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0) + struct dma_map_ops { + unsigned int flags; + void *(*alloc)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); @@ -69,6 +79,7 @@ struct dma_map_ops { int (*dma_supported)(struct device *dev, u64 mask); u64 (*get_required_mask)(struct device *dev); size_t (*max_mapping_size)(struct device *dev); + size_t (*opt_mapping_size)(void); unsigned long (*get_merge_boundary)(struct device *dev); }; @@ -166,6 +177,7 @@ static inline void dma_pernuma_cma_reserve(void) { } #ifdef CONFIG_DMA_DECLARE_COHERENT int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size); +void dma_release_coherent_memory(struct device *dev); int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle, void **ret); int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); @@ -177,9 +189,11 @@ static inline int dma_declare_coherent_memory(struct device *dev, { return -ENOSYS; } + #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) #define dma_release_from_dev_coherent(dev, order, vaddr) (0) #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) +static inline void dma_release_coherent_memory(struct device *dev) { } #endif /* CONFIG_DMA_DECLARE_COHERENT */ #ifdef CONFIG_DMA_GLOBAL_POOL @@ -379,4 +393,57 @@ static inline void debug_dma_dump_mappings(struct device *dev) extern const struct dma_map_ops dma_dummy_ops; +enum pci_p2pdma_map_type { + /* + * PCI_P2PDMA_MAP_UNKNOWN: Used internally for indicating the mapping + * type hasn't been calculated yet. Functions that return this enum + * never return this value. + */ + PCI_P2PDMA_MAP_UNKNOWN = 0, + + /* + * PCI_P2PDMA_MAP_NOT_SUPPORTED: Indicates the transaction will + * traverse the host bridge and the host bridge is not in the + * allowlist. DMA Mapping routines should return an error when + * this is returned. + */ + PCI_P2PDMA_MAP_NOT_SUPPORTED, + + /* + * PCI_P2PDMA_BUS_ADDR: Indicates that two devices can talk to + * each other directly through a PCI switch and the transaction will + * not traverse the host bridge. Such a mapping should program + * the DMA engine with PCI bus addresses. + */ + PCI_P2PDMA_MAP_BUS_ADDR, + + /* + * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: Indicates two devices can talk + * to each other, but the transaction traverses a host bridge on the + * allowlist. In this case, a normal mapping either with CPU physical + * addresses (in the case of dma-direct) or IOVA addresses (in the + * case of IOMMUs) should be used to program the DMA engine. + */ + PCI_P2PDMA_MAP_THRU_HOST_BRIDGE, +}; + +struct pci_p2pdma_map_state { + struct dev_pagemap *pgmap; + int map; + u64 bus_off; +}; + +#ifdef CONFIG_PCI_P2PDMA +enum pci_p2pdma_map_type +pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, + struct scatterlist *sg); +#else /* CONFIG_PCI_P2PDMA */ +static inline enum pci_p2pdma_map_type +pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, + struct scatterlist *sg) +{ + return PCI_P2PDMA_MAP_NOT_SUPPORTED; +} +#endif /* CONFIG_PCI_P2PDMA */ + #endif /* _LINUX_DMA_MAP_OPS_H */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index dca2b1355bb1..25a30906289d 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -140,10 +140,12 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, unsigned long attrs); bool dma_can_mmap(struct device *dev); int dma_supported(struct device *dev, u64 mask); +bool dma_pci_p2pdma_supported(struct device *dev); int dma_set_mask(struct device *dev, u64 mask); int dma_set_coherent_mask(struct device *dev, u64 mask); u64 dma_get_required_mask(struct device *dev); size_t dma_max_mapping_size(struct device *dev); +size_t dma_opt_mapping_size(struct device *dev); bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); unsigned long dma_get_merge_boundary(struct device *dev); struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, @@ -250,6 +252,10 @@ static inline int dma_supported(struct device *dev, u64 mask) { return 0; } +static inline bool dma_pci_p2pdma_supported(struct device *dev) +{ + return false; +} static inline int dma_set_mask(struct device *dev, u64 mask) { return -EIO; @@ -266,6 +272,10 @@ static inline size_t dma_max_mapping_size(struct device *dev) { return 0; } +static inline size_t dma_opt_mapping_size(struct device *dev) +{ + return 0; +} static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) { return false; diff --git a/include/linux/dma/edma.h b/include/linux/dma/edma.h index cab6e18773da..7d8062e9c544 100644 --- a/include/linux/dma/edma.h +++ b/include/linux/dma/edma.h @@ -12,19 +12,74 @@ #include <linux/device.h> #include <linux/dmaengine.h> +#define EDMA_MAX_WR_CH 8 +#define EDMA_MAX_RD_CH 8 + struct dw_edma; +struct dw_edma_region { + phys_addr_t paddr; + void __iomem *vaddr; + size_t sz; +}; + +struct dw_edma_core_ops { + int (*irq_vector)(struct device *dev, unsigned int nr); +}; + +enum dw_edma_map_format { + EDMA_MF_EDMA_LEGACY = 0x0, + EDMA_MF_EDMA_UNROLL = 0x1, + EDMA_MF_HDMA_COMPAT = 0x5 +}; + +/** + * enum dw_edma_chip_flags - Flags specific to an eDMA chip + * @DW_EDMA_CHIP_LOCAL: eDMA is used locally by an endpoint + */ +enum dw_edma_chip_flags { + DW_EDMA_CHIP_LOCAL = BIT(0), +}; + /** * struct dw_edma_chip - representation of DesignWare eDMA controller hardware * @dev: struct device of the eDMA controller * @id: instance ID - * @irq: irq line - * @dw: struct dw_edma that is filed by dw_edma_probe() + * @nr_irqs: total number of DMA IRQs + * @ops DMA channel to IRQ number mapping + * @flags dw_edma_chip_flags + * @reg_base DMA register base address + * @ll_wr_cnt DMA write link list count + * @ll_rd_cnt DMA read link list count + * @rg_region DMA register region + * @ll_region_wr DMA descriptor link list memory for write channel + * @ll_region_rd DMA descriptor link list memory for read channel + * @dt_region_wr DMA data memory for write channel + * @dt_region_rd DMA data memory for read channel + * @mf DMA register map format + * @dw: struct dw_edma that is filled by dw_edma_probe() */ struct dw_edma_chip { struct device *dev; int id; - int irq; + int nr_irqs; + const struct dw_edma_core_ops *ops; + u32 flags; + + void __iomem *reg_base; + + u16 ll_wr_cnt; + u16 ll_rd_cnt; + /* link list address */ + struct dw_edma_region ll_region_wr[EDMA_MAX_WR_CH]; + struct dw_edma_region ll_region_rd[EDMA_MAX_RD_CH]; + + /* data region */ + struct dw_edma_region dt_region_wr[EDMA_MAX_WR_CH]; + struct dw_edma_region dt_region_rd[EDMA_MAX_RD_CH]; + + enum dw_edma_map_format mf; + struct dw_edma *dw; }; diff --git a/include/linux/dma/imx-dma.h b/include/linux/dma/imx-dma.h index 8887762360d4..f487a4fa103a 100644 --- a/include/linux/dma/imx-dma.h +++ b/include/linux/dma/imx-dma.h @@ -70,6 +70,16 @@ static inline int imx_dma_is_general_purpose(struct dma_chan *chan) * struct sdma_peripheral_config - SDMA config for audio * @n_fifos_src: Number of FIFOs for recording * @n_fifos_dst: Number of FIFOs for playback + * @stride_fifos_src: FIFO address stride for recording, 0 means all FIFOs are + * continuous, 1 means 1 word stride between FIFOs. All stride + * between FIFOs should be same. + * @stride_fifos_dst: FIFO address stride for playback + * @words_per_fifo: numbers of words per FIFO fetch/fill, 1 means + * one channel per FIFO, 2 means 2 channels per FIFO.. + * If 'n_fifos_src = 4' and 'words_per_fifo = 2', it + * means the first two words(channels) fetch from FIFO0 + * and then jump to FIFO1 for next two words, and so on + * after the last FIFO3 fetched, roll back to FIFO0. * @sw_done: Use software done. Needed for PDM (micfil) * * Some i.MX Audio devices (SAI, micfil) have multiple successive FIFO @@ -82,6 +92,9 @@ static inline int imx_dma_is_general_purpose(struct dma_chan *chan) struct sdma_peripheral_config { int n_fifos_src; int n_fifos_dst; + int stride_fifos_src; + int stride_fifos_dst; + int words_per_fifo; bool sw_done; }; diff --git a/include/linux/dma/qcom-gpi-dma.h b/include/linux/dma/qcom-gpi-dma.h index f46dc3372f11..6680dd1a43c6 100644 --- a/include/linux/dma/qcom-gpi-dma.h +++ b/include/linux/dma/qcom-gpi-dma.h @@ -26,7 +26,7 @@ enum spi_transfer_cmd { * @clk_div: source clock divider * @clk_src: serial clock * @cmd: spi cmd - * @fragmentation: keep CS assserted at end of sequence + * @fragmentation: keep CS asserted at end of sequence * @cs: chip select toggle * @set_config: set peripheral config * @rx_len: receive length for buffer diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index b46b88e6aa0d..c923f4e60f24 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -50,7 +50,6 @@ enum dma_status { */ enum dma_transaction_type { DMA_MEMCPY, - DMA_MEMCPY_SG, DMA_XOR, DMA_PQ, DMA_XOR_VAL, @@ -887,11 +886,6 @@ struct dma_device { struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t len, unsigned long flags); - struct dma_async_tx_descriptor *(*device_prep_dma_memcpy_sg)( - struct dma_chan *chan, - struct scatterlist *dst_sg, unsigned int dst_nents, - struct scatterlist *src_sg, unsigned int src_nents, - unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_xor)( struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags); @@ -1060,20 +1054,6 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy( len, flags); } -static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy_sg( - struct dma_chan *chan, - struct scatterlist *dst_sg, unsigned int dst_nents, - struct scatterlist *src_sg, unsigned int src_nents, - unsigned long flags) -{ - if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy_sg) - return NULL; - - return chan->device->device_prep_dma_memcpy_sg(chan, dst_sg, dst_nents, - src_sg, src_nents, - flags); -} - static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan, enum dma_desc_metadata_mode mode) { diff --git a/include/linux/dmar.h b/include/linux/dmar.h index cbd714a198a0..d81a51978d01 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -18,11 +18,7 @@ struct acpi_dmar_header; -#ifdef CONFIG_X86 -# define DMAR_UNITS_SUPPORTED MAX_IO_APICS -#else -# define DMAR_UNITS_SUPPORTED 64 -#endif +#define DMAR_UNITS_SUPPORTED 1024 /* DMAR Flags */ #define DMAR_INTR_REMAP 0x1 diff --git a/include/linux/dsa/tag_qca.h b/include/linux/dsa/tag_qca.h index 4359fb0221cf..50be7cbd93a5 100644 --- a/include/linux/dsa/tag_qca.h +++ b/include/linux/dsa/tag_qca.h @@ -3,6 +3,11 @@ #ifndef __TAG_QCA_H #define __TAG_QCA_H +#include <linux/types.h> + +struct dsa_switch; +struct sk_buff; + #define QCA_HDR_LEN 2 #define QCA_HDR_VERSION 0x2 diff --git a/include/linux/efi.h b/include/linux/efi.h index 7d9b0bb47eb3..d2b84c2fec39 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -872,6 +872,7 @@ static inline bool efi_rt_services_supported(unsigned int mask) { return (efi.runtime_supported_mask & mask) == mask; } +extern void efi_find_mirror(void); #else static inline bool efi_enabled(int feature) { @@ -889,6 +890,8 @@ static inline bool efi_rt_services_supported(unsigned int mask) { return false; } + +static inline void efi_find_mirror(void) {} #endif extern int efi_status_to_err(efi_status_t status); @@ -1027,29 +1030,6 @@ struct efivars { #define EFI_VAR_NAME_LEN 1024 -struct efi_variable { - efi_char16_t VariableName[EFI_VAR_NAME_LEN/sizeof(efi_char16_t)]; - efi_guid_t VendorGuid; - unsigned long DataSize; - __u8 Data[1024]; - efi_status_t Status; - __u32 Attributes; -} __attribute__((packed)); - -struct efivar_entry { - struct efi_variable var; - struct list_head list; - struct kobject kobj; - bool scanning; - bool deleting; -}; - -static inline void -efivar_unregister(struct efivar_entry *var) -{ - kobject_put(&var->kobj); -} - int efivars_register(struct efivars *efivars, const struct efivar_operations *ops, struct kobject *kobject); @@ -1057,43 +1037,26 @@ int efivars_unregister(struct efivars *efivars); struct kobject *efivars_kobject(void); int efivar_supports_writes(void); -int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), - void *data, bool duplicates, struct list_head *head); - -int efivar_entry_add(struct efivar_entry *entry, struct list_head *head); -int efivar_entry_remove(struct efivar_entry *entry); - -int __efivar_entry_delete(struct efivar_entry *entry); -int efivar_entry_delete(struct efivar_entry *entry); - -int efivar_entry_size(struct efivar_entry *entry, unsigned long *size); -int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes, - unsigned long *size, void *data); -int efivar_entry_get(struct efivar_entry *entry, u32 *attributes, - unsigned long *size, void *data); -int efivar_entry_set(struct efivar_entry *entry, u32 attributes, - unsigned long size, void *data, struct list_head *head); -int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes, - unsigned long *size, void *data, bool *set); -int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, - bool block, unsigned long size, void *data); - -int efivar_entry_iter_begin(void); -void efivar_entry_iter_end(void); - -int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *), - struct list_head *head, void *data, - struct efivar_entry **prev); -int efivar_entry_iter(int (*func)(struct efivar_entry *, void *), - struct list_head *head, void *data); - -struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, - struct list_head *head, bool remove); - -bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data, - unsigned long data_size); -bool efivar_variable_is_removable(efi_guid_t vendor, const char *name, - size_t len); + +int efivar_lock(void); +int efivar_trylock(void); +void efivar_unlock(void); + +efi_status_t efivar_get_variable(efi_char16_t *name, efi_guid_t *vendor, + u32 *attr, unsigned long *size, void *data); + +efi_status_t efivar_get_next_variable(unsigned long *name_size, + efi_char16_t *name, efi_guid_t *vendor); + +efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor, + u32 attr, unsigned long data_size, + void *data, bool nonblocking); + +efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor, + u32 attr, unsigned long data_size, void *data); + +efi_status_t check_var_size(u32 attributes, unsigned long size); +efi_status_t check_var_size_nonblocking(u32 attributes, unsigned long size); #if IS_ENABLED(CONFIG_EFI_CAPSULE_LOADER) extern bool efi_capsule_pending(int *reset_type); @@ -1181,6 +1144,8 @@ static inline void efi_check_for_embedded_firmwares(void) { } efi_status_t efi_random_get_seed(void); +#define arch_efi_call_virt(p, f, args...) ((p)->f(args)) + /* * Arch code can implement the following three template macros, avoiding * reptition for the void/non-void return cases of {__,}efi_call_virt(): diff --git a/include/linux/extcon.h b/include/linux/extcon.h index 685401d94d39..3c45c3846fe9 100644 --- a/include/linux/extcon.h +++ b/include/linux/extcon.h @@ -76,6 +76,8 @@ #define EXTCON_DISP_VGA 43 /* Video Graphics Array */ #define EXTCON_DISP_DP 44 /* Display Port */ #define EXTCON_DISP_HMD 45 /* Head-Mounted Display */ +#define EXTCON_DISP_CVBS 46 /* Composite Video Broadcast Signal */ +#define EXTCON_DISP_EDP 47 /* Embedded Display Port */ /* Miscellaneous external connector */ #define EXTCON_DOCK 60 diff --git a/include/linux/fb.h b/include/linux/fb.h index 2892145468c9..07fcd0e56682 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -511,7 +511,6 @@ struct fb_info { } *apertures; bool skip_vt_switch; /* no VT switch on suspend/resume required */ - bool forced_out; /* set when being removed by another driver */ }; static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { diff --git a/include/linux/filter.h b/include/linux/filter.h index ed0c0ff42ad5..a5f21dc3c432 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -559,40 +559,6 @@ struct bpf_prog_stats { struct u64_stats_sync syncp; } __aligned(2 * sizeof(u64)); -struct bpf_prog { - u16 pages; /* Number of allocated pages */ - u16 jited:1, /* Is our filter JIT'ed? */ - jit_requested:1,/* archs need to JIT the prog */ - gpl_compatible:1, /* Is filter GPL compatible? */ - cb_access:1, /* Is control block accessed? */ - dst_needed:1, /* Do we need dst entry? */ - blinding_requested:1, /* needs constant blinding */ - blinded:1, /* Was blinded */ - is_func:1, /* program is a bpf function */ - kprobe_override:1, /* Do we override a kprobe? */ - has_callchain_buf:1, /* callchain buffer allocated? */ - enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ - call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ - call_get_func_ip:1, /* Do we call get_func_ip() */ - tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */ - enum bpf_prog_type type; /* Type of BPF program */ - enum bpf_attach_type expected_attach_type; /* For some prog types */ - u32 len; /* Number of filter blocks */ - u32 jited_len; /* Size of jited insns in bytes */ - u8 tag[BPF_TAG_SIZE]; - struct bpf_prog_stats __percpu *stats; - int __percpu *active; - unsigned int (*bpf_func)(const void *ctx, - const struct bpf_insn *insn); - struct bpf_prog_aux *aux; /* Auxiliary fields */ - struct sock_fprog_kern *orig_prog; /* Original BPF program */ - /* Instructions for interpreter */ - union { - DECLARE_FLEX_ARRAY(struct sock_filter, insns); - DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi); - }; -}; - struct sk_filter { refcount_t refcnt; struct rcu_head rcu; @@ -948,6 +914,7 @@ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); bool bpf_jit_needs_zext(void); +bool bpf_jit_supports_subprog_tailcalls(void); bool bpf_jit_supports_kfunc_call(void); bool bpf_helper_changes_pkt_data(void *func); @@ -1060,6 +1027,14 @@ u64 bpf_jit_alloc_exec_limit(void); void *bpf_jit_alloc_exec(unsigned long size); void bpf_jit_free_exec(void *addr); void bpf_jit_free(struct bpf_prog *fp); +struct bpf_binary_header * +bpf_jit_binary_pack_hdr(const struct bpf_prog *fp); + +static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) +{ + return list_empty(&fp->aux->ksym.lnode) || + fp->aux->ksym.lnode.prev == LIST_POISON2; +} struct bpf_binary_header * bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image, diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h index 30055706cce2..cad828e21c72 100644 --- a/include/linux/firmware/cirrus/cs_dsp.h +++ b/include/linux/firmware/cirrus/cs_dsp.h @@ -11,6 +11,7 @@ #ifndef __CS_DSP_H #define __CS_DSP_H +#include <linux/bits.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/list.h> @@ -34,6 +35,7 @@ #define CS_ADSP2_REGION_ALL (CS_ADSP2_REGION_0 | CS_ADSP2_REGION_1_9) #define CS_DSP_DATA_WORD_SIZE 3 +#define CS_DSP_DATA_WORD_BITS (3 * BITS_PER_BYTE) #define CS_DSP_ACKED_CTL_TIMEOUT_MS 100 #define CS_DSP_ACKED_CTL_N_QUICKPOLLS 10 @@ -189,7 +191,8 @@ struct cs_dsp { * @control_remove: Called under the pwr_lock when a control is destroyed * @pre_run: Called under the pwr_lock by cs_dsp_run() before the core is started * @post_run: Called under the pwr_lock by cs_dsp_run() after the core is started - * @post_stop: Called under the pwr_lock by cs_dsp_stop() + * @pre_stop: Called under the pwr_lock by cs_dsp_stop() before the core is stopped + * @post_stop: Called under the pwr_lock by cs_dsp_stop() after the core is stopped * @watchdog_expired: Called when a watchdog expiry is detected * * These callbacks give the cs_dsp client an opportunity to respond to events @@ -200,6 +203,7 @@ struct cs_dsp_client_ops { void (*control_remove)(struct cs_dsp_coeff_ctl *ctl); int (*pre_run)(struct cs_dsp *dsp); int (*post_run)(struct cs_dsp *dsp); + void (*pre_stop)(struct cs_dsp *dsp); void (*post_stop)(struct cs_dsp *dsp); void (*watchdog_expired)(struct cs_dsp *dsp); }; @@ -250,4 +254,75 @@ struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp, const char *cs_dsp_mem_region_name(unsigned int type); +/** + * struct cs_dsp_chunk - Describes a buffer holding data formatted for the DSP + * @data: Pointer to underlying buffer memory + * @max: Pointer to end of the buffer memory + * @bytes: Number of bytes read/written into the memory chunk + * @cache: Temporary holding data as it is formatted + * @cachebits: Number of bits of data currently in cache + */ +struct cs_dsp_chunk { + u8 *data; + u8 *max; + int bytes; + + u32 cache; + int cachebits; +}; + +/** + * cs_dsp_chunk() - Create a DSP memory chunk + * @data: Pointer to the buffer that will be used to store data + * @size: Size of the buffer in bytes + * + * Return: A cs_dsp_chunk structure + */ +static inline struct cs_dsp_chunk cs_dsp_chunk(void *data, int size) +{ + struct cs_dsp_chunk ch = { + .data = data, + .max = data + size, + }; + + return ch; +} + +/** + * cs_dsp_chunk_end() - Check if a DSP memory chunk is full + * @ch: Pointer to the chunk structure + * + * Return: True if the whole buffer has been read/written + */ +static inline bool cs_dsp_chunk_end(struct cs_dsp_chunk *ch) +{ + return ch->data == ch->max; +} + +/** + * cs_dsp_chunk_bytes() - Number of bytes written/read from a DSP memory chunk + * @ch: Pointer to the chunk structure + * + * Return: Number of bytes read/written to the buffer + */ +static inline int cs_dsp_chunk_bytes(struct cs_dsp_chunk *ch) +{ + return ch->bytes; +} + +/** + * cs_dsp_chunk_valid_addr() - Check if an address is in a DSP memory chunk + * @ch: Pointer to the chunk structure + * + * Return: True if the given address is within the buffer + */ +static inline bool cs_dsp_chunk_valid_addr(struct cs_dsp_chunk *ch, void *addr) +{ + return (u8 *)addr >= ch->data && (u8 *)addr < ch->max; +} + +int cs_dsp_chunk_write(struct cs_dsp_chunk *ch, int nbits, u32 val); +int cs_dsp_chunk_flush(struct cs_dsp_chunk *ch); +int cs_dsp_chunk_read(struct cs_dsp_chunk *ch, int nbits); + #endif diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h index aad497a9ad8b..a718f853d457 100644 --- a/include/linux/firmware/intel/stratix10-smc.h +++ b/include/linux/firmware/intel/stratix10-smc.h @@ -404,6 +404,52 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY) /** + * Request INTEL_SIP_SMC_RSU_DCMF_STATUS + * + * Sync call used by service driver at EL1 to query DCMF status from FW + * + * Call register usage: + * a0 INTEL_SIP_SMC_RSU_DCMF_STATUS + * a1-7 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + * a1 dcmf3 | dcmf2 | dcmf1 | dcmf0 + * + * Or + * + * a0 INTEL_SIP_SMC_RSU_ERROR + */ +#define INTEL_SIP_SMC_FUNCID_RSU_DCMF_STATUS 20 +#define INTEL_SIP_SMC_RSU_DCMF_STATUS \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_DCMF_STATUS) + +/** + * Request INTEL_SIP_SMC_SERVICE_COMPLETED + * Sync call to check if the secure world have completed service request + * or not. + * + * Call register usage: + * a0: INTEL_SIP_SMC_SERVICE_COMPLETED + * a1: this register is optional. If used, it is the physical address for + * secure firmware to put output data + * a2: this register is optional. If used, it is the size of output data + * a3-a7: not used + * + * Return status: + * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_ERROR, + * INTEL_SIP_SMC_REJECTED or INTEL_SIP_SMC_STATUS_BUSY + * a1: mailbox error if a0 is INTEL_SIP_SMC_STATUS_ERROR + * a2: physical address containing the process info + * for FCS certificate -- the data contains the certificate status + * for FCS cryption -- the data contains the actual data size FW processes + * a3: output data size + */ +#define INTEL_SIP_SMC_FUNCID_SERVICE_COMPLETED 30 +#define INTEL_SIP_SMC_SERVICE_COMPLETED \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_SERVICE_COMPLETED) + +/** * Request INTEL_SIP_SMC_FIRMWARE_VERSION * * Sync call used to query the version of running firmware @@ -420,4 +466,133 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_FIRMWARE_VERSION \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION) +/** + * Request INTEL_SIP_SMC_SVC_VERSION + * + * Sync call used to query the SIP SMC API Version + * + * Call register usage: + * a0 INTEL_SIP_SMC_SVC_VERSION + * a1-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK + * a1 Major + * a2 Minor + */ +#define INTEL_SIP_SMC_SVC_FUNCID_VERSION 512 +#define INTEL_SIP_SMC_SVC_VERSION \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_SVC_FUNCID_VERSION) + +/** + * SMC call protocol for FPGA Crypto Service (FCS) + * FUNCID starts from 90 + */ + +/** + * Request INTEL_SIP_SMC_FCS_RANDOM_NUMBER + * + * Sync call used to query the random number generated by the firmware + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_RANDOM_NUMBER + * a1 the physical address for firmware to write generated random data + * a2-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FCS_ERROR or + * INTEL_SIP_SMC_FCS_REJECTED + * a1 mailbox error + * a2 the physical address of generated random number + * a3 size + */ +#define INTEL_SIP_SMC_FUNCID_FCS_RANDOM_NUMBER 90 +#define INTEL_SIP_SMC_FCS_RANDOM_NUMBER \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_RANDOM_NUMBER) + +/** + * Request INTEL_SIP_SMC_FCS_CRYPTION + * Async call for data encryption and HMAC signature generation, or for + * data decryption and HMAC verification. + * + * Call INTEL_SIP_SMC_SERVICE_COMPLETED to get the output encrypted or + * decrypted data + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_CRYPTION + * a1 cryption mode (1 for encryption and 0 for decryption) + * a2 physical address which stores to be encrypted or decrypted data + * a3 input data size + * a4 physical address which will hold the encrypted or decrypted output data + * a5 output data size + * a6-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_ERROR or + * INTEL_SIP_SMC_STATUS_REJECTED + * a1-3 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_CRYPTION 91 +#define INTEL_SIP_SMC_FCS_CRYPTION \ + INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_CRYPTION) + +/** + * Request INTEL_SIP_SMC_FCS_SERVICE_REQUEST + * Async call for authentication service of HPS software + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_SERVICE_REQUEST + * a1 the physical address of data block + * a2 size of data block + * a3-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_ERROR or + * INTEL_SIP_SMC_REJECTED + * a1-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_SERVICE_REQUEST 92 +#define INTEL_SIP_SMC_FCS_SERVICE_REQUEST \ + INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_SERVICE_REQUEST) + +/** + * Request INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE + * Sync call to send a signed certificate + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_SEND_CERTIFICATE + * a1 the physical address of CERTIFICATE block + * a2 size of data block + * a3-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FCS_REJECTED + * a1-a3 not used + */ +#define INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE 93 +#define INTEL_SIP_SMC_FCS_SEND_CERTIFICATE \ + INTEL_SIP_SMC_STD_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_SEND_CERTIFICATE) + +/** + * Request INTEL_SIP_SMC_FCS_GET_PROVISION_DATA + * Sync call to dump all the fuses and key hashes + * + * Call register usage: + * a0 INTEL_SIP_SMC_FCS_GET_PROVISION_DATA + * a1 the physical address for firmware to write structure of fuse and + * key hashes + * a2-a7 not used + * + * Return status: + * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FCS_ERROR or + * INTEL_SIP_SMC_FCS_REJECTED + * a1 mailbox error + * a2 physical address for the structure of fuse and key hashes + * a3 the size of structure + * + */ +#define INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA 94 +#define INTEL_SIP_SMC_FCS_GET_PROVISION_DATA \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FCS_GET_PROVISION_DATA) + #endif diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h index 18c1841fdb1f..0c16037fd08d 100644 --- a/include/linux/firmware/intel/stratix10-svc-client.h +++ b/include/linux/firmware/intel/stratix10-svc-client.h @@ -14,6 +14,7 @@ */ #define SVC_CLIENT_FPGA "fpga" #define SVC_CLIENT_RSU "rsu" +#define SVC_CLIENT_FCS "fcs" /* * Status of the sent command, in bit number @@ -49,6 +50,7 @@ #define SVC_STATUS_BUSY 4 #define SVC_STATUS_ERROR 5 #define SVC_STATUS_NO_SUPPORT 6 +#define SVC_STATUS_INVALID_PARAM 7 /* * Flag bit for COMMAND_RECONFIG @@ -66,6 +68,8 @@ #define SVC_RECONFIG_REQUEST_TIMEOUT_MS 300 #define SVC_RECONFIG_BUFFER_TIMEOUT_MS 720 #define SVC_RSU_REQUEST_TIMEOUT_MS 300 +#define SVC_FCS_REQUEST_TIMEOUT_MS 2000 +#define SVC_COMPLETED_TIMEOUT_MS 30000 struct stratix10_svc_chan; @@ -105,34 +109,79 @@ struct stratix10_svc_chan; * @COMMAND_RSU_DCMF_VERSION: query firmware for the DCMF version, return status * is SVC_STATUS_OK or SVC_STATUS_ERROR * + * @COMMAND_POLL_SERVICE_STATUS: poll if the service request is complete, + * return statis is SVC_STATUS_OK, SVC_STATUS_ERROR or SVC_STATUS_BUSY + * * @COMMAND_FIRMWARE_VERSION: query running firmware version, return status * is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_SMC_SVC_VERSION: Non-mailbox SMC SVC API Version, + * return status is SVC_STATUS_OK + * + * @COMMAND_RSU_DCMF_STATUS: query firmware for the DCMF status + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_FCS_REQUEST_SERVICE: request validation of image from firmware, + * return status is SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM + * + * @COMMAND_FCS_SEND_CERTIFICATE: send a certificate, return status is + * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR + * + * @COMMAND_FCS_GET_PROVISION_DATA: read the provisioning data, return status is + * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR + * + * @COMMAND_FCS_DATA_ENCRYPTION: encrypt the data, return status is + * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR + * + * @COMMAND_FCS_DATA_DECRYPTION: decrypt the data, return status is + * SVC_STATUS_OK, SVC_STATUS_INVALID_PARAM, SVC_STATUS_ERROR + * + * @COMMAND_FCS_RANDOM_NUMBER_GEN: generate a random number, return status + * is SVC_STATUS_OK, SVC_STATUS_ERROR */ enum stratix10_svc_command_code { + /* for FPGA */ COMMAND_NOOP = 0, COMMAND_RECONFIG, COMMAND_RECONFIG_DATA_SUBMIT, COMMAND_RECONFIG_DATA_CLAIM, COMMAND_RECONFIG_STATUS, - COMMAND_RSU_STATUS, + /* for RSU */ + COMMAND_RSU_STATUS = 10, COMMAND_RSU_UPDATE, COMMAND_RSU_NOTIFY, COMMAND_RSU_RETRY, COMMAND_RSU_MAX_RETRY, COMMAND_RSU_DCMF_VERSION, + COMMAND_RSU_DCMF_STATUS, COMMAND_FIRMWARE_VERSION, + /* for FCS */ + COMMAND_FCS_REQUEST_SERVICE = 20, + COMMAND_FCS_SEND_CERTIFICATE, + COMMAND_FCS_GET_PROVISION_DATA, + COMMAND_FCS_DATA_ENCRYPTION, + COMMAND_FCS_DATA_DECRYPTION, + COMMAND_FCS_RANDOM_NUMBER_GEN, + /* for general status poll */ + COMMAND_POLL_SERVICE_STATUS = 40, + /* Non-mailbox SMC Call */ + COMMAND_SMC_SVC_VERSION = 200, }; /** * struct stratix10_svc_client_msg - message sent by client to service * @payload: starting address of data need be processed - * @payload_length: data size in bytes + * @payload_length: to be processed data size in bytes + * @payload_output: starting address of processed data + * @payload_length_output: processed data size in bytes * @command: service command * @arg: args to be passed via registers and not physically mapped buffers */ struct stratix10_svc_client_msg { void *payload; size_t payload_length; + void *payload_output; + size_t payload_length_output; enum stratix10_svc_command_code command; u64 arg[3]; }; diff --git a/include/linux/firmware/trusted_foundations.h b/include/linux/firmware/trusted_foundations.h index be5984bda592..931b6c5c72df 100644 --- a/include/linux/firmware/trusted_foundations.h +++ b/include/linux/firmware/trusted_foundations.h @@ -71,12 +71,16 @@ static inline void register_trusted_foundations( static inline void of_register_trusted_foundations(void) { + struct device_node *np = of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations"); + + if (!np) + return; + of_node_put(np); /* * If we find the target should enable TF but does not support it, * fail as the system won't be able to do much anyway */ - if (of_find_compatible_node(NULL, NULL, "tlm,trusted-foundations")) - register_trusted_foundations(NULL); + register_trusted_foundations(NULL); } static inline bool trusted_foundations_registered(void) diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index cbde3b1fa414..9f50dacbf7d6 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -369,6 +369,11 @@ enum pm_pinctrl_drive_strength { PM_PINCTRL_DRIVE_STRENGTH_12MA = 3, }; +enum pm_pinctrl_tri_state { + PM_PINCTRL_TRI_STATE_DISABLE = 0, + PM_PINCTRL_TRI_STATE_ENABLE = 1, +}; + enum zynqmp_pm_shutdown_type { ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN = 0, ZYNQMP_PM_SHUTDOWN_TYPE_RESET = 1, diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index 0f9468771bb9..54f63459efd6 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -22,6 +22,8 @@ struct sg_table; * @FPGA_MGR_STATE_RESET: FPGA in reset state * @FPGA_MGR_STATE_FIRMWARE_REQ: firmware request in progress * @FPGA_MGR_STATE_FIRMWARE_REQ_ERR: firmware request failed + * @FPGA_MGR_STATE_PARSE_HEADER: parse FPGA image header + * @FPGA_MGR_STATE_PARSE_HEADER_ERR: Error during PARSE_HEADER stage * @FPGA_MGR_STATE_WRITE_INIT: preparing FPGA for programming * @FPGA_MGR_STATE_WRITE_INIT_ERR: Error during WRITE_INIT stage * @FPGA_MGR_STATE_WRITE: writing image to FPGA @@ -41,7 +43,9 @@ enum fpga_mgr_states { FPGA_MGR_STATE_FIRMWARE_REQ, FPGA_MGR_STATE_FIRMWARE_REQ_ERR, - /* write sequence: init, write, complete */ + /* write sequence: parse header, init, write, complete */ + FPGA_MGR_STATE_PARSE_HEADER, + FPGA_MGR_STATE_PARSE_HEADER_ERR, FPGA_MGR_STATE_WRITE_INIT, FPGA_MGR_STATE_WRITE_INIT_ERR, FPGA_MGR_STATE_WRITE, @@ -85,6 +89,9 @@ enum fpga_mgr_states { * @sgt: scatter/gather table containing FPGA image * @buf: contiguous buffer containing FPGA image * @count: size of buf + * @header_size: size of image header. + * @data_size: size of image data to be sent to the device. If not specified, + * whole image will be used. Header may be skipped in either case. * @region_id: id of target region * @dev: device that owns this * @overlay: Device Tree overlay @@ -98,6 +105,8 @@ struct fpga_image_info { struct sg_table *sgt; const char *buf; size_t count; + size_t header_size; + size_t data_size; int region_id; struct device *dev; #ifdef CONFIG_OF @@ -137,9 +146,16 @@ struct fpga_manager_info { /** * struct fpga_manager_ops - ops for low level fpga manager drivers - * @initial_header_size: Maximum number of bytes that should be passed into write_init + * @initial_header_size: minimum number of bytes that should be passed into + * parse_header and write_init. + * @skip_header: bool flag to tell fpga-mgr core whether it should skip + * info->header_size part at the beginning of the image when invoking + * write callback. * @state: returns an enum value of the FPGA's state * @status: returns status of the FPGA, including reconfiguration error code + * @parse_header: parse FPGA image header to set info->header_size and + * info->data_size. In case the input buffer is not large enough, set + * required size to info->header_size and return -EAGAIN. * @write_init: prepare the FPGA to receive configuration data * @write: write count bytes of configuration data to the FPGA * @write_sg: write the scatter list of configuration data to the FPGA @@ -153,8 +169,12 @@ struct fpga_manager_info { */ struct fpga_manager_ops { size_t initial_header_size; + bool skip_header; enum fpga_mgr_states (*state)(struct fpga_manager *mgr); u64 (*status)(struct fpga_manager *mgr); + int (*parse_header)(struct fpga_manager *mgr, + struct fpga_image_info *info, + const char *buf, size_t count); int (*write_init)(struct fpga_manager *mgr, struct fpga_image_info *info, const char *buf, size_t count); diff --git a/include/linux/fs.h b/include/linux/fs.h index daf69a6504b6..9eced4cc286e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -74,6 +74,7 @@ struct fsverity_operations; struct fs_context; struct fs_parameter_spec; struct fileattr; +struct iomap_ops; extern void __init inode_init(void); extern void __init inode_init_early(void); @@ -378,13 +379,11 @@ struct address_space_operations { void (*free_folio)(struct folio *folio); ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter); /* - * migrate the contents of a page to the specified target. If + * migrate the contents of a folio to the specified target. If * migrate_mode is MIGRATE_ASYNC, it must not block. */ - int (*migratepage) (struct address_space *, - struct page *, struct page *, enum migrate_mode); - bool (*isolate_page)(struct page *, isolate_mode_t); - void (*putback_page)(struct page *); + int (*migrate_folio)(struct address_space *, struct folio *dst, + struct folio *src, enum migrate_mode); int (*launder_folio)(struct folio *); bool (*is_partially_uptodate) (struct folio *, size_t from, size_t count); @@ -940,9 +939,10 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) struct file { union { - struct llist_node fu_llist; - struct rcu_head fu_rcuhead; - } f_u; + struct llist_node f_llist; + struct rcu_head f_rcuhead; + unsigned int f_iocb_flags; + }; struct path f_path; struct inode *f_inode; /* cached value */ const struct file_operations *f_op; @@ -1428,6 +1428,7 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */ #define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */ #define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */ +#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */ /* Possible states of 'frozen' field */ enum { @@ -2029,6 +2030,8 @@ extern long compat_ptr_ioctl(struct file *file, unsigned int cmd, void inode_init_owner(struct user_namespace *mnt_userns, struct inode *inode, const struct inode *dir, umode_t mode); extern bool may_open_dev(const struct path *path); +umode_t mode_strip_sgid(struct user_namespace *mnt_userns, + const struct inode *dir, umode_t mode); /* * This is the "filldir" function type, used by readdir() to let @@ -2196,10 +2199,13 @@ extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, extern ssize_t generic_copy_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t len, unsigned int flags); -extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, - struct file *file_out, loff_t pos_out, - loff_t *count, - unsigned int remap_flags); +int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *len, unsigned int remap_flags, + const struct iomap_ops *dax_read_ops); +int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + loff_t *count, unsigned int remap_flags); extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t len, unsigned int remap_flags); @@ -2325,13 +2331,11 @@ static inline bool HAS_UNMAPPED_ID(struct user_namespace *mnt_userns, !vfsgid_valid(i_gid_into_vfsgid(mnt_userns, inode)); } -static inline int iocb_flags(struct file *file); - static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) { *kiocb = (struct kiocb) { .ki_filp = filp, - .ki_flags = iocb_flags(filp), + .ki_flags = filp->f_iocb_flags, .ki_ioprio = get_current_ioprio(), }; } @@ -2559,6 +2563,7 @@ extern struct dentry *mount_nodev(struct file_system_type *fs_type, int flags, void *data, int (*fill_super)(struct super_block *, void *, int)); extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path); +void retire_super(struct super_block *sb); void generic_shutdown_super(struct super_block *sb); void kill_block_super(struct super_block *sb); void kill_anon_super(struct super_block *sb); @@ -2847,6 +2852,12 @@ extern int vfs_fsync(struct file *file, int datasync); extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes, unsigned int flags); +static inline bool iocb_is_dsync(const struct kiocb *iocb) +{ + return (iocb->ki_flags & IOCB_DSYNC) || + IS_SYNC(iocb->ki_filp->f_mapping->host); +} + /* * Sync the bytes written if this was a synchronous write. Expect ki_pos * to already be updated for the write, and will return either the amount @@ -2854,7 +2865,7 @@ extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes, */ static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count) { - if (iocb->ki_flags & IOCB_DSYNC) { + if (iocb_is_dsync(iocb)) { int ret = vfs_fsync_range(iocb->ki_filp, iocb->ki_pos - count, iocb->ki_pos - 1, (iocb->ki_flags & IOCB_SYNC) ? 0 : 1); @@ -3149,7 +3160,7 @@ extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, extern void file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); extern loff_t noop_llseek(struct file *file, loff_t offset, int whence); -extern loff_t no_llseek(struct file *file, loff_t offset, int whence); +#define no_llseek NULL extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize); extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence); extern loff_t generic_file_llseek_size(struct file *file, loff_t offset, @@ -3342,18 +3353,6 @@ extern int generic_check_addressable(unsigned, u64); extern void generic_set_encrypted_ci_d_ops(struct dentry *dentry); -#ifdef CONFIG_MIGRATION -extern int buffer_migrate_page(struct address_space *, - struct page *, struct page *, - enum migrate_mode); -extern int buffer_migrate_page_norefs(struct address_space *, - struct page *, struct page *, - enum migrate_mode); -#else -#define buffer_migrate_page NULL -#define buffer_migrate_page_norefs NULL -#endif - int may_setattr(struct user_namespace *mnt_userns, struct inode *inode, unsigned int ia_valid); int setattr_prepare(struct user_namespace *, struct dentry *, struct iattr *); @@ -3389,7 +3388,7 @@ static inline int iocb_flags(struct file *file) res |= IOCB_APPEND; if (file->f_flags & O_DIRECT) res |= IOCB_DIRECT; - if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host)) + if (file->f_flags & O_DSYNC) res |= IOCB_DSYNC; if (file->f_flags & __O_SYNC) res |= IOCB_SYNC; diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index e60d57c99cb6..7d2f1e0f23b1 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -284,6 +284,7 @@ int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg); int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *arg); int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg); int fscrypt_has_permitted_context(struct inode *parent, struct inode *child); +int fscrypt_context_for_new_inode(void *ctx, struct inode *inode); int fscrypt_set_context(struct inode *inode, void *fs_data); struct fscrypt_dummy_policy { @@ -327,6 +328,10 @@ void fscrypt_free_inode(struct inode *inode); int fscrypt_drop_inode(struct inode *inode); /* fname.c */ +int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname, + u8 *out, unsigned int olen); +bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len, + u32 max_len, u32 *encrypted_len_ret); int fscrypt_setup_filename(struct inode *inode, const struct qstr *iname, int lookup, struct fscrypt_name *fname); diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 979f6bfa2c25..0b61371e287b 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -208,6 +208,43 @@ enum { FTRACE_OPS_FL_DIRECT = BIT(17), }; +/* + * FTRACE_OPS_CMD_* commands allow the ftrace core logic to request changes + * to a ftrace_ops. Note, the requests may fail. + * + * ENABLE_SHARE_IPMODIFY_SELF - enable a DIRECT ops to work on the same + * function as an ops with IPMODIFY. Called + * when the DIRECT ops is being registered. + * This is called with both direct_mutex and + * ftrace_lock are locked. + * + * ENABLE_SHARE_IPMODIFY_PEER - enable a DIRECT ops to work on the same + * function as an ops with IPMODIFY. Called + * when the other ops (the one with IPMODIFY) + * is being registered. + * This is called with direct_mutex locked. + * + * DISABLE_SHARE_IPMODIFY_PEER - disable a DIRECT ops to work on the same + * function as an ops with IPMODIFY. Called + * when the other ops (the one with IPMODIFY) + * is being unregistered. + * This is called with direct_mutex locked. + */ +enum ftrace_ops_cmd { + FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_SELF, + FTRACE_OPS_CMD_ENABLE_SHARE_IPMODIFY_PEER, + FTRACE_OPS_CMD_DISABLE_SHARE_IPMODIFY_PEER, +}; + +/* + * For most ftrace_ops_cmd, + * Returns: + * 0 - Success. + * Negative on failure. The return value is dependent on the + * callback. + */ +typedef int (*ftrace_ops_func_t)(struct ftrace_ops *op, enum ftrace_ops_cmd cmd); + #ifdef CONFIG_DYNAMIC_FTRACE /* The hash used to know what functions callbacks trace */ struct ftrace_ops_hash { @@ -250,6 +287,7 @@ struct ftrace_ops { unsigned long trampoline; unsigned long trampoline_size; struct list_head list; + ftrace_ops_func_t ops_func; #endif }; @@ -340,6 +378,7 @@ unsigned long ftrace_find_rec_direct(unsigned long ip); int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr); int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr); int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr); +int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr); #else struct ftrace_ops; @@ -384,6 +423,10 @@ static inline int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned lo { return -ENODEV; } +static inline int modify_ftrace_direct_multi_nolock(struct ftrace_ops *ops, unsigned long addr) +{ + return -ENODEV; +} #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 9a81c4410b9f..89b9bdfca925 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -27,11 +27,15 @@ struct device; * driver needs its child devices to be bound with * their respective drivers as soon as they are * added. + * BEST_EFFORT: The fwnode/device needs to probe early and might be missing some + * suppliers. Only enforce ordering with suppliers that have + * drivers. */ #define FWNODE_FLAG_LINKS_ADDED BIT(0) #define FWNODE_FLAG_NOT_DEVICE BIT(1) #define FWNODE_FLAG_INITIALIZED BIT(2) #define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3) +#define FWNODE_FLAG_BEST_EFFORT BIT(4) struct fwnode_handle { struct fwnode_handle *secondary; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0ace7759acd2..f314be58fa77 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -2,357 +2,13 @@ #ifndef __LINUX_GFP_H #define __LINUX_GFP_H -#include <linux/mmdebug.h> +#include <linux/gfp_types.h> + #include <linux/mmzone.h> -#include <linux/stddef.h> -#include <linux/linkage.h> #include <linux/topology.h> -/* The typedef is in types.h but we want the documentation here */ -#if 0 -/** - * typedef gfp_t - Memory allocation flags. - * - * GFP flags are commonly used throughout Linux to indicate how memory - * should be allocated. The GFP acronym stands for get_free_pages(), - * the underlying memory allocation function. Not every GFP flag is - * supported by every function which may allocate memory. Most users - * will want to use a plain ``GFP_KERNEL``. - */ -typedef unsigned int __bitwise gfp_t; -#endif - struct vm_area_struct; -/* - * In case of changes, please don't forget to update - * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c - */ - -/* Plain integer GFP bitmasks. Do not use this directly. */ -#define ___GFP_DMA 0x01u -#define ___GFP_HIGHMEM 0x02u -#define ___GFP_DMA32 0x04u -#define ___GFP_MOVABLE 0x08u -#define ___GFP_RECLAIMABLE 0x10u -#define ___GFP_HIGH 0x20u -#define ___GFP_IO 0x40u -#define ___GFP_FS 0x80u -#define ___GFP_ZERO 0x100u -#define ___GFP_ATOMIC 0x200u -#define ___GFP_DIRECT_RECLAIM 0x400u -#define ___GFP_KSWAPD_RECLAIM 0x800u -#define ___GFP_WRITE 0x1000u -#define ___GFP_NOWARN 0x2000u -#define ___GFP_RETRY_MAYFAIL 0x4000u -#define ___GFP_NOFAIL 0x8000u -#define ___GFP_NORETRY 0x10000u -#define ___GFP_MEMALLOC 0x20000u -#define ___GFP_COMP 0x40000u -#define ___GFP_NOMEMALLOC 0x80000u -#define ___GFP_HARDWALL 0x100000u -#define ___GFP_THISNODE 0x200000u -#define ___GFP_ACCOUNT 0x400000u -#define ___GFP_ZEROTAGS 0x800000u -#ifdef CONFIG_KASAN_HW_TAGS -#define ___GFP_SKIP_ZERO 0x1000000u -#define ___GFP_SKIP_KASAN_UNPOISON 0x2000000u -#define ___GFP_SKIP_KASAN_POISON 0x4000000u -#else -#define ___GFP_SKIP_ZERO 0 -#define ___GFP_SKIP_KASAN_UNPOISON 0 -#define ___GFP_SKIP_KASAN_POISON 0 -#endif -#ifdef CONFIG_LOCKDEP -#define ___GFP_NOLOCKDEP 0x8000000u -#else -#define ___GFP_NOLOCKDEP 0 -#endif -/* If the above are modified, __GFP_BITS_SHIFT may need updating */ - -/* - * Physical address zone modifiers (see linux/mmzone.h - low four bits) - * - * Do not put any conditional on these. If necessary modify the definitions - * without the underscores and use them consistently. The definitions here may - * be used in bit comparisons. - */ -#define __GFP_DMA ((__force gfp_t)___GFP_DMA) -#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) -#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) -#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ -#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) - -/** - * DOC: Page mobility and placement hints - * - * Page mobility and placement hints - * --------------------------------- - * - * These flags provide hints about how mobile the page is. Pages with similar - * mobility are placed within the same pageblocks to minimise problems due - * to external fragmentation. - * - * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be - * moved by page migration during memory compaction or can be reclaimed. - * - * %__GFP_RECLAIMABLE is used for slab allocations that specify - * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. - * - * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible, - * these pages will be spread between local zones to avoid all the dirty - * pages being in one zone (fair zone allocation policy). - * - * %__GFP_HARDWALL enforces the cpuset memory allocation policy. - * - * %__GFP_THISNODE forces the allocation to be satisfied from the requested - * node with no fallbacks or placement policy enforcements. - * - * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. - */ -#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) -#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) -#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) -#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) -#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) - -/** - * DOC: Watermark modifiers - * - * Watermark modifiers -- controls access to emergency reserves - * ------------------------------------------------------------ - * - * %__GFP_HIGH indicates that the caller is high-priority and that granting - * the request is necessary before the system can make forward progress. - * For example, creating an IO context to clean pages. - * - * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is - * high priority. Users are typically interrupt handlers. This may be - * used in conjunction with %__GFP_HIGH - * - * %__GFP_MEMALLOC allows access to all memory. This should only be used when - * the caller guarantees the allocation will allow more memory to be freed - * very shortly e.g. process exiting or swapping. Users either should - * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). - * Users of this flag have to be extremely careful to not deplete the reserve - * completely and implement a throttling mechanism which controls the - * consumption of the reserve based on the amount of freed memory. - * Usage of a pre-allocated pool (e.g. mempool) should be always considered - * before using this flag. - * - * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. - * This takes precedence over the %__GFP_MEMALLOC flag if both are set. - */ -#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) -#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) -#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) -#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) - -/** - * DOC: Reclaim modifiers - * - * Reclaim modifiers - * ----------------- - * Please note that all the following flags are only applicable to sleepable - * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them). - * - * %__GFP_IO can start physical IO. - * - * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the - * allocator recursing into the filesystem which might already be holding - * locks. - * - * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. - * This flag can be cleared to avoid unnecessary delays when a fallback - * option is available. - * - * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when - * the low watermark is reached and have it reclaim pages until the high - * watermark is reached. A caller may wish to clear this flag when fallback - * options are available and the reclaim is likely to disrupt the system. The - * canonical example is THP allocation where a fallback is cheap but - * reclaim/compaction may cause indirect stalls. - * - * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. - * - * The default allocator behavior depends on the request size. We have a concept - * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER). - * !costly allocations are too essential to fail so they are implicitly - * non-failing by default (with some exceptions like OOM victims might fail so - * the caller still has to check for failures) while costly requests try to be - * not disruptive and back off even without invoking the OOM killer. - * The following three modifiers might be used to override some of these - * implicit rules - * - * %__GFP_NORETRY: The VM implementation will try only very lightweight - * memory direct reclaim to get some memory under memory pressure (thus - * it can sleep). It will avoid disruptive actions like OOM killer. The - * caller must handle the failure which is quite likely to happen under - * heavy memory pressure. The flag is suitable when failure can easily be - * handled at small cost, such as reduced throughput - * - * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim - * procedures that have previously failed if there is some indication - * that progress has been made else where. It can wait for other - * tasks to attempt high level approaches to freeing memory such as - * compaction (which removes fragmentation) and page-out. - * There is still a definite limit to the number of retries, but it is - * a larger limit than with %__GFP_NORETRY. - * Allocations with this flag may fail, but only when there is - * genuinely little unused memory. While these allocations do not - * directly trigger the OOM killer, their failure indicates that - * the system is likely to need to use the OOM killer soon. The - * caller must handle failure, but can reasonably do so by failing - * a higher-level request, or completing it only in a much less - * efficient manner. - * If the allocation does fail, and the caller is in a position to - * free some non-essential memory, doing so could benefit the system - * as a whole. - * - * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller - * cannot handle allocation failures. The allocation could block - * indefinitely but will never return with failure. Testing for - * failure is pointless. - * New users should be evaluated carefully (and the flag should be - * used only when there is no reasonable failure policy) but it is - * definitely preferable to use the flag rather than opencode endless - * loop around allocator. - * Using this flag for costly allocations is _highly_ discouraged. - */ -#define __GFP_IO ((__force gfp_t)___GFP_IO) -#define __GFP_FS ((__force gfp_t)___GFP_FS) -#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ -#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ -#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) -#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL) -#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) -#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) - -/** - * DOC: Action modifiers - * - * Action modifiers - * ---------------- - * - * %__GFP_NOWARN suppresses allocation failure reports. - * - * %__GFP_COMP address compound page metadata. - * - * %__GFP_ZERO returns a zeroed page on success. - * - * %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself - * is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that - * __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting - * memory tags at the same time as zeroing memory has minimal additional - * performace impact. - * - * %__GFP_SKIP_KASAN_UNPOISON makes KASAN skip unpoisoning on page allocation. - * Only effective in HW_TAGS mode. - * - * %__GFP_SKIP_KASAN_POISON makes KASAN skip poisoning on page deallocation. - * Typically, used for userspace pages. Only effective in HW_TAGS mode. - */ -#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) -#define __GFP_COMP ((__force gfp_t)___GFP_COMP) -#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) -#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS) -#define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO) -#define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON) -#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON) - -/* Disable lockdep for GFP context tracking */ -#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) - -/* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP)) -#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) - -/** - * DOC: Useful GFP flag combinations - * - * Useful GFP flag combinations - * ---------------------------- - * - * Useful GFP flag combinations that are commonly used. It is recommended - * that subsystems start with one of these combinations and then set/clear - * %__GFP_FOO flags as necessary. - * - * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower - * watermark is applied to allow access to "atomic reserves". - * The current implementation doesn't support NMI and few other strict - * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT. - * - * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires - * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim. - * - * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is - * accounted to kmemcg. - * - * %GFP_NOWAIT is for kernel allocations that should not stall for direct - * reclaim, start physical IO or use any filesystem callback. - * - * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages - * that do not require the starting of any physical IO. - * Please try to avoid using this flag directly and instead use - * memalloc_noio_{save,restore} to mark the whole scope which cannot - * perform any IO with a short explanation why. All allocation requests - * will inherit GFP_NOIO implicitly. - * - * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. - * Please try to avoid using this flag directly and instead use - * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't - * recurse into the FS layer with a short explanation why. All allocation - * requests will inherit GFP_NOFS implicitly. - * - * %GFP_USER is for userspace allocations that also need to be directly - * accessibly by the kernel or hardware. It is typically used by hardware - * for buffers that are mapped to userspace (e.g. graphics) that hardware - * still must DMA to. cpuset limits are enforced for these allocations. - * - * %GFP_DMA exists for historical reasons and should be avoided where possible. - * The flags indicates that the caller requires that the lowest zone be - * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but - * it would require careful auditing as some users really require it and - * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the - * lowest zone as a type of emergency reserve. - * - * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit - * address. Note that kmalloc(..., GFP_DMA32) does not return DMA32 memory - * because the DMA32 kmalloc cache array is not implemented. - * (Reason: there is no such user in kernel). - * - * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, - * do not need to be directly accessible by the kernel but that cannot - * move once in use. An example may be a hardware allocation that maps - * data directly into userspace but has no addressing limitations. - * - * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not - * need direct access to but can use kmap() when access is required. They - * are expected to be movable via page reclaim or page migration. Typically, - * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE. - * - * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They - * are compound allocations that will generally fail quickly if memory is not - * available and will not wake kswapd/kcompactd on failure. The _LIGHT - * version does not attempt reclaim/compaction at all and is by default used - * in page fault path, while the non-light is used by khugepaged. - */ -#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) -#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) -#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) -#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) -#define GFP_NOIO (__GFP_RECLAIM) -#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) -#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) -#define GFP_DMA __GFP_DMA -#define GFP_DMA32 __GFP_DMA32 -#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) -#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \ - __GFP_SKIP_KASAN_POISON | __GFP_SKIP_KASAN_UNPOISON) -#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ - __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) -#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) - /* Convert GFP flags to their corresponding migrate type */ #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) #define GFP_MOVABLE_SHIFT 3 diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h new file mode 100644 index 000000000000..d88c46ca82e1 --- /dev/null +++ b/include/linux/gfp_types.h @@ -0,0 +1,348 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_GFP_TYPES_H +#define __LINUX_GFP_TYPES_H + +/* The typedef is in types.h but we want the documentation here */ +#if 0 +/** + * typedef gfp_t - Memory allocation flags. + * + * GFP flags are commonly used throughout Linux to indicate how memory + * should be allocated. The GFP acronym stands for get_free_pages(), + * the underlying memory allocation function. Not every GFP flag is + * supported by every function which may allocate memory. Most users + * will want to use a plain ``GFP_KERNEL``. + */ +typedef unsigned int __bitwise gfp_t; +#endif + +/* + * In case of changes, please don't forget to update + * include/trace/events/mmflags.h and tools/perf/builtin-kmem.c + */ + +/* Plain integer GFP bitmasks. Do not use this directly. */ +#define ___GFP_DMA 0x01u +#define ___GFP_HIGHMEM 0x02u +#define ___GFP_DMA32 0x04u +#define ___GFP_MOVABLE 0x08u +#define ___GFP_RECLAIMABLE 0x10u +#define ___GFP_HIGH 0x20u +#define ___GFP_IO 0x40u +#define ___GFP_FS 0x80u +#define ___GFP_ZERO 0x100u +#define ___GFP_ATOMIC 0x200u +#define ___GFP_DIRECT_RECLAIM 0x400u +#define ___GFP_KSWAPD_RECLAIM 0x800u +#define ___GFP_WRITE 0x1000u +#define ___GFP_NOWARN 0x2000u +#define ___GFP_RETRY_MAYFAIL 0x4000u +#define ___GFP_NOFAIL 0x8000u +#define ___GFP_NORETRY 0x10000u +#define ___GFP_MEMALLOC 0x20000u +#define ___GFP_COMP 0x40000u +#define ___GFP_NOMEMALLOC 0x80000u +#define ___GFP_HARDWALL 0x100000u +#define ___GFP_THISNODE 0x200000u +#define ___GFP_ACCOUNT 0x400000u +#define ___GFP_ZEROTAGS 0x800000u +#ifdef CONFIG_KASAN_HW_TAGS +#define ___GFP_SKIP_ZERO 0x1000000u +#define ___GFP_SKIP_KASAN_UNPOISON 0x2000000u +#define ___GFP_SKIP_KASAN_POISON 0x4000000u +#else +#define ___GFP_SKIP_ZERO 0 +#define ___GFP_SKIP_KASAN_UNPOISON 0 +#define ___GFP_SKIP_KASAN_POISON 0 +#endif +#ifdef CONFIG_LOCKDEP +#define ___GFP_NOLOCKDEP 0x8000000u +#else +#define ___GFP_NOLOCKDEP 0 +#endif +/* If the above are modified, __GFP_BITS_SHIFT may need updating */ + +/* + * Physical address zone modifiers (see linux/mmzone.h - low four bits) + * + * Do not put any conditional on these. If necessary modify the definitions + * without the underscores and use them consistently. The definitions here may + * be used in bit comparisons. + */ +#define __GFP_DMA ((__force gfp_t)___GFP_DMA) +#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) +#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) +#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ +#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) + +/** + * DOC: Page mobility and placement hints + * + * Page mobility and placement hints + * --------------------------------- + * + * These flags provide hints about how mobile the page is. Pages with similar + * mobility are placed within the same pageblocks to minimise problems due + * to external fragmentation. + * + * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be + * moved by page migration during memory compaction or can be reclaimed. + * + * %__GFP_RECLAIMABLE is used for slab allocations that specify + * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers. + * + * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible, + * these pages will be spread between local zones to avoid all the dirty + * pages being in one zone (fair zone allocation policy). + * + * %__GFP_HARDWALL enforces the cpuset memory allocation policy. + * + * %__GFP_THISNODE forces the allocation to be satisfied from the requested + * node with no fallbacks or placement policy enforcements. + * + * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. + */ +#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) +#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) +#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) +#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) +#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) + +/** + * DOC: Watermark modifiers + * + * Watermark modifiers -- controls access to emergency reserves + * ------------------------------------------------------------ + * + * %__GFP_HIGH indicates that the caller is high-priority and that granting + * the request is necessary before the system can make forward progress. + * For example, creating an IO context to clean pages. + * + * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is + * high priority. Users are typically interrupt handlers. This may be + * used in conjunction with %__GFP_HIGH + * + * %__GFP_MEMALLOC allows access to all memory. This should only be used when + * the caller guarantees the allocation will allow more memory to be freed + * very shortly e.g. process exiting or swapping. Users either should + * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). + * Users of this flag have to be extremely careful to not deplete the reserve + * completely and implement a throttling mechanism which controls the + * consumption of the reserve based on the amount of freed memory. + * Usage of a pre-allocated pool (e.g. mempool) should be always considered + * before using this flag. + * + * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. + * This takes precedence over the %__GFP_MEMALLOC flag if both are set. + */ +#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC) +#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) +#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) +#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) + +/** + * DOC: Reclaim modifiers + * + * Reclaim modifiers + * ----------------- + * Please note that all the following flags are only applicable to sleepable + * allocations (e.g. %GFP_NOWAIT and %GFP_ATOMIC will ignore them). + * + * %__GFP_IO can start physical IO. + * + * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the + * allocator recursing into the filesystem which might already be holding + * locks. + * + * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim. + * This flag can be cleared to avoid unnecessary delays when a fallback + * option is available. + * + * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when + * the low watermark is reached and have it reclaim pages until the high + * watermark is reached. A caller may wish to clear this flag when fallback + * options are available and the reclaim is likely to disrupt the system. The + * canonical example is THP allocation where a fallback is cheap but + * reclaim/compaction may cause indirect stalls. + * + * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim. + * + * The default allocator behavior depends on the request size. We have a concept + * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER). + * !costly allocations are too essential to fail so they are implicitly + * non-failing by default (with some exceptions like OOM victims might fail so + * the caller still has to check for failures) while costly requests try to be + * not disruptive and back off even without invoking the OOM killer. + * The following three modifiers might be used to override some of these + * implicit rules + * + * %__GFP_NORETRY: The VM implementation will try only very lightweight + * memory direct reclaim to get some memory under memory pressure (thus + * it can sleep). It will avoid disruptive actions like OOM killer. The + * caller must handle the failure which is quite likely to happen under + * heavy memory pressure. The flag is suitable when failure can easily be + * handled at small cost, such as reduced throughput + * + * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim + * procedures that have previously failed if there is some indication + * that progress has been made else where. It can wait for other + * tasks to attempt high level approaches to freeing memory such as + * compaction (which removes fragmentation) and page-out. + * There is still a definite limit to the number of retries, but it is + * a larger limit than with %__GFP_NORETRY. + * Allocations with this flag may fail, but only when there is + * genuinely little unused memory. While these allocations do not + * directly trigger the OOM killer, their failure indicates that + * the system is likely to need to use the OOM killer soon. The + * caller must handle failure, but can reasonably do so by failing + * a higher-level request, or completing it only in a much less + * efficient manner. + * If the allocation does fail, and the caller is in a position to + * free some non-essential memory, doing so could benefit the system + * as a whole. + * + * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller + * cannot handle allocation failures. The allocation could block + * indefinitely but will never return with failure. Testing for + * failure is pointless. + * New users should be evaluated carefully (and the flag should be + * used only when there is no reasonable failure policy) but it is + * definitely preferable to use the flag rather than opencode endless + * loop around allocator. + * Using this flag for costly allocations is _highly_ discouraged. + */ +#define __GFP_IO ((__force gfp_t)___GFP_IO) +#define __GFP_FS ((__force gfp_t)___GFP_FS) +#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ +#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ +#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) +#define __GFP_RETRY_MAYFAIL ((__force gfp_t)___GFP_RETRY_MAYFAIL) +#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) +#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) + +/** + * DOC: Action modifiers + * + * Action modifiers + * ---------------- + * + * %__GFP_NOWARN suppresses allocation failure reports. + * + * %__GFP_COMP address compound page metadata. + * + * %__GFP_ZERO returns a zeroed page on success. + * + * %__GFP_ZEROTAGS zeroes memory tags at allocation time if the memory itself + * is being zeroed (either via __GFP_ZERO or via init_on_alloc, provided that + * __GFP_SKIP_ZERO is not set). This flag is intended for optimization: setting + * memory tags at the same time as zeroing memory has minimal additional + * performace impact. + * + * %__GFP_SKIP_KASAN_UNPOISON makes KASAN skip unpoisoning on page allocation. + * Only effective in HW_TAGS mode. + * + * %__GFP_SKIP_KASAN_POISON makes KASAN skip poisoning on page deallocation. + * Typically, used for userspace pages. Only effective in HW_TAGS mode. + */ +#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) +#define __GFP_COMP ((__force gfp_t)___GFP_COMP) +#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) +#define __GFP_ZEROTAGS ((__force gfp_t)___GFP_ZEROTAGS) +#define __GFP_SKIP_ZERO ((__force gfp_t)___GFP_SKIP_ZERO) +#define __GFP_SKIP_KASAN_UNPOISON ((__force gfp_t)___GFP_SKIP_KASAN_UNPOISON) +#define __GFP_SKIP_KASAN_POISON ((__force gfp_t)___GFP_SKIP_KASAN_POISON) + +/* Disable lockdep for GFP context tracking */ +#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) + +/* Room for N __GFP_FOO bits */ +#define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) + +/** + * DOC: Useful GFP flag combinations + * + * Useful GFP flag combinations + * ---------------------------- + * + * Useful GFP flag combinations that are commonly used. It is recommended + * that subsystems start with one of these combinations and then set/clear + * %__GFP_FOO flags as necessary. + * + * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower + * watermark is applied to allow access to "atomic reserves". + * The current implementation doesn't support NMI and few other strict + * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT. + * + * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires + * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim. + * + * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is + * accounted to kmemcg. + * + * %GFP_NOWAIT is for kernel allocations that should not stall for direct + * reclaim, start physical IO or use any filesystem callback. + * + * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages + * that do not require the starting of any physical IO. + * Please try to avoid using this flag directly and instead use + * memalloc_noio_{save,restore} to mark the whole scope which cannot + * perform any IO with a short explanation why. All allocation requests + * will inherit GFP_NOIO implicitly. + * + * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces. + * Please try to avoid using this flag directly and instead use + * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't + * recurse into the FS layer with a short explanation why. All allocation + * requests will inherit GFP_NOFS implicitly. + * + * %GFP_USER is for userspace allocations that also need to be directly + * accessibly by the kernel or hardware. It is typically used by hardware + * for buffers that are mapped to userspace (e.g. graphics) that hardware + * still must DMA to. cpuset limits are enforced for these allocations. + * + * %GFP_DMA exists for historical reasons and should be avoided where possible. + * The flags indicates that the caller requires that the lowest zone be + * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but + * it would require careful auditing as some users really require it and + * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the + * lowest zone as a type of emergency reserve. + * + * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit + * address. Note that kmalloc(..., GFP_DMA32) does not return DMA32 memory + * because the DMA32 kmalloc cache array is not implemented. + * (Reason: there is no such user in kernel). + * + * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace, + * do not need to be directly accessible by the kernel but that cannot + * move once in use. An example may be a hardware allocation that maps + * data directly into userspace but has no addressing limitations. + * + * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not + * need direct access to but can use kmap() when access is required. They + * are expected to be movable via page reclaim or page migration. Typically, + * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE. + * + * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They + * are compound allocations that will generally fail quickly if memory is not + * available and will not wake kswapd/kcompactd on failure. The _LIGHT + * version does not attempt reclaim/compaction at all and is by default used + * in page fault path, while the non-light is used by khugepaged. + */ +#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) +#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) +#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) +#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) +#define GFP_NOIO (__GFP_RECLAIM) +#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) +#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL) +#define GFP_DMA __GFP_DMA +#define GFP_DMA32 __GFP_DMA32 +#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) +#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE | \ + __GFP_SKIP_KASAN_POISON | __GFP_SKIP_KASAN_UNPOISON) +#define GFP_TRANSHUGE_LIGHT ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ + __GFP_NOMEMALLOC | __GFP_NOWARN) & ~__GFP_RECLAIM) +#define GFP_TRANSHUGE (GFP_TRANSHUGE_LIGHT | __GFP_DIRECT_RECLAIM) + +#endif /* __LINUX_GFP_TYPES_H */ diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 008ad3ee56b7..a370387fa406 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -95,7 +95,6 @@ struct device; int devm_gpio_request(struct device *dev, unsigned gpio, const char *label); int devm_gpio_request_one(struct device *dev, unsigned gpio, unsigned long flags, const char *label); -void devm_gpio_free(struct device *dev, unsigned int gpio); #else /* ! CONFIG_GPIOLIB */ @@ -240,11 +239,6 @@ static inline int devm_gpio_request_one(struct device *dev, unsigned gpio, return -EINVAL; } -static inline void devm_gpio_free(struct device *dev, unsigned int gpio) -{ - WARN_ON(1); -} - #endif /* ! CONFIG_GPIOLIB */ #endif /* __LINUX_GPIO_H */ diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h index 4d55da28e664..0b619eb7ae83 100644 --- a/include/linux/gpio/machine.h +++ b/include/linux/gpio/machine.h @@ -14,6 +14,7 @@ enum gpio_lookup_flags { GPIO_TRANSITORY = (1 << 3), GPIO_PULL_UP = (1 << 4), GPIO_PULL_DOWN = (1 << 5), + GPIO_PULL_DISABLE = (1 << 6), GPIO_LOOKUP_FLAGS_DEFAULT = GPIO_ACTIVE_HIGH | GPIO_PERSISTENT, }; diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h index cddb42ff0473..034b1106d022 100644 --- a/include/linux/highmem-internal.h +++ b/include/linux/highmem-internal.h @@ -8,7 +8,7 @@ #ifdef CONFIG_KMAP_LOCAL void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot); void *__kmap_local_page_prot(struct page *page, pgprot_t prot); -void kunmap_local_indexed(void *vaddr); +void kunmap_local_indexed(const void *vaddr); void kmap_local_fork(struct task_struct *tsk); void __kmap_local_sched_out(void); void __kmap_local_sched_in(void); @@ -89,7 +89,7 @@ static inline void *kmap_local_pfn(unsigned long pfn) return __kmap_local_pfn_prot(pfn, kmap_prot); } -static inline void __kunmap_local(void *vaddr) +static inline void __kunmap_local(const void *vaddr) { kunmap_local_indexed(vaddr); } @@ -121,7 +121,7 @@ static inline void *kmap_atomic_pfn(unsigned long pfn) return __kmap_local_pfn_prot(pfn, kmap_prot); } -static inline void __kunmap_atomic(void *addr) +static inline void __kunmap_atomic(const void *addr) { kunmap_local_indexed(addr); pagefault_enable(); @@ -197,7 +197,7 @@ static inline void *kmap_local_pfn(unsigned long pfn) return kmap_local_page(pfn_to_page(pfn)); } -static inline void __kunmap_local(void *addr) +static inline void __kunmap_local(const void *addr) { #ifdef ARCH_HAS_FLUSH_ON_KUNMAP kunmap_flush_on_unmap(addr); @@ -224,7 +224,7 @@ static inline void *kmap_atomic_pfn(unsigned long pfn) return kmap_atomic(pfn_to_page(pfn)); } -static inline void __kunmap_atomic(void *addr) +static inline void __kunmap_atomic(const void *addr) { #ifdef ARCH_HAS_FLUSH_ON_KUNMAP kunmap_flush_on_unmap(addr); diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 56d6a0196534..25679035ca28 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -60,11 +60,11 @@ static inline void kmap_flush_unused(void); /** * kmap_local_page - Map a page for temporary usage - * @page: Pointer to the page to be mapped + * @page: Pointer to the page to be mapped * * Returns: The virtual address of the mapping * - * Can be invoked from any context. + * Can be invoked from any context, including interrupts. * * Requires careful handling when nesting multiple mappings because the map * management is stack based. The unmap has to be in the reverse order of @@ -86,8 +86,7 @@ static inline void kmap_flush_unused(void); * temporarily mapped. * * While it is significantly faster than kmap() for the higmem case it - * comes with restrictions about the pointer validity. Only use when really - * necessary. + * comes with restrictions about the pointer validity. * * On HIGHMEM enabled systems mapping a highmem page has the side effect of * disabling migration in order to keep the virtual address stable across @@ -243,6 +242,16 @@ static inline void clear_highpage(struct page *page) kunmap_local(kaddr); } +static inline void clear_highpage_kasan_tagged(struct page *page) +{ + u8 tag; + + tag = page_kasan_tag(page); + page_kasan_tag_reset(page); + clear_highpage(page); + page_kasan_tag_set(page, tag); +} + #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE static inline void tag_clear_highpage(struct page *page) @@ -336,19 +345,6 @@ static inline void memcpy_page(struct page *dst_page, size_t dst_off, kunmap_local(dst); } -static inline void memmove_page(struct page *dst_page, size_t dst_off, - struct page *src_page, size_t src_off, - size_t len) -{ - char *dst = kmap_local_page(dst_page); - char *src = kmap_local_page(src_page); - - VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); - memmove(dst + dst_off, src + src_off, len); - kunmap_local(src); - kunmap_local(dst); -} - static inline void memset_page(struct page *page, size_t offset, int val, size_t len) { diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h index 9dc01f7ab5b4..07414c241e65 100644 --- a/include/linux/hippidevice.h +++ b/include/linux/hippidevice.h @@ -23,6 +23,10 @@ #ifdef __KERNEL__ +struct neigh_parms; +struct net_device; +struct sk_buff; + struct hippi_cb { __u32 ifield; }; diff --git a/include/linux/hmm.h b/include/linux/hmm.h index d5a6f101f843..126a36571667 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -4,7 +4,7 @@ * * Authors: Jérôme Glisse <jglisse@redhat.com> * - * See Documentation/vm/hmm.rst for reasons and overview of what HMM is. + * See Documentation/mm/hmm.rst for reasons and overview of what HMM is. */ #ifndef LINUX_HMM_H #define LINUX_HMM_H @@ -100,7 +100,7 @@ struct hmm_range { }; /* - * Please see Documentation/vm/hmm.rst for how to use the range API. + * Please see Documentation/mm/hmm.rst for how to use the range API. */ int hmm_range_fault(struct hmm_range *range); diff --git a/include/linux/host1x.h b/include/linux/host1x.h index c0bf4e581fe9..cb2100d9b0ff 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -327,6 +327,14 @@ struct host1x_job { /* Whether host1x-side firewall should be ran for this job or not */ bool enable_firewall; + + /* Options for configuring engine data stream ID */ + /* Context device to use for job */ + struct host1x_memory_context *memory_context; + /* Stream ID to use if context isolation is disabled (!memory_context) */ + u32 engine_fallback_streamid; + /* Engine offset to program stream ID to */ + u32 engine_streamid_offset; }; struct host1x_job *host1x_job_alloc(struct host1x_channel *ch, @@ -446,4 +454,38 @@ int tegra_mipi_disable(struct tegra_mipi_device *device); int tegra_mipi_start_calibration(struct tegra_mipi_device *device); int tegra_mipi_finish_calibration(struct tegra_mipi_device *device); +/* host1x memory contexts */ + +struct host1x_memory_context { + struct host1x *host; + + refcount_t ref; + struct pid *owner; + + struct device dev; + u64 dma_mask; + u32 stream_id; +}; + +#ifdef CONFIG_IOMMU_API +struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x, + struct pid *pid); +void host1x_memory_context_get(struct host1x_memory_context *cd); +void host1x_memory_context_put(struct host1x_memory_context *cd); +#else +static inline struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x, + struct pid *pid) +{ + return NULL; +} + +static inline void host1x_memory_context_get(struct host1x_memory_context *cd) +{ +} + +static inline void host1x_memory_context_put(struct host1x_memory_context *cd) +{ +} +#endif + #endif diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 4ddaf6ad73ef..768e5261fdae 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -116,9 +116,30 @@ extern struct kobj_attribute shmem_enabled_attr; extern unsigned long transparent_hugepage_flags; +#define hugepage_flags_enabled() \ + (transparent_hugepage_flags & \ + ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \ + (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))) +#define hugepage_flags_always() \ + (transparent_hugepage_flags & \ + (1<<TRANSPARENT_HUGEPAGE_FLAG)) + +/* + * Do the below checks: + * - For file vma, check if the linear page offset of vma is + * HPAGE_PMD_NR aligned within the file. The hugepage is + * guaranteed to be hugepage-aligned within the file, but we must + * check that the PMD-aligned addresses in the VMA map to + * PMD-aligned offsets within the file, else the hugepage will + * not be PMD-mappable. + * - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE + * area. + */ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, - unsigned long haddr) + unsigned long addr) { + unsigned long haddr; + /* Don't have to check pgoff for anonymous vma */ if (!vma_is_anonymous(vma)) { if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, @@ -126,53 +147,13 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, return false; } - if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) - return false; - return true; -} + haddr = addr & HPAGE_PMD_MASK; -static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, - unsigned long vm_flags) -{ - /* Explicitly disabled through madvise. */ - if ((vm_flags & VM_NOHUGEPAGE) || - test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) + if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) return false; return true; } -/* - * to be used on vmas which are known to support THP. - * Use transparent_hugepage_active otherwise - */ -static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) -{ - - /* - * If the hardware/firmware marked hugepage support disabled. - */ - if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX)) - return false; - - if (!transhuge_vma_enabled(vma, vma->vm_flags)) - return false; - - if (vma_is_temporary_stack(vma)) - return false; - - if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) - return true; - - if (vma_is_dax(vma)) - return true; - - if (transparent_hugepage_flags & - (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) - return !!(vma->vm_flags & VM_HUGEPAGE); - - return false; -} - static inline bool file_thp_enabled(struct vm_area_struct *vma) { struct inode *inode; @@ -187,7 +168,9 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma) !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); } -bool transparent_hugepage_active(struct vm_area_struct *vma); +bool hugepage_vma_check(struct vm_area_struct *vma, + unsigned long vm_flags, + bool smaps, bool in_pf); #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ @@ -290,7 +273,7 @@ static inline bool is_huge_zero_page(struct page *page) static inline bool is_huge_zero_pmd(pmd_t pmd) { - return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd); + return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd); } static inline bool is_huge_zero_pud(pud_t pud) @@ -311,8 +294,8 @@ static inline bool thp_migration_supported(void) static inline struct list_head *page_deferred_list(struct page *page) { /* - * Global or memcg deferred list in the second tail pages is - * occupied by compound_head. + * See organization of tail pages of compound page in + * "struct page" definition. */ return &page[2].deferred_list; } @@ -331,24 +314,15 @@ static inline bool folio_test_pmd_mappable(struct folio *folio) return false; } -static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) -{ - return false; -} - -static inline bool transparent_hugepage_active(struct vm_area_struct *vma) -{ - return false; -} - static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, - unsigned long haddr) + unsigned long addr) { return false; } -static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, - unsigned long vm_flags) +static inline bool hugepage_vma_check(struct vm_area_struct *vma, + unsigned long vm_flags, + bool smaps, bool in_pf) { return false; } diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index e4cff27d1198..3ec981a0d8b3 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -43,6 +43,9 @@ enum { SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */ __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD, #endif +#ifdef CONFIG_MEMORY_FAILURE + SUBPAGE_INDEX_HWPOISON, +#endif __NR_USED_SUBPAGE, }; @@ -152,7 +155,7 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb, struct page *ref_page, zap_flags_t zap_flags); void hugetlb_report_meminfo(struct seq_file *); int hugetlb_report_node_meminfo(char *buf, int len, int nid); -void hugetlb_show_meminfo(void); +void hugetlb_show_meminfo_node(int nid); unsigned long hugetlb_total_pages(void); vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags); @@ -170,7 +173,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to, vm_flags_t vm_flags); long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long freed); -bool isolate_huge_page(struct page *page, struct list_head *list); +int isolate_hugetlb(struct page *page, struct list_head *list); int get_hwpoison_huge_page(struct page *page, bool *hugetlb); int get_huge_page_for_hwpoison(unsigned long pfn, int flags); void putback_active_hugepage(struct page *page); @@ -194,8 +197,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz); pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz); +unsigned long hugetlb_mask_last_page(struct hstate *h); int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long *addr, pte_t *ptep); + unsigned long addr, pte_t *ptep); void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end); struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, @@ -242,7 +246,7 @@ static inline struct address_space *hugetlb_page_mapping_lock_write( static inline int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long *addr, pte_t *ptep) + unsigned long addr, pte_t *ptep) { return 0; } @@ -297,7 +301,7 @@ static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid) return 0; } -static inline void hugetlb_show_meminfo(void) +static inline void hugetlb_show_meminfo_node(int nid) { } @@ -376,9 +380,9 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, return NULL; } -static inline bool isolate_huge_page(struct page *page, struct list_head *list) +static inline int isolate_hugetlb(struct page *page, struct list_head *list) { - return false; + return -EBUSY; } static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb) @@ -550,7 +554,7 @@ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, * Synchronization: Initially set after new page allocation with no * locking. When examined and modified during migration processing * (isolate, migrate, putback) the hugetlb_lock is held. - * HPG_temporary - - Set on a page that is temporarily allocated from the buddy + * HPG_temporary - Set on a page that is temporarily allocated from the buddy * allocator. Typically used for migration target pages when no pages * are available in the pool. The hugetlb free page path will * immediately free pages with this flag set to the buddy allocator. @@ -560,6 +564,8 @@ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, * HPG_freed - Set when page is on the free lists. * Synchronization: hugetlb_lock held for examination and modification. * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed. + * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page + * that is not tracked by raw_hwp_page list. */ enum hugetlb_page_flags { HPG_restore_reserve = 0, @@ -567,6 +573,7 @@ enum hugetlb_page_flags { HPG_temporary, HPG_freed, HPG_vmemmap_optimized, + HPG_raw_hwp_unreliable, __NR_HPAGEFLAGS, }; @@ -613,6 +620,7 @@ HPAGEFLAG(Migratable, migratable) HPAGEFLAG(Temporary, temporary) HPAGEFLAG(Freed, freed) HPAGEFLAG(VmemmapOptimized, vmemmap_optimized) +HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable) #ifdef CONFIG_HUGETLB_PAGE @@ -637,9 +645,6 @@ struct hstate { unsigned int nr_huge_pages_node[MAX_NUMNODES]; unsigned int free_huge_pages_node[MAX_NUMNODES]; unsigned int surplus_huge_pages_node[MAX_NUMNODES]; -#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP - unsigned int optimize_vmemmap_pages; -#endif #ifdef CONFIG_CGROUP_HUGETLB /* cgroup control files */ struct cftype cgroup_files_dfl[8]; @@ -715,7 +720,7 @@ static inline struct hstate *hstate_vma(struct vm_area_struct *vma) return hstate_file(vma->vm_file); } -static inline unsigned long huge_page_size(struct hstate *h) +static inline unsigned long huge_page_size(const struct hstate *h) { return (unsigned long)PAGE_SIZE << h->order; } @@ -744,7 +749,7 @@ static inline bool hstate_is_gigantic(struct hstate *h) return huge_page_order(h) >= MAX_ORDER; } -static inline unsigned int pages_per_huge_page(struct hstate *h) +static inline unsigned int pages_per_huge_page(const struct hstate *h) { return 1 << h->order; } @@ -798,6 +803,14 @@ extern int dissolve_free_huge_page(struct page *page); extern int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn); +#ifdef CONFIG_MEMORY_FAILURE +extern void hugetlb_clear_page_hwpoison(struct page *hpage); +#else +static inline void hugetlb_clear_page_hwpoison(struct page *hpage) +{ +} +#endif + #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION #ifndef arch_hugetlb_migration_supported static inline bool arch_hugetlb_migration_supported(struct hstate *h) @@ -903,14 +916,6 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm) atomic_long_sub(l, &mm->hugetlb_usage); } -#ifndef set_huge_swap_pte_at -static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte, unsigned long sz) -{ - set_huge_pte_at(mm, addr, ptep, pte); -} -#endif - #ifndef huge_ptep_modify_prot_start #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, @@ -1094,11 +1099,6 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm) { } -static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte, unsigned long sz) -{ -} - static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h index fc08b433c856..9efbc54e35e5 100644 --- a/include/linux/hypervisor.h +++ b/include/linux/hypervisor.h @@ -32,4 +32,12 @@ static inline bool jailhouse_paravirt(void) #endif /* !CONFIG_X86 */ +static inline bool hypervisor_isolated_pci_functions(void) +{ + if (IS_ENABLED(CONFIG_S390)) + return true; + + return jailhouse_paravirt(); +} + #endif /* __LINUX_HYPEVISOR_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index fbda5ada2afc..8eab5017bff3 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -537,7 +537,8 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info, * * The return codes from the ``master_xfer{_atomic}`` fields should indicate the * type of error code that occurred during the transfer, as documented in the - * Kernel Documentation file Documentation/i2c/fault-codes.rst. + * Kernel Documentation file Documentation/i2c/fault-codes.rst. Otherwise, the + * number of messages executed should be returned. */ struct i2c_algorithm { /* diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 75d40acb60c1..55e6f4ad0ca6 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -76,6 +76,7 @@ #define IEEE80211_STYPE_ACTION 0x00D0 /* control */ +#define IEEE80211_STYPE_TRIGGER 0x0020 #define IEEE80211_STYPE_CTL_EXT 0x0060 #define IEEE80211_STYPE_BACK_REQ 0x0080 #define IEEE80211_STYPE_BACK 0x0090 @@ -295,6 +296,17 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) #define IEEE80211_HT_CTL_LEN 4 +/* trigger type within common_info of trigger frame */ +#define IEEE80211_TRIGGER_TYPE_MASK 0xf +#define IEEE80211_TRIGGER_TYPE_BASIC 0x0 +#define IEEE80211_TRIGGER_TYPE_BFRP 0x1 +#define IEEE80211_TRIGGER_TYPE_MU_BAR 0x2 +#define IEEE80211_TRIGGER_TYPE_MU_RTS 0x3 +#define IEEE80211_TRIGGER_TYPE_BSRP 0x4 +#define IEEE80211_TRIGGER_TYPE_GCR_MU_BAR 0x5 +#define IEEE80211_TRIGGER_TYPE_BQRP 0x6 +#define IEEE80211_TRIGGER_TYPE_NFRP 0x7 + struct ieee80211_hdr { __le16 frame_control; __le16 duration_id; @@ -324,6 +336,15 @@ struct ieee80211_qos_hdr { __le16 qos_ctrl; } __packed __aligned(2); +struct ieee80211_trigger { + __le16 frame_control; + __le16 duration; + u8 ra[ETH_ALEN]; + u8 ta[ETH_ALEN]; + __le64 common_info; + u8 variable[]; +} __packed __aligned(2); + /** * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set * @fc: frame control bytes in little-endian byteorder @@ -730,6 +751,16 @@ static inline bool ieee80211_is_qos_nullfunc(__le16 fc) } /** + * ieee80211_is_trigger - check if frame is trigger frame + * @fc: frame control field in little-endian byteorder + */ +static inline bool ieee80211_is_trigger(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_TRIGGER); +} + +/** * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame * @fc: frame control bytes in little-endian byteorder */ @@ -1301,6 +1332,15 @@ struct ieee80211_mgmt { u8 action_code; u8 variable[]; } __packed s1g; + struct { + u8 action_code; + u8 dialog_token; + u8 follow_up; + u32 tod; + u32 toa; + u8 max_tod_error; + u8 max_toa_error; + } __packed wnm_timing_msr; } u; } __packed action; } u; @@ -1989,7 +2029,7 @@ struct ieee80211_eht_mcs_nss_supp_bw { * struct ieee80211_eht_cap_elem_fixed - EHT capabilities fixed data * * This structure is the "EHT Capabilities element" fixed fields as - * described in P802.11be_D1.4 section 9.4.2.313. + * described in P802.11be_D2.0 section 9.4.2.313. * * @mac_cap_info: MAC capabilities, see IEEE80211_EHT_MAC_CAP* * @phy_cap_info: PHY capabilities, see IEEE80211_EHT_PHY_CAP* @@ -2015,25 +2055,45 @@ struct ieee80211_eht_cap_elem { u8 optional[]; } __packed; +#define IEEE80211_EHT_OPER_INFO_PRESENT 0x01 +#define IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT 0x02 +#define IEEE80211_EHT_OPER_EHT_DEF_PE_DURATION 0x04 +#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_LIMIT 0x08 +#define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_EXP_MASK 0x30 + /** * struct ieee80211_eht_operation - eht operation element * * This structure is the "EHT Operation Element" fields as - * described in P802.11be_D1.4 section 9.4.2.311 + * described in P802.11be_D2.0 section 9.4.2.311 * - * FIXME: The spec is unclear how big the fields are, and doesn't - * indicate the "Disabled Subchannel Bitmap Present" in the - * structure (Figure 9-1002a) at all ... + * @params: EHT operation element parameters. See &IEEE80211_EHT_OPER_* + * @basic_mcs_nss: indicates the EHT-MCSs for each number of spatial streams in + * EHT PPDUs that are supported by all EHT STAs in the BSS in transmit and + * receive. + * @optional: optional parts */ struct ieee80211_eht_operation { - u8 chan_width; - u8 ccfs; - u8 present_bm; - - u8 disable_subchannel_bitmap[]; + u8 params; + __le32 basic_mcs_nss; + u8 optional[]; } __packed; -#define IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT 0x1 +/** + * struct ieee80211_eht_operation_info - eht operation information + * + * @control: EHT operation information control. + * @ccfs0: defines a channel center frequency for a 20, 40, 80, 160, or 320 MHz + * EHT BSS. + * @ccfs1: defines a channel center frequency for a 160 or 320 MHz EHT BSS. + * @optional: optional parts + */ +struct ieee80211_eht_operation_info { + u8 control; + u8 ccfs0; + u8 ccfs1; + u8 optional[]; +} __packed; /* 802.11ac VHT Capabilities */ #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000 @@ -2735,19 +2795,21 @@ ieee80211_he_spr_size(const u8 *he_spr_ie) #define S1G_OPER_CH_WIDTH_PRIMARY_1MHZ BIT(0) #define S1G_OPER_CH_WIDTH_OPER GENMASK(4, 1) -/* EHT MAC capabilities as defined in P802.11be_D1.4 section 9.4.2.313.2 */ -#define IEEE80211_EHT_MAC_CAP0_NSEP_PRIO_ACCESS 0x01 +/* EHT MAC capabilities as defined in P802.11be_D2.0 section 9.4.2.313.2 */ +#define IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS 0x01 #define IEEE80211_EHT_MAC_CAP0_OM_CONTROL 0x02 #define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1 0x04 #define IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE2 0x08 #define IEEE80211_EHT_MAC_CAP0_RESTRICTED_TWT 0x10 #define IEEE80211_EHT_MAC_CAP0_SCS_TRAFFIC_DESC 0x20 -#define IEEE80211_EHT_MAC_CAP0_MAX_AMPDU_LEN_MASK 0xc0 -#define IEEE80211_EHT_MAC_CAP0_MAX_AMPDU_LEN_3895 0 -#define IEEE80211_EHT_MAC_CAP0_MAX_AMPDU_LEN_7991 1 -#define IEEE80211_EHT_MAC_CAP0_MAX_AMPDU_LEN_11454 2 +#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_MASK 0xc0 +#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_3895 0 +#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_7991 1 +#define IEEE80211_EHT_MAC_CAP0_MAX_MPDU_LEN_11454 2 -/* EHT PHY capabilities as defined in P802.11be_D1.4 section 9.4.2.313.3 */ +#define IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK 0x01 + +/* EHT PHY capabilities as defined in P802.11be_D2.0 section 9.4.2.313.3 */ #define IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ 0x02 #define IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ 0x04 #define IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI 0x08 @@ -2812,7 +2874,7 @@ ieee80211_he_spr_size(const u8 *he_spr_ie) #define IEEE80211_EHT_PHY_CAP8_RX_4096QAM_WIDER_BW_DL_OFDMA 0x02 /* - * EHT operation channel width as defined in P802.11be_D1.4 section 9.4.2.311 + * EHT operation channel width as defined in P802.11be_D2.0 section 9.4.2.311 */ #define IEEE80211_EHT_OPER_CHAN_WIDTH 0x7 #define IEEE80211_EHT_OPER_CHAN_WIDTH_20MHZ 0 @@ -2918,8 +2980,13 @@ ieee80211_eht_oper_size_ok(const u8 *data, u8 len) if (len < needed) return false; - if (elem->present_bm & IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT) - needed += 2; + if (elem->params & IEEE80211_EHT_OPER_INFO_PRESENT) { + needed += 3; + + if (elem->params & + IEEE80211_EHT_OPER_DISABLED_SUBCHANNEL_BITMAP_PRESENT) + needed += 2; + } return len >= needed; } @@ -3464,6 +3531,17 @@ enum ieee80211_mesh_actioncode { WLAN_MESH_ACTION_TBTT_ADJUSTMENT_RESPONSE, }; +/* Unprotected WNM action codes */ +enum ieee80211_unprotected_wnm_actioncode { + WLAN_UNPROTECTED_WNM_ACTION_TIM = 0, + WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE = 1, +}; + +/* Public action codes */ +enum ieee80211_public_actioncode { + WLAN_PUBLIC_ACTION_FTM_RESPONSE = 33, +}; + /* Security key length */ enum ieee80211_key_len { WLAN_KEY_LEN_WEP40 = 5, @@ -4063,6 +4141,7 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) *category != WLAN_CATEGORY_SELF_PROTECTED && *category != WLAN_CATEGORY_UNPROT_DMG && *category != WLAN_CATEGORY_VHT && + *category != WLAN_CATEGORY_S1G && *category != WLAN_CATEGORY_VENDOR_SPECIFIC; } @@ -4252,6 +4331,40 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb) return true; } +static inline bool ieee80211_is_timing_measurement(struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + + if (skb->len < IEEE80211_MIN_ACTION_SIZE) + return false; + + if (!ieee80211_is_action(mgmt->frame_control)) + return false; + + if (mgmt->u.action.category == WLAN_CATEGORY_WNM_UNPROTECTED && + mgmt->u.action.u.wnm_timing_msr.action_code == + WLAN_UNPROTECTED_WNM_ACTION_TIMING_MEASUREMENT_RESPONSE && + skb->len >= offsetofend(typeof(*mgmt), u.action.u.wnm_timing_msr)) + return true; + + return false; +} + +static inline bool ieee80211_is_ftm(struct sk_buff *skb) +{ + struct ieee80211_mgmt *mgmt = (void *)skb->data; + + if (!ieee80211_is_public_action((void *)mgmt, skb->len)) + return false; + + if (mgmt->u.action.u.ftm.action_code == + WLAN_PUBLIC_ACTION_FTM_RESPONSE && + skb->len >= offsetofend(typeof(*mgmt), u.action.u.ftm)) + return true; + + return false; +} + struct element { u8 id; u8 datalen; @@ -4345,4 +4458,229 @@ enum ieee80211_range_params_max_total_ltf { IEEE80211_RANGE_PARAMS_MAX_TOTAL_LTF_UNSPECIFIED, }; +/* multi-link device */ +#define IEEE80211_MLD_MAX_NUM_LINKS 15 + +#define IEEE80211_ML_CONTROL_TYPE 0x0007 +#define IEEE80211_ML_CONTROL_TYPE_BASIC 0 +#define IEEE80211_ML_CONTROL_TYPE_PREQ 1 +#define IEEE80211_ML_CONTROL_TYPE_RECONF 2 +#define IEEE80211_ML_CONTROL_TYPE_TDLS 3 +#define IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS 4 +#define IEEE80211_ML_CONTROL_PRESENCE_MASK 0xfff0 + +struct ieee80211_multi_link_elem { + __le16 control; + u8 variable[]; +} __packed; + +#define IEEE80211_MLC_BASIC_PRES_LINK_ID 0x0010 +#define IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT 0x0020 +#define IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY 0x0040 +#define IEEE80211_MLC_BASIC_PRES_EML_CAPA 0x0080 +#define IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP 0x0100 +#define IEEE80211_MLC_BASIC_PRES_MLD_ID 0x0200 + +#define IEEE80211_MED_SYNC_DELAY_DURATION 0x00ff +#define IEEE80211_MED_SYNC_DELAY_SYNC_OFDM_ED_THRESH 0x0f00 +#define IEEE80211_MED_SYNC_DELAY_SYNC_MAX_NUM_TXOPS 0xf000 + +#define IEEE80211_EML_CAP_EMLSR_SUPP 0x0001 +#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY 0x000e +#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_0US 0 +#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US 1 +#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_64US 2 +#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_128US 3 +#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US 4 +#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY 0x0070 +#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_0US 0 +#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_16US 1 +#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_32US 2 +#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US 3 +#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_128US 4 +#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US 5 +#define IEEE80211_EML_CAP_EMLMR_SUPPORT 0x0080 +#define IEEE80211_EML_CAP_EMLMR_DELAY 0x0700 +#define IEEE80211_EML_CAP_EMLMR_DELAY_0US 0 +#define IEEE80211_EML_CAP_EMLMR_DELAY_32US 1 +#define IEEE80211_EML_CAP_EMLMR_DELAY_64US 2 +#define IEEE80211_EML_CAP_EMLMR_DELAY_128US 3 +#define IEEE80211_EML_CAP_EMLMR_DELAY_256US 4 +#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT 0x7800 +#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_0 0 +#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128US 1 +#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_256US 2 +#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_512US 3 +#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_1TU 4 +#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_2TU 5 +#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_4TU 6 +#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_8TU 7 +#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_16TU 8 +#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_32TU 9 +#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_64TU 10 +#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU 11 + +#define IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS 0x000f +#define IEEE80211_MLD_CAP_OP_SRS_SUPPORT 0x0010 +#define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP 0x0060 +#define IEEE80211_MLD_CAP_OP_FREQ_SEP_TYPE_IND 0x0f80 +#define IEEE80211_MLD_CAP_OP_AAR_SUPPORT 0x1000 + +struct ieee80211_mle_basic_common_info { + u8 len; + u8 mld_mac_addr[ETH_ALEN]; + u8 variable[]; +} __packed; + +#define IEEE80211_MLC_PREQ_PRES_MLD_ID 0x0010 + +struct ieee80211_mle_preq_common_info { + u8 len; + u8 variable[]; +} __packed; + +#define IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR 0x0010 + +/* no fixed fields in RECONF */ + +struct ieee80211_mle_tdls_common_info { + u8 len; + u8 ap_mld_mac_addr[ETH_ALEN]; +} __packed; + +#define IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR 0x0010 + +/* no fixed fields in PRIO_ACCESS */ + +/** + * ieee80211_mle_common_size - check multi-link element common size + * @data: multi-link element, must already be checked for size using + * ieee80211_mle_size_ok() + */ +static inline u8 ieee80211_mle_common_size(const u8 *data) +{ + const struct ieee80211_multi_link_elem *mle = (const void *)data; + u16 control = le16_to_cpu(mle->control); + u8 common = 0; + + switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) { + case IEEE80211_ML_CONTROL_TYPE_BASIC: + common += sizeof(struct ieee80211_mle_basic_common_info); + break; + case IEEE80211_ML_CONTROL_TYPE_PREQ: + common += sizeof(struct ieee80211_mle_preq_common_info); + break; + case IEEE80211_ML_CONTROL_TYPE_RECONF: + if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR) + common += ETH_ALEN; + return common; + case IEEE80211_ML_CONTROL_TYPE_TDLS: + common += sizeof(struct ieee80211_mle_tdls_common_info); + break; + case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS: + if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR) + common += ETH_ALEN; + return common; + default: + WARN_ON(1); + return 0; + } + + return common + mle->variable[0]; +} + +/** + * ieee80211_mle_size_ok - validate multi-link element size + * @data: pointer to the element data + * @len: length of the containing element + */ +static inline bool ieee80211_mle_size_ok(const u8 *data, u8 len) +{ + const struct ieee80211_multi_link_elem *mle = (const void *)data; + u8 fixed = sizeof(*mle); + u8 common = 0; + bool check_common_len = false; + u16 control; + + if (len < fixed) + return false; + + control = le16_to_cpu(mle->control); + + switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) { + case IEEE80211_ML_CONTROL_TYPE_BASIC: + common += sizeof(struct ieee80211_mle_basic_common_info); + check_common_len = true; + if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID) + common += 1; + if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT) + common += 1; + if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY) + common += 2; + if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA) + common += 2; + if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP) + common += 2; + if (control & IEEE80211_MLC_BASIC_PRES_MLD_ID) + common += 1; + break; + case IEEE80211_ML_CONTROL_TYPE_PREQ: + common += sizeof(struct ieee80211_mle_preq_common_info); + if (control & IEEE80211_MLC_PREQ_PRES_MLD_ID) + common += 1; + check_common_len = true; + break; + case IEEE80211_ML_CONTROL_TYPE_RECONF: + if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR) + common += ETH_ALEN; + break; + case IEEE80211_ML_CONTROL_TYPE_TDLS: + common += sizeof(struct ieee80211_mle_tdls_common_info); + check_common_len = true; + break; + case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS: + if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR) + common += ETH_ALEN; + break; + default: + /* we don't know this type */ + return true; + } + + if (len < fixed + common) + return false; + + if (!check_common_len) + return true; + + /* if present, common length is the first octet there */ + return mle->variable[0] >= common; +} + +enum ieee80211_mle_subelems { + IEEE80211_MLE_SUBELEM_PER_STA_PROFILE = 0, +}; + +#define IEEE80211_MLE_STA_CONTROL_LINK_ID 0x000f +#define IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE 0x0010 +#define IEEE80211_MLE_STA_CONTROL_STA_MAC_ADDR_PRESENT 0x0020 +#define IEEE80211_MLE_STA_CONTROL_BEACON_INT_PRESENT 0x0040 +#define IEEE80211_MLE_STA_CONTROL_TSF_OFFS_PRESENT 0x0080 +#define IEEE80211_MLE_STA_CONTROL_DTIM_INFO_PRESENT 0x0100 +#define IEEE80211_MLE_STA_CONTROL_NSTR_LINK_PAIR_PRESENT 0x0200 +#define IEEE80211_MLE_STA_CONTROL_NSTR_BITMAP_SIZE 0x0400 +#define IEEE80211_MLE_STA_CONTROL_BSS_PARAM_CHANGE_CNT_PRESENT 0x0800 + +struct ieee80211_mle_per_sta_profile { + __le16 control; + u8 sta_info_len; + u8 variable[]; +} __packed; + +#define for_each_mle_subelement(_elem, _data, _len) \ + if (ieee80211_mle_size_ok(_data, _len)) \ + for_each_element(_elem, \ + _data + ieee80211_mle_common_size(_data),\ + _len - ieee80211_mle_common_size(_data)) + #endif /* LINUX_IEEE80211_H */ diff --git a/include/linux/if_eql.h b/include/linux/if_eql.h index d75601d613cc..07f9b660b741 100644 --- a/include/linux/if_eql.h +++ b/include/linux/if_eql.h @@ -21,6 +21,7 @@ #include <linux/timer.h> #include <linux/spinlock.h> +#include <net/net_trackers.h> #include <uapi/linux/if_eql.h> typedef struct slave { diff --git a/include/linux/if_hsr.h b/include/linux/if_hsr.h index 408539d5ea5f..0404f5bf4f30 100644 --- a/include/linux/if_hsr.h +++ b/include/linux/if_hsr.h @@ -2,6 +2,10 @@ #ifndef _LINUX_IF_HSR_H_ #define _LINUX_IF_HSR_H_ +#include <linux/types.h> + +struct net_device; + /* used to differentiate various protocols */ enum hsr_version { HSR_V0 = 0, diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index b42294739063..523025106a64 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -46,10 +46,10 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan, pcpu_stats = get_cpu_ptr(vlan->pcpu_stats); u64_stats_update_begin(&pcpu_stats->syncp); - pcpu_stats->rx_packets++; - pcpu_stats->rx_bytes += len; + u64_stats_inc(&pcpu_stats->rx_packets); + u64_stats_add(&pcpu_stats->rx_bytes, len); if (multicast) - pcpu_stats->rx_multicast++; + u64_stats_inc(&pcpu_stats->rx_multicast); u64_stats_update_end(&pcpu_stats->syncp); put_cpu_ptr(vlan->pcpu_stats); } else { diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h index 10e7521ecb6c..839d1e48b85e 100644 --- a/include/linux/if_rmnet.h +++ b/include/linux/if_rmnet.h @@ -5,6 +5,8 @@ #ifndef _LINUX_IF_RMNET_H_ #define _LINUX_IF_RMNET_H_ +#include <linux/types.h> + struct rmnet_map_header { u8 flags; /* MAP_CMD_FLAG, MAP_PAD_LEN_MASK */ u8 mux_id; diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 915a187cfabd..553552fa635c 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h @@ -2,14 +2,18 @@ #ifndef _LINUX_IF_TAP_H_ #define _LINUX_IF_TAP_H_ +#include <net/sock.h> +#include <linux/skb_array.h> + +struct file; +struct socket; + #if IS_ENABLED(CONFIG_TAP) struct socket *tap_get_socket(struct file *); struct ptr_ring *tap_get_ptr_ring(struct file *file); #else #include <linux/err.h> #include <linux/errno.h> -struct file; -struct socket; static inline struct socket *tap_get_socket(struct file *f) { return ERR_PTR(-EINVAL); @@ -20,9 +24,6 @@ static inline struct ptr_ring *tap_get_ptr_ring(struct file *f) } #endif /* CONFIG_TAP */ -#include <net/sock.h> -#include <linux/skb_array.h> - /* * Maximum times a tap device can be opened. This can be used to * configure the number of receive queue, e.g. for multiqueue virtio. diff --git a/include/linux/if_team.h b/include/linux/if_team.h index add607943c95..fc985e5c739d 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -12,11 +12,11 @@ #include <uapi/linux/if_team.h> struct team_pcpu_stats { - u64 rx_packets; - u64 rx_bytes; - u64 rx_multicast; - u64 tx_packets; - u64 tx_bytes; + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_multicast; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; struct u64_stats_sync syncp; u32 rx_dropped; u32 tx_dropped; diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 2be4dd7e90a9..e00c4ee81ff7 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -118,11 +118,11 @@ static inline void vlan_drop_rx_stag_filter_info(struct net_device *dev) * @tx_dropped: number of tx drops */ struct vlan_pcpu_stats { - u64 rx_packets; - u64 rx_bytes; - u64 rx_multicast; - u64 tx_packets; - u64 tx_bytes; + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_multicast; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; struct u64_stats_sync syncp; u32 rx_errors; u32 tx_dropped; diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index c582e1a14232..e72167b96d27 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -41,7 +41,6 @@ typedef irqreturn_t (*cros_ec_sensors_capture_t)(int irq, void *p); * @param: motion sensor parameters structure * @resp: motion sensor response structure * @type: type of motion sensor - * @loc: location where the motion sensor is placed * @range_updated: True if the range of the sensor has been * updated. * @curr_range: If updated, the current range value. @@ -67,7 +66,6 @@ struct cros_ec_sensors_core_state { struct ec_response_motion_sense *resp; enum motionsensor_type type; - enum motionsensor_location loc; bool range_updated; int curr_range; @@ -95,8 +93,11 @@ int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask, struct platform_device; int cros_ec_sensors_core_init(struct platform_device *pdev, struct iio_dev *indio_dev, bool physical_device, - cros_ec_sensors_capture_t trigger_capture, - cros_ec_sensorhub_push_data_cb_t push_data); + cros_ec_sensors_capture_t trigger_capture); + +int cros_ec_sensors_core_register(struct device *dev, + struct iio_dev *indio_dev, + cros_ec_sensorhub_push_data_cb_t push_data); irqreturn_t cros_ec_sensors_capture(int irq, void *p); int cros_ec_sensors_push_data(struct iio_dev *indio_dev, diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 233d2e6b7721..5dfbfc991c69 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -9,14 +9,16 @@ #include <linux/device.h> #include <linux/cdev.h> +#include <linux/slab.h> #include <linux/iio/types.h> -#include <linux/of.h> /* IIO TODO LIST */ /* * Provide means of adjusting timer accuracy. * Currently assumes nano seconds. */ +struct of_phandle_args; + enum iio_shared_by { IIO_SEPARATE, IIO_SHARED_BY_TYPE, @@ -313,7 +315,6 @@ static inline bool iio_channel_has_available(const struct iio_chan_spec *chan, } s64 iio_get_time_ns(const struct iio_dev *indio_dev); -unsigned int iio_get_time_res(const struct iio_dev *indio_dev); /* * Device operating modes @@ -709,8 +710,13 @@ static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev) return dev_get_drvdata(&indio_dev->dev); } -/* Can we make this smaller? */ -#define IIO_ALIGN L1_CACHE_BYTES +/* + * Used to ensure the iio_priv() structure is aligned to allow that structure + * to in turn include IIO_DMA_MINALIGN'd elements such as buffers which + * must not share cachelines with the rest of the structure, thus making + * them safe for use with non-coherent DMA. + */ +#define IIO_DMA_MINALIGN ARCH_KMALLOC_MINALIGN struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv); /* The information at the returned address is guaranteed to be cacheline aligned */ @@ -721,10 +727,13 @@ static inline void *iio_priv(const struct iio_dev *indio_dev) void iio_device_free(struct iio_dev *indio_dev); struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv); -__printf(2, 3) -struct iio_trigger *devm_iio_trigger_alloc(struct device *parent, - const char *fmt, ...); +#define devm_iio_trigger_alloc(parent, fmt, ...) \ + __devm_iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__) +__printf(3, 4) +struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent, + struct module *this_mod, + const char *fmt, ...); /** * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry * @indio_dev: IIO device structure for device diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index 4c69b144677b..f6360d9a492d 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h @@ -93,6 +93,11 @@ static inline void iio_trigger_put(struct iio_trigger *trig) static inline struct iio_trigger *iio_trigger_get(struct iio_trigger *trig) { get_device(&trig->dev); + + WARN_ONCE(list_empty(&trig->list), + "Getting non-registered iio trigger %s is prohibited\n", + trig->name); + __module_get(trig->owner); return trig; @@ -126,16 +131,10 @@ static inline void *iio_trigger_get_drvdata(struct iio_trigger *trig) * iio_trigger_register() - register a trigger with the IIO core * @trig_info: trigger to be registered **/ -#define iio_trigger_register(trig_info) \ - __iio_trigger_register((trig_info), THIS_MODULE) -int __iio_trigger_register(struct iio_trigger *trig_info, - struct module *this_mod); +int iio_trigger_register(struct iio_trigger *trig_info); -#define devm_iio_trigger_register(dev, trig_info) \ - __devm_iio_trigger_register((dev), (trig_info), THIS_MODULE) -int __devm_iio_trigger_register(struct device *dev, - struct iio_trigger *trig_info, - struct module *this_mod); +int devm_iio_trigger_register(struct device *dev, + struct iio_trigger *trig_info); /** * iio_trigger_unregister() - unregister a trigger from the core @@ -163,8 +162,13 @@ void iio_trigger_poll_chained(struct iio_trigger *trig); irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private); -__printf(2, 3) -struct iio_trigger *iio_trigger_alloc(struct device *parent, const char *fmt, ...); +#define iio_trigger_alloc(parent, fmt, ...) \ + __iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__) + +__printf(3, 4) +struct iio_trigger *__iio_trigger_alloc(struct device *parent, + struct module *this_mod, + const char *fmt, ...); void iio_trigger_free(struct iio_trigger *trig); /** diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index ead323243e7b..ddb27fc0ee8c 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -131,7 +131,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) IN_DEV_ORCONF((in_dev), IGNORE_ROUTES_WITH_LINKDOWN) #define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) -#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT) +#define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ACCEPT) #define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) #define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE) #define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY) diff --git a/include/linux/input/elan-i2c-ids.h b/include/linux/input/elan-i2c-ids.h index 520858d12680..51cca17ee94c 100644 --- a/include/linux/input/elan-i2c-ids.h +++ b/include/linux/input/elan-i2c-ids.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Elan I2C/SMBus Touchpad device whitelist * @@ -11,10 +12,6 @@ * copyright (c) 2011-2012 Cypress Semiconductor, Inc. * copyright (c) 2011-2012 Google, Inc. * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published - * by the Free Software Foundation. - * * Trademarks are the property of their respective owners. */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h deleted file mode 100644 index 5fcf89faa31a..000000000000 --- a/include/linux/intel-iommu.h +++ /dev/null @@ -1,832 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright © 2006-2015, Intel Corporation. - * - * Authors: Ashok Raj <ashok.raj@intel.com> - * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> - * David Woodhouse <David.Woodhouse@intel.com> - */ - -#ifndef _INTEL_IOMMU_H_ -#define _INTEL_IOMMU_H_ - -#include <linux/types.h> -#include <linux/iova.h> -#include <linux/io.h> -#include <linux/idr.h> -#include <linux/mmu_notifier.h> -#include <linux/list.h> -#include <linux/iommu.h> -#include <linux/io-64-nonatomic-lo-hi.h> -#include <linux/dmar.h> -#include <linux/ioasid.h> -#include <linux/bitfield.h> - -#include <asm/cacheflush.h> -#include <asm/iommu.h> - -/* - * VT-d hardware uses 4KiB page size regardless of host page size. - */ -#define VTD_PAGE_SHIFT (12) -#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) -#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) -#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) - -#define VTD_STRIDE_SHIFT (9) -#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT) - -#define DMA_PTE_READ BIT_ULL(0) -#define DMA_PTE_WRITE BIT_ULL(1) -#define DMA_PTE_LARGE_PAGE BIT_ULL(7) -#define DMA_PTE_SNP BIT_ULL(11) - -#define DMA_FL_PTE_PRESENT BIT_ULL(0) -#define DMA_FL_PTE_US BIT_ULL(2) -#define DMA_FL_PTE_ACCESS BIT_ULL(5) -#define DMA_FL_PTE_DIRTY BIT_ULL(6) -#define DMA_FL_PTE_XD BIT_ULL(63) - -#define ADDR_WIDTH_5LEVEL (57) -#define ADDR_WIDTH_4LEVEL (48) - -#define CONTEXT_TT_MULTI_LEVEL 0 -#define CONTEXT_TT_DEV_IOTLB 1 -#define CONTEXT_TT_PASS_THROUGH 2 -#define CONTEXT_PASIDE BIT_ULL(3) - -/* - * Intel IOMMU register specification per version 1.0 public spec. - */ -#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */ -#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */ -#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */ -#define DMAR_GCMD_REG 0x18 /* Global command register */ -#define DMAR_GSTS_REG 0x1c /* Global status register */ -#define DMAR_RTADDR_REG 0x20 /* Root entry table */ -#define DMAR_CCMD_REG 0x28 /* Context command reg */ -#define DMAR_FSTS_REG 0x34 /* Fault Status register */ -#define DMAR_FECTL_REG 0x38 /* Fault control register */ -#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */ -#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */ -#define DMAR_FEUADDR_REG 0x44 /* Upper address register */ -#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */ -#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */ -#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */ -#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */ -#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */ -#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */ -#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */ -#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */ -#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */ -#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ -#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */ -#define DMAR_IQER_REG 0xb0 /* Invalidation queue error record register */ -#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ -#define DMAR_PQH_REG 0xc0 /* Page request queue head register */ -#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */ -#define DMAR_PQA_REG 0xd0 /* Page request queue address register */ -#define DMAR_PRS_REG 0xdc /* Page request status register */ -#define DMAR_PECTL_REG 0xe0 /* Page request event control register */ -#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */ -#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */ -#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */ -#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */ -#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */ -#define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */ -#define DMAR_MTRR_FIX16K_80000_REG 0x128 -#define DMAR_MTRR_FIX16K_A0000_REG 0x130 -#define DMAR_MTRR_FIX4K_C0000_REG 0x138 -#define DMAR_MTRR_FIX4K_C8000_REG 0x140 -#define DMAR_MTRR_FIX4K_D0000_REG 0x148 -#define DMAR_MTRR_FIX4K_D8000_REG 0x150 -#define DMAR_MTRR_FIX4K_E0000_REG 0x158 -#define DMAR_MTRR_FIX4K_E8000_REG 0x160 -#define DMAR_MTRR_FIX4K_F0000_REG 0x168 -#define DMAR_MTRR_FIX4K_F8000_REG 0x170 -#define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */ -#define DMAR_MTRR_PHYSMASK0_REG 0x188 -#define DMAR_MTRR_PHYSBASE1_REG 0x190 -#define DMAR_MTRR_PHYSMASK1_REG 0x198 -#define DMAR_MTRR_PHYSBASE2_REG 0x1a0 -#define DMAR_MTRR_PHYSMASK2_REG 0x1a8 -#define DMAR_MTRR_PHYSBASE3_REG 0x1b0 -#define DMAR_MTRR_PHYSMASK3_REG 0x1b8 -#define DMAR_MTRR_PHYSBASE4_REG 0x1c0 -#define DMAR_MTRR_PHYSMASK4_REG 0x1c8 -#define DMAR_MTRR_PHYSBASE5_REG 0x1d0 -#define DMAR_MTRR_PHYSMASK5_REG 0x1d8 -#define DMAR_MTRR_PHYSBASE6_REG 0x1e0 -#define DMAR_MTRR_PHYSMASK6_REG 0x1e8 -#define DMAR_MTRR_PHYSBASE7_REG 0x1f0 -#define DMAR_MTRR_PHYSMASK7_REG 0x1f8 -#define DMAR_MTRR_PHYSBASE8_REG 0x200 -#define DMAR_MTRR_PHYSMASK8_REG 0x208 -#define DMAR_MTRR_PHYSBASE9_REG 0x210 -#define DMAR_MTRR_PHYSMASK9_REG 0x218 -#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */ -#define DMAR_VCMD_REG 0xe00 /* Virtual command register */ -#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */ - -#define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg) -#define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg) -#define DMAR_IQER_REG_ICESID(reg) FIELD_GET(GENMASK_ULL(63, 48), reg) - -#define OFFSET_STRIDE (9) - -#define dmar_readq(a) readq(a) -#define dmar_writeq(a,v) writeq(v,a) -#define dmar_readl(a) readl(a) -#define dmar_writel(a, v) writel(v, a) - -#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) -#define DMAR_VER_MINOR(v) ((v) & 0x0f) - -/* - * Decoding Capability Register - */ -#define cap_5lp_support(c) (((c) >> 60) & 1) -#define cap_pi_support(c) (((c) >> 59) & 1) -#define cap_fl1gp_support(c) (((c) >> 56) & 1) -#define cap_read_drain(c) (((c) >> 55) & 1) -#define cap_write_drain(c) (((c) >> 54) & 1) -#define cap_max_amask_val(c) (((c) >> 48) & 0x3f) -#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1) -#define cap_pgsel_inv(c) (((c) >> 39) & 1) - -#define cap_super_page_val(c) (((c) >> 34) & 0xf) -#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \ - * OFFSET_STRIDE) + 21) - -#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16) -#define cap_max_fault_reg_offset(c) \ - (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16) - -#define cap_zlr(c) (((c) >> 22) & 1) -#define cap_isoch(c) (((c) >> 23) & 1) -#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1) -#define cap_sagaw(c) (((c) >> 8) & 0x1f) -#define cap_caching_mode(c) (((c) >> 7) & 1) -#define cap_phmr(c) (((c) >> 6) & 1) -#define cap_plmr(c) (((c) >> 5) & 1) -#define cap_rwbf(c) (((c) >> 4) & 1) -#define cap_afl(c) (((c) >> 3) & 1) -#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7))) -/* - * Extended Capability Register - */ - -#define ecap_rps(e) (((e) >> 49) & 0x1) -#define ecap_smpwc(e) (((e) >> 48) & 0x1) -#define ecap_flts(e) (((e) >> 47) & 0x1) -#define ecap_slts(e) (((e) >> 46) & 0x1) -#define ecap_slads(e) (((e) >> 45) & 0x1) -#define ecap_vcs(e) (((e) >> 44) & 0x1) -#define ecap_smts(e) (((e) >> 43) & 0x1) -#define ecap_dit(e) (((e) >> 41) & 0x1) -#define ecap_pds(e) (((e) >> 42) & 0x1) -#define ecap_pasid(e) (((e) >> 40) & 0x1) -#define ecap_pss(e) (((e) >> 35) & 0x1f) -#define ecap_eafs(e) (((e) >> 34) & 0x1) -#define ecap_nwfs(e) (((e) >> 33) & 0x1) -#define ecap_srs(e) (((e) >> 31) & 0x1) -#define ecap_ers(e) (((e) >> 30) & 0x1) -#define ecap_prs(e) (((e) >> 29) & 0x1) -#define ecap_broken_pasid(e) (((e) >> 28) & 0x1) -#define ecap_dis(e) (((e) >> 27) & 0x1) -#define ecap_nest(e) (((e) >> 26) & 0x1) -#define ecap_mts(e) (((e) >> 25) & 0x1) -#define ecap_ecs(e) (((e) >> 24) & 0x1) -#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16) -#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16) -#define ecap_coherent(e) ((e) & 0x1) -#define ecap_qis(e) ((e) & 0x2) -#define ecap_pass_through(e) (((e) >> 6) & 0x1) -#define ecap_eim_support(e) (((e) >> 4) & 0x1) -#define ecap_ir_support(e) (((e) >> 3) & 0x1) -#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1) -#define ecap_max_handle_mask(e) (((e) >> 20) & 0xf) -#define ecap_sc_support(e) (((e) >> 7) & 0x1) /* Snooping Control */ - -/* Virtual command interface capability */ -#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */ - -/* IOTLB_REG */ -#define DMA_TLB_FLUSH_GRANU_OFFSET 60 -#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) -#define DMA_TLB_DSI_FLUSH (((u64)2) << 60) -#define DMA_TLB_PSI_FLUSH (((u64)3) << 60) -#define DMA_TLB_IIRG(type) ((type >> 60) & 3) -#define DMA_TLB_IAIG(val) (((val) >> 57) & 3) -#define DMA_TLB_READ_DRAIN (((u64)1) << 49) -#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) -#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32) -#define DMA_TLB_IVT (((u64)1) << 63) -#define DMA_TLB_IH_NONLEAF (((u64)1) << 6) -#define DMA_TLB_MAX_SIZE (0x3f) - -/* INVALID_DESC */ -#define DMA_CCMD_INVL_GRANU_OFFSET 61 -#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4) -#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4) -#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4) -#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7) -#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6) -#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16))) -#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6) -#define DMA_ID_TLB_ADDR(addr) (addr) -#define DMA_ID_TLB_ADDR_MASK(mask) (mask) - -/* PMEN_REG */ -#define DMA_PMEN_EPM (((u32)1)<<31) -#define DMA_PMEN_PRS (((u32)1)<<0) - -/* GCMD_REG */ -#define DMA_GCMD_TE (((u32)1) << 31) -#define DMA_GCMD_SRTP (((u32)1) << 30) -#define DMA_GCMD_SFL (((u32)1) << 29) -#define DMA_GCMD_EAFL (((u32)1) << 28) -#define DMA_GCMD_WBF (((u32)1) << 27) -#define DMA_GCMD_QIE (((u32)1) << 26) -#define DMA_GCMD_SIRTP (((u32)1) << 24) -#define DMA_GCMD_IRE (((u32) 1) << 25) -#define DMA_GCMD_CFI (((u32) 1) << 23) - -/* GSTS_REG */ -#define DMA_GSTS_TES (((u32)1) << 31) -#define DMA_GSTS_RTPS (((u32)1) << 30) -#define DMA_GSTS_FLS (((u32)1) << 29) -#define DMA_GSTS_AFLS (((u32)1) << 28) -#define DMA_GSTS_WBFS (((u32)1) << 27) -#define DMA_GSTS_QIES (((u32)1) << 26) -#define DMA_GSTS_IRTPS (((u32)1) << 24) -#define DMA_GSTS_IRES (((u32)1) << 25) -#define DMA_GSTS_CFIS (((u32)1) << 23) - -/* DMA_RTADDR_REG */ -#define DMA_RTADDR_RTT (((u64)1) << 11) -#define DMA_RTADDR_SMT (((u64)1) << 10) - -/* CCMD_REG */ -#define DMA_CCMD_ICC (((u64)1) << 63) -#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61) -#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61) -#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61) -#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32) -#define DMA_CCMD_MASK_NOBIT 0 -#define DMA_CCMD_MASK_1BIT 1 -#define DMA_CCMD_MASK_2BIT 2 -#define DMA_CCMD_MASK_3BIT 3 -#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16) -#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff)) - -/* FECTL_REG */ -#define DMA_FECTL_IM (((u32)1) << 31) - -/* FSTS_REG */ -#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */ -#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */ -#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */ -#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */ -#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */ -#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */ -#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) - -/* FRCD_REG, 32 bits access */ -#define DMA_FRCD_F (((u32)1) << 31) -#define dma_frcd_type(d) ((d >> 30) & 1) -#define dma_frcd_fault_reason(c) (c & 0xff) -#define dma_frcd_source_id(c) (c & 0xffff) -#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff) -#define dma_frcd_pasid_present(c) (((c) >> 31) & 1) -/* low 64 bit */ -#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) - -/* PRS_REG */ -#define DMA_PRS_PPR ((u32)1) -#define DMA_PRS_PRO ((u32)2) - -#define DMA_VCS_PAS ((u64)1) - -#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ -do { \ - cycles_t start_time = get_cycles(); \ - while (1) { \ - sts = op(iommu->reg + offset); \ - if (cond) \ - break; \ - if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ - panic("DMAR hardware is malfunctioning\n"); \ - cpu_relax(); \ - } \ -} while (0) - -#define QI_LENGTH 256 /* queue length */ - -enum { - QI_FREE, - QI_IN_USE, - QI_DONE, - QI_ABORT -}; - -#define QI_CC_TYPE 0x1 -#define QI_IOTLB_TYPE 0x2 -#define QI_DIOTLB_TYPE 0x3 -#define QI_IEC_TYPE 0x4 -#define QI_IWD_TYPE 0x5 -#define QI_EIOTLB_TYPE 0x6 -#define QI_PC_TYPE 0x7 -#define QI_DEIOTLB_TYPE 0x8 -#define QI_PGRP_RESP_TYPE 0x9 -#define QI_PSTRM_RESP_TYPE 0xa - -#define QI_IEC_SELECTIVE (((u64)1) << 4) -#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32)) -#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27)) - -#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) -#define QI_IWD_STATUS_WRITE (((u64)1) << 5) -#define QI_IWD_FENCE (((u64)1) << 6) -#define QI_IWD_PRQ_DRAIN (((u64)1) << 7) - -#define QI_IOTLB_DID(did) (((u64)did) << 16) -#define QI_IOTLB_DR(dr) (((u64)dr) << 7) -#define QI_IOTLB_DW(dw) (((u64)dw) << 6) -#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) -#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) -#define QI_IOTLB_IH(ih) (((u64)ih) << 6) -#define QI_IOTLB_AM(am) (((u8)am) & 0x3f) - -#define QI_CC_FM(fm) (((u64)fm) << 48) -#define QI_CC_SID(sid) (((u64)sid) << 32) -#define QI_CC_DID(did) (((u64)did) << 16) -#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4)) - -#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) -#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16) -#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) -#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ - ((u64)((pfsid >> 4) & 0xfff) << 52)) -#define QI_DEV_IOTLB_SIZE 1 -#define QI_DEV_IOTLB_MAX_INVS 32 - -#define QI_PC_PASID(pasid) (((u64)pasid) << 32) -#define QI_PC_DID(did) (((u64)did) << 16) -#define QI_PC_GRAN(gran) (((u64)gran) << 4) - -/* PASID cache invalidation granu */ -#define QI_PC_ALL_PASIDS 0 -#define QI_PC_PASID_SEL 1 -#define QI_PC_GLOBAL 3 - -#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) -#define QI_EIOTLB_IH(ih) (((u64)ih) << 6) -#define QI_EIOTLB_AM(am) (((u64)am) & 0x3f) -#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) -#define QI_EIOTLB_DID(did) (((u64)did) << 16) -#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4) - -/* QI Dev-IOTLB inv granu */ -#define QI_DEV_IOTLB_GRAN_ALL 1 -#define QI_DEV_IOTLB_GRAN_PASID_SEL 0 - -#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) -#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) -#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) -#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) -#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) -#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ - ((u64)((pfsid >> 4) & 0xfff) << 52)) -#define QI_DEV_EIOTLB_MAX_INVS 32 - -/* Page group response descriptor QW0 */ -#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4) -#define QI_PGRP_PDP(p) (((u64)(p)) << 5) -#define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12) -#define QI_PGRP_DID(rid) (((u64)(rid)) << 16) -#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) - -/* Page group response descriptor QW1 */ -#define QI_PGRP_LPIG(x) (((u64)(x)) << 2) -#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3) - - -#define QI_RESP_SUCCESS 0x0 -#define QI_RESP_INVALID 0x1 -#define QI_RESP_FAILURE 0xf - -#define QI_GRAN_NONG_PASID 2 -#define QI_GRAN_PSI_PASID 3 - -#define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap)) - -struct qi_desc { - u64 qw0; - u64 qw1; - u64 qw2; - u64 qw3; -}; - -struct q_inval { - raw_spinlock_t q_lock; - void *desc; /* invalidation queue */ - int *desc_status; /* desc status */ - int free_head; /* first free entry */ - int free_tail; /* last free entry */ - int free_cnt; -}; - -struct dmar_pci_notify_info; - -#ifdef CONFIG_IRQ_REMAP -/* 1MB - maximum possible interrupt remapping table size */ -#define INTR_REMAP_PAGE_ORDER 8 -#define INTR_REMAP_TABLE_REG_SIZE 0xf -#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf - -#define INTR_REMAP_TABLE_ENTRIES 65536 - -struct irq_domain; - -struct ir_table { - struct irte *base; - unsigned long *bitmap; -}; - -void intel_irq_remap_add_device(struct dmar_pci_notify_info *info); -#else -static inline void -intel_irq_remap_add_device(struct dmar_pci_notify_info *info) { } -#endif - -struct iommu_flush { - void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, - u8 fm, u64 type); - void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr, - unsigned int size_order, u64 type); -}; - -enum { - SR_DMAR_FECTL_REG, - SR_DMAR_FEDATA_REG, - SR_DMAR_FEADDR_REG, - SR_DMAR_FEUADDR_REG, - MAX_SR_DMAR_REGS -}; - -#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) -#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) -#define VTD_FLAG_SVM_CAPABLE (1 << 2) - -extern int intel_iommu_sm; -extern spinlock_t device_domain_lock; - -#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap)) -#define pasid_supported(iommu) (sm_supported(iommu) && \ - ecap_pasid((iommu)->ecap)) - -struct pasid_entry; -struct pasid_state_entry; -struct page_req_dsc; - -/* - * 0: Present - * 1-11: Reserved - * 12-63: Context Ptr (12 - (haw-1)) - * 64-127: Reserved - */ -struct root_entry { - u64 lo; - u64 hi; -}; - -/* - * low 64 bits: - * 0: present - * 1: fault processing disable - * 2-3: translation type - * 12-63: address space root - * high 64 bits: - * 0-2: address width - * 3-6: aval - * 8-23: domain id - */ -struct context_entry { - u64 lo; - u64 hi; -}; - -/* - * When VT-d works in the scalable mode, it allows DMA translation to - * happen through either first level or second level page table. This - * bit marks that the DMA translation for the domain goes through the - * first level page table, otherwise, it goes through the second level. - */ -#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1) - -struct dmar_domain { - int nid; /* node id */ - - unsigned int iommu_refcnt[DMAR_UNITS_SUPPORTED]; - /* Refcount of devices per iommu */ - - - u16 iommu_did[DMAR_UNITS_SUPPORTED]; - /* Domain ids per IOMMU. Use u16 since - * domain ids are 16 bit wide according - * to VT-d spec, section 9.3 */ - - u8 has_iotlb_device: 1; - u8 iommu_coherency: 1; /* indicate coherency of iommu access */ - u8 force_snooping : 1; /* Create IOPTEs with snoop control */ - u8 set_pte_snp:1; - - struct list_head devices; /* all devices' list */ - struct iova_domain iovad; /* iova's that belong to this domain */ - - struct dma_pte *pgd; /* virtual address */ - int gaw; /* max guest address width */ - - /* adjusted guest address width, 0 is level 2 30-bit */ - int agaw; - - int flags; /* flags to find out type of domain */ - int iommu_superpage;/* Level of superpages supported: - 0 == 4KiB (no superpages), 1 == 2MiB, - 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ - u64 max_addr; /* maximum mapped address */ - - struct iommu_domain domain; /* generic domain data structure for - iommu core */ -}; - -struct intel_iommu { - void __iomem *reg; /* Pointer to hardware regs, virtual addr */ - u64 reg_phys; /* physical address of hw register set */ - u64 reg_size; /* size of hw register set */ - u64 cap; - u64 ecap; - u64 vccap; - u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ - raw_spinlock_t register_lock; /* protect register handling */ - int seq_id; /* sequence id of the iommu */ - int agaw; /* agaw of this iommu */ - int msagaw; /* max sagaw of this iommu */ - unsigned int irq, pr_irq; - u16 segment; /* PCI segment# */ - unsigned char name[13]; /* Device Name */ - -#ifdef CONFIG_INTEL_IOMMU - unsigned long *domain_ids; /* bitmap of domains */ - spinlock_t lock; /* protect context, domain ids */ - struct root_entry *root_entry; /* virtual address */ - - struct iommu_flush flush; -#endif -#ifdef CONFIG_INTEL_IOMMU_SVM - struct page_req_dsc *prq; - unsigned char prq_name[16]; /* Name for PRQ interrupt */ - struct completion prq_complete; - struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */ -#endif - struct iopf_queue *iopf_queue; - unsigned char iopfq_name[16]; - struct q_inval *qi; /* Queued invalidation info */ - u32 *iommu_state; /* Store iommu states between suspend and resume.*/ - -#ifdef CONFIG_IRQ_REMAP - struct ir_table *ir_table; /* Interrupt remapping info */ - struct irq_domain *ir_domain; - struct irq_domain *ir_msi_domain; -#endif - struct iommu_device iommu; /* IOMMU core code handle */ - int node; - u32 flags; /* Software defined flags */ - - struct dmar_drhd_unit *drhd; - void *perf_statistic; -}; - -/* PCI domain-device relationship */ -struct device_domain_info { - struct list_head link; /* link to domain siblings */ - struct list_head global; /* link to global list */ - u32 segment; /* PCI segment number */ - u8 bus; /* PCI bus number */ - u8 devfn; /* PCI devfn number */ - u16 pfsid; /* SRIOV physical function source ID */ - u8 pasid_supported:3; - u8 pasid_enabled:1; - u8 pri_supported:1; - u8 pri_enabled:1; - u8 ats_supported:1; - u8 ats_enabled:1; - u8 ats_qdep; - struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ - struct intel_iommu *iommu; /* IOMMU used by this device */ - struct dmar_domain *domain; /* pointer to domain */ - struct pasid_table *pasid_table; /* pasid table */ -}; - -static inline void __iommu_flush_cache( - struct intel_iommu *iommu, void *addr, int size) -{ - if (!ecap_coherent(iommu->ecap)) - clflush_cache_range(addr, size); -} - -/* Convert generic struct iommu_domain to private struct dmar_domain */ -static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom) -{ - return container_of(dom, struct dmar_domain, domain); -} - -/* - * 0: readable - * 1: writable - * 2-6: reserved - * 7: super page - * 8-10: available - * 11: snoop behavior - * 12-63: Host physical address - */ -struct dma_pte { - u64 val; -}; - -static inline void dma_clear_pte(struct dma_pte *pte) -{ - pte->val = 0; -} - -static inline u64 dma_pte_addr(struct dma_pte *pte) -{ -#ifdef CONFIG_64BIT - return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD); -#else - /* Must have a full atomic 64-bit read */ - return __cmpxchg64(&pte->val, 0ULL, 0ULL) & - VTD_PAGE_MASK & (~DMA_FL_PTE_XD); -#endif -} - -static inline bool dma_pte_present(struct dma_pte *pte) -{ - return (pte->val & 3) != 0; -} - -static inline bool dma_pte_superpage(struct dma_pte *pte) -{ - return (pte->val & DMA_PTE_LARGE_PAGE); -} - -static inline bool first_pte_in_page(struct dma_pte *pte) -{ - return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE); -} - -static inline int nr_pte_to_next_page(struct dma_pte *pte) -{ - return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) : - (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte; -} - -extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); - -extern int dmar_enable_qi(struct intel_iommu *iommu); -extern void dmar_disable_qi(struct intel_iommu *iommu); -extern int dmar_reenable_qi(struct intel_iommu *iommu); -extern void qi_global_iec(struct intel_iommu *iommu); - -extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, - u8 fm, u64 type); -extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, - unsigned int size_order, u64 type); -extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, - u16 qdep, u64 addr, unsigned mask); - -void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, - unsigned long npages, bool ih); - -void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, - u32 pasid, u16 qdep, u64 addr, - unsigned int size_order); -void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, - u32 pasid); - -int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, - unsigned int count, unsigned long options); -/* - * Options used in qi_submit_sync: - * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8. - */ -#define QI_OPT_WAIT_DRAIN BIT(0) - -extern int dmar_ir_support(void); - -void *alloc_pgtable_page(int node); -void free_pgtable_page(void *vaddr); -struct intel_iommu *domain_get_iommu(struct dmar_domain *domain); -void iommu_flush_write_buffer(struct intel_iommu *iommu); -int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev); -struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); - -#ifdef CONFIG_INTEL_IOMMU_SVM -extern void intel_svm_check(struct intel_iommu *iommu); -extern int intel_svm_enable_prq(struct intel_iommu *iommu); -extern int intel_svm_finish_prq(struct intel_iommu *iommu); -struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, - void *drvdata); -void intel_svm_unbind(struct iommu_sva *handle); -u32 intel_svm_get_pasid(struct iommu_sva *handle); -int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt, - struct iommu_page_response *msg); - -struct intel_svm_dev { - struct list_head list; - struct rcu_head rcu; - struct device *dev; - struct intel_iommu *iommu; - struct iommu_sva sva; - unsigned long prq_seq_number; - u32 pasid; - int users; - u16 did; - u16 dev_iotlb:1; - u16 sid, qdep; -}; - -struct intel_svm { - struct mmu_notifier notifier; - struct mm_struct *mm; - - unsigned int flags; - u32 pasid; - struct list_head devs; -}; -#else -static inline void intel_svm_check(struct intel_iommu *iommu) {} -#endif - -#ifdef CONFIG_INTEL_IOMMU_DEBUGFS -void intel_iommu_debugfs_init(void); -#else -static inline void intel_iommu_debugfs_init(void) {} -#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */ - -extern const struct attribute_group *intel_iommu_groups[]; -bool context_present(struct context_entry *context); -struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, - u8 devfn, int alloc); - -extern const struct iommu_ops intel_iommu_ops; - -#ifdef CONFIG_INTEL_IOMMU -extern int iommu_calculate_agaw(struct intel_iommu *iommu); -extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); -extern int dmar_disabled; -extern int intel_iommu_enabled; -extern int intel_iommu_gfx_mapped; -#else -static inline int iommu_calculate_agaw(struct intel_iommu *iommu) -{ - return 0; -} -static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) -{ - return 0; -} -#define dmar_disabled (1) -#define intel_iommu_enabled (0) -#endif - -static inline const char *decode_prq_descriptor(char *str, size_t size, - u64 dw0, u64 dw1, u64 dw2, u64 dw3) -{ - char *buf = str; - int bytes; - - bytes = snprintf(buf, size, - "rid=0x%llx addr=0x%llx %c%c%c%c%c pasid=0x%llx index=0x%llx", - FIELD_GET(GENMASK_ULL(31, 16), dw0), - FIELD_GET(GENMASK_ULL(63, 12), dw1), - dw1 & BIT_ULL(0) ? 'r' : '-', - dw1 & BIT_ULL(1) ? 'w' : '-', - dw0 & BIT_ULL(52) ? 'x' : '-', - dw0 & BIT_ULL(53) ? 'p' : '-', - dw1 & BIT_ULL(2) ? 'l' : '-', - FIELD_GET(GENMASK_ULL(51, 32), dw0), - FIELD_GET(GENMASK_ULL(11, 3), dw1)); - - /* Private Data */ - if (dw0 & BIT_ULL(9)) { - size -= bytes; - buf += bytes; - snprintf(buf, size, " private=0x%llx/0x%llx\n", dw2, dw3); - } - - return str; -} - -#endif diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h index f685777b875e..2b0e784ba771 100644 --- a/include/linux/interconnect.h +++ b/include/linux/interconnect.h @@ -44,6 +44,7 @@ struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id); struct icc_path *of_icc_get(struct device *dev, const char *name); struct icc_path *devm_of_icc_get(struct device *dev, const char *name); +int devm_of_icc_bulk_get(struct device *dev, int num_paths, struct icc_bulk_data *paths); struct icc_path *of_icc_get_by_index(struct device *dev, int idx); void icc_put(struct icc_path *path); int icc_enable(struct icc_path *path); @@ -116,6 +117,12 @@ static inline int of_icc_bulk_get(struct device *dev, int num_paths, struct icc_ return 0; } +static inline int devm_of_icc_bulk_get(struct device *dev, int num_paths, + struct icc_bulk_data *paths) +{ + return 0; +} + static inline void icc_bulk_put(int num_paths, struct icc_bulk_data *paths) { } diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 86af6f0a00a2..ca98aeadcc80 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -74,17 +74,22 @@ struct io_pgtable_cfg { * to support up to 35 bits PA where the bit32, bit33 and bit34 are * encoded in the bit9, bit4 and bit5 of the PTE respectively. * + * IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT: (ARM v7s format) MediaTek IOMMUs + * extend the translation table base support up to 35 bits PA, the + * encoding format is same with IO_PGTABLE_QUIRK_ARM_MTK_EXT. + * * IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table * for use in the upper half of a split address space. * * IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability * attributes set in the TCR for a non-coherent page-table walker. */ - #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) - #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) - #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3) - #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5) - #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6) + #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) + #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) + #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3) + #define IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT BIT(4) + #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5) + #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6) unsigned long quirks; unsigned long pgsize_bitmap; unsigned int ias; diff --git a/include/linux/iomap.h b/include/linux/iomap.h index e552097c67e0..238a03087e17 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -231,12 +231,6 @@ void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count); bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags); void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len); -#ifdef CONFIG_MIGRATION -int iomap_migrate_page(struct address_space *mapping, struct page *newpage, - struct page *page, enum migrate_mode mode); -#else -#define iomap_migrate_page NULL -#endif int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, const struct iomap_ops *ops); int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, @@ -303,9 +297,6 @@ void iomap_finish_ioends(struct iomap_ioend *ioend, int error); void iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends); void iomap_sort_ioends(struct list_head *ioend_list); -int iomap_writepage(struct page *page, struct writeback_control *wbc, - struct iomap_writepage_ctx *wpc, - const struct iomap_writeback_ops *ops); int iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, struct iomap_writepage_ctx *wpc, const struct iomap_writeback_ops *ops); @@ -353,6 +344,12 @@ struct iomap_dio_ops { */ #define IOMAP_DIO_PARTIAL (1 << 2) +/* + * The caller will sync the write if needed; do not sync it within + * iomap_dio_rw. Overrides IOMAP_DIO_FORCE_WAIT. + */ +#define IOMAP_DIO_NOSYNC (1 << 3) + ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops, const struct iomap_dio_ops *dops, unsigned int dio_flags, void *private, size_t done_before); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 5e1afe169549..ea30f00dc145 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -135,6 +135,7 @@ enum iommu_resv_type { * @length: Length of the region in bytes * @prot: IOMMU Protection flags (READ/WRITE/...) * @type: Type of the reserved region + * @free: Callback to free associated memory allocations */ struct iommu_resv_region { struct list_head list; @@ -142,6 +143,15 @@ struct iommu_resv_region { size_t length; int prot; enum iommu_resv_type type; + void (*free)(struct device *dev, struct iommu_resv_region *region); +}; + +struct iommu_iort_rmr_data { + struct iommu_resv_region rr; + + /* Stream IDs associated with IORT RMR entry */ + const u32 *sids; + u32 num_sids; }; /** @@ -154,8 +164,7 @@ struct iommu_resv_region { * supported, this feature must be enabled before and * disabled after %IOMMU_DEV_FEAT_SVA. * - * Device drivers query whether a feature is supported using - * iommu_dev_has_feature(), and enable it using iommu_dev_enable_feature(). + * Device drivers enable a feature using iommu_dev_enable_feature(). */ enum iommu_dev_features { IOMMU_DEV_FEAT_SVA, @@ -200,13 +209,11 @@ struct iommu_iotlb_gather { * group and attached to the groups domain * @device_group: find iommu group for a particular device * @get_resv_regions: Request list of reserved regions for a device - * @put_resv_regions: Free list of reserved regions for a device * @of_xlate: add OF master IDs to iommu grouping * @is_attach_deferred: Check if domain attach should be deferred from iommu * driver init to device driver init (default no) * @dev_has/enable/disable_feat: per device entries to check/enable/disable * iommu specific features. - * @dev_feat_enabled: check enabled feature * @sva_bind: Bind process address space to device * @sva_unbind: Unbind process address space from device * @sva_get_pasid: Get PASID associated to a SVA handle @@ -232,14 +239,11 @@ struct iommu_ops { /* Request/Free a list of reserved regions for a device */ void (*get_resv_regions)(struct device *dev, struct list_head *list); - void (*put_resv_regions)(struct device *dev, struct list_head *list); int (*of_xlate)(struct device *dev, struct of_phandle_args *args); bool (*is_attach_deferred)(struct device *dev); /* Per device IOMMU features */ - bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f); - bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f); int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); @@ -448,8 +452,6 @@ extern void iommu_set_fault_handler(struct iommu_domain *domain, extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); -extern void generic_iommu_put_resv_regions(struct device *dev, - struct list_head *list); extern void iommu_set_default_passthrough(bool cmd_line); extern void iommu_set_default_translated(bool cmd_line); extern bool iommu_default_passthrough(void); @@ -662,7 +664,6 @@ void iommu_release_device(struct device *dev); int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); -bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f); struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, @@ -989,12 +990,6 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) return NULL; } -static inline bool -iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) -{ - return false; -} - static inline int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) { diff --git a/include/linux/ioport.h b/include/linux/ioport.h index ec5f71f7135b..616b683563a9 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -141,6 +141,7 @@ enum { IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, IORES_DESC_RESERVED = 7, IORES_DESC_SOFT_RESERVED = 8, + IORES_DESC_CXL = 9, }; /* @@ -329,6 +330,8 @@ struct resource *devm_request_free_mem_region(struct device *dev, struct resource *base, unsigned long size); struct resource *request_free_mem_region(struct resource *base, unsigned long size, const char *name); +struct resource *alloc_free_mem_region(struct resource *base, + unsigned long size, unsigned long align, const char *name); static inline void irqresource_disabled(struct resource *res, u32 irq) { diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h index e69a002d5aa4..a533cae189d7 100644 --- a/include/linux/iosys-map.h +++ b/include/linux/iosys-map.h @@ -6,6 +6,7 @@ #ifndef __IOSYS_MAP_H__ #define __IOSYS_MAP_H__ +#include <linux/compiler_types.h> #include <linux/io.h> #include <linux/string.h> @@ -23,7 +24,7 @@ * memcpy(vaddr, src, len); * * void *vaddr_iomem = ...; // pointer to I/O memory - * memcpy_toio(vaddr, _iomem, src, len); + * memcpy_toio(vaddr_iomem, src, len); * * The user of such pointer may not have information about the mapping of that * region or may want to have a single code path to handle operations on that @@ -333,6 +334,36 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, memset(dst->vaddr + offset, value, len); } +#ifdef CONFIG_64BIT +#define __iosys_map_rd_io_u64_case(val_, vaddr_iomem_) \ + u64: val_ = readq(vaddr_iomem_) +#define __iosys_map_wr_io_u64_case(val_, vaddr_iomem_) \ + u64: writeq(val_, vaddr_iomem_) +#else +#define __iosys_map_rd_io_u64_case(val_, vaddr_iomem_) \ + u64: memcpy_fromio(&(val_), vaddr_iomem_, sizeof(u64)) +#define __iosys_map_wr_io_u64_case(val_, vaddr_iomem_) \ + u64: memcpy_toio(vaddr_iomem_, &(val_), sizeof(u64)) +#endif + +#define __iosys_map_rd_io(val__, vaddr_iomem__, type__) _Generic(val__, \ + u8: val__ = readb(vaddr_iomem__), \ + u16: val__ = readw(vaddr_iomem__), \ + u32: val__ = readl(vaddr_iomem__), \ + __iosys_map_rd_io_u64_case(val__, vaddr_iomem__)) + +#define __iosys_map_rd_sys(val__, vaddr__, type__) \ + val__ = READ_ONCE(*(type__ *)(vaddr__)) + +#define __iosys_map_wr_io(val__, vaddr_iomem__, type__) _Generic(val__, \ + u8: writeb(val__, vaddr_iomem__), \ + u16: writew(val__, vaddr_iomem__), \ + u32: writel(val__, vaddr_iomem__), \ + __iosys_map_wr_io_u64_case(val__, vaddr_iomem__)) + +#define __iosys_map_wr_sys(val__, vaddr__, type__) \ + WRITE_ONCE(*(type__ *)(vaddr__), val__) + /** * iosys_map_rd - Read a C-type value from the iosys_map * @@ -340,16 +371,21 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * @offset__: The offset from which to read * @type__: Type of the value being read * - * Read a C type value from iosys_map, handling possible un-aligned accesses to - * the mapping. + * Read a C type value (u8, u16, u32 and u64) from iosys_map. For other types or + * if pointer may be unaligned (and problematic for the architecture supported), + * use iosys_map_memcpy_from(). * * Returns: * The value read from the mapping. */ -#define iosys_map_rd(map__, offset__, type__) ({ \ - type__ val; \ - iosys_map_memcpy_from(&val, map__, offset__, sizeof(val)); \ - val; \ +#define iosys_map_rd(map__, offset__, type__) ({ \ + type__ val; \ + if ((map__)->is_iomem) { \ + __iosys_map_rd_io(val, (map__)->vaddr_iomem + (offset__), type__);\ + } else { \ + __iosys_map_rd_sys(val, (map__)->vaddr + (offset__), type__); \ + } \ + val; \ }) /** @@ -360,12 +396,17 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * @type__: Type of the value being written * @val__: Value to write * - * Write a C-type value to the iosys_map, handling possible un-aligned accesses - * to the mapping. + * Write a C type value (u8, u16, u32 and u64) to the iosys_map. For other types + * or if pointer may be unaligned (and problematic for the architecture + * supported), use iosys_map_memcpy_to() */ -#define iosys_map_wr(map__, offset__, type__, val__) ({ \ - type__ val = (val__); \ - iosys_map_memcpy_to(map__, offset__, &val, sizeof(val)); \ +#define iosys_map_wr(map__, offset__, type__, val__) ({ \ + type__ val = (val__); \ + if ((map__)->is_iomem) { \ + __iosys_map_wr_io(val, (map__)->vaddr_iomem + (offset__), type__);\ + } else { \ + __iosys_map_wr_sys(val, (map__)->vaddr + (offset__), type__); \ + } \ }) /** @@ -379,9 +420,10 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * * Read a value from iosys_map considering its layout is described by a C struct * starting at @struct_offset__. The field offset and size is calculated and its - * value read handling possible un-aligned memory accesses. For example: suppose - * there is a @struct foo defined as below and the value ``foo.field2.inner2`` - * needs to be read from the iosys_map: + * value read. If the field access would incur in un-aligned access, then either + * iosys_map_memcpy_from() needs to be used or the architecture must support it. + * For example: suppose there is a @struct foo defined as below and the value + * ``foo.field2.inner2`` needs to be read from the iosys_map: * * .. code-block:: c * @@ -445,10 +487,12 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset, * @field__: Member of the struct to read * @val__: Value to write * - * Write a value to the iosys_map considering its layout is described by a C struct - * starting at @struct_offset__. The field offset and size is calculated and the - * @val__ is written handling possible un-aligned memory accesses. Refer to - * iosys_map_rd_field() for expected usage and memory layout. + * Write a value to the iosys_map considering its layout is described by a C + * struct starting at @struct_offset__. The field offset and size is calculated + * and the @val__ is written. If the field access would incur in un-aligned + * access, then either iosys_map_memcpy_to() needs to be used or the + * architecture must support it. Refer to iosys_map_rd_field() for expected + * usage and memory layout. */ #define iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__) ({ \ struct_type__ *s; \ diff --git a/include/linux/iova.h b/include/linux/iova.h index 320a70e40233..c6ba6d95d79c 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -79,6 +79,8 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) int iova_cache_get(void); void iova_cache_put(void); +unsigned long iova_rcache_range(void); + void free_iova(struct iova_domain *iovad, unsigned long pfn); void __free_iova(struct iova_domain *iovad, struct iova *iova); struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, diff --git a/include/linux/isa-dma.h b/include/linux/isa-dma.h new file mode 100644 index 000000000000..61504a8c1b9e --- /dev/null +++ b/include/linux/isa-dma.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_ISA_DMA_H +#define __LINUX_ISA_DMA_H + +#include <asm/dma.h> + +#if defined(CONFIG_PCI) && defined(CONFIG_X86_32) +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + +#endif /* __LINUX_ISA_DMA_H */ diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index dc1724131300..0b7242370b56 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -54,14 +54,13 @@ * CONFIG_JBD2_DEBUG is on. */ #define JBD2_EXPENSIVE_CHECKING -extern ushort jbd2_journal_enable_debug; void __jbd2_debug(int level, const char *file, const char *func, unsigned int line, const char *fmt, ...); -#define jbd_debug(n, fmt, a...) \ +#define jbd2_debug(n, fmt, a...) \ __jbd2_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a) #else -#define jbd_debug(n, fmt, a...) no_printk(fmt, ##a) +#define jbd2_debug(n, fmt, a...) no_printk(fmt, ##a) #endif extern void *jbd2_alloc(size_t size, gfp_t flags); @@ -1647,7 +1646,6 @@ extern void jbd2_clear_buffer_revoked_flags(journal_t *journal); */ int jbd2_log_start_commit(journal_t *journal, tid_t tid); -int __jbd2_log_start_commit(journal_t *journal, tid_t tid); int jbd2_journal_start_commit(journal_t *journal, tid_t *tid); int jbd2_log_wait_commit(journal_t *journal, tid_t tid); int jbd2_transaction_committed(journal_t *journal, tid_t tid); diff --git a/include/linux/kernel_read_file.h b/include/linux/kernel_read_file.h index 575ffa1031d3..90451e2e12bd 100644 --- a/include/linux/kernel_read_file.h +++ b/include/linux/kernel_read_file.h @@ -35,21 +35,21 @@ static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id) return kernel_read_file_str[id]; } -int kernel_read_file(struct file *file, loff_t offset, - void **buf, size_t buf_size, - size_t *file_size, - enum kernel_read_file_id id); -int kernel_read_file_from_path(const char *path, loff_t offset, - void **buf, size_t buf_size, - size_t *file_size, - enum kernel_read_file_id id); -int kernel_read_file_from_path_initns(const char *path, loff_t offset, - void **buf, size_t buf_size, - size_t *file_size, - enum kernel_read_file_id id); -int kernel_read_file_from_fd(int fd, loff_t offset, - void **buf, size_t buf_size, - size_t *file_size, - enum kernel_read_file_id id); +ssize_t kernel_read_file(struct file *file, loff_t offset, + void **buf, size_t buf_size, + size_t *file_size, + enum kernel_read_file_id id); +ssize_t kernel_read_file_from_path(const char *path, loff_t offset, + void **buf, size_t buf_size, + size_t *file_size, + enum kernel_read_file_id id); +ssize_t kernel_read_file_from_path_initns(const char *path, loff_t offset, + void **buf, size_t buf_size, + size_t *file_size, + enum kernel_read_file_id id); +ssize_t kernel_read_file_from_fd(int fd, loff_t offset, + void **buf, size_t buf_size, + size_t *file_size, + enum kernel_read_file_id id); #endif /* _LINUX_KERNEL_READ_FILE_H */ diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index e2ae15a6225e..367044d7708c 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -18,6 +18,7 @@ #include <linux/uidgid.h> #include <linux/wait.h> #include <linux/rwsem.h> +#include <linux/cache.h> struct file; struct dentry; @@ -34,6 +35,62 @@ struct kernfs_fs_context; struct kernfs_open_node; struct kernfs_iattrs; +/* + * NR_KERNFS_LOCK_BITS determines size (NR_KERNFS_LOCKS) of hash + * table of locks. + * Having a small hash table would impact scalability, since + * more and more kernfs_node objects will end up using same lock + * and having a very large hash table would waste memory. + * + * At the moment size of hash table of locks is being set based on + * the number of CPUs as follows: + * + * NR_CPU NR_KERNFS_LOCK_BITS NR_KERNFS_LOCKS + * 1 1 2 + * 2-3 2 4 + * 4-7 4 16 + * 8-15 6 64 + * 16-31 8 256 + * 32 and more 10 1024 + * + * The above relation between NR_CPU and number of locks is based + * on some internal experimentation which involved booting qemu + * with different values of smp, performing some sysfs operations + * on all CPUs and observing how increase in number of locks impacts + * completion time of these sysfs operations on each CPU. + */ +#ifdef CONFIG_SMP +#define NR_KERNFS_LOCK_BITS (2 * (ilog2(NR_CPUS < 32 ? NR_CPUS : 32))) +#else +#define NR_KERNFS_LOCK_BITS 1 +#endif + +#define NR_KERNFS_LOCKS (1 << NR_KERNFS_LOCK_BITS) + +/* + * There's one kernfs_open_file for each open file and one kernfs_open_node + * for each kernfs_node with one or more open files. + * + * filp->private_data points to seq_file whose ->private points to + * kernfs_open_file. + * + * kernfs_open_files are chained at kernfs_open_node->files, which is + * protected by kernfs_global_locks.open_file_mutex[i]. + * + * To reduce possible contention in sysfs access, arising due to single + * locks, use an array of locks (e.g. open_file_mutex) and use kernfs_node + * object address as hash keys to get the index of these locks. + * + * Hashed mutexes are safe to use here because operations using these don't + * rely on global exclusion. + * + * In future we intend to replace other global locks with hashed ones as well. + * kernfs_global_locks acts as a holder for all such hash tables. + */ +struct kernfs_global_locks { + struct mutex open_file_mutex[NR_KERNFS_LOCKS]; +}; + enum kernfs_node_type { KERNFS_DIR = 0x0001, KERNFS_FILE = 0x0002, @@ -114,7 +171,7 @@ struct kernfs_elem_symlink { struct kernfs_elem_attr { const struct kernfs_ops *ops; - struct kernfs_open_node *open; + struct kernfs_open_node __rcu *open; loff_t size; struct kernfs_node *notify_next; /* for kernfs_notify() */ }; diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 86249476b57f..0b35a41440ff 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -688,7 +688,7 @@ __kfifo_uint_must_check_helper( \ * writer, you don't need extra locking to use these macro. */ #define kfifo_to_user(fifo, to, len, copied) \ -__kfifo_uint_must_check_helper( \ +__kfifo_int_must_check_helper( \ ({ \ typeof((fifo) + 1) __tmp = (fifo); \ void __user *__to = (to); \ diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 392d34c3c59a..384f034ae947 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -10,8 +10,6 @@ extern struct attribute_group khugepaged_attr_group; extern int khugepaged_init(void); extern void khugepaged_destroy(void); extern int start_stop_khugepaged(void); -extern bool hugepage_vma_check(struct vm_area_struct *vma, - unsigned long vm_flags); extern void __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); extern void khugepaged_enter_vma(struct vm_area_struct *vma, @@ -26,20 +24,6 @@ static inline void collapse_pte_mapped_thp(struct mm_struct *mm, } #endif -#define khugepaged_enabled() \ - (transparent_hugepage_flags & \ - ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \ - (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))) -#define khugepaged_always() \ - (transparent_hugepage_flags & \ - (1<<TRANSPARENT_HUGEPAGE_FLAG)) -#define khugepaged_req_madv() \ - (transparent_hugepage_flags & \ - (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) -#define khugepaged_defrag() \ - (transparent_hugepage_flags & \ - (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)) - static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) { if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags)) @@ -51,16 +35,6 @@ static inline void khugepaged_exit(struct mm_struct *mm) if (test_bit(MMF_VM_HUGEPAGE, &mm->flags)) __khugepaged_exit(mm); } - -static inline void khugepaged_enter(struct vm_area_struct *vma, - unsigned long vm_flags) -{ - if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) && - khugepaged_enabled()) { - if (hugepage_vma_check(vma, vm_flags)) - __khugepaged_enter(vma->vm_mm); - } -} #else /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm) { @@ -68,10 +42,6 @@ static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm static inline void khugepaged_exit(struct mm_struct *mm) { } -static inline void khugepaged_enter(struct vm_area_struct *vma, - unsigned long vm_flags) -{ -} static inline void khugepaged_enter_vma(struct vm_area_struct *vma, unsigned long vm_flags) { diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 34684b2026ab..6a3cd1bf4680 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h @@ -29,10 +29,9 @@ extern void kmemleak_not_leak(const void *ptr) __ref; extern void kmemleak_ignore(const void *ptr) __ref; extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref; extern void kmemleak_no_scan(const void *ptr) __ref; -extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, +extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp) __ref; extern void kmemleak_free_part_phys(phys_addr_t phys, size_t size) __ref; -extern void kmemleak_not_leak_phys(phys_addr_t phys) __ref; extern void kmemleak_ignore_phys(phys_addr_t phys) __ref; static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, @@ -107,15 +106,12 @@ static inline void kmemleak_no_scan(const void *ptr) { } static inline void kmemleak_alloc_phys(phys_addr_t phys, size_t size, - int min_count, gfp_t gfp) + gfp_t gfp) { } static inline void kmemleak_free_part_phys(phys_addr_t phys, size_t size) { } -static inline void kmemleak_not_leak_phys(phys_addr_t phys) -{ -} static inline void kmemleak_ignore_phys(phys_addr_t phys) { } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 90a45ef7203b..1c480b1821e1 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -907,11 +907,6 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) return NULL; } -static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) -{ - return vcpu->vcpu_idx; -} - void kvm_destroy_vcpus(struct kvm *kvm); void vcpu_load(struct kvm_vcpu *vcpu); @@ -1139,7 +1134,6 @@ unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, bool *writable); void kvm_release_page_clean(struct page *page); void kvm_release_page_dirty(struct page *page); -void kvm_set_page_accessed(struct page *page); kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, @@ -1231,7 +1225,6 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); -struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); @@ -1358,6 +1351,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm); #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); +int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min); int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc); void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); @@ -1436,6 +1430,8 @@ int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state); #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); +#else +static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {} #endif int kvm_arch_hardware_enable(void); @@ -1572,8 +1568,8 @@ void kvm_arch_sync_events(struct kvm *kvm); int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); -bool kvm_is_reserved_pfn(kvm_pfn_t pfn); -bool kvm_is_zone_device_pfn(kvm_pfn_t pfn); +struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn); +bool kvm_is_zone_device_page(struct page *page); struct kvm_irq_ack_notifier { struct hlist_node link; @@ -1719,12 +1715,6 @@ static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) return (hpa_t)pfn << PAGE_SHIFT; } -static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu, - gpa_t gpa) -{ - return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa)); -} - static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) { unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index ac1ebb37a0ff..3ca3db020e0e 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -19,6 +19,7 @@ struct kvm_memslots; enum kvm_mr_change; #include <linux/bits.h> +#include <linux/mutex.h> #include <linux/types.h> #include <linux/spinlock_types.h> @@ -69,6 +70,7 @@ struct gfn_to_pfn_cache { struct kvm_vcpu *vcpu; struct list_head list; rwlock_t lock; + struct mutex refresh_lock; void *khva; kvm_pfn_t pfn; enum pfn_cache_usage usage; @@ -83,12 +85,17 @@ struct gfn_to_pfn_cache { * MMU flows is problematic, as is triggering reclaim, I/O, etc... while * holding MMU locks. Note, these caches act more like prefetch buffers than * classical caches, i.e. objects are not returned to the cache on being freed. + * + * The @capacity field and @objects array are lazily initialized when the cache + * is topped up (__kvm_mmu_topup_memory_cache()). */ struct kvm_mmu_memory_cache { int nobjs; gfp_t gfp_zero; + gfp_t gfp_custom; struct kmem_cache *kmem_cache; - void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE]; + int capacity; + void **objects; }; #endif diff --git a/include/linux/lapb.h b/include/linux/lapb.h index eb56472f23b2..b5333f9413dc 100644 --- a/include/linux/lapb.h +++ b/include/linux/lapb.h @@ -6,6 +6,11 @@ #ifndef LAPB_KERNEL_H #define LAPB_KERNEL_H +#include <linux/skbuff.h> +#include <linux/timer.h> + +struct net_device; + #define LAPB_OK 0 #define LAPB_BADTOKEN 1 #define LAPB_INVALUE 2 diff --git a/include/linux/libata.h b/include/linux/libata.h index 0f2a59c9c735..0269ff114f5a 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -275,7 +275,7 @@ enum { PORT_DISABLED = 2, /* encoding various smaller bitmaps into a single - * unsigned long bitmap + * unsigned int bitmap */ ATA_NR_PIO_MODES = 7, ATA_NR_MWDMA_MODES = 5, @@ -426,12 +426,9 @@ enum { }; enum ata_xfer_mask { - ATA_MASK_PIO = ((1LU << ATA_NR_PIO_MODES) - 1) - << ATA_SHIFT_PIO, - ATA_MASK_MWDMA = ((1LU << ATA_NR_MWDMA_MODES) - 1) - << ATA_SHIFT_MWDMA, - ATA_MASK_UDMA = ((1LU << ATA_NR_UDMA_MODES) - 1) - << ATA_SHIFT_UDMA, + ATA_MASK_PIO = ((1U << ATA_NR_PIO_MODES) - 1) << ATA_SHIFT_PIO, + ATA_MASK_MWDMA = ((1U << ATA_NR_MWDMA_MODES) - 1) << ATA_SHIFT_MWDMA, + ATA_MASK_UDMA = ((1U << ATA_NR_UDMA_MODES) - 1) << ATA_SHIFT_UDMA, }; enum hsm_task_states { @@ -680,9 +677,9 @@ struct ata_device { unsigned int cdb_len; /* per-dev xfer mask */ - unsigned long pio_mask; - unsigned long mwdma_mask; - unsigned long udma_mask; + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; /* for CHS addressing */ u16 cylinders; /* Number of cylinders */ @@ -850,7 +847,7 @@ struct ata_port { enum ata_lpm_policy target_lpm_policy; struct timer_list fastdrain_timer; - unsigned long fastdrain_cnt; + unsigned int fastdrain_cnt; async_cookie_t cookie; @@ -885,7 +882,7 @@ struct ata_port_operations { * Configuration and exception handling */ int (*cable_detect)(struct ata_port *ap); - unsigned long (*mode_filter)(struct ata_device *dev, unsigned long xfer_mask); + unsigned int (*mode_filter)(struct ata_device *dev, unsigned int xfer_mask); void (*set_piomode)(struct ata_port *ap, struct ata_device *dev); void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev); int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev); @@ -981,9 +978,9 @@ struct ata_port_operations { struct ata_port_info { unsigned long flags; unsigned long link_flags; - unsigned long pio_mask; - unsigned long mwdma_mask; - unsigned long udma_mask; + unsigned int pio_mask; + unsigned int mwdma_mask; + unsigned int udma_mask; struct ata_port_operations *port_ops; void *private_data; }; @@ -1102,16 +1099,18 @@ extern void ata_msleep(struct ata_port *ap, unsigned int msecs); extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, unsigned long interval, unsigned long timeout); extern int atapi_cmd_type(u8 opcode); -extern unsigned long ata_pack_xfermask(unsigned long pio_mask, - unsigned long mwdma_mask, unsigned long udma_mask); -extern void ata_unpack_xfermask(unsigned long xfer_mask, - unsigned long *pio_mask, unsigned long *mwdma_mask, - unsigned long *udma_mask); -extern u8 ata_xfer_mask2mode(unsigned long xfer_mask); -extern unsigned long ata_xfer_mode2mask(u8 xfer_mode); +extern unsigned int ata_pack_xfermask(unsigned int pio_mask, + unsigned int mwdma_mask, + unsigned int udma_mask); +extern void ata_unpack_xfermask(unsigned int xfer_mask, + unsigned int *pio_mask, + unsigned int *mwdma_mask, + unsigned int *udma_mask); +extern u8 ata_xfer_mask2mode(unsigned int xfer_mask); +extern unsigned int ata_xfer_mode2mask(u8 xfer_mode); extern int ata_xfer_mode2shift(u8 xfer_mode); -extern const char *ata_mode_string(unsigned long xfer_mask); -extern unsigned long ata_id_xfermask(const u16 *id); +extern const char *ata_mode_string(unsigned int xfer_mask); +extern unsigned int ata_id_xfermask(const u16 *id); extern int ata_std_qc_defer(struct ata_queued_cmd *qc); extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc); extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, @@ -1283,8 +1282,8 @@ static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) } int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm); int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm); -unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev, - const struct ata_acpi_gtm *gtm); +unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev, + const struct ata_acpi_gtm *gtm); int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm); #else static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 0d61e07b6827..c74acfa1a3fe 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -59,6 +59,9 @@ enum { /* Platform provides asynchronous flush mechanism */ ND_REGION_ASYNC = 3, + /* Region was created by CXL subsystem */ + ND_REGION_CXL = 4, + /* mark newly adjusted resources as requiring a label update */ DPA_RESOURCE_ADJUSTED = 1 << 0, }; @@ -122,6 +125,7 @@ struct nd_region_desc { int numa_node; int target_node; unsigned long flags; + int memregion; struct device_node *of_node; int (*flush)(struct nd_region *nd_region, struct bio *bio); }; @@ -259,6 +263,7 @@ static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL); } void nvdimm_delete(struct nvdimm *nvdimm); +void nvdimm_region_delete(struct nd_region *nd_region); const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); diff --git a/include/linux/limits.h b/include/linux/limits.h index b568b9c30bbf..f6bcc9369010 100644 --- a/include/linux/limits.h +++ b/include/linux/limits.h @@ -7,6 +7,7 @@ #include <vdso/limits.h> #define SIZE_MAX (~(size_t)0) +#define SSIZE_MAX ((ssize_t)(SIZE_MAX >> 1)) #define PHYS_ADDR_MAX (~(phys_addr_t)0) #define U8_MAX ((u8)~0U) diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index fcef192e5e45..70ce419e2709 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -292,6 +292,7 @@ void nlmsvc_locks_init_private(struct file_lock *, struct nlm_host *, pid_t); __be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **, struct nlm_lock *); void nlm_release_file(struct nlm_file *); +void nlmsvc_put_lockowner(struct nlm_lockowner *); void nlmsvc_release_lockowner(struct nlm_lock *); void nlmsvc_mark_resources(struct net *); void nlmsvc_free_host_resources(struct nlm_host *); diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h index 398f70093cd3..67e4a2c5500b 100644 --- a/include/linux/lockd/xdr.h +++ b/include/linux/lockd/xdr.h @@ -41,6 +41,8 @@ struct nlm_lock { struct nfs_fh fh; struct xdr_netobj oh; u32 svid; + u64 lock_start; + u64 lock_len; struct file_lock fl; }; diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h index 44365aab043c..a8f0070c7aa9 100644 --- a/include/linux/mailbox/mtk-cmdq-mailbox.h +++ b/include/linux/mailbox/mtk-cmdq-mailbox.h @@ -67,24 +67,14 @@ enum cmdq_code { struct cmdq_cb_data { int sta; - void *data; struct cmdq_pkt *pkt; }; -typedef void (*cmdq_async_flush_cb)(struct cmdq_cb_data data); - -struct cmdq_task_cb { - cmdq_async_flush_cb cb; - void *data; -}; - struct cmdq_pkt { void *va_base; dma_addr_t pa_base; size_t cmd_buf_size; /* command occupied size */ size_t buf_size; /* real buffer size */ - struct cmdq_task_cb cb; - struct cmdq_task_cb async_cb; void *cl; }; diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h index 20f1e3ff6013..2da63fd7b98f 100644 --- a/include/linux/mbcache.h +++ b/include/linux/mbcache.h @@ -13,8 +13,16 @@ struct mb_cache; struct mb_cache_entry { /* List of entries in cache - protected by cache->c_list_lock */ struct list_head e_list; - /* Hash table list - protected by hash chain bitlock */ + /* + * Hash table list - protected by hash chain bitlock. The entry is + * guaranteed to be hashed while e_refcnt > 0. + */ struct hlist_bl_node e_hash_list; + /* + * Entry refcount. Once it reaches zero, entry is unhashed and freed. + * While refcount > 0, the entry is guaranteed to stay in the hash and + * e.g. mb_cache_entry_try_delete() will fail. + */ atomic_t e_refcnt; /* Key in hash - stable during lifetime of the entry */ u32 e_key; @@ -29,17 +37,24 @@ void mb_cache_destroy(struct mb_cache *cache); int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, u64 value, bool reusable); -void __mb_cache_entry_free(struct mb_cache_entry *entry); -static inline int mb_cache_entry_put(struct mb_cache *cache, - struct mb_cache_entry *entry) +void __mb_cache_entry_free(struct mb_cache *cache, + struct mb_cache_entry *entry); +void mb_cache_entry_wait_unused(struct mb_cache_entry *entry); +static inline void mb_cache_entry_put(struct mb_cache *cache, + struct mb_cache_entry *entry) { - if (!atomic_dec_and_test(&entry->e_refcnt)) - return 0; - __mb_cache_entry_free(entry); - return 1; + unsigned int cnt = atomic_dec_return(&entry->e_refcnt); + + if (cnt > 0) { + if (cnt <= 2) + wake_up_var(&entry->e_refcnt); + return; + } + __mb_cache_entry_free(cache, entry); } -void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value); +struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache, + u32 key, u64 value); struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, u64 value); struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, diff --git a/include/linux/mdev.h b/include/linux/mdev.h index bb539794f54a..47ad3b104d9e 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -65,11 +65,6 @@ struct mdev_driver { struct device_driver driver; }; -static inline const guid_t *mdev_uuid(struct mdev_device *mdev) -{ - return &mdev->uuid; -} - extern struct bus_type mdev_bus_type; int mdev_register_device(struct device *dev, struct mdev_driver *mdev_driver); diff --git a/include/linux/mdio/mdio-xgene.h b/include/linux/mdio/mdio-xgene.h index 8af93ada8b64..9e588965dc83 100644 --- a/include/linux/mdio/mdio-xgene.h +++ b/include/linux/mdio/mdio-xgene.h @@ -8,6 +8,10 @@ #ifndef __MDIO_XGENE_H__ #define __MDIO_XGENE_H__ +#include <linux/bits.h> +#include <linux/spinlock.h> +#include <linux/types.h> + #define BLOCK_XG_MDIO_CSR_OFFSET 0x5000 #define BLOCK_DIAG_CSR_OFFSET 0xd000 #define XGENET_CONFIG_REG_ADDR 0x20 diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 9ecead1042b9..4d31ce55b1c0 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -837,6 +837,15 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) } struct mem_cgroup *mem_cgroup_from_id(unsigned short id); +#ifdef CONFIG_SHRINKER_DEBUG +static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) +{ + return memcg ? cgroup_ino(memcg->css.cgroup) : 0; +} + +struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino); +#endif + static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) { return mem_cgroup_from_css(seq_css(m)); @@ -1343,6 +1352,18 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) return NULL; } +#ifdef CONFIG_SHRINKER_DEBUG +static inline unsigned long mem_cgroup_ino(struct mem_cgroup *memcg) +{ + return 0; +} + +static inline struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino) +{ + return NULL; +} +#endif + static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) { return NULL; @@ -1740,6 +1761,7 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg) } struct mem_cgroup *mem_cgroup_from_obj(void *p); +struct mem_cgroup *mem_cgroup_from_slab_obj(void *p); static inline void count_objcg_event(struct obj_cgroup *objcg, enum vm_event_item idx) @@ -1755,6 +1777,42 @@ static inline void count_objcg_event(struct obj_cgroup *objcg, rcu_read_unlock(); } +/** + * get_mem_cgroup_from_obj - get a memcg associated with passed kernel object. + * @p: pointer to object from which memcg should be extracted. It can be NULL. + * + * Retrieves the memory group into which the memory of the pointed kernel + * object is accounted. If memcg is found, its reference is taken. + * If a passed kernel object is uncharged, or if proper memcg cannot be found, + * as well as if mem_cgroup is disabled, NULL is returned. + * + * Return: valid memcg pointer with taken reference or NULL. + */ +static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p) +{ + struct mem_cgroup *memcg; + + rcu_read_lock(); + do { + memcg = mem_cgroup_from_obj(p); + } while (memcg && !css_tryget(&memcg->css)); + rcu_read_unlock(); + return memcg; +} + +/** + * mem_cgroup_or_root - always returns a pointer to a valid memory cgroup. + * @memcg: pointer to a valid memory cgroup or NULL. + * + * If passed argument is not NULL, returns it without any additional checks + * and changes. Otherwise, root_mem_cgroup is returned. + * + * NOTE: root_mem_cgroup can be NULL during early boot. + */ +static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg) +{ + return memcg ? memcg : root_mem_cgroup; +} #else static inline bool mem_cgroup_kmem_disabled(void) { @@ -1798,7 +1856,12 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg) static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) { - return NULL; + return NULL; +} + +static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) +{ + return NULL; } static inline void count_objcg_event(struct obj_cgroup *objcg, @@ -1806,6 +1869,15 @@ static inline void count_objcg_event(struct obj_cgroup *objcg, { } +static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p) +{ + return NULL; +} + +static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg) +{ + return NULL; +} #endif /* CONFIG_MEMCG_KMEM */ #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP) diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 20d7edf62a6a..e0b2209ab71c 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -351,13 +351,4 @@ void arch_remove_linear_mapping(u64 start, u64 size); extern bool mhp_supports_memmap_on_memory(unsigned long size); #endif /* CONFIG_MEMORY_HOTPLUG */ -#ifdef CONFIG_MHP_MEMMAP_ON_MEMORY -bool mhp_memmap_on_memory(void); -#else -static inline bool mhp_memmap_on_memory(void) -{ - return false; -} -#endif - #endif /* __LINUX_MEMORY_HOTPLUG_H */ diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 8af304f6b504..19010491a603 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -2,7 +2,7 @@ #ifndef _LINUX_MEMREMAP_H_ #define _LINUX_MEMREMAP_H_ -#include <linux/mm.h> +#include <linux/mmzone.h> #include <linux/range.h> #include <linux/ioport.h> #include <linux/percpu-refcount.h> @@ -39,7 +39,14 @@ struct vmem_altmap { * must be treated as an opaque object, rather than a "normal" struct page. * * A more complete discussion of unaddressable memory may be found in - * include/linux/hmm.h and Documentation/vm/hmm.rst. + * include/linux/hmm.h and Documentation/mm/hmm.rst. + * + * MEMORY_DEVICE_COHERENT: + * Device memory that is cache coherent from device and CPU point of view. This + * is used on platforms that have an advanced system bus (like CAPI or CXL). A + * driver can hotplug the device memory using ZONE_DEVICE and with that memory + * type. Any page of a process can be migrated to such memory. However no one + * should be allowed to pin such memory so that it can always be evicted. * * MEMORY_DEVICE_FS_DAX: * Host memory that has similar access semantics as System RAM i.e. DMA @@ -61,6 +68,7 @@ struct vmem_altmap { enum memory_type { /* 0 is reserved to catch uninitialized type fields */ MEMORY_DEVICE_PRIVATE = 1, + MEMORY_DEVICE_COHERENT, MEMORY_DEVICE_FS_DAX, MEMORY_DEVICE_GENERIC, MEMORY_DEVICE_PCI_P2PDMA, @@ -79,6 +87,18 @@ struct dev_pagemap_ops { * the page back to a CPU accessible page. */ vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf); + + /* + * Handle the memory failure happens on a range of pfns. Notify the + * processes who are using these pfns, and try to recover the data on + * them if necessary. The mf_flags is finally passed to the recover + * function through the whole notify routine. + * + * When this is not implemented, or it returns -EOPNOTSUPP, the caller + * will fall back to a common handler called mf_generic_kill_procs(). + */ + int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn, + unsigned long nr_pages, int mf_flags); }; #define PGMAP_ALTMAP_VALID (1 << 0) @@ -150,6 +170,17 @@ static inline bool is_pci_p2pdma_page(const struct page *page) page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; } +static inline bool is_device_coherent_page(const struct page *page) +{ + return is_zone_device_page(page) && + page->pgmap->type == MEMORY_DEVICE_COHERENT; +} + +static inline bool folio_is_device_coherent(const struct folio *folio) +{ + return is_device_coherent_page(&folio->page); +} + #ifdef CONFIG_ZONE_DEVICE void *memremap_pages(struct dev_pagemap *pgmap, int nid); void memunmap_pages(struct dev_pagemap *pgmap); diff --git a/include/linux/mfd/ipaq-micro.h b/include/linux/mfd/ipaq-micro.h index ee48a4321c57..d5caa4c86ecc 100644 --- a/include/linux/mfd/ipaq-micro.h +++ b/include/linux/mfd/ipaq-micro.h @@ -75,8 +75,8 @@ struct ipaq_micro_rxdev { * @id: 4-bit ID of the message * @tx_len: length of TX data * @tx_data: TX data to send - * @rx_len: length of receieved RX data - * @rx_data: RX data to recieve + * @rx_len: length of received RX data + * @rx_data: RX data to receive * @ack: a completion that will be completed when RX is complete * @node: list node if message gets queued */ diff --git a/include/linux/mfd/lp873x.h b/include/linux/mfd/lp873x.h index 5546688c7da7..fe8174cc8637 100644 --- a/include/linux/mfd/lp873x.h +++ b/include/linux/mfd/lp873x.h @@ -1,16 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Functions to access LP873X power management chip. * * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/ - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __LINUX_MFD_LP873X_H diff --git a/include/linux/mfd/max77714.h b/include/linux/mfd/max77714.h index a970dc455426..7947e0d697a5 100644 --- a/include/linux/mfd/max77714.h +++ b/include/linux/mfd/max77714.h @@ -3,7 +3,7 @@ * Maxim MAX77714 Register and data structures definition. * * Copyright (C) 2022 Luca Ceresoli - * Author: Luca Ceresoli <luca@lucaceresoli.net> + * Author: Luca Ceresoli <luca.ceresoli@bootlin.com> */ #ifndef __LINUX_MFD_MAX77714_H_ diff --git a/include/linux/mfd/mt6331/core.h b/include/linux/mfd/mt6331/core.h new file mode 100644 index 000000000000..df8e6b1e4bc1 --- /dev/null +++ b/include/linux/mfd/mt6331/core.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#ifndef __MFD_MT6331_CORE_H__ +#define __MFD_MT6331_CORE_H__ + +enum mt6331_irq_status_numbers { + MT6331_IRQ_STATUS_PWRKEY = 0, + MT6331_IRQ_STATUS_HOMEKEY, + MT6331_IRQ_STATUS_CHRDET, + MT6331_IRQ_STATUS_THR_H, + MT6331_IRQ_STATUS_THR_L, + MT6331_IRQ_STATUS_BAT_H, + MT6331_IRQ_STATUS_BAT_L, + MT6331_IRQ_STATUS_RTC, + MT6331_IRQ_STATUS_AUDIO, + MT6331_IRQ_STATUS_MAD, + MT6331_IRQ_STATUS_ACCDET, + MT6331_IRQ_STATUS_ACCDET_EINT, + MT6331_IRQ_STATUS_ACCDET_NEGV = 12, + MT6331_IRQ_STATUS_VDVFS11_OC = 16, + MT6331_IRQ_STATUS_VDVFS12_OC, + MT6331_IRQ_STATUS_VDVFS13_OC, + MT6331_IRQ_STATUS_VDVFS14_OC, + MT6331_IRQ_STATUS_GPU_OC, + MT6331_IRQ_STATUS_VCORE1_OC, + MT6331_IRQ_STATUS_VCORE2_OC, + MT6331_IRQ_STATUS_VIO18_OC, + MT6331_IRQ_STATUS_LDO_OC, + MT6331_IRQ_STATUS_NR, +}; + +#define MT6331_IRQ_CON0_BASE MT6331_IRQ_STATUS_PWRKEY +#define MT6331_IRQ_CON0_BITS (MT6331_IRQ_STATUS_ACCDET_NEGV + 1) +#define MT6331_IRQ_CON1_BASE MT6331_IRQ_STATUS_VDVFS11_OC +#define MT6331_IRQ_CON1_BITS (MT6331_IRQ_STATUS_LDO_OC - MT6331_IRQ_STATUS_VDFS11_OC + 1) + +#endif /* __MFD_MT6331_CORE_H__ */ diff --git a/include/linux/mfd/mt6331/registers.h b/include/linux/mfd/mt6331/registers.h new file mode 100644 index 000000000000..e2be6bccd1a7 --- /dev/null +++ b/include/linux/mfd/mt6331/registers.h @@ -0,0 +1,584 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#ifndef __MFD_MT6331_REGISTERS_H__ +#define __MFD_MT6331_REGISTERS_H__ + +/* PMIC Registers */ +#define MT6331_STRUP_CON0 0x0 +#define MT6331_STRUP_CON2 0x2 +#define MT6331_STRUP_CON3 0x4 +#define MT6331_STRUP_CON4 0x6 +#define MT6331_STRUP_CON5 0x8 +#define MT6331_STRUP_CON6 0xA +#define MT6331_STRUP_CON7 0xC +#define MT6331_STRUP_CON8 0xE +#define MT6331_STRUP_CON9 0x10 +#define MT6331_STRUP_CON10 0x12 +#define MT6331_STRUP_CON11 0x14 +#define MT6331_STRUP_CON12 0x16 +#define MT6331_STRUP_CON13 0x18 +#define MT6331_STRUP_CON14 0x1A +#define MT6331_STRUP_CON15 0x1C +#define MT6331_STRUP_CON16 0x1E +#define MT6331_STRUP_CON17 0x20 +#define MT6331_STRUP_CON18 0x22 +#define MT6331_HWCID 0x100 +#define MT6331_SWCID 0x102 +#define MT6331_EXT_PMIC_STATUS 0x104 +#define MT6331_TOP_CON 0x106 +#define MT6331_TEST_OUT 0x108 +#define MT6331_TEST_CON0 0x10A +#define MT6331_TEST_CON1 0x10C +#define MT6331_TESTMODE_SW 0x10E +#define MT6331_EN_STATUS0 0x110 +#define MT6331_EN_STATUS1 0x112 +#define MT6331_EN_STATUS2 0x114 +#define MT6331_OCSTATUS0 0x116 +#define MT6331_OCSTATUS1 0x118 +#define MT6331_OCSTATUS2 0x11A +#define MT6331_PGSTATUS 0x11C +#define MT6331_TOPSTATUS 0x11E +#define MT6331_TDSEL_CON 0x120 +#define MT6331_RDSEL_CON 0x122 +#define MT6331_SMT_CON0 0x124 +#define MT6331_SMT_CON1 0x126 +#define MT6331_SMT_CON2 0x128 +#define MT6331_DRV_CON0 0x12A +#define MT6331_DRV_CON1 0x12C +#define MT6331_DRV_CON2 0x12E +#define MT6331_DRV_CON3 0x130 +#define MT6331_TOP_STATUS 0x132 +#define MT6331_TOP_STATUS_SET 0x134 +#define MT6331_TOP_STATUS_CLR 0x136 +#define MT6331_TOP_CKPDN_CON0 0x138 +#define MT6331_TOP_CKPDN_CON0_SET 0x13A +#define MT6331_TOP_CKPDN_CON0_CLR 0x13C +#define MT6331_TOP_CKPDN_CON1 0x13E +#define MT6331_TOP_CKPDN_CON1_SET 0x140 +#define MT6331_TOP_CKPDN_CON1_CLR 0x142 +#define MT6331_TOP_CKPDN_CON2 0x144 +#define MT6331_TOP_CKPDN_CON2_SET 0x146 +#define MT6331_TOP_CKPDN_CON2_CLR 0x148 +#define MT6331_TOP_CKSEL_CON 0x14A +#define MT6331_TOP_CKSEL_CON_SET 0x14C +#define MT6331_TOP_CKSEL_CON_CLR 0x14E +#define MT6331_TOP_CKHWEN_CON 0x150 +#define MT6331_TOP_CKHWEN_CON_SET 0x152 +#define MT6331_TOP_CKHWEN_CON_CLR 0x154 +#define MT6331_TOP_CKTST_CON0 0x156 +#define MT6331_TOP_CKTST_CON1 0x158 +#define MT6331_TOP_CLKSQ 0x15A +#define MT6331_TOP_CLKSQ_SET 0x15C +#define MT6331_TOP_CLKSQ_CLR 0x15E +#define MT6331_TOP_RST_CON 0x160 +#define MT6331_TOP_RST_CON_SET 0x162 +#define MT6331_TOP_RST_CON_CLR 0x164 +#define MT6331_TOP_RST_MISC 0x166 +#define MT6331_TOP_RST_MISC_SET 0x168 +#define MT6331_TOP_RST_MISC_CLR 0x16A +#define MT6331_INT_CON0 0x16C +#define MT6331_INT_CON0_SET 0x16E +#define MT6331_INT_CON0_CLR 0x170 +#define MT6331_INT_CON1 0x172 +#define MT6331_INT_CON1_SET 0x174 +#define MT6331_INT_CON1_CLR 0x176 +#define MT6331_INT_MISC_CON 0x178 +#define MT6331_INT_MISC_CON_SET 0x17A +#define MT6331_INT_MISC_CON_CLR 0x17C +#define MT6331_INT_STATUS_CON0 0x17E +#define MT6331_INT_STATUS_CON1 0x180 +#define MT6331_OC_GEAR_0 0x182 +#define MT6331_FQMTR_CON0 0x184 +#define MT6331_FQMTR_CON1 0x186 +#define MT6331_FQMTR_CON2 0x188 +#define MT6331_RG_SPI_CON 0x18A +#define MT6331_DEW_DIO_EN 0x18C +#define MT6331_DEW_READ_TEST 0x18E +#define MT6331_DEW_WRITE_TEST 0x190 +#define MT6331_DEW_CRC_SWRST 0x192 +#define MT6331_DEW_CRC_EN 0x194 +#define MT6331_DEW_CRC_VAL 0x196 +#define MT6331_DEW_DBG_MON_SEL 0x198 +#define MT6331_DEW_CIPHER_KEY_SEL 0x19A +#define MT6331_DEW_CIPHER_IV_SEL 0x19C +#define MT6331_DEW_CIPHER_EN 0x19E +#define MT6331_DEW_CIPHER_RDY 0x1A0 +#define MT6331_DEW_CIPHER_MODE 0x1A2 +#define MT6331_DEW_CIPHER_SWRST 0x1A4 +#define MT6331_DEW_RDDMY_NO 0x1A6 +#define MT6331_INT_TYPE_CON0 0x1A8 +#define MT6331_INT_TYPE_CON0_SET 0x1AA +#define MT6331_INT_TYPE_CON0_CLR 0x1AC +#define MT6331_INT_TYPE_CON1 0x1AE +#define MT6331_INT_TYPE_CON1_SET 0x1B0 +#define MT6331_INT_TYPE_CON1_CLR 0x1B2 +#define MT6331_INT_STA 0x1B4 +#define MT6331_BUCK_ALL_CON0 0x200 +#define MT6331_BUCK_ALL_CON1 0x202 +#define MT6331_BUCK_ALL_CON2 0x204 +#define MT6331_BUCK_ALL_CON3 0x206 +#define MT6331_BUCK_ALL_CON4 0x208 +#define MT6331_BUCK_ALL_CON5 0x20A +#define MT6331_BUCK_ALL_CON6 0x20C +#define MT6331_BUCK_ALL_CON7 0x20E +#define MT6331_BUCK_ALL_CON8 0x210 +#define MT6331_BUCK_ALL_CON9 0x212 +#define MT6331_BUCK_ALL_CON10 0x214 +#define MT6331_BUCK_ALL_CON11 0x216 +#define MT6331_BUCK_ALL_CON12 0x218 +#define MT6331_BUCK_ALL_CON13 0x21A +#define MT6331_BUCK_ALL_CON14 0x21C +#define MT6331_BUCK_ALL_CON15 0x21E +#define MT6331_BUCK_ALL_CON16 0x220 +#define MT6331_BUCK_ALL_CON17 0x222 +#define MT6331_BUCK_ALL_CON18 0x224 +#define MT6331_BUCK_ALL_CON19 0x226 +#define MT6331_BUCK_ALL_CON20 0x228 +#define MT6331_BUCK_ALL_CON21 0x22A +#define MT6331_BUCK_ALL_CON22 0x22C +#define MT6331_BUCK_ALL_CON23 0x22E +#define MT6331_BUCK_ALL_CON24 0x230 +#define MT6331_BUCK_ALL_CON25 0x232 +#define MT6331_BUCK_ALL_CON26 0x234 +#define MT6331_VDVFS11_CON0 0x236 +#define MT6331_VDVFS11_CON1 0x238 +#define MT6331_VDVFS11_CON2 0x23A +#define MT6331_VDVFS11_CON3 0x23C +#define MT6331_VDVFS11_CON4 0x23E +#define MT6331_VDVFS11_CON5 0x240 +#define MT6331_VDVFS11_CON6 0x242 +#define MT6331_VDVFS11_CON7 0x244 +#define MT6331_VDVFS11_CON8 0x246 +#define MT6331_VDVFS11_CON9 0x248 +#define MT6331_VDVFS11_CON10 0x24A +#define MT6331_VDVFS11_CON11 0x24C +#define MT6331_VDVFS11_CON12 0x24E +#define MT6331_VDVFS11_CON13 0x250 +#define MT6331_VDVFS11_CON14 0x252 +#define MT6331_VDVFS11_CON18 0x25A +#define MT6331_VDVFS11_CON19 0x25C +#define MT6331_VDVFS11_CON20 0x25E +#define MT6331_VDVFS11_CON21 0x260 +#define MT6331_VDVFS11_CON22 0x262 +#define MT6331_VDVFS11_CON23 0x264 +#define MT6331_VDVFS11_CON24 0x266 +#define MT6331_VDVFS11_CON25 0x268 +#define MT6331_VDVFS11_CON26 0x26A +#define MT6331_VDVFS11_CON27 0x26C +#define MT6331_VDVFS12_CON0 0x26E +#define MT6331_VDVFS12_CON1 0x270 +#define MT6331_VDVFS12_CON2 0x272 +#define MT6331_VDVFS12_CON3 0x274 +#define MT6331_VDVFS12_CON4 0x276 +#define MT6331_VDVFS12_CON5 0x278 +#define MT6331_VDVFS12_CON6 0x27A +#define MT6331_VDVFS12_CON7 0x27C +#define MT6331_VDVFS12_CON8 0x27E +#define MT6331_VDVFS12_CON9 0x280 +#define MT6331_VDVFS12_CON10 0x282 +#define MT6331_VDVFS12_CON11 0x284 +#define MT6331_VDVFS12_CON12 0x286 +#define MT6331_VDVFS12_CON13 0x288 +#define MT6331_VDVFS12_CON14 0x28A +#define MT6331_VDVFS12_CON18 0x292 +#define MT6331_VDVFS12_CON19 0x294 +#define MT6331_VDVFS12_CON20 0x296 +#define MT6331_VDVFS13_CON0 0x298 +#define MT6331_VDVFS13_CON1 0x29A +#define MT6331_VDVFS13_CON2 0x29C +#define MT6331_VDVFS13_CON3 0x29E +#define MT6331_VDVFS13_CON4 0x2A0 +#define MT6331_VDVFS13_CON5 0x2A2 +#define MT6331_VDVFS13_CON6 0x2A4 +#define MT6331_VDVFS13_CON7 0x2A6 +#define MT6331_VDVFS13_CON8 0x2A8 +#define MT6331_VDVFS13_CON9 0x2AA +#define MT6331_VDVFS13_CON10 0x2AC +#define MT6331_VDVFS13_CON11 0x2AE +#define MT6331_VDVFS13_CON12 0x2B0 +#define MT6331_VDVFS13_CON13 0x2B2 +#define MT6331_VDVFS13_CON14 0x2B4 +#define MT6331_VDVFS13_CON18 0x2BC +#define MT6331_VDVFS13_CON19 0x2BE +#define MT6331_VDVFS13_CON20 0x2C0 +#define MT6331_VDVFS14_CON0 0x2C2 +#define MT6331_VDVFS14_CON1 0x2C4 +#define MT6331_VDVFS14_CON2 0x2C6 +#define MT6331_VDVFS14_CON3 0x2C8 +#define MT6331_VDVFS14_CON4 0x2CA +#define MT6331_VDVFS14_CON5 0x2CC +#define MT6331_VDVFS14_CON6 0x2CE +#define MT6331_VDVFS14_CON7 0x2D0 +#define MT6331_VDVFS14_CON8 0x2D2 +#define MT6331_VDVFS14_CON9 0x2D4 +#define MT6331_VDVFS14_CON10 0x2D6 +#define MT6331_VDVFS14_CON11 0x2D8 +#define MT6331_VDVFS14_CON12 0x2DA +#define MT6331_VDVFS14_CON13 0x2DC +#define MT6331_VDVFS14_CON14 0x2DE +#define MT6331_VDVFS14_CON18 0x2E6 +#define MT6331_VDVFS14_CON19 0x2E8 +#define MT6331_VDVFS14_CON20 0x2EA +#define MT6331_VGPU_CON0 0x300 +#define MT6331_VGPU_CON1 0x302 +#define MT6331_VGPU_CON2 0x304 +#define MT6331_VGPU_CON3 0x306 +#define MT6331_VGPU_CON4 0x308 +#define MT6331_VGPU_CON5 0x30A +#define MT6331_VGPU_CON6 0x30C +#define MT6331_VGPU_CON7 0x30E +#define MT6331_VGPU_CON8 0x310 +#define MT6331_VGPU_CON9 0x312 +#define MT6331_VGPU_CON10 0x314 +#define MT6331_VGPU_CON11 0x316 +#define MT6331_VGPU_CON12 0x318 +#define MT6331_VGPU_CON13 0x31A +#define MT6331_VGPU_CON14 0x31C +#define MT6331_VGPU_CON15 0x31E +#define MT6331_VGPU_CON16 0x320 +#define MT6331_VGPU_CON17 0x322 +#define MT6331_VGPU_CON18 0x324 +#define MT6331_VGPU_CON19 0x326 +#define MT6331_VGPU_CON20 0x328 +#define MT6331_VCORE1_CON0 0x32A +#define MT6331_VCORE1_CON1 0x32C +#define MT6331_VCORE1_CON2 0x32E +#define MT6331_VCORE1_CON3 0x330 +#define MT6331_VCORE1_CON4 0x332 +#define MT6331_VCORE1_CON5 0x334 +#define MT6331_VCORE1_CON6 0x336 +#define MT6331_VCORE1_CON7 0x338 +#define MT6331_VCORE1_CON8 0x33A +#define MT6331_VCORE1_CON9 0x33C +#define MT6331_VCORE1_CON10 0x33E +#define MT6331_VCORE1_CON11 0x340 +#define MT6331_VCORE1_CON12 0x342 +#define MT6331_VCORE1_CON13 0x344 +#define MT6331_VCORE1_CON14 0x346 +#define MT6331_VCORE1_CON15 0x348 +#define MT6331_VCORE1_CON16 0x34A +#define MT6331_VCORE1_CON17 0x34C +#define MT6331_VCORE1_CON18 0x34E +#define MT6331_VCORE1_CON19 0x350 +#define MT6331_VCORE1_CON20 0x352 +#define MT6331_VCORE2_CON0 0x354 +#define MT6331_VCORE2_CON1 0x356 +#define MT6331_VCORE2_CON2 0x358 +#define MT6331_VCORE2_CON3 0x35A +#define MT6331_VCORE2_CON4 0x35C +#define MT6331_VCORE2_CON5 0x35E +#define MT6331_VCORE2_CON6 0x360 +#define MT6331_VCORE2_CON7 0x362 +#define MT6331_VCORE2_CON8 0x364 +#define MT6331_VCORE2_CON9 0x366 +#define MT6331_VCORE2_CON10 0x368 +#define MT6331_VCORE2_CON11 0x36A +#define MT6331_VCORE2_CON12 0x36C +#define MT6331_VCORE2_CON13 0x36E +#define MT6331_VCORE2_CON14 0x370 +#define MT6331_VCORE2_CON15 0x372 +#define MT6331_VCORE2_CON16 0x374 +#define MT6331_VCORE2_CON17 0x376 +#define MT6331_VCORE2_CON18 0x378 +#define MT6331_VCORE2_CON19 0x37A +#define MT6331_VCORE2_CON20 0x37C +#define MT6331_VCORE2_CON21 0x37E +#define MT6331_VIO18_CON0 0x380 +#define MT6331_VIO18_CON1 0x382 +#define MT6331_VIO18_CON2 0x384 +#define MT6331_VIO18_CON3 0x386 +#define MT6331_VIO18_CON4 0x388 +#define MT6331_VIO18_CON5 0x38A +#define MT6331_VIO18_CON6 0x38C +#define MT6331_VIO18_CON7 0x38E +#define MT6331_VIO18_CON8 0x390 +#define MT6331_VIO18_CON9 0x392 +#define MT6331_VIO18_CON10 0x394 +#define MT6331_VIO18_CON11 0x396 +#define MT6331_VIO18_CON12 0x398 +#define MT6331_VIO18_CON13 0x39A +#define MT6331_VIO18_CON14 0x39C +#define MT6331_VIO18_CON15 0x39E +#define MT6331_VIO18_CON16 0x3A0 +#define MT6331_VIO18_CON17 0x3A2 +#define MT6331_VIO18_CON18 0x3A4 +#define MT6331_VIO18_CON19 0x3A6 +#define MT6331_VIO18_CON20 0x3A8 +#define MT6331_BUCK_K_CON0 0x3AA +#define MT6331_BUCK_K_CON1 0x3AC +#define MT6331_BUCK_K_CON2 0x3AE +#define MT6331_BUCK_K_CON3 0x3B0 +#define MT6331_ZCD_CON0 0x400 +#define MT6331_ZCD_CON1 0x402 +#define MT6331_ZCD_CON2 0x404 +#define MT6331_ZCD_CON3 0x406 +#define MT6331_ZCD_CON4 0x408 +#define MT6331_ZCD_CON5 0x40A +#define MT6331_ISINK0_CON0 0x40C +#define MT6331_ISINK0_CON1 0x40E +#define MT6331_ISINK0_CON2 0x410 +#define MT6331_ISINK0_CON3 0x412 +#define MT6331_ISINK0_CON4 0x414 +#define MT6331_ISINK1_CON0 0x416 +#define MT6331_ISINK1_CON1 0x418 +#define MT6331_ISINK1_CON2 0x41A +#define MT6331_ISINK1_CON3 0x41C +#define MT6331_ISINK1_CON4 0x41E +#define MT6331_ISINK2_CON0 0x420 +#define MT6331_ISINK2_CON1 0x422 +#define MT6331_ISINK2_CON2 0x424 +#define MT6331_ISINK2_CON3 0x426 +#define MT6331_ISINK2_CON4 0x428 +#define MT6331_ISINK3_CON0 0x42A +#define MT6331_ISINK3_CON1 0x42C +#define MT6331_ISINK3_CON2 0x42E +#define MT6331_ISINK3_CON3 0x430 +#define MT6331_ISINK3_CON4 0x432 +#define MT6331_ISINK_ANA0 0x434 +#define MT6331_ISINK_ANA1 0x436 +#define MT6331_ISINK_PHASE_DLY 0x438 +#define MT6331_ISINK_EN_CTRL 0x43A +#define MT6331_ANALDO_CON0 0x500 +#define MT6331_ANALDO_CON1 0x502 +#define MT6331_ANALDO_CON2 0x504 +#define MT6331_ANALDO_CON3 0x506 +#define MT6331_ANALDO_CON4 0x508 +#define MT6331_ANALDO_CON5 0x50A +#define MT6331_ANALDO_CON6 0x50C +#define MT6331_ANALDO_CON7 0x50E +#define MT6331_ANALDO_CON8 0x510 +#define MT6331_ANALDO_CON9 0x512 +#define MT6331_ANALDO_CON10 0x514 +#define MT6331_ANALDO_CON11 0x516 +#define MT6331_ANALDO_CON12 0x518 +#define MT6331_ANALDO_CON13 0x51A +#define MT6331_SYSLDO_CON0 0x51C +#define MT6331_SYSLDO_CON1 0x51E +#define MT6331_SYSLDO_CON2 0x520 +#define MT6331_SYSLDO_CON3 0x522 +#define MT6331_SYSLDO_CON4 0x524 +#define MT6331_SYSLDO_CON5 0x526 +#define MT6331_SYSLDO_CON6 0x528 +#define MT6331_SYSLDO_CON7 0x52A +#define MT6331_SYSLDO_CON8 0x52C +#define MT6331_SYSLDO_CON9 0x52E +#define MT6331_SYSLDO_CON10 0x530 +#define MT6331_SYSLDO_CON11 0x532 +#define MT6331_SYSLDO_CON12 0x534 +#define MT6331_SYSLDO_CON13 0x536 +#define MT6331_SYSLDO_CON14 0x538 +#define MT6331_SYSLDO_CON15 0x53A +#define MT6331_SYSLDO_CON16 0x53C +#define MT6331_SYSLDO_CON17 0x53E +#define MT6331_SYSLDO_CON18 0x540 +#define MT6331_SYSLDO_CON19 0x542 +#define MT6331_SYSLDO_CON20 0x544 +#define MT6331_SYSLDO_CON21 0x546 +#define MT6331_DIGLDO_CON0 0x548 +#define MT6331_DIGLDO_CON1 0x54A +#define MT6331_DIGLDO_CON2 0x54C +#define MT6331_DIGLDO_CON3 0x54E +#define MT6331_DIGLDO_CON4 0x550 +#define MT6331_DIGLDO_CON5 0x552 +#define MT6331_DIGLDO_CON6 0x554 +#define MT6331_DIGLDO_CON7 0x556 +#define MT6331_DIGLDO_CON8 0x558 +#define MT6331_DIGLDO_CON9 0x55A +#define MT6331_DIGLDO_CON10 0x55C +#define MT6331_DIGLDO_CON11 0x55E +#define MT6331_DIGLDO_CON12 0x560 +#define MT6331_DIGLDO_CON13 0x562 +#define MT6331_DIGLDO_CON14 0x564 +#define MT6331_DIGLDO_CON15 0x566 +#define MT6331_DIGLDO_CON16 0x568 +#define MT6331_DIGLDO_CON17 0x56A +#define MT6331_DIGLDO_CON18 0x56C +#define MT6331_DIGLDO_CON19 0x56E +#define MT6331_DIGLDO_CON20 0x570 +#define MT6331_DIGLDO_CON21 0x572 +#define MT6331_DIGLDO_CON22 0x574 +#define MT6331_DIGLDO_CON23 0x576 +#define MT6331_DIGLDO_CON24 0x578 +#define MT6331_DIGLDO_CON25 0x57A +#define MT6331_DIGLDO_CON26 0x57C +#define MT6331_DIGLDO_CON27 0x57E +#define MT6331_DIGLDO_CON28 0x580 +#define MT6331_OTP_CON0 0x600 +#define MT6331_OTP_CON1 0x602 +#define MT6331_OTP_CON2 0x604 +#define MT6331_OTP_CON3 0x606 +#define MT6331_OTP_CON4 0x608 +#define MT6331_OTP_CON5 0x60A +#define MT6331_OTP_CON6 0x60C +#define MT6331_OTP_CON7 0x60E +#define MT6331_OTP_CON8 0x610 +#define MT6331_OTP_CON9 0x612 +#define MT6331_OTP_CON10 0x614 +#define MT6331_OTP_CON11 0x616 +#define MT6331_OTP_CON12 0x618 +#define MT6331_OTP_CON13 0x61A +#define MT6331_OTP_CON14 0x61C +#define MT6331_OTP_DOUT_0_15 0x61E +#define MT6331_OTP_DOUT_16_31 0x620 +#define MT6331_OTP_DOUT_32_47 0x622 +#define MT6331_OTP_DOUT_48_63 0x624 +#define MT6331_OTP_DOUT_64_79 0x626 +#define MT6331_OTP_DOUT_80_95 0x628 +#define MT6331_OTP_DOUT_96_111 0x62A +#define MT6331_OTP_DOUT_112_127 0x62C +#define MT6331_OTP_DOUT_128_143 0x62E +#define MT6331_OTP_DOUT_144_159 0x630 +#define MT6331_OTP_DOUT_160_175 0x632 +#define MT6331_OTP_DOUT_176_191 0x634 +#define MT6331_OTP_DOUT_192_207 0x636 +#define MT6331_OTP_DOUT_208_223 0x638 +#define MT6331_OTP_DOUT_224_239 0x63A +#define MT6331_OTP_DOUT_240_255 0x63C +#define MT6331_OTP_VAL_0_15 0x63E +#define MT6331_OTP_VAL_16_31 0x640 +#define MT6331_OTP_VAL_32_47 0x642 +#define MT6331_OTP_VAL_48_63 0x644 +#define MT6331_OTP_VAL_64_79 0x646 +#define MT6331_OTP_VAL_80_95 0x648 +#define MT6331_OTP_VAL_96_111 0x64A +#define MT6331_OTP_VAL_112_127 0x64C +#define MT6331_OTP_VAL_128_143 0x64E +#define MT6331_OTP_VAL_144_159 0x650 +#define MT6331_OTP_VAL_160_175 0x652 +#define MT6331_OTP_VAL_176_191 0x654 +#define MT6331_OTP_VAL_192_207 0x656 +#define MT6331_OTP_VAL_208_223 0x658 +#define MT6331_OTP_VAL_224_239 0x65A +#define MT6331_OTP_VAL_240_255 0x65C +#define MT6331_RTC_MIX_CON0 0x65E +#define MT6331_RTC_MIX_CON1 0x660 +#define MT6331_AUDDAC_CFG0 0x662 +#define MT6331_AUDBUF_CFG0 0x664 +#define MT6331_AUDBUF_CFG1 0x666 +#define MT6331_AUDBUF_CFG2 0x668 +#define MT6331_AUDBUF_CFG3 0x66A +#define MT6331_AUDBUF_CFG4 0x66C +#define MT6331_AUDBUF_CFG5 0x66E +#define MT6331_AUDBUF_CFG6 0x670 +#define MT6331_AUDBUF_CFG7 0x672 +#define MT6331_AUDBUF_CFG8 0x674 +#define MT6331_IBIASDIST_CFG0 0x676 +#define MT6331_AUDCLKGEN_CFG0 0x678 +#define MT6331_AUDLDO_CFG0 0x67A +#define MT6331_AUDDCDC_CFG0 0x67C +#define MT6331_AUDDCDC_CFG1 0x67E +#define MT6331_AUDNVREGGLB_CFG0 0x680 +#define MT6331_AUD_NCP0 0x682 +#define MT6331_AUD_ZCD_CFG0 0x684 +#define MT6331_AUDPREAMP_CFG0 0x686 +#define MT6331_AUDPREAMP_CFG1 0x688 +#define MT6331_AUDPREAMP_CFG2 0x68A +#define MT6331_AUDADC_CFG0 0x68C +#define MT6331_AUDADC_CFG1 0x68E +#define MT6331_AUDADC_CFG2 0x690 +#define MT6331_AUDADC_CFG3 0x692 +#define MT6331_AUDADC_CFG4 0x694 +#define MT6331_AUDADC_CFG5 0x696 +#define MT6331_AUDDIGMI_CFG0 0x698 +#define MT6331_AUDDIGMI_CFG1 0x69A +#define MT6331_AUDMICBIAS_CFG0 0x69C +#define MT6331_AUDMICBIAS_CFG1 0x69E +#define MT6331_AUDENCSPARE_CFG0 0x6A0 +#define MT6331_AUDPREAMPGAIN_CFG0 0x6A2 +#define MT6331_AUDMADPLL_CFG0 0x6A4 +#define MT6331_AUDMADPLL_CFG1 0x6A6 +#define MT6331_AUDMADPLL_CFG2 0x6A8 +#define MT6331_AUDLDO_NVREG_CFG0 0x6AA +#define MT6331_AUDLDO_NVREG_CFG1 0x6AC +#define MT6331_AUDLDO_NVREG_CFG2 0x6AE +#define MT6331_AUXADC_ADC0 0x700 +#define MT6331_AUXADC_ADC1 0x702 +#define MT6331_AUXADC_ADC2 0x704 +#define MT6331_AUXADC_ADC3 0x706 +#define MT6331_AUXADC_ADC4 0x708 +#define MT6331_AUXADC_ADC5 0x70A +#define MT6331_AUXADC_ADC6 0x70C +#define MT6331_AUXADC_ADC7 0x70E +#define MT6331_AUXADC_ADC8 0x710 +#define MT6331_AUXADC_ADC9 0x712 +#define MT6331_AUXADC_ADC10 0x714 +#define MT6331_AUXADC_ADC11 0x716 +#define MT6331_AUXADC_ADC12 0x718 +#define MT6331_AUXADC_ADC13 0x71A +#define MT6331_AUXADC_ADC14 0x71C +#define MT6331_AUXADC_ADC15 0x71E +#define MT6331_AUXADC_ADC16 0x720 +#define MT6331_AUXADC_ADC17 0x722 +#define MT6331_AUXADC_ADC18 0x724 +#define MT6331_AUXADC_ADC19 0x726 +#define MT6331_AUXADC_STA0 0x728 +#define MT6331_AUXADC_STA1 0x72A +#define MT6331_AUXADC_RQST0 0x72C +#define MT6331_AUXADC_RQST0_SET 0x72E +#define MT6331_AUXADC_RQST0_CLR 0x730 +#define MT6331_AUXADC_RQST1 0x732 +#define MT6331_AUXADC_RQST1_SET 0x734 +#define MT6331_AUXADC_RQST1_CLR 0x736 +#define MT6331_AUXADC_CON0 0x738 +#define MT6331_AUXADC_CON1 0x73A +#define MT6331_AUXADC_CON2 0x73C +#define MT6331_AUXADC_CON3 0x73E +#define MT6331_AUXADC_CON4 0x740 +#define MT6331_AUXADC_CON5 0x742 +#define MT6331_AUXADC_CON6 0x744 +#define MT6331_AUXADC_CON7 0x746 +#define MT6331_AUXADC_CON8 0x748 +#define MT6331_AUXADC_CON9 0x74A +#define MT6331_AUXADC_CON10 0x74C +#define MT6331_AUXADC_CON11 0x74E +#define MT6331_AUXADC_CON12 0x750 +#define MT6331_AUXADC_CON13 0x752 +#define MT6331_AUXADC_CON14 0x754 +#define MT6331_AUXADC_CON15 0x756 +#define MT6331_AUXADC_CON16 0x758 +#define MT6331_AUXADC_CON17 0x75A +#define MT6331_AUXADC_CON18 0x75C +#define MT6331_AUXADC_CON19 0x75E +#define MT6331_AUXADC_CON20 0x760 +#define MT6331_AUXADC_CON21 0x762 +#define MT6331_AUXADC_CON22 0x764 +#define MT6331_AUXADC_CON23 0x766 +#define MT6331_AUXADC_CON24 0x768 +#define MT6331_AUXADC_CON25 0x76A +#define MT6331_AUXADC_CON26 0x76C +#define MT6331_AUXADC_CON27 0x76E +#define MT6331_AUXADC_CON28 0x770 +#define MT6331_AUXADC_CON29 0x772 +#define MT6331_AUXADC_CON30 0x774 +#define MT6331_AUXADC_CON31 0x776 +#define MT6331_AUXADC_CON32 0x778 +#define MT6331_ACCDET_CON0 0x77A +#define MT6331_ACCDET_CON1 0x77C +#define MT6331_ACCDET_CON2 0x77E +#define MT6331_ACCDET_CON3 0x780 +#define MT6331_ACCDET_CON4 0x782 +#define MT6331_ACCDET_CON5 0x784 +#define MT6331_ACCDET_CON6 0x786 +#define MT6331_ACCDET_CON7 0x788 +#define MT6331_ACCDET_CON8 0x78A +#define MT6331_ACCDET_CON9 0x78C +#define MT6331_ACCDET_CON10 0x78E +#define MT6331_ACCDET_CON11 0x790 +#define MT6331_ACCDET_CON12 0x792 +#define MT6331_ACCDET_CON13 0x794 +#define MT6331_ACCDET_CON14 0x796 +#define MT6331_ACCDET_CON15 0x798 +#define MT6331_ACCDET_CON16 0x79A +#define MT6331_ACCDET_CON17 0x79C +#define MT6331_ACCDET_CON18 0x79E +#define MT6331_ACCDET_CON19 0x7A0 +#define MT6331_ACCDET_CON20 0x7A2 +#define MT6331_ACCDET_CON21 0x7A4 +#define MT6331_ACCDET_CON22 0x7A6 +#define MT6331_ACCDET_CON23 0x7A8 +#define MT6331_ACCDET_CON24 0x7AA + +#endif /* __MFD_MT6331_REGISTERS_H__ */ diff --git a/include/linux/mfd/mt6332/core.h b/include/linux/mfd/mt6332/core.h new file mode 100644 index 000000000000..cd6013eb82d9 --- /dev/null +++ b/include/linux/mfd/mt6332/core.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#ifndef __MFD_MT6332_CORE_H__ +#define __MFD_MT6332_CORE_H__ + +enum mt6332_irq_status_numbers { + MT6332_IRQ_STATUS_CHR_COMPLETE = 0, + MT6332_IRQ_STATUS_THERMAL_SD, + MT6332_IRQ_STATUS_THERMAL_REG_IN, + MT6332_IRQ_STATUS_THERMAL_REG_OUT, + MT6332_IRQ_STATUS_OTG_OC, + MT6332_IRQ_STATUS_CHR_OC, + MT6332_IRQ_STATUS_OTG_THERMAL, + MT6332_IRQ_STATUS_CHRIN_SHORT, + MT6332_IRQ_STATUS_DRVCDT_SHORT, + MT6332_IRQ_STATUS_PLUG_IN_FLASH, + MT6332_IRQ_STATUS_CHRWDT_FLAG, + MT6332_IRQ_STATUS_FLASH_EN_TIMEOUT, + MT6332_IRQ_STATUS_FLASH_VLED1_SHORT, + MT6332_IRQ_STATUS_FLASH_VLED1_OPEN = 13, + MT6332_IRQ_STATUS_OV = 16, + MT6332_IRQ_STATUS_BVALID_DET, + MT6332_IRQ_STATUS_VBATON_UNDET, + MT6332_IRQ_STATUS_CHR_PLUG_IN, + MT6332_IRQ_STATUS_CHR_PLUG_OUT, + MT6332_IRQ_STATUS_BC11_TIMEOUT, + MT6332_IRQ_STATUS_FLASH_VLED2_SHORT, + MT6332_IRQ_STATUS_FLASH_VLED2_OPEN = 23, + MT6332_IRQ_STATUS_THR_H = 32, + MT6332_IRQ_STATUS_THR_L, + MT6332_IRQ_STATUS_BAT_H, + MT6332_IRQ_STATUS_BAT_L, + MT6332_IRQ_STATUS_M3_H, + MT6332_IRQ_STATUS_M3_L, + MT6332_IRQ_STATUS_FG_BAT_H, + MT6332_IRQ_STATUS_FG_BAT_L, + MT6332_IRQ_STATUS_FG_CUR_H, + MT6332_IRQ_STATUS_FG_CUR_L, + MT6332_IRQ_STATUS_SPKL_D, + MT6332_IRQ_STATUS_SPKL_AB, + MT6332_IRQ_STATUS_BIF, + MT6332_IRQ_STATUS_VWLED_OC = 45, + MT6332_IRQ_STATUS_VDRAM_OC = 48, + MT6332_IRQ_STATUS_VDVFS2_OC, + MT6332_IRQ_STATUS_VRF1_OC, + MT6332_IRQ_STATUS_VRF2_OC, + MT6332_IRQ_STATUS_VPA_OC, + MT6332_IRQ_STATUS_VSBST_OC, + MT6332_IRQ_STATUS_LDO_OC, + MT6332_IRQ_STATUS_NR, +}; + +#define MT6332_IRQ_CON0_BASE MT6332_IRQ_STATUS_CHR_COMPLETE +#define MT6332_IRQ_CON0_BITS (MT6332_IRQ_STATUS_FLASH_VLED1_OPEN + 1) +#define MT6332_IRQ_CON1_BASE MT6332_IRQ_STATUS_OV +#define MT6332_IRQ_CON1_BITS (MT6332_IRQ_STATUS_FLASH_VLED2_OPEN - MT6332_IRQ_STATUS_OV + 1) +#define MT6332_IRQ_CON2_BASE MT6332_IRQ_STATUS_THR_H +#define MT6332_IRQ_CON2_BITS (MT6332_IRQ_STATUS_VWLED_OC - MT6332_IRQ_STATUS_THR_H + 1) +#define MT6332_IRQ_CON3_BASE MT6332_IRQ_STATUS_VDRAM_OC +#define MT6332_IRQ_CON3_BITS (MT6332_IRQ_STATUS_LDO_OC - MT6332_IRQ_STATUS_VDRAM_OC + 1) + +#endif /* __MFD_MT6332_CORE_H__ */ diff --git a/include/linux/mfd/mt6332/registers.h b/include/linux/mfd/mt6332/registers.h new file mode 100644 index 000000000000..65e0b86fceac --- /dev/null +++ b/include/linux/mfd/mt6332/registers.h @@ -0,0 +1,642 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#ifndef __MFD_MT6332_REGISTERS_H__ +#define __MFD_MT6332_REGISTERS_H__ + +/* PMIC Registers */ +#define MT6332_HWCID 0x8000 +#define MT6332_SWCID 0x8002 +#define MT6332_TOP_CON 0x8004 +#define MT6332_DDR_VREF_AP_CON 0x8006 +#define MT6332_DDR_VREF_DQ_CON 0x8008 +#define MT6332_DDR_VREF_CA_CON 0x800A +#define MT6332_TEST_OUT 0x800C +#define MT6332_TEST_CON0 0x800E +#define MT6332_TEST_CON1 0x8010 +#define MT6332_TESTMODE_SW 0x8012 +#define MT6332_TESTMODE_ANA 0x8014 +#define MT6332_TDSEL_CON 0x8016 +#define MT6332_RDSEL_CON 0x8018 +#define MT6332_SMT_CON0 0x801A +#define MT6332_SMT_CON1 0x801C +#define MT6332_DRV_CON0 0x801E +#define MT6332_DRV_CON1 0x8020 +#define MT6332_DRV_CON2 0x8022 +#define MT6332_EN_STATUS0 0x8024 +#define MT6332_OCSTATUS0 0x8026 +#define MT6332_TOP_STATUS 0x8028 +#define MT6332_TOP_STATUS_SET 0x802A +#define MT6332_TOP_STATUS_CLR 0x802C +#define MT6332_FLASH_CON0 0x802E +#define MT6332_FLASH_CON1 0x8030 +#define MT6332_FLASH_CON2 0x8032 +#define MT6332_CORE_CON0 0x8034 +#define MT6332_CORE_CON1 0x8036 +#define MT6332_CORE_CON2 0x8038 +#define MT6332_CORE_CON3 0x803A +#define MT6332_CORE_CON4 0x803C +#define MT6332_CORE_CON5 0x803E +#define MT6332_CORE_CON6 0x8040 +#define MT6332_CORE_CON7 0x8042 +#define MT6332_CORE_CON8 0x8044 +#define MT6332_CORE_CON9 0x8046 +#define MT6332_CORE_CON10 0x8048 +#define MT6332_CORE_CON11 0x804A +#define MT6332_CORE_CON12 0x804C +#define MT6332_CORE_CON13 0x804E +#define MT6332_CORE_CON14 0x8050 +#define MT6332_CORE_CON15 0x8052 +#define MT6332_STA_CON0 0x8054 +#define MT6332_STA_CON1 0x8056 +#define MT6332_STA_CON2 0x8058 +#define MT6332_STA_CON3 0x805A +#define MT6332_STA_CON4 0x805C +#define MT6332_STA_CON5 0x805E +#define MT6332_STA_CON6 0x8060 +#define MT6332_STA_CON7 0x8062 +#define MT6332_CHR_CON0 0x8064 +#define MT6332_CHR_CON1 0x8066 +#define MT6332_CHR_CON2 0x8068 +#define MT6332_CHR_CON3 0x806A +#define MT6332_CHR_CON4 0x806C +#define MT6332_CHR_CON5 0x806E +#define MT6332_CHR_CON6 0x8070 +#define MT6332_CHR_CON7 0x8072 +#define MT6332_CHR_CON8 0x8074 +#define MT6332_CHR_CON9 0x8076 +#define MT6332_CHR_CON10 0x8078 +#define MT6332_CHR_CON11 0x807A +#define MT6332_CHR_CON12 0x807C +#define MT6332_CHR_CON13 0x807E +#define MT6332_CHR_CON14 0x8080 +#define MT6332_CHR_CON15 0x8082 +#define MT6332_BOOST_CON0 0x8084 +#define MT6332_BOOST_CON1 0x8086 +#define MT6332_BOOST_CON2 0x8088 +#define MT6332_BOOST_CON3 0x808A +#define MT6332_BOOST_CON4 0x808C +#define MT6332_BOOST_CON5 0x808E +#define MT6332_BOOST_CON6 0x8090 +#define MT6332_BOOST_CON7 0x8092 +#define MT6332_TOP_CKPDN_CON0 0x8094 +#define MT6332_TOP_CKPDN_CON0_SET 0x8096 +#define MT6332_TOP_CKPDN_CON0_CLR 0x8098 +#define MT6332_TOP_CKPDN_CON1 0x809A +#define MT6332_TOP_CKPDN_CON1_SET 0x809C +#define MT6332_TOP_CKPDN_CON1_CLR 0x809E +#define MT6332_TOP_CKPDN_CON2 0x80A0 +#define MT6332_TOP_CKPDN_CON2_SET 0x80A2 +#define MT6332_TOP_CKPDN_CON2_CLR 0x80A4 +#define MT6332_TOP_CKSEL_CON0 0x80A6 +#define MT6332_TOP_CKSEL_CON0_SET 0x80A8 +#define MT6332_TOP_CKSEL_CON0_CLR 0x80AA +#define MT6332_TOP_CKSEL_CON1 0x80AC +#define MT6332_TOP_CKSEL_CON1_SET 0x80AE +#define MT6332_TOP_CKSEL_CON1_CLR 0x80B0 +#define MT6332_TOP_CKHWEN_CON 0x80B2 +#define MT6332_TOP_CKHWEN_CON_SET 0x80B4 +#define MT6332_TOP_CKHWEN_CON_CLR 0x80B6 +#define MT6332_TOP_CKTST_CON0 0x80B8 +#define MT6332_TOP_CKTST_CON1 0x80BA +#define MT6332_TOP_RST_CON 0x80BC +#define MT6332_TOP_RST_CON_SET 0x80BE +#define MT6332_TOP_RST_CON_CLR 0x80C0 +#define MT6332_TOP_RST_MISC 0x80C2 +#define MT6332_TOP_RST_MISC_SET 0x80C4 +#define MT6332_TOP_RST_MISC_CLR 0x80C6 +#define MT6332_INT_CON0 0x80C8 +#define MT6332_INT_CON0_SET 0x80CA +#define MT6332_INT_CON0_CLR 0x80CC +#define MT6332_INT_CON1 0x80CE +#define MT6332_INT_CON1_SET 0x80D0 +#define MT6332_INT_CON1_CLR 0x80D2 +#define MT6332_INT_CON2 0x80D4 +#define MT6332_INT_CON2_SET 0x80D6 +#define MT6332_INT_CON2_CLR 0x80D8 +#define MT6332_INT_CON3 0x80DA +#define MT6332_INT_CON3_SET 0x80DC +#define MT6332_INT_CON3_CLR 0x80DE +#define MT6332_CHRWDT_CON0 0x80E0 +#define MT6332_CHRWDT_STATUS0 0x80E2 +#define MT6332_INT_STATUS0 0x80E4 +#define MT6332_INT_STATUS1 0x80E6 +#define MT6332_INT_STATUS2 0x80E8 +#define MT6332_INT_STATUS3 0x80EA +#define MT6332_OC_GEAR_0 0x80EC +#define MT6332_OC_GEAR_1 0x80EE +#define MT6332_OC_GEAR_2 0x80F0 +#define MT6332_INT_MISC_CON 0x80F2 +#define MT6332_RG_SPI_CON 0x80F4 +#define MT6332_DEW_DIO_EN 0x80F6 +#define MT6332_DEW_READ_TEST 0x80F8 +#define MT6332_DEW_WRITE_TEST 0x80FA +#define MT6332_DEW_CRC_SWRST 0x80FC +#define MT6332_DEW_CRC_EN 0x80FE +#define MT6332_DEW_CRC_VAL 0x8100 +#define MT6332_DEW_DBG_MON_SEL 0x8102 +#define MT6332_DEW_CIPHER_KEY_SEL 0x8104 +#define MT6332_DEW_CIPHER_IV_SEL 0x8106 +#define MT6332_DEW_CIPHER_EN 0x8108 +#define MT6332_DEW_CIPHER_RDY 0x810A +#define MT6332_DEW_CIPHER_MODE 0x810C +#define MT6332_DEW_CIPHER_SWRST 0x810E +#define MT6332_DEW_RDDMY_NO 0x8110 +#define MT6332_INT_STA 0x8112 +#define MT6332_BIF_CON0 0x8114 +#define MT6332_BIF_CON1 0x8116 +#define MT6332_BIF_CON2 0x8118 +#define MT6332_BIF_CON3 0x811A +#define MT6332_BIF_CON4 0x811C +#define MT6332_BIF_CON5 0x811E +#define MT6332_BIF_CON6 0x8120 +#define MT6332_BIF_CON7 0x8122 +#define MT6332_BIF_CON8 0x8124 +#define MT6332_BIF_CON9 0x8126 +#define MT6332_BIF_CON10 0x8128 +#define MT6332_BIF_CON11 0x812A +#define MT6332_BIF_CON12 0x812C +#define MT6332_BIF_CON13 0x812E +#define MT6332_BIF_CON14 0x8130 +#define MT6332_BIF_CON15 0x8132 +#define MT6332_BIF_CON16 0x8134 +#define MT6332_BIF_CON17 0x8136 +#define MT6332_BIF_CON18 0x8138 +#define MT6332_BIF_CON19 0x813A +#define MT6332_BIF_CON20 0x813C +#define MT6332_BIF_CON21 0x813E +#define MT6332_BIF_CON22 0x8140 +#define MT6332_BIF_CON23 0x8142 +#define MT6332_BIF_CON24 0x8144 +#define MT6332_BIF_CON25 0x8146 +#define MT6332_BIF_CON26 0x8148 +#define MT6332_BIF_CON27 0x814A +#define MT6332_BIF_CON28 0x814C +#define MT6332_BIF_CON29 0x814E +#define MT6332_BIF_CON30 0x8150 +#define MT6332_BIF_CON31 0x8152 +#define MT6332_BIF_CON32 0x8154 +#define MT6332_BIF_CON33 0x8156 +#define MT6332_BIF_CON34 0x8158 +#define MT6332_BIF_CON35 0x815A +#define MT6332_BIF_CON36 0x815C +#define MT6332_BATON_CON0 0x815E +#define MT6332_BIF_CON37 0x8160 +#define MT6332_BIF_CON38 0x8162 +#define MT6332_CHR_CON16 0x8164 +#define MT6332_CHR_CON17 0x8166 +#define MT6332_CHR_CON18 0x8168 +#define MT6332_CHR_CON19 0x816A +#define MT6332_CHR_CON20 0x816C +#define MT6332_CHR_CON21 0x816E +#define MT6332_CHR_CON22 0x8170 +#define MT6332_CHR_CON23 0x8172 +#define MT6332_CHR_CON24 0x8174 +#define MT6332_CHR_CON25 0x8176 +#define MT6332_STA_CON8 0x8178 +#define MT6332_BUCK_ALL_CON0 0x8400 +#define MT6332_BUCK_ALL_CON1 0x8402 +#define MT6332_BUCK_ALL_CON2 0x8404 +#define MT6332_BUCK_ALL_CON3 0x8406 +#define MT6332_BUCK_ALL_CON4 0x8408 +#define MT6332_BUCK_ALL_CON5 0x840A +#define MT6332_BUCK_ALL_CON6 0x840C +#define MT6332_BUCK_ALL_CON7 0x840E +#define MT6332_BUCK_ALL_CON8 0x8410 +#define MT6332_BUCK_ALL_CON9 0x8412 +#define MT6332_BUCK_ALL_CON10 0x8414 +#define MT6332_BUCK_ALL_CON11 0x8416 +#define MT6332_BUCK_ALL_CON12 0x8418 +#define MT6332_BUCK_ALL_CON13 0x841A +#define MT6332_BUCK_ALL_CON14 0x841C +#define MT6332_BUCK_ALL_CON15 0x841E +#define MT6332_BUCK_ALL_CON16 0x8420 +#define MT6332_BUCK_ALL_CON17 0x8422 +#define MT6332_BUCK_ALL_CON18 0x8424 +#define MT6332_BUCK_ALL_CON19 0x8426 +#define MT6332_BUCK_ALL_CON20 0x8428 +#define MT6332_BUCK_ALL_CON21 0x842A +#define MT6332_BUCK_ALL_CON22 0x842C +#define MT6332_BUCK_ALL_CON23 0x842E +#define MT6332_BUCK_ALL_CON24 0x8430 +#define MT6332_BUCK_ALL_CON25 0x8432 +#define MT6332_BUCK_ALL_CON26 0x8434 +#define MT6332_BUCK_ALL_CON27 0x8436 +#define MT6332_VDRAM_CON0 0x8438 +#define MT6332_VDRAM_CON1 0x843A +#define MT6332_VDRAM_CON2 0x843C +#define MT6332_VDRAM_CON3 0x843E +#define MT6332_VDRAM_CON4 0x8440 +#define MT6332_VDRAM_CON5 0x8442 +#define MT6332_VDRAM_CON6 0x8444 +#define MT6332_VDRAM_CON7 0x8446 +#define MT6332_VDRAM_CON8 0x8448 +#define MT6332_VDRAM_CON9 0x844A +#define MT6332_VDRAM_CON10 0x844C +#define MT6332_VDRAM_CON11 0x844E +#define MT6332_VDRAM_CON12 0x8450 +#define MT6332_VDRAM_CON13 0x8452 +#define MT6332_VDRAM_CON14 0x8454 +#define MT6332_VDRAM_CON15 0x8456 +#define MT6332_VDRAM_CON16 0x8458 +#define MT6332_VDRAM_CON17 0x845A +#define MT6332_VDRAM_CON18 0x845C +#define MT6332_VDRAM_CON19 0x845E +#define MT6332_VDRAM_CON20 0x8460 +#define MT6332_VDRAM_CON21 0x8462 +#define MT6332_VDVFS2_CON0 0x8464 +#define MT6332_VDVFS2_CON1 0x8466 +#define MT6332_VDVFS2_CON2 0x8468 +#define MT6332_VDVFS2_CON3 0x846A +#define MT6332_VDVFS2_CON4 0x846C +#define MT6332_VDVFS2_CON5 0x846E +#define MT6332_VDVFS2_CON6 0x8470 +#define MT6332_VDVFS2_CON7 0x8472 +#define MT6332_VDVFS2_CON8 0x8474 +#define MT6332_VDVFS2_CON9 0x8476 +#define MT6332_VDVFS2_CON10 0x8478 +#define MT6332_VDVFS2_CON11 0x847A +#define MT6332_VDVFS2_CON12 0x847C +#define MT6332_VDVFS2_CON13 0x847E +#define MT6332_VDVFS2_CON14 0x8480 +#define MT6332_VDVFS2_CON15 0x8482 +#define MT6332_VDVFS2_CON16 0x8484 +#define MT6332_VDVFS2_CON17 0x8486 +#define MT6332_VDVFS2_CON18 0x8488 +#define MT6332_VDVFS2_CON19 0x848A +#define MT6332_VDVFS2_CON20 0x848C +#define MT6332_VDVFS2_CON21 0x848E +#define MT6332_VDVFS2_CON22 0x8490 +#define MT6332_VDVFS2_CON23 0x8492 +#define MT6332_VDVFS2_CON24 0x8494 +#define MT6332_VDVFS2_CON25 0x8496 +#define MT6332_VDVFS2_CON26 0x8498 +#define MT6332_VDVFS2_CON27 0x849A +#define MT6332_VRF1_CON0 0x849C +#define MT6332_VRF1_CON1 0x849E +#define MT6332_VRF1_CON2 0x84A0 +#define MT6332_VRF1_CON3 0x84A2 +#define MT6332_VRF1_CON4 0x84A4 +#define MT6332_VRF1_CON5 0x84A6 +#define MT6332_VRF1_CON6 0x84A8 +#define MT6332_VRF1_CON7 0x84AA +#define MT6332_VRF1_CON8 0x84AC +#define MT6332_VRF1_CON9 0x84AE +#define MT6332_VRF1_CON10 0x84B0 +#define MT6332_VRF1_CON11 0x84B2 +#define MT6332_VRF1_CON12 0x84B4 +#define MT6332_VRF1_CON13 0x84B6 +#define MT6332_VRF1_CON14 0x84B8 +#define MT6332_VRF1_CON15 0x84BA +#define MT6332_VRF1_CON16 0x84BC +#define MT6332_VRF1_CON17 0x84BE +#define MT6332_VRF1_CON18 0x84C0 +#define MT6332_VRF1_CON19 0x84C2 +#define MT6332_VRF1_CON20 0x84C4 +#define MT6332_VRF1_CON21 0x84C6 +#define MT6332_VRF2_CON0 0x84C8 +#define MT6332_VRF2_CON1 0x84CA +#define MT6332_VRF2_CON2 0x84CC +#define MT6332_VRF2_CON3 0x84CE +#define MT6332_VRF2_CON4 0x84D0 +#define MT6332_VRF2_CON5 0x84D2 +#define MT6332_VRF2_CON6 0x84D4 +#define MT6332_VRF2_CON7 0x84D6 +#define MT6332_VRF2_CON8 0x84D8 +#define MT6332_VRF2_CON9 0x84DA +#define MT6332_VRF2_CON10 0x84DC +#define MT6332_VRF2_CON11 0x84DE +#define MT6332_VRF2_CON12 0x84E0 +#define MT6332_VRF2_CON13 0x84E2 +#define MT6332_VRF2_CON14 0x84E4 +#define MT6332_VRF2_CON15 0x84E6 +#define MT6332_VRF2_CON16 0x84E8 +#define MT6332_VRF2_CON17 0x84EA +#define MT6332_VRF2_CON18 0x84EC +#define MT6332_VRF2_CON19 0x84EE +#define MT6332_VRF2_CON20 0x84F0 +#define MT6332_VRF2_CON21 0x84F2 +#define MT6332_VPA_CON0 0x84F4 +#define MT6332_VPA_CON1 0x84F6 +#define MT6332_VPA_CON2 0x84F8 +#define MT6332_VPA_CON3 0x84FC +#define MT6332_VPA_CON4 0x84FE +#define MT6332_VPA_CON5 0x8500 +#define MT6332_VPA_CON6 0x8502 +#define MT6332_VPA_CON7 0x8504 +#define MT6332_VPA_CON8 0x8506 +#define MT6332_VPA_CON9 0x8508 +#define MT6332_VPA_CON10 0x850A +#define MT6332_VPA_CON11 0x850C +#define MT6332_VPA_CON12 0x850E +#define MT6332_VPA_CON13 0x8510 +#define MT6332_VPA_CON14 0x8512 +#define MT6332_VPA_CON15 0x8514 +#define MT6332_VPA_CON16 0x8516 +#define MT6332_VPA_CON17 0x8518 +#define MT6332_VPA_CON18 0x851A +#define MT6332_VPA_CON19 0x851C +#define MT6332_VPA_CON20 0x851E +#define MT6332_VPA_CON21 0x8520 +#define MT6332_VPA_CON22 0x8522 +#define MT6332_VPA_CON23 0x8524 +#define MT6332_VPA_CON24 0x8526 +#define MT6332_VPA_CON25 0x8528 +#define MT6332_VSBST_CON0 0x852A +#define MT6332_VSBST_CON1 0x852C +#define MT6332_VSBST_CON2 0x852E +#define MT6332_VSBST_CON3 0x8530 +#define MT6332_VSBST_CON4 0x8532 +#define MT6332_VSBST_CON5 0x8534 +#define MT6332_VSBST_CON6 0x8536 +#define MT6332_VSBST_CON7 0x8538 +#define MT6332_VSBST_CON8 0x853A +#define MT6332_VSBST_CON9 0x853C +#define MT6332_VSBST_CON10 0x853E +#define MT6332_VSBST_CON11 0x8540 +#define MT6332_VSBST_CON12 0x8542 +#define MT6332_VSBST_CON13 0x8544 +#define MT6332_VSBST_CON14 0x8546 +#define MT6332_VSBST_CON15 0x8548 +#define MT6332_VSBST_CON16 0x854A +#define MT6332_VSBST_CON17 0x854C +#define MT6332_VSBST_CON18 0x854E +#define MT6332_VSBST_CON19 0x8550 +#define MT6332_VSBST_CON20 0x8552 +#define MT6332_VSBST_CON21 0x8554 +#define MT6332_BUCK_K_CON0 0x8556 +#define MT6332_BUCK_K_CON1 0x8558 +#define MT6332_BUCK_K_CON2 0x855A +#define MT6332_BUCK_K_CON3 0x855C +#define MT6332_BUCK_K_CON4 0x855E +#define MT6332_BUCK_K_CON5 0x8560 +#define MT6332_AUXADC_ADC0 0x8800 +#define MT6332_AUXADC_ADC1 0x8802 +#define MT6332_AUXADC_ADC2 0x8804 +#define MT6332_AUXADC_ADC3 0x8806 +#define MT6332_AUXADC_ADC4 0x8808 +#define MT6332_AUXADC_ADC5 0x880A +#define MT6332_AUXADC_ADC6 0x880C +#define MT6332_AUXADC_ADC7 0x880E +#define MT6332_AUXADC_ADC8 0x8810 +#define MT6332_AUXADC_ADC9 0x8812 +#define MT6332_AUXADC_ADC10 0x8814 +#define MT6332_AUXADC_ADC11 0x8816 +#define MT6332_AUXADC_ADC12 0x8818 +#define MT6332_AUXADC_ADC13 0x881A +#define MT6332_AUXADC_ADC14 0x881C +#define MT6332_AUXADC_ADC15 0x881E +#define MT6332_AUXADC_ADC16 0x8820 +#define MT6332_AUXADC_ADC17 0x8822 +#define MT6332_AUXADC_ADC18 0x8824 +#define MT6332_AUXADC_ADC19 0x8826 +#define MT6332_AUXADC_ADC20 0x8828 +#define MT6332_AUXADC_ADC21 0x882A +#define MT6332_AUXADC_ADC22 0x882C +#define MT6332_AUXADC_ADC23 0x882E +#define MT6332_AUXADC_ADC24 0x8830 +#define MT6332_AUXADC_ADC25 0x8832 +#define MT6332_AUXADC_ADC26 0x8834 +#define MT6332_AUXADC_ADC27 0x8836 +#define MT6332_AUXADC_ADC28 0x8838 +#define MT6332_AUXADC_ADC29 0x883A +#define MT6332_AUXADC_ADC30 0x883C +#define MT6332_AUXADC_ADC31 0x883E +#define MT6332_AUXADC_ADC32 0x8840 +#define MT6332_AUXADC_ADC33 0x8842 +#define MT6332_AUXADC_ADC34 0x8844 +#define MT6332_AUXADC_ADC35 0x8846 +#define MT6332_AUXADC_ADC36 0x8848 +#define MT6332_AUXADC_ADC37 0x884A +#define MT6332_AUXADC_ADC38 0x884C +#define MT6332_AUXADC_ADC39 0x884E +#define MT6332_AUXADC_ADC40 0x8850 +#define MT6332_AUXADC_ADC41 0x8852 +#define MT6332_AUXADC_ADC42 0x8854 +#define MT6332_AUXADC_ADC43 0x8856 +#define MT6332_AUXADC_STA0 0x8858 +#define MT6332_AUXADC_STA1 0x885A +#define MT6332_AUXADC_RQST0 0x885C +#define MT6332_AUXADC_RQST0_SET 0x885E +#define MT6332_AUXADC_RQST0_CLR 0x8860 +#define MT6332_AUXADC_RQST1 0x8862 +#define MT6332_AUXADC_RQST1_SET 0x8864 +#define MT6332_AUXADC_RQST1_CLR 0x8866 +#define MT6332_AUXADC_CON0 0x8868 +#define MT6332_AUXADC_CON1 0x886A +#define MT6332_AUXADC_CON2 0x886C +#define MT6332_AUXADC_CON3 0x886E +#define MT6332_AUXADC_CON4 0x8870 +#define MT6332_AUXADC_CON5 0x8872 +#define MT6332_AUXADC_CON6 0x8874 +#define MT6332_AUXADC_CON7 0x8876 +#define MT6332_AUXADC_CON8 0x8878 +#define MT6332_AUXADC_CON9 0x887A +#define MT6332_AUXADC_CON10 0x887C +#define MT6332_AUXADC_CON11 0x887E +#define MT6332_AUXADC_CON12 0x8880 +#define MT6332_AUXADC_CON13 0x8882 +#define MT6332_AUXADC_CON14 0x8884 +#define MT6332_AUXADC_CON15 0x8886 +#define MT6332_AUXADC_CON16 0x8888 +#define MT6332_AUXADC_CON17 0x888A +#define MT6332_AUXADC_CON18 0x888C +#define MT6332_AUXADC_CON19 0x888E +#define MT6332_AUXADC_CON20 0x8890 +#define MT6332_AUXADC_CON21 0x8892 +#define MT6332_AUXADC_CON22 0x8894 +#define MT6332_AUXADC_CON23 0x8896 +#define MT6332_AUXADC_CON24 0x8898 +#define MT6332_AUXADC_CON25 0x889A +#define MT6332_AUXADC_CON26 0x889C +#define MT6332_AUXADC_CON27 0x889E +#define MT6332_AUXADC_CON28 0x88A0 +#define MT6332_AUXADC_CON29 0x88A2 +#define MT6332_AUXADC_CON30 0x88A4 +#define MT6332_AUXADC_CON31 0x88A6 +#define MT6332_AUXADC_CON32 0x88A8 +#define MT6332_AUXADC_CON33 0x88AA +#define MT6332_AUXADC_CON34 0x88AC +#define MT6332_AUXADC_CON35 0x88AE +#define MT6332_AUXADC_CON36 0x88B0 +#define MT6332_AUXADC_CON37 0x88B2 +#define MT6332_AUXADC_CON38 0x88B4 +#define MT6332_AUXADC_CON39 0x88B6 +#define MT6332_AUXADC_CON40 0x88B8 +#define MT6332_AUXADC_CON41 0x88BA +#define MT6332_AUXADC_CON42 0x88BC +#define MT6332_AUXADC_CON43 0x88BE +#define MT6332_AUXADC_CON44 0x88C0 +#define MT6332_AUXADC_CON45 0x88C2 +#define MT6332_AUXADC_CON46 0x88C4 +#define MT6332_AUXADC_CON47 0x88C6 +#define MT6332_STRUP_CONA0 0x8C00 +#define MT6332_STRUP_CONA1 0x8C02 +#define MT6332_STRUP_CONA2 0x8C04 +#define MT6332_STRUP_CON0 0x8C06 +#define MT6332_STRUP_CON2 0x8C08 +#define MT6332_STRUP_CON3 0x8C0A +#define MT6332_STRUP_CON4 0x8C0C +#define MT6332_STRUP_CON5 0x8C0E +#define MT6332_STRUP_CON6 0x8C10 +#define MT6332_STRUP_CON7 0x8C12 +#define MT6332_STRUP_CON8 0x8C14 +#define MT6332_STRUP_CON9 0x8C16 +#define MT6332_STRUP_CON10 0x8C18 +#define MT6332_STRUP_CON11 0x8C1A +#define MT6332_STRUP_CON12 0x8C1C +#define MT6332_STRUP_CON13 0x8C1E +#define MT6332_STRUP_CON14 0x8C20 +#define MT6332_STRUP_CON15 0x8C22 +#define MT6332_STRUP_CON16 0x8C24 +#define MT6332_STRUP_CON17 0x8C26 +#define MT6332_FGADC_CON0 0x8C28 +#define MT6332_FGADC_CON1 0x8C2A +#define MT6332_FGADC_CON2 0x8C2C +#define MT6332_FGADC_CON3 0x8C2E +#define MT6332_FGADC_CON4 0x8C30 +#define MT6332_FGADC_CON5 0x8C32 +#define MT6332_FGADC_CON6 0x8C34 +#define MT6332_FGADC_CON7 0x8C36 +#define MT6332_FGADC_CON8 0x8C38 +#define MT6332_FGADC_CON9 0x8C3A +#define MT6332_FGADC_CON10 0x8C3C +#define MT6332_FGADC_CON11 0x8C3E +#define MT6332_FGADC_CON12 0x8C40 +#define MT6332_FGADC_CON13 0x8C42 +#define MT6332_FGADC_CON14 0x8C44 +#define MT6332_FGADC_CON15 0x8C46 +#define MT6332_FGADC_CON16 0x8C48 +#define MT6332_FGADC_CON17 0x8C4A +#define MT6332_FGADC_CON18 0x8C4C +#define MT6332_FGADC_CON19 0x8C4E +#define MT6332_FGADC_CON20 0x8C50 +#define MT6332_FGADC_CON21 0x8C52 +#define MT6332_FGADC_CON22 0x8C54 +#define MT6332_OTP_CON0 0x8C56 +#define MT6332_OTP_CON1 0x8C58 +#define MT6332_OTP_CON2 0x8C5A +#define MT6332_OTP_CON3 0x8C5C +#define MT6332_OTP_CON4 0x8C5E +#define MT6332_OTP_CON5 0x8C60 +#define MT6332_OTP_CON6 0x8C62 +#define MT6332_OTP_CON7 0x8C64 +#define MT6332_OTP_CON8 0x8C66 +#define MT6332_OTP_CON9 0x8C68 +#define MT6332_OTP_CON10 0x8C6A +#define MT6332_OTP_CON11 0x8C6C +#define MT6332_OTP_CON12 0x8C6E +#define MT6332_OTP_CON13 0x8C70 +#define MT6332_OTP_CON14 0x8C72 +#define MT6332_OTP_DOUT_0_15 0x8C74 +#define MT6332_OTP_DOUT_16_31 0x8C76 +#define MT6332_OTP_DOUT_32_47 0x8C78 +#define MT6332_OTP_DOUT_48_63 0x8C7A +#define MT6332_OTP_DOUT_64_79 0x8C7C +#define MT6332_OTP_DOUT_80_95 0x8C7E +#define MT6332_OTP_DOUT_96_111 0x8C80 +#define MT6332_OTP_DOUT_112_127 0x8C82 +#define MT6332_OTP_DOUT_128_143 0x8C84 +#define MT6332_OTP_DOUT_144_159 0x8C86 +#define MT6332_OTP_DOUT_160_175 0x8C88 +#define MT6332_OTP_DOUT_176_191 0x8C8A +#define MT6332_OTP_DOUT_192_207 0x8C8C +#define MT6332_OTP_DOUT_208_223 0x8C8E +#define MT6332_OTP_DOUT_224_239 0x8C90 +#define MT6332_OTP_DOUT_240_255 0x8C92 +#define MT6332_OTP_VAL_0_15 0x8C94 +#define MT6332_OTP_VAL_16_31 0x8C96 +#define MT6332_OTP_VAL_32_47 0x8C98 +#define MT6332_OTP_VAL_48_63 0x8C9A +#define MT6332_OTP_VAL_64_79 0x8C9C +#define MT6332_OTP_VAL_80_95 0x8C9E +#define MT6332_OTP_VAL_96_111 0x8CA0 +#define MT6332_OTP_VAL_112_127 0x8CA2 +#define MT6332_OTP_VAL_128_143 0x8CA4 +#define MT6332_OTP_VAL_144_159 0x8CA6 +#define MT6332_OTP_VAL_160_175 0x8CA8 +#define MT6332_OTP_VAL_176_191 0x8CAA +#define MT6332_OTP_VAL_192_207 0x8CAC +#define MT6332_OTP_VAL_208_223 0x8CAE +#define MT6332_OTP_VAL_224_239 0x8CB0 +#define MT6332_OTP_VAL_240_255 0x8CB2 +#define MT6332_LDO_CON0 0x8CB4 +#define MT6332_LDO_CON1 0x8CB6 +#define MT6332_LDO_CON2 0x8CB8 +#define MT6332_LDO_CON3 0x8CBA +#define MT6332_LDO_CON5 0x8CBC +#define MT6332_LDO_CON6 0x8CBE +#define MT6332_LDO_CON7 0x8CC0 +#define MT6332_LDO_CON8 0x8CC2 +#define MT6332_LDO_CON9 0x8CC4 +#define MT6332_LDO_CON10 0x8CC6 +#define MT6332_LDO_CON11 0x8CC8 +#define MT6332_LDO_CON12 0x8CCA +#define MT6332_LDO_CON13 0x8CCC +#define MT6332_FQMTR_CON0 0x8CCE +#define MT6332_FQMTR_CON1 0x8CD0 +#define MT6332_FQMTR_CON2 0x8CD2 +#define MT6332_IWLED_CON0 0x8CD4 +#define MT6332_IWLED_DEG 0x8CD6 +#define MT6332_IWLED_STATUS 0x8CD8 +#define MT6332_IWLED_EN_CTRL 0x8CDA +#define MT6332_IWLED_CON1 0x8CDC +#define MT6332_IWLED_CON2 0x8CDE +#define MT6332_IWLED_TRIM0 0x8CE0 +#define MT6332_IWLED_TRIM1 0x8CE2 +#define MT6332_IWLED_CON3 0x8CE4 +#define MT6332_IWLED_CON4 0x8CE6 +#define MT6332_IWLED_CON5 0x8CE8 +#define MT6332_IWLED_CON6 0x8CEA +#define MT6332_IWLED_CON7 0x8CEC +#define MT6332_IWLED_CON8 0x8CEE +#define MT6332_IWLED_CON9 0x8CF0 +#define MT6332_SPK_CON0 0x8CF2 +#define MT6332_SPK_CON1 0x8CF4 +#define MT6332_SPK_CON2 0x8CF6 +#define MT6332_SPK_CON3 0x8CF8 +#define MT6332_SPK_CON4 0x8CFA +#define MT6332_SPK_CON5 0x8CFC +#define MT6332_SPK_CON6 0x8CFE +#define MT6332_SPK_CON7 0x8D00 +#define MT6332_SPK_CON8 0x8D02 +#define MT6332_SPK_CON9 0x8D04 +#define MT6332_SPK_CON10 0x8D06 +#define MT6332_SPK_CON11 0x8D08 +#define MT6332_SPK_CON12 0x8D0A +#define MT6332_SPK_CON13 0x8D0C +#define MT6332_SPK_CON14 0x8D0E +#define MT6332_SPK_CON15 0x8D10 +#define MT6332_SPK_CON16 0x8D12 +#define MT6332_TESTI_CON0 0x8D14 +#define MT6332_TESTI_CON1 0x8D16 +#define MT6332_TESTI_CON2 0x8D18 +#define MT6332_TESTI_CON3 0x8D1A +#define MT6332_TESTI_CON4 0x8D1C +#define MT6332_TESTI_CON5 0x8D1E +#define MT6332_TESTI_CON6 0x8D20 +#define MT6332_TESTI_MUX_CON0 0x8D22 +#define MT6332_TESTI_MUX_CON1 0x8D24 +#define MT6332_TESTI_MUX_CON2 0x8D26 +#define MT6332_TESTI_MUX_CON3 0x8D28 +#define MT6332_TESTI_MUX_CON4 0x8D2A +#define MT6332_TESTI_MUX_CON5 0x8D2C +#define MT6332_TESTI_MUX_CON6 0x8D2E +#define MT6332_TESTO_CON0 0x8D30 +#define MT6332_TESTO_CON1 0x8D32 +#define MT6332_TEST_OMUX_CON0 0x8D34 +#define MT6332_TEST_OMUX_CON1 0x8D36 +#define MT6332_DEBUG_CON0 0x8D38 +#define MT6332_DEBUG_CON1 0x8D3A +#define MT6332_DEBUG_CON2 0x8D3C +#define MT6332_FGADC_CON23 0x8D3E +#define MT6332_FGADC_CON24 0x8D40 +#define MT6332_FGADC_CON25 0x8D42 +#define MT6332_TOP_RST_STATUS 0x8D44 +#define MT6332_TOP_RST_STATUS_SET 0x8D46 +#define MT6332_TOP_RST_STATUS_CLR 0x8D48 +#define MT6332_VDVFS2_CON28 0x8D4A + +#endif /* __MFD_MT6332_REGISTERS_H__ */ diff --git a/include/linux/mfd/mt6357/core.h b/include/linux/mfd/mt6357/core.h new file mode 100644 index 000000000000..2441611264fd --- /dev/null +++ b/include/linux/mfd/mt6357/core.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 BayLibre, SAS + * Author: Fabien Parent <fparent@baylibre.com> + */ + +#ifndef __MFD_MT6357_CORE_H__ +#define __MFD_MT6357_CORE_H__ + +enum mt6357_irq_top_status_shift { + MT6357_BUCK_TOP = 0, + MT6357_LDO_TOP, + MT6357_PSC_TOP, + MT6357_SCK_TOP, + MT6357_BM_TOP, + MT6357_HK_TOP, + MT6357_XPP_TOP, + MT6357_AUD_TOP, + MT6357_MISC_TOP, +}; + +enum mt6357_irq_numbers { + MT6357_IRQ_VPROC_OC = 0, + MT6357_IRQ_VCORE_OC, + MT6357_IRQ_VMODEM_OC, + MT6357_IRQ_VS1_OC, + MT6357_IRQ_VPA_OC, + MT6357_IRQ_VCORE_PREOC, + MT6357_IRQ_VFE28_OC = 16, + MT6357_IRQ_VXO22_OC, + MT6357_IRQ_VRF18_OC, + MT6357_IRQ_VRF12_OC, + MT6357_IRQ_VEFUSE_OC, + MT6357_IRQ_VCN33_OC, + MT6357_IRQ_VCN28_OC, + MT6357_IRQ_VCN18_OC, + MT6357_IRQ_VCAMA_OC, + MT6357_IRQ_VCAMD_OC, + MT6357_IRQ_VCAMIO_OC, + MT6357_IRQ_VLDO28_OC, + MT6357_IRQ_VUSB33_OC, + MT6357_IRQ_VAUX18_OC, + MT6357_IRQ_VAUD28_OC, + MT6357_IRQ_VIO28_OC, + MT6357_IRQ_VIO18_OC, + MT6357_IRQ_VSRAM_PROC_OC, + MT6357_IRQ_VSRAM_OTHERS_OC, + MT6357_IRQ_VIBR_OC, + MT6357_IRQ_VDRAM_OC, + MT6357_IRQ_VMC_OC, + MT6357_IRQ_VMCH_OC, + MT6357_IRQ_VEMC_OC, + MT6357_IRQ_VSIM1_OC, + MT6357_IRQ_VSIM2_OC, + MT6357_IRQ_PWRKEY = 48, + MT6357_IRQ_HOMEKEY, + MT6357_IRQ_PWRKEY_R, + MT6357_IRQ_HOMEKEY_R, + MT6357_IRQ_NI_LBAT_INT, + MT6357_IRQ_CHRDET, + MT6357_IRQ_CHRDET_EDGE, + MT6357_IRQ_VCDT_HV_DET, + MT6357_IRQ_WATCHDOG, + MT6357_IRQ_VBATON_UNDET, + MT6357_IRQ_BVALID_DET, + MT6357_IRQ_OV, + MT6357_IRQ_RTC = 64, + MT6357_IRQ_FG_BAT0_H = 80, + MT6357_IRQ_FG_BAT0_L, + MT6357_IRQ_FG_CUR_H, + MT6357_IRQ_FG_CUR_L, + MT6357_IRQ_FG_ZCV, + MT6357_IRQ_BATON_LV = 96, + MT6357_IRQ_BATON_HT, + MT6357_IRQ_BAT_H = 112, + MT6357_IRQ_BAT_L, + MT6357_IRQ_AUXADC_IMP, + MT6357_IRQ_NAG_C_DLTV, + MT6357_IRQ_AUDIO = 128, + MT6357_IRQ_ACCDET = 133, + MT6357_IRQ_ACCDET_EINT0, + MT6357_IRQ_ACCDET_EINT1, + MT6357_IRQ_SPI_CMD_ALERT = 144, + MT6357_IRQ_NR, +}; + +#define MT6357_IRQ_BUCK_BASE MT6357_IRQ_VPROC_OC +#define MT6357_IRQ_LDO_BASE MT6357_IRQ_VFE28_OC +#define MT6357_IRQ_PSC_BASE MT6357_IRQ_PWRKEY +#define MT6357_IRQ_SCK_BASE MT6357_IRQ_RTC +#define MT6357_IRQ_BM_BASE MT6357_IRQ_FG_BAT0_H +#define MT6357_IRQ_HK_BASE MT6357_IRQ_BAT_H +#define MT6357_IRQ_AUD_BASE MT6357_IRQ_AUDIO +#define MT6357_IRQ_MISC_BASE MT6357_IRQ_SPI_CMD_ALERT + +#define MT6357_IRQ_BUCK_BITS (MT6357_IRQ_VCORE_PREOC - MT6357_IRQ_BUCK_BASE + 1) +#define MT6357_IRQ_LDO_BITS (MT6357_IRQ_VSIM2_OC - MT6357_IRQ_LDO_BASE + 1) +#define MT6357_IRQ_PSC_BITS (MT6357_IRQ_VCDT_HV_DET - MT6357_IRQ_PSC_BASE + 1) +#define MT6357_IRQ_SCK_BITS (MT6357_IRQ_RTC - MT6357_IRQ_SCK_BASE + 1) +#define MT6357_IRQ_BM_BITS (MT6357_IRQ_BATON_HT - MT6357_IRQ_BM_BASE + 1) +#define MT6357_IRQ_HK_BITS (MT6357_IRQ_NAG_C_DLTV - MT6357_IRQ_HK_BASE + 1) +#define MT6357_IRQ_AUD_BITS (MT6357_IRQ_ACCDET_EINT1 - MT6357_IRQ_AUD_BASE + 1) +#define MT6357_IRQ_MISC_BITS \ + (MT6357_IRQ_SPI_CMD_ALERT - MT6357_IRQ_MISC_BASE + 1) + +#define MT6357_TOP_GEN(sp) \ +{ \ + .hwirq_base = MT6357_IRQ_##sp##_BASE, \ + .num_int_regs = \ + ((MT6357_IRQ_##sp##_BITS - 1) / \ + MTK_PMIC_REG_WIDTH) + 1, \ + .en_reg = MT6357_##sp##_TOP_INT_CON0, \ + .en_reg_shift = 0x6, \ + .sta_reg = MT6357_##sp##_TOP_INT_STATUS0, \ + .sta_reg_shift = 0x2, \ + .top_offset = MT6357_##sp##_TOP, \ +} + +#endif /* __MFD_MT6357_CORE_H__ */ diff --git a/include/linux/mfd/mt6357/registers.h b/include/linux/mfd/mt6357/registers.h new file mode 100644 index 000000000000..e24af83b618d --- /dev/null +++ b/include/linux/mfd/mt6357/registers.h @@ -0,0 +1,1574 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 MediaTek Inc. + */ + +#ifndef __MFD_MT6357_REGISTERS_H__ +#define __MFD_MT6357_REGISTERS_H__ + +/* PMIC Registers */ +#define MT6357_TOP0_ID 0x0 +#define MT6357_TOP0_REV0 0x2 +#define MT6357_TOP0_DSN_DBI 0x4 +#define MT6357_TOP0_DSN_DXI 0x6 +#define MT6357_HWCID 0x8 +#define MT6357_SWCID 0xa +#define MT6357_PONSTS 0xc +#define MT6357_POFFSTS 0xe +#define MT6357_PSTSCTL 0x10 +#define MT6357_PG_DEB_STS0 0x12 +#define MT6357_PG_SDN_STS0 0x14 +#define MT6357_OC_SDN_STS0 0x16 +#define MT6357_THERMALSTATUS 0x18 +#define MT6357_TOP_CON 0x1a +#define MT6357_TEST_OUT 0x1c +#define MT6357_TEST_CON0 0x1e +#define MT6357_TEST_CON1 0x20 +#define MT6357_TESTMODE_SW 0x22 +#define MT6357_TOPSTATUS 0x24 +#define MT6357_TDSEL_CON 0x26 +#define MT6357_RDSEL_CON 0x28 +#define MT6357_SMT_CON0 0x2a +#define MT6357_SMT_CON1 0x2c +#define MT6357_TOP_RSV0 0x2e +#define MT6357_TOP_RSV1 0x30 +#define MT6357_DRV_CON0 0x32 +#define MT6357_DRV_CON1 0x34 +#define MT6357_DRV_CON2 0x36 +#define MT6357_DRV_CON3 0x38 +#define MT6357_FILTER_CON0 0x3a +#define MT6357_FILTER_CON1 0x3c +#define MT6357_FILTER_CON2 0x3e +#define MT6357_FILTER_CON3 0x40 +#define MT6357_TOP_STATUS 0x42 +#define MT6357_TOP_STATUS_SET 0x44 +#define MT6357_TOP_STATUS_CLR 0x46 +#define MT6357_TOP_TRAP 0x48 +#define MT6357_TOP1_ID 0x80 +#define MT6357_TOP1_REV0 0x82 +#define MT6357_TOP1_DSN_DBI 0x84 +#define MT6357_TOP1_DSN_DXI 0x86 +#define MT6357_GPIO_DIR0 0x88 +#define MT6357_GPIO_DIR0_SET 0x8a +#define MT6357_GPIO_DIR0_CLR 0x8c +#define MT6357_GPIO_PULLEN0 0x8e +#define MT6357_GPIO_PULLEN0_SET 0x90 +#define MT6357_GPIO_PULLEN0_CLR 0x92 +#define MT6357_GPIO_PULLSEL0 0x94 +#define MT6357_GPIO_PULLSEL0_SET 0x96 +#define MT6357_GPIO_PULLSEL0_CLR 0x98 +#define MT6357_GPIO_DINV0 0x9a +#define MT6357_GPIO_DINV0_SET 0x9c +#define MT6357_GPIO_DINV0_CLR 0x9e +#define MT6357_GPIO_DOUT0 0xa0 +#define MT6357_GPIO_DOUT0_SET 0xa2 +#define MT6357_GPIO_DOUT0_CLR 0xa4 +#define MT6357_GPIO_PI0 0xa6 +#define MT6357_GPIO_POE0 0xa8 +#define MT6357_GPIO_MODE0 0xaa +#define MT6357_GPIO_MODE0_SET 0xac +#define MT6357_GPIO_MODE0_CLR 0xae +#define MT6357_GPIO_MODE1 0xb0 +#define MT6357_GPIO_MODE1_SET 0xb2 +#define MT6357_GPIO_MODE1_CLR 0xb4 +#define MT6357_GPIO_MODE2 0xb6 +#define MT6357_GPIO_MODE2_SET 0xb8 +#define MT6357_GPIO_MODE2_CLR 0xba +#define MT6357_GPIO_MODE3 0xbc +#define MT6357_GPIO_MODE3_SET 0xbe +#define MT6357_GPIO_MODE3_CLR 0xc0 +#define MT6357_GPIO_RSV 0xc2 +#define MT6357_TOP2_ID 0x100 +#define MT6357_TOP2_REV0 0x102 +#define MT6357_TOP2_DSN_DBI 0x104 +#define MT6357_TOP2_DSN_DXI 0x106 +#define MT6357_TOP_PAM0 0x108 +#define MT6357_TOP_PAM1 0x10a +#define MT6357_TOP_CKPDN_CON0 0x10c +#define MT6357_TOP_CKPDN_CON0_SET 0x10e +#define MT6357_TOP_CKPDN_CON0_CLR 0x110 +#define MT6357_TOP_CKPDN_CON1 0x112 +#define MT6357_TOP_CKPDN_CON1_SET 0x114 +#define MT6357_TOP_CKPDN_CON1_CLR 0x116 +#define MT6357_TOP_CKSEL_CON0 0x118 +#define MT6357_TOP_CKSEL_CON0_SET 0x11a +#define MT6357_TOP_CKSEL_CON0_CLR 0x11c +#define MT6357_TOP_CKSEL_CON1 0x11e +#define MT6357_TOP_CKSEL_CON1_SET 0x120 +#define MT6357_TOP_CKSEL_CON1_CLR 0x122 +#define MT6357_TOP_CKDIVSEL_CON0 0x124 +#define MT6357_TOP_CKDIVSEL_CON0_SET 0x126 +#define MT6357_TOP_CKDIVSEL_CON0_CLR 0x128 +#define MT6357_TOP_CKHWEN_CON0 0x12a +#define MT6357_TOP_CKHWEN_CON0_SET 0x12c +#define MT6357_TOP_CKHWEN_CON0_CLR 0x12e +#define MT6357_TOP_CKTST_CON0 0x130 +#define MT6357_TOP_CKTST_CON1 0x132 +#define MT6357_TOP_CLK_CON0 0x134 +#define MT6357_TOP_CLK_CON0_SET 0x136 +#define MT6357_TOP_CLK_CON0_CLR 0x138 +#define MT6357_TOP_DCM_CON0 0x13a +#define MT6357_TOP_HANDOVER_DEBUG0 0x13c +#define MT6357_TOP_RST_CON0 0x13e +#define MT6357_TOP_RST_CON0_SET 0x140 +#define MT6357_TOP_RST_CON0_CLR 0x142 +#define MT6357_TOP_RST_CON1 0x144 +#define MT6357_TOP_RST_CON1_SET 0x146 +#define MT6357_TOP_RST_CON1_CLR 0x148 +#define MT6357_TOP_RST_CON2 0x14a +#define MT6357_TOP_RST_MISC 0x14c +#define MT6357_TOP_RST_MISC_SET 0x14e +#define MT6357_TOP_RST_MISC_CLR 0x150 +#define MT6357_TOP_RST_STATUS 0x152 +#define MT6357_TOP_RST_STATUS_SET 0x154 +#define MT6357_TOP_RST_STATUS_CLR 0x156 +#define MT6357_TOP2_ELR_NUM 0x158 +#define MT6357_TOP2_ELR0 0x15a +#define MT6357_TOP2_ELR1 0x15c +#define MT6357_TOP3_ID 0x180 +#define MT6357_TOP3_REV0 0x182 +#define MT6357_TOP3_DSN_DBI 0x184 +#define MT6357_TOP3_DSN_DXI 0x186 +#define MT6357_MISC_TOP_INT_CON0 0x188 +#define MT6357_MISC_TOP_INT_CON0_SET 0x18a +#define MT6357_MISC_TOP_INT_CON0_CLR 0x18c +#define MT6357_MISC_TOP_INT_MASK_CON0 0x18e +#define MT6357_MISC_TOP_INT_MASK_CON0_SET 0x190 +#define MT6357_MISC_TOP_INT_MASK_CON0_CLR 0x192 +#define MT6357_MISC_TOP_INT_STATUS0 0x194 +#define MT6357_MISC_TOP_INT_RAW_STATUS0 0x196 +#define MT6357_TOP_INT_MASK_CON0 0x198 +#define MT6357_TOP_INT_MASK_CON0_SET 0x19a +#define MT6357_TOP_INT_MASK_CON0_CLR 0x19c +#define MT6357_TOP_INT_STATUS0 0x19e +#define MT6357_TOP_INT_RAW_STATUS0 0x1a0 +#define MT6357_TOP_INT_CON0 0x1a2 +#define MT6357_PLT0_ID 0x380 +#define MT6357_PLT0_REV0 0x382 +#define MT6357_PLT0_REV1 0x384 +#define MT6357_PLT0_DSN_DXI 0x386 +#define MT6357_FQMTR_CON0 0x388 +#define MT6357_FQMTR_CON1 0x38a +#define MT6357_FQMTR_CON2 0x38c +#define MT6357_TOP_CLK_TRIM 0x38e +#define MT6357_OTP_CON0 0x390 +#define MT6357_OTP_CON1 0x392 +#define MT6357_OTP_CON2 0x394 +#define MT6357_OTP_CON3 0x396 +#define MT6357_OTP_CON4 0x398 +#define MT6357_OTP_CON5 0x39a +#define MT6357_OTP_CON6 0x39c +#define MT6357_OTP_CON7 0x39e +#define MT6357_OTP_CON8 0x3a0 +#define MT6357_OTP_CON9 0x3a2 +#define MT6357_OTP_CON10 0x3a4 +#define MT6357_OTP_CON11 0x3a6 +#define MT6357_OTP_CON12 0x3a8 +#define MT6357_OTP_CON13 0x3aa +#define MT6357_OTP_CON14 0x3ac +#define MT6357_TOP_TMA_KEY 0x3ae +#define MT6357_TOP_MDB_CONF0 0x3b0 +#define MT6357_TOP_MDB_CONF1 0x3b2 +#define MT6357_TOP_MDB_CONF2 0x3b4 +#define MT6357_PLT0_ELR_NUM 0x3b6 +#define MT6357_PLT0_ELR0 0x3b8 +#define MT6357_PLT0_ELR1 0x3ba +#define MT6357_SPISLV_ID 0x400 +#define MT6357_SPISLV_REV0 0x402 +#define MT6357_SPISLV_REV1 0x404 +#define MT6357_SPISLV_DSN_DXI 0x406 +#define MT6357_RG_SPI_CON0 0x408 +#define MT6357_DEW_DIO_EN 0x40a +#define MT6357_DEW_READ_TEST 0x40c +#define MT6357_DEW_WRITE_TEST 0x40e +#define MT6357_DEW_CRC_SWRST 0x410 +#define MT6357_DEW_CRC_EN 0x412 +#define MT6357_DEW_CRC_VAL 0x414 +#define MT6357_DEW_DBG_MON_SEL 0x416 +#define MT6357_DEW_CIPHER_KEY_SEL 0x418 +#define MT6357_DEW_CIPHER_IV_SEL 0x41a +#define MT6357_DEW_CIPHER_EN 0x41c +#define MT6357_DEW_CIPHER_RDY 0x41e +#define MT6357_DEW_CIPHER_MODE 0x420 +#define MT6357_DEW_CIPHER_SWRST 0x422 +#define MT6357_DEW_RDDMY_NO 0x424 +#define MT6357_INT_TYPE_CON0 0x426 +#define MT6357_INT_TYPE_CON0_SET 0x428 +#define MT6357_INT_TYPE_CON0_CLR 0x42a +#define MT6357_INT_STA 0x42c +#define MT6357_RG_SPI_CON1 0x42e +#define MT6357_RG_SPI_CON2 0x430 +#define MT6357_RG_SPI_CON3 0x432 +#define MT6357_RG_SPI_CON4 0x434 +#define MT6357_RG_SPI_CON5 0x436 +#define MT6357_RG_SPI_CON6 0x438 +#define MT6357_RG_SPI_CON7 0x43a +#define MT6357_RG_SPI_CON8 0x43c +#define MT6357_RG_SPI_CON9 0x43e +#define MT6357_RG_SPI_CON10 0x440 +#define MT6357_RG_SPI_CON11 0x442 +#define MT6357_RG_SPI_CON12 0x444 +#define MT6357_RG_SPI_CON13 0x446 +#define MT6357_TOP_SPI_CON0 0x448 +#define MT6357_TOP_SPI_CON1 0x44a +#define MT6357_SCK_TOP_DSN_ID 0x500 +#define MT6357_SCK_TOP_DSN_REV0 0x502 +#define MT6357_SCK_TOP_DBI 0x504 +#define MT6357_SCK_TOP_DXI 0x506 +#define MT6357_SCK_TOP_TPM0 0x508 +#define MT6357_SCK_TOP_TPM1 0x50a +#define MT6357_SCK_TOP_CON0 0x50c +#define MT6357_SCK_TOP_CON1 0x50e +#define MT6357_SCK_TOP_TEST_OUT 0x510 +#define MT6357_SCK_TOP_TEST_CON0 0x512 +#define MT6357_SCK_TOP_CKPDN_CON0 0x514 +#define MT6357_SCK_TOP_CKPDN_CON0_SET 0x516 +#define MT6357_SCK_TOP_CKPDN_CON0_CLR 0x518 +#define MT6357_SCK_TOP_CKHWEN_CON0 0x51a +#define MT6357_SCK_TOP_CKHWEN_CON0_SET 0x51c +#define MT6357_SCK_TOP_CKHWEN_CON0_CLR 0x51e +#define MT6357_SCK_TOP_CKTST_CON 0x520 +#define MT6357_SCK_TOP_RST_CON0 0x522 +#define MT6357_SCK_TOP_RST_CON0_SET 0x524 +#define MT6357_SCK_TOP_RST_CON0_CLR 0x526 +#define MT6357_SCK_TOP_INT_CON0 0x528 +#define MT6357_SCK_TOP_INT_CON0_SET 0x52a +#define MT6357_SCK_TOP_INT_CON0_CLR 0x52c +#define MT6357_SCK_TOP_INT_MASK_CON0 0x52e +#define MT6357_SCK_TOP_INT_MASK_CON0_SET 0x530 +#define MT6357_SCK_TOP_INT_MASK_CON0_CLR 0x532 +#define MT6357_SCK_TOP_INT_STATUS0 0x534 +#define MT6357_SCK_TOP_INT_RAW_STATUS0 0x536 +#define MT6357_SCK_TOP_INT_MISC_CON 0x538 +#define MT6357_EOSC_CALI_CON0 0x53a +#define MT6357_EOSC_CALI_CON1 0x53c +#define MT6357_RTC_MIX_CON0 0x53e +#define MT6357_RTC_MIX_CON1 0x540 +#define MT6357_RTC_MIX_CON2 0x542 +#define MT6357_RTC_DSN_ID 0x580 +#define MT6357_RTC_DSN_REV0 0x582 +#define MT6357_RTC_DBI 0x584 +#define MT6357_RTC_DXI 0x586 +#define MT6357_RTC_BBPU 0x588 +#define MT6357_RTC_IRQ_STA 0x58a +#define MT6357_RTC_IRQ_EN 0x58c +#define MT6357_RTC_CII_EN 0x58e +#define MT6357_RTC_AL_MASK 0x590 +#define MT6357_RTC_TC_SEC 0x592 +#define MT6357_RTC_TC_MIN 0x594 +#define MT6357_RTC_TC_HOU 0x596 +#define MT6357_RTC_TC_DOM 0x598 +#define MT6357_RTC_TC_DOW 0x59a +#define MT6357_RTC_TC_MTH 0x59c +#define MT6357_RTC_TC_YEA 0x59e +#define MT6357_RTC_AL_SEC 0x5a0 +#define MT6357_RTC_AL_MIN 0x5a2 +#define MT6357_RTC_AL_HOU 0x5a4 +#define MT6357_RTC_AL_DOM 0x5a6 +#define MT6357_RTC_AL_DOW 0x5a8 +#define MT6357_RTC_AL_MTH 0x5aa +#define MT6357_RTC_AL_YEA 0x5ac +#define MT6357_RTC_OSC32CON 0x5ae +#define MT6357_RTC_POWERKEY1 0x5b0 +#define MT6357_RTC_POWERKEY2 0x5b2 +#define MT6357_RTC_PDN1 0x5b4 +#define MT6357_RTC_PDN2 0x5b6 +#define MT6357_RTC_SPAR0 0x5b8 +#define MT6357_RTC_SPAR1 0x5ba +#define MT6357_RTC_PROT 0x5bc +#define MT6357_RTC_DIFF 0x5be +#define MT6357_RTC_CALI 0x5c0 +#define MT6357_RTC_WRTGR 0x5c2 +#define MT6357_RTC_CON 0x5c4 +#define MT6357_RTC_SEC_CTRL 0x5c6 +#define MT6357_RTC_INT_CNT 0x5c8 +#define MT6357_RTC_SEC_DAT0 0x5ca +#define MT6357_RTC_SEC_DAT1 0x5cc +#define MT6357_RTC_SEC_DAT2 0x5ce +#define MT6357_RTC_SEC_DSN_ID 0x600 +#define MT6357_RTC_SEC_DSN_REV0 0x602 +#define MT6357_RTC_SEC_DBI 0x604 +#define MT6357_RTC_SEC_DXI 0x606 +#define MT6357_RTC_TC_SEC_SEC 0x608 +#define MT6357_RTC_TC_MIN_SEC 0x60a +#define MT6357_RTC_TC_HOU_SEC 0x60c +#define MT6357_RTC_TC_DOM_SEC 0x60e +#define MT6357_RTC_TC_DOW_SEC 0x610 +#define MT6357_RTC_TC_MTH_SEC 0x612 +#define MT6357_RTC_TC_YEA_SEC 0x614 +#define MT6357_RTC_SEC_CK_PDN 0x616 +#define MT6357_RTC_SEC_WRTGR 0x618 +#define MT6357_DCXO_DSN_ID 0x780 +#define MT6357_DCXO_DSN_REV0 0x782 +#define MT6357_DCXO_DSN_DBI 0x784 +#define MT6357_DCXO_DSN_DXI 0x786 +#define MT6357_DCXO_CW00 0x788 +#define MT6357_DCXO_CW00_SET 0x78a +#define MT6357_DCXO_CW00_CLR 0x78c +#define MT6357_DCXO_CW01 0x78e +#define MT6357_DCXO_CW02 0x790 +#define MT6357_DCXO_CW03 0x792 +#define MT6357_DCXO_CW04 0x794 +#define MT6357_DCXO_CW05 0x796 +#define MT6357_DCXO_CW06 0x798 +#define MT6357_DCXO_CW07 0x79a +#define MT6357_DCXO_CW08 0x79c +#define MT6357_DCXO_CW09 0x79e +#define MT6357_DCXO_CW10 0x7a0 +#define MT6357_DCXO_CW11 0x7a2 +#define MT6357_DCXO_CW11_SET 0x7a4 +#define MT6357_DCXO_CW11_CLR 0x7a6 +#define MT6357_DCXO_CW12 0x7a8 +#define MT6357_DCXO_CW13 0x7aa +#define MT6357_DCXO_CW14 0x7ac +#define MT6357_DCXO_CW15 0x7ae +#define MT6357_DCXO_CW16 0x7b0 +#define MT6357_DCXO_CW17 0x7b2 +#define MT6357_DCXO_CW18 0x7b4 +#define MT6357_DCXO_CW19 0x7b6 +#define MT6357_DCXO_CW20 0x7b8 +#define MT6357_DCXO_CW21 0x7ba +#define MT6357_DCXO_CW22 0x7bc +#define MT6357_DCXO_ELR_NUM 0x7be +#define MT6357_DCXO_ELR0 0x7c0 +#define MT6357_PSC_TOP_ID 0x900 +#define MT6357_PSC_TOP_REV0 0x902 +#define MT6357_PSC_TOP_DBI 0x904 +#define MT6357_PSC_TOP_DXI 0x906 +#define MT6357_PSC_TPM0 0x908 +#define MT6357_PSC_TPM1 0x90a +#define MT6357_PSC_TOP_RSTCTL_0 0x90c +#define MT6357_PSC_TOP_INT_CON0 0x90e +#define MT6357_PSC_TOP_INT_CON0_SET 0x910 +#define MT6357_PSC_TOP_INT_CON0_CLR 0x912 +#define MT6357_PSC_TOP_INT_MASK_CON0 0x914 +#define MT6357_PSC_TOP_INT_MASK_CON0_SET 0x916 +#define MT6357_PSC_TOP_INT_MASK_CON0_CLR 0x918 +#define MT6357_PSC_TOP_INT_STATUS0 0x91a +#define MT6357_PSC_TOP_INT_RAW_STATUS0 0x91c +#define MT6357_PSC_TOP_INT_MISC_CON 0x91e +#define MT6357_PSC_TOP_INT_MISC_CON_SET 0x920 +#define MT6357_PSC_TOP_INT_MISC_CON_CLR 0x922 +#define MT6357_PSC_TOP_MON_CTL 0x924 +#define MT6357_STRUP_ID 0x980 +#define MT6357_STRUP_REV0 0x982 +#define MT6357_STRUP_DBI 0x984 +#define MT6357_STRUP_DXI 0x986 +#define MT6357_STRUP_ANA_CON0 0x988 +#define MT6357_STRUP_ANA_CON1 0x98a +#define MT6357_STRUP_ANA_CON2 0x98c +#define MT6357_STRUP_ELR_NUM 0x98e +#define MT6357_STRUP_ELR_0 0x990 +#define MT6357_PSEQ_ID 0xa00 +#define MT6357_PSEQ_REV0 0xa02 +#define MT6357_PSEQ_DBI 0xa04 +#define MT6357_PSEQ_DXI 0xa06 +#define MT6357_PPCCTL0 0xa08 +#define MT6357_PPCCTL1 0xa0a +#define MT6357_PPCCTL2 0xa0c +#define MT6357_PPCCFG0 0xa0e +#define MT6357_PPCTST0 0xa10 +#define MT6357_PORFLAG 0xa12 +#define MT6357_STRUP_CON0 0xa14 +#define MT6357_STRUP_CON1 0xa16 +#define MT6357_STRUP_CON2 0xa18 +#define MT6357_STRUP_CON3 0xa1a +#define MT6357_STRUP_CON4 0xa1c +#define MT6357_STRUP_CON5 0xa1e +#define MT6357_STRUP_CON6 0xa20 +#define MT6357_STRUP_CON7 0xa22 +#define MT6357_CPSCFG0 0xa24 +#define MT6357_STRUP_CON9 0xa26 +#define MT6357_STRUP_CON10 0xa28 +#define MT6357_STRUP_CON11 0xa2a +#define MT6357_STRUP_CON12 0xa2c +#define MT6357_STRUP_CON13 0xa2e +#define MT6357_STRUP_CON14 0xa30 +#define MT6357_STRUP_CON15 0xa32 +#define MT6357_STRUP_CON16 0xa34 +#define MT6357_STRUP_CON19 0xa36 +#define MT6357_PSEQ_ELR_NUM 0xa38 +#define MT6357_PSEQ_ELR7 0xa3a +#define MT6357_PSEQ_ELR8 0xa3c +#define MT6357_PCHR_DIG_DSN_ID 0xa80 +#define MT6357_PCHR_DIG_DSN_REV0 0xa82 +#define MT6357_PCHR_DIG_DSN_DBI 0xa84 +#define MT6357_PCHR_DIG_DSN_DXI 0xa86 +#define MT6357_CHR_TOP_CON0 0xa88 +#define MT6357_CHR_TOP_CON1 0xa8a +#define MT6357_CHR_TOP_CON2 0xa8c +#define MT6357_CHR_TOP_CON3 0xa8e +#define MT6357_CHR_TOP_CON4 0xa90 +#define MT6357_CHR_TOP_CON5 0xa92 +#define MT6357_CHR_TOP_CON6 0xa94 +#define MT6357_PCHR_DIG_ELR_NUM 0xa96 +#define MT6357_PCHR_ELR0 0xa98 +#define MT6357_PCHR_ELR1 0xa9a +#define MT6357_PCHR_MACRO_DSN_ID 0xb80 +#define MT6357_PCHR_MACRO_DSN_REV0 0xb82 +#define MT6357_PCHR_MACRO_DSN_DBI 0xb84 +#define MT6357_PCHR_MACRO_DSN_DXI 0xb86 +#define MT6357_CHR_CON0 0xb88 +#define MT6357_CHR_CON1 0xb8a +#define MT6357_CHR_CON2 0xb8c +#define MT6357_CHR_CON3 0xb8e +#define MT6357_CHR_CON4 0xb90 +#define MT6357_CHR_CON5 0xb92 +#define MT6357_CHR_CON6 0xb94 +#define MT6357_CHR_CON7 0xb96 +#define MT6357_CHR_CON8 0xb98 +#define MT6357_CHR_CON9 0xb9a +#define MT6357_BM_TOP_DSN_ID 0xc00 +#define MT6357_BM_TOP_DSN_REV0 0xc02 +#define MT6357_BM_TOP_DBI 0xc04 +#define MT6357_BM_TOP_DXI 0xc06 +#define MT6357_BM_TPM0 0xc08 +#define MT6357_BM_TPM1 0xc0a +#define MT6357_BM_TOP_CKPDN_CON0 0xc0c +#define MT6357_BM_TOP_CKPDN_CON0_SET 0xc0e +#define MT6357_BM_TOP_CKPDN_CON0_CLR 0xc10 +#define MT6357_BM_TOP_CKSEL_CON0 0xc12 +#define MT6357_BM_TOP_CKSEL_CON0_SET 0xc14 +#define MT6357_BM_TOP_CKSEL_CON0_CLR 0xc16 +#define MT6357_BM_TOP_CKTST_CON0 0xc18 +#define MT6357_BM_TOP_RST_CON0 0xc1a +#define MT6357_BM_TOP_RST_CON0_SET 0xc1c +#define MT6357_BM_TOP_RST_CON0_CLR 0xc1e +#define MT6357_BM_TOP_INT_CON0 0xc20 +#define MT6357_BM_TOP_INT_CON0_SET 0xc22 +#define MT6357_BM_TOP_INT_CON0_CLR 0xc24 +#define MT6357_BM_TOP_INT_CON1 0xc26 +#define MT6357_BM_TOP_INT_CON1_SET 0xc28 +#define MT6357_BM_TOP_INT_CON1_CLR 0xc2a +#define MT6357_BM_TOP_INT_MASK_CON0 0xc2c +#define MT6357_BM_TOP_INT_MASK_CON0_SET 0xc2e +#define MT6357_BM_TOP_INT_MASK_CON0_CLR 0xc30 +#define MT6357_BM_TOP_INT_MASK_CON1 0xc32 +#define MT6357_BM_TOP_INT_MASK_CON1_SET 0xc34 +#define MT6357_BM_TOP_INT_MASK_CON1_CLR 0xc36 +#define MT6357_BM_TOP_INT_STATUS0 0xc38 +#define MT6357_BM_TOP_INT_STATUS1 0xc3a +#define MT6357_BM_TOP_INT_RAW_STATUS0 0xc3c +#define MT6357_BM_TOP_INT_RAW_STATUS1 0xc3e +#define MT6357_BM_TOP_INT_MISC_CON 0xc40 +#define MT6357_BM_TOP_DBG_CON 0xc42 +#define MT6357_BM_TOP_RSV0 0xc44 +#define MT6357_FGADC_ANA_DSN_ID 0xc80 +#define MT6357_FGADC_ANA_DSN_REV0 0xc82 +#define MT6357_FGADC_ANA_DSN_DBI 0xc84 +#define MT6357_FGADC_ANA_DSN_DXI 0xc86 +#define MT6357_FGADC_ANA_CON0 0xc88 +#define MT6357_FGADC_ANA_TEST_CON0 0xc8a +#define MT6357_FGADC_ANA_ELR_NUM 0xc8c +#define MT6357_FGADC_ANA_ELR0 0xc8e +#define MT6357_FGADC_ANA_ELR1 0xc90 +#define MT6357_FGADC0_DSN_ID 0xd00 +#define MT6357_FGADC0_DSN_REV0 0xd02 +#define MT6357_FGADC0_DSN_DBI 0xd04 +#define MT6357_FGADC0_DSN_DXI 0xd06 +#define MT6357_FGADC_CON0 0xd08 +#define MT6357_FGADC_CON1 0xd0a +#define MT6357_FGADC_CON2 0xd0c +#define MT6357_FGADC_CON3 0xd0e +#define MT6357_FGADC_CON4 0xd10 +#define MT6357_FGADC_CAR_CON0 0xd12 +#define MT6357_FGADC_CAR_CON1 0xd14 +#define MT6357_FGADC_CAR_CON2 0xd16 +#define MT6357_FGADC_CARTH_CON0 0xd18 +#define MT6357_FGADC_CARTH_CON1 0xd1a +#define MT6357_FGADC_CARTH_CON2 0xd1c +#define MT6357_FGADC_CARTH_CON3 0xd1e +#define MT6357_FGADC_NTER_CON0 0xd20 +#define MT6357_FGADC_NTER_CON1 0xd22 +#define MT6357_FGADC_NTER_CON2 0xd24 +#define MT6357_FGADC_SON_CON0 0xd26 +#define MT6357_FGADC_SON_CON1 0xd28 +#define MT6357_FGADC_SON_CON2 0xd2a +#define MT6357_FGADC_SON_CON3 0xd2c +#define MT6357_FGADC_ZCV_CON0 0xd2e +#define MT6357_FGADC_ZCV_CON1 0xd30 +#define MT6357_FGADC_ZCV_CON2 0xd32 +#define MT6357_FGADC_ZCV_CON3 0xd34 +#define MT6357_FGADC_ZCV_CON4 0xd36 +#define MT6357_FGADC_ZCVTH_CON0 0xd38 +#define MT6357_FGADC_ZCVTH_CON1 0xd3a +#define MT6357_FGADC_ZCVTH_CON2 0xd3c +#define MT6357_FGADC1_DSN_ID 0xd80 +#define MT6357_FGADC1_DSN_REV0 0xd82 +#define MT6357_FGADC1_DSN_DBI 0xd84 +#define MT6357_FGADC1_DSN_DXI 0xd86 +#define MT6357_FGADC_R_CON0 0xd88 +#define MT6357_FGADC_CUR_CON0 0xd8a +#define MT6357_FGADC_CUR_CON1 0xd8c +#define MT6357_FGADC_CUR_CON2 0xd8e +#define MT6357_FGADC_CUR_CON3 0xd90 +#define MT6357_FGADC_OFFSET_CON0 0xd92 +#define MT6357_FGADC_OFFSET_CON1 0xd94 +#define MT6357_FGADC_GAIN_CON0 0xd96 +#define MT6357_FGADC_TEST_CON0 0xd98 +#define MT6357_SYSTEM_INFO_CON0 0xd9a +#define MT6357_SYSTEM_INFO_CON1 0xd9c +#define MT6357_SYSTEM_INFO_CON2 0xd9e +#define MT6357_SYSTEM_INFO_CON3 0xda0 +#define MT6357_SYSTEM_INFO_CON4 0xda2 +#define MT6357_BATON_ANA_DSN_ID 0xe00 +#define MT6357_BATON_ANA_DSN_REV0 0xe02 +#define MT6357_BATON_ANA_DSN_DBI 0xe04 +#define MT6357_BATON_ANA_DSN_DXI 0xe06 +#define MT6357_BATON_ANA_CON0 0xe08 +#define MT6357_BATON_ANA_ELR_NUM 0xe0a +#define MT6357_BATON_ANA_ELR0 0xe0c +#define MT6357_HK_TOP_ID 0xf80 +#define MT6357_HK_TOP_REV0 0xf82 +#define MT6357_HK_TOP_DBI 0xf84 +#define MT6357_HK_TOP_DXI 0xf86 +#define MT6357_HK_TPM0 0xf88 +#define MT6357_HK_TPM1 0xf8a +#define MT6357_HK_TOP_CLK_CON0 0xf8c +#define MT6357_HK_TOP_CLK_CON1 0xf8e +#define MT6357_HK_TOP_RST_CON0 0xf90 +#define MT6357_HK_TOP_INT_CON0 0xf92 +#define MT6357_HK_TOP_INT_CON0_SET 0xf94 +#define MT6357_HK_TOP_INT_CON0_CLR 0xf96 +#define MT6357_HK_TOP_INT_MASK_CON0 0xf98 +#define MT6357_HK_TOP_INT_MASK_CON0_SET 0xf9a +#define MT6357_HK_TOP_INT_MASK_CON0_CLR 0xf9c +#define MT6357_HK_TOP_INT_STATUS0 0xf9e +#define MT6357_HK_TOP_INT_RAW_STATUS0 0xfa0 +#define MT6357_HK_TOP_MON_CON0 0xfa2 +#define MT6357_HK_TOP_MON_CON1 0xfa4 +#define MT6357_HK_TOP_MON_CON2 0xfa6 +#define MT6357_AUXADC_DSN_ID 0x1000 +#define MT6357_AUXADC_DSN_REV0 0x1002 +#define MT6357_AUXADC_DSN_DBI 0x1004 +#define MT6357_AUXADC_DSN_DXI 0x1006 +#define MT6357_AUXADC_ANA_CON0 0x1008 +#define MT6357_AUXADC_DIG_1_DSN_ID 0x1080 +#define MT6357_AUXADC_DIG_1_DSN_REV0 0x1082 +#define MT6357_AUXADC_DIG_1_DSN_DBI 0x1084 +#define MT6357_AUXADC_DIG_1_DSN_DXI 0x1086 +#define MT6357_AUXADC_ADC0 0x1088 +#define MT6357_AUXADC_ADC1 0x108a +#define MT6357_AUXADC_ADC2 0x108c +#define MT6357_AUXADC_ADC3 0x108e +#define MT6357_AUXADC_ADC4 0x1090 +#define MT6357_AUXADC_ADC5 0x1092 +#define MT6357_AUXADC_ADC6 0x1094 +#define MT6357_AUXADC_ADC7 0x1096 +#define MT6357_AUXADC_ADC8 0x1098 +#define MT6357_AUXADC_ADC9 0x109a +#define MT6357_AUXADC_ADC10 0x109c +#define MT6357_AUXADC_ADC11 0x109e +#define MT6357_AUXADC_ADC12 0x10a0 +#define MT6357_AUXADC_ADC14 0x10a2 +#define MT6357_AUXADC_ADC16 0x10a4 +#define MT6357_AUXADC_ADC17 0x10a6 +#define MT6357_AUXADC_ADC18 0x10a8 +#define MT6357_AUXADC_ADC19 0x10aa +#define MT6357_AUXADC_ADC20 0x10ac +#define MT6357_AUXADC_ADC21 0x10ae +#define MT6357_AUXADC_ADC22 0x10b0 +#define MT6357_AUXADC_ADC23 0x10b2 +#define MT6357_AUXADC_ADC24 0x10b4 +#define MT6357_AUXADC_ADC25 0x10b6 +#define MT6357_AUXADC_ADC26 0x10b8 +#define MT6357_AUXADC_ADC27 0x10ba +#define MT6357_AUXADC_ADC29 0x10bc +#define MT6357_AUXADC_ADC30 0x10be +#define MT6357_AUXADC_ADC31 0x10c0 +#define MT6357_AUXADC_ADC32 0x10c2 +#define MT6357_AUXADC_ADC33 0x10c4 +#define MT6357_AUXADC_ADC34 0x10c6 +#define MT6357_AUXADC_ADC35 0x10c8 +#define MT6357_AUXADC_ADC36 0x10ca +#define MT6357_AUXADC_ADC38 0x10cc +#define MT6357_AUXADC_ADC39 0x10ce +#define MT6357_AUXADC_ADC40 0x10d0 +#define MT6357_AUXADC_ADC41 0x10d2 +#define MT6357_AUXADC_ADC42 0x10d4 +#define MT6357_AUXADC_ADC43 0x10d6 +#define MT6357_AUXADC_ADC46 0x10d8 +#define MT6357_AUXADC_ADC47 0x10da +#define MT6357_AUXADC_DIG_1_ELR_NUM 0x10dc +#define MT6357_AUXADC_DIG_1_ELR0 0x10de +#define MT6357_AUXADC_DIG_1_ELR1 0x10e0 +#define MT6357_AUXADC_DIG_2_DSN_ID 0x1100 +#define MT6357_AUXADC_DIG_2_DSN_REV0 0x1102 +#define MT6357_AUXADC_DIG_2_DSN_DBI 0x1104 +#define MT6357_AUXADC_DIG_2_DSN_DXI 0x1106 +#define MT6357_AUXADC_STA0 0x1108 +#define MT6357_AUXADC_STA1 0x110a +#define MT6357_AUXADC_STA2 0x110c +#define MT6357_AUXADC_RQST0 0x110e +#define MT6357_AUXADC_RQST0_SET 0x1110 +#define MT6357_AUXADC_RQST0_CLR 0x1112 +#define MT6357_AUXADC_RQST2 0x1114 +#define MT6357_AUXADC_RQST2_SET 0x1116 +#define MT6357_AUXADC_RQST2_CLR 0x1118 +#define MT6357_AUXADC_RQST1 0x111a +#define MT6357_AUXADC_RQST1_SET 0x111c +#define MT6357_AUXADC_RQST1_CLR 0x111e +#define MT6357_AUXADC_CON0 0x1120 +#define MT6357_AUXADC_CON0_SET 0x1122 +#define MT6357_AUXADC_CON0_CLR 0x1124 +#define MT6357_AUXADC_CON1 0x1126 +#define MT6357_AUXADC_CON2 0x1128 +#define MT6357_AUXADC_CON3 0x112a +#define MT6357_AUXADC_CON4 0x112c +#define MT6357_AUXADC_CON5 0x112e +#define MT6357_AUXADC_CON6 0x1130 +#define MT6357_AUXADC_CON7 0x1132 +#define MT6357_AUXADC_CON8 0x1134 +#define MT6357_AUXADC_CON9 0x1136 +#define MT6357_AUXADC_CON10 0x1138 +#define MT6357_AUXADC_CON11 0x113a +#define MT6357_AUXADC_CON12 0x113c +#define MT6357_AUXADC_CON13 0x113e +#define MT6357_AUXADC_CON14 0x1140 +#define MT6357_AUXADC_CON15 0x1142 +#define MT6357_AUXADC_CON16 0x1144 +#define MT6357_AUXADC_CON17 0x1146 +#define MT6357_AUXADC_CON18 0x1148 +#define MT6357_AUXADC_CON19 0x114a +#define MT6357_AUXADC_CON20 0x114c +#define MT6357_AUXADC_DIG_3_DSN_ID 0x1180 +#define MT6357_AUXADC_DIG_3_DSN_REV0 0x1182 +#define MT6357_AUXADC_DIG_3_DSN_DBI 0x1184 +#define MT6357_AUXADC_DIG_3_DSN_DXI 0x1186 +#define MT6357_AUXADC_AUTORPT0 0x1188 +#define MT6357_AUXADC_LBAT0 0x118a +#define MT6357_AUXADC_LBAT1 0x118c +#define MT6357_AUXADC_LBAT2 0x118e +#define MT6357_AUXADC_LBAT3 0x1190 +#define MT6357_AUXADC_LBAT4 0x1192 +#define MT6357_AUXADC_LBAT5 0x1194 +#define MT6357_AUXADC_LBAT6 0x1196 +#define MT6357_AUXADC_ACCDET 0x1198 +#define MT6357_AUXADC_DBG0 0x119a +#define MT6357_AUXADC_IMP0 0x119c +#define MT6357_AUXADC_IMP1 0x119e +#define MT6357_AUXADC_DIG_3_ELR_NUM 0x11a0 +#define MT6357_AUXADC_DIG_3_ELR0 0x11a2 +#define MT6357_AUXADC_DIG_3_ELR1 0x11a4 +#define MT6357_AUXADC_DIG_3_ELR2 0x11a6 +#define MT6357_AUXADC_DIG_3_ELR3 0x11a8 +#define MT6357_AUXADC_DIG_3_ELR4 0x11aa +#define MT6357_AUXADC_DIG_3_ELR5 0x11ac +#define MT6357_AUXADC_DIG_3_ELR6 0x11ae +#define MT6357_AUXADC_DIG_3_ELR7 0x11b0 +#define MT6357_AUXADC_DIG_3_ELR8 0x11b2 +#define MT6357_AUXADC_DIG_3_ELR9 0x11b4 +#define MT6357_AUXADC_DIG_3_ELR10 0x11b6 +#define MT6357_AUXADC_DIG_3_ELR11 0x11b8 +#define MT6357_AUXADC_DIG_4_DSN_ID 0x1200 +#define MT6357_AUXADC_DIG_4_DSN_REV0 0x1202 +#define MT6357_AUXADC_DIG_4_DSN_DBI 0x1204 +#define MT6357_AUXADC_DIG_4_DSN_DXI 0x1206 +#define MT6357_AUXADC_MDRT_0 0x1208 +#define MT6357_AUXADC_MDRT_1 0x120a +#define MT6357_AUXADC_MDRT_2 0x120c +#define MT6357_AUXADC_MDRT_3 0x120e +#define MT6357_AUXADC_MDRT_4 0x1210 +#define MT6357_AUXADC_DCXO_MDRT_0 0x1212 +#define MT6357_AUXADC_DCXO_MDRT_1 0x1214 +#define MT6357_AUXADC_DCXO_MDRT_2 0x1216 +#define MT6357_AUXADC_NAG_0 0x1218 +#define MT6357_AUXADC_NAG_1 0x121a +#define MT6357_AUXADC_NAG_2 0x121c +#define MT6357_AUXADC_NAG_3 0x121e +#define MT6357_AUXADC_NAG_4 0x1220 +#define MT6357_AUXADC_NAG_5 0x1222 +#define MT6357_AUXADC_NAG_6 0x1224 +#define MT6357_AUXADC_NAG_7 0x1226 +#define MT6357_AUXADC_NAG_8 0x1228 +#define MT6357_AUXADC_RSV_1 0x122a +#define MT6357_AUXADC_ANA_0 0x122c +#define MT6357_AUXADC_IMP_CG0 0x122e +#define MT6357_AUXADC_LBAT_CG0 0x1230 +#define MT6357_AUXADC_NAG_CG0 0x1232 +#define MT6357_AUXADC_PRI_NEW 0x1234 +#define MT6357_AUXADC_CHR_TOP_CON2 0x1236 +#define MT6357_BUCK_TOP_DSN_ID 0x1400 +#define MT6357_BUCK_TOP_DSN_REV0 0x1402 +#define MT6357_BUCK_TOP_DBI 0x1404 +#define MT6357_BUCK_TOP_DXI 0x1406 +#define MT6357_BUCK_TOP_PAM0 0x1408 +#define MT6357_BUCK_TOP_PAM1 0x140a +#define MT6357_BUCK_TOP_CLK_CON0 0x140c +#define MT6357_BUCK_TOP_CLK_CON0_SET 0x140e +#define MT6357_BUCK_TOP_CLK_CON0_CLR 0x1410 +#define MT6357_BUCK_TOP_CLK_HWEN_CON0 0x1412 +#define MT6357_BUCK_TOP_CLK_HWEN_CON0_SET 0x1414 +#define MT6357_BUCK_TOP_CLK_HWEN_CON0_CLR 0x1416 +#define MT6357_BUCK_TOP_CLK_MISC_CON0 0x1418 +#define MT6357_BUCK_TOP_INT_CON0 0x141a +#define MT6357_BUCK_TOP_INT_CON0_SET 0x141c +#define MT6357_BUCK_TOP_INT_CON0_CLR 0x141e +#define MT6357_BUCK_TOP_INT_MASK_CON0 0x1420 +#define MT6357_BUCK_TOP_INT_MASK_CON0_SET 0x1422 +#define MT6357_BUCK_TOP_INT_MASK_CON0_CLR 0x1424 +#define MT6357_BUCK_TOP_INT_STATUS0 0x1426 +#define MT6357_BUCK_TOP_INT_RAW_STATUS0 0x1428 +#define MT6357_BUCK_TOP_STB_CON 0x142a +#define MT6357_BUCK_TOP_SLP_CON0 0x142c +#define MT6357_BUCK_TOP_SLP_CON1 0x142e +#define MT6357_BUCK_TOP_SLP_CON2 0x1430 +#define MT6357_BUCK_TOP_MINFREQ_CON 0x1432 +#define MT6357_BUCK_TOP_OC_CON0 0x1434 +#define MT6357_BUCK_TOP_K_CON0 0x1436 +#define MT6357_BUCK_TOP_K_CON1 0x1438 +#define MT6357_BUCK_TOP_K_CON2 0x143a +#define MT6357_BUCK_TOP_WDTDBG0 0x143c +#define MT6357_BUCK_TOP_WDTDBG1 0x143e +#define MT6357_BUCK_TOP_WDTDBG2 0x1440 +#define MT6357_BUCK_TOP_ELR_NUM 0x1442 +#define MT6357_BUCK_TOP_ELR0 0x1444 +#define MT6357_BUCK_TOP_ELR1 0x1446 +#define MT6357_BUCK_VPROC_DSN_ID 0x1480 +#define MT6357_BUCK_VPROC_DSN_REV0 0x1482 +#define MT6357_BUCK_VPROC_DSN_DBI 0x1484 +#define MT6357_BUCK_VPROC_DSN_DXI 0x1486 +#define MT6357_BUCK_VPROC_CON0 0x1488 +#define MT6357_BUCK_VPROC_CON1 0x148a +#define MT6357_BUCK_VPROC_CFG0 0x148c +#define MT6357_BUCK_VPROC_CFG1 0x148e +#define MT6357_BUCK_VPROC_OP_EN 0x1490 +#define MT6357_BUCK_VPROC_OP_EN_SET 0x1492 +#define MT6357_BUCK_VPROC_OP_EN_CLR 0x1494 +#define MT6357_BUCK_VPROC_OP_CFG 0x1496 +#define MT6357_BUCK_VPROC_OP_CFG_SET 0x1498 +#define MT6357_BUCK_VPROC_OP_CFG_CLR 0x149a +#define MT6357_BUCK_VPROC_SP_CON 0x149c +#define MT6357_BUCK_VPROC_SP_CFG 0x149e +#define MT6357_BUCK_VPROC_OC_CFG 0x14a0 +#define MT6357_BUCK_VPROC_DBG0 0x14a2 +#define MT6357_BUCK_VPROC_DBG1 0x14a4 +#define MT6357_BUCK_VPROC_DBG2 0x14a6 +#define MT6357_BUCK_VPROC_ELR_NUM 0x14a8 +#define MT6357_BUCK_VPROC_ELR0 0x14aa +#define MT6357_BUCK_VCORE_DSN_ID 0x1500 +#define MT6357_BUCK_VCORE_DSN_REV0 0x1502 +#define MT6357_BUCK_VCORE_DSN_DBI 0x1504 +#define MT6357_BUCK_VCORE_DSN_DXI 0x1506 +#define MT6357_BUCK_VCORE_CON0 0x1508 +#define MT6357_BUCK_VCORE_CON1 0x150a +#define MT6357_BUCK_VCORE_CFG0 0x150c +#define MT6357_BUCK_VCORE_CFG1 0x150e +#define MT6357_BUCK_VCORE_OP_EN 0x1510 +#define MT6357_BUCK_VCORE_OP_EN_SET 0x1512 +#define MT6357_BUCK_VCORE_OP_EN_CLR 0x1514 +#define MT6357_BUCK_VCORE_OP_CFG 0x1516 +#define MT6357_BUCK_VCORE_OP_CFG_SET 0x1518 +#define MT6357_BUCK_VCORE_OP_CFG_CLR 0x151a +#define MT6357_BUCK_VCORE_SP_CON 0x151c +#define MT6357_BUCK_VCORE_SP_CFG 0x151e +#define MT6357_BUCK_VCORE_OC_CFG 0x1520 +#define MT6357_BUCK_VCORE_DBG0 0x1522 +#define MT6357_BUCK_VCORE_DBG1 0x1524 +#define MT6357_BUCK_VCORE_DBG2 0x1526 +#define MT6357_BUCK_VCORE_ELR_NUM 0x1528 +#define MT6357_BUCK_VCORE_ELR0 0x152a +#define MT6357_BUCK_VMODEM_DSN_ID 0x1580 +#define MT6357_BUCK_VMODEM_DSN_REV0 0x1582 +#define MT6357_BUCK_VMODEM_DSN_DBI 0x1584 +#define MT6357_BUCK_VMODEM_DSN_DXI 0x1586 +#define MT6357_BUCK_VMODEM_CON0 0x1588 +#define MT6357_BUCK_VMODEM_CON1 0x158a +#define MT6357_BUCK_VMODEM_CFG0 0x158c +#define MT6357_BUCK_VMODEM_CFG1 0x158e +#define MT6357_BUCK_VMODEM_OP_EN 0x1590 +#define MT6357_BUCK_VMODEM_OP_EN_SET 0x1592 +#define MT6357_BUCK_VMODEM_OP_EN_CLR 0x1594 +#define MT6357_BUCK_VMODEM_OP_CFG 0x1596 +#define MT6357_BUCK_VMODEM_OP_CFG_SET 0x1598 +#define MT6357_BUCK_VMODEM_OP_CFG_CLR 0x159a +#define MT6357_BUCK_VMODEM_SP_CON 0x159c +#define MT6357_BUCK_VMODEM_SP_CFG 0x159e +#define MT6357_BUCK_VMODEM_OC_CFG 0x15a0 +#define MT6357_BUCK_VMODEM_DBG0 0x15a2 +#define MT6357_BUCK_VMODEM_DBG1 0x15a4 +#define MT6357_BUCK_VMODEM_DBG2 0x15a6 +#define MT6357_BUCK_VMODEM_ELR_NUM 0x15a8 +#define MT6357_BUCK_VMODEM_ELR0 0x15aa +#define MT6357_BUCK_VS1_DSN_ID 0x1600 +#define MT6357_BUCK_VS1_DSN_REV0 0x1602 +#define MT6357_BUCK_VS1_DSN_DBI 0x1604 +#define MT6357_BUCK_VS1_DSN_DXI 0x1606 +#define MT6357_BUCK_VS1_CON0 0x1608 +#define MT6357_BUCK_VS1_CON1 0x160a +#define MT6357_BUCK_VS1_CFG0 0x160c +#define MT6357_BUCK_VS1_CFG1 0x160e +#define MT6357_BUCK_VS1_OP_EN 0x1610 +#define MT6357_BUCK_VS1_OP_EN_SET 0x1612 +#define MT6357_BUCK_VS1_OP_EN_CLR 0x1614 +#define MT6357_BUCK_VS1_OP_CFG 0x1616 +#define MT6357_BUCK_VS1_OP_CFG_SET 0x1618 +#define MT6357_BUCK_VS1_OP_CFG_CLR 0x161a +#define MT6357_BUCK_VS1_SP_CON 0x161c +#define MT6357_BUCK_VS1_SP_CFG 0x161e +#define MT6357_BUCK_VS1_OC_CFG 0x1620 +#define MT6357_BUCK_VS1_DBG0 0x1622 +#define MT6357_BUCK_VS1_DBG1 0x1624 +#define MT6357_BUCK_VS1_DBG2 0x1626 +#define MT6357_BUCK_VS1_VOTER 0x1628 +#define MT6357_BUCK_VS1_VOTER_SET 0x162a +#define MT6357_BUCK_VS1_VOTER_CLR 0x162c +#define MT6357_BUCK_VS1_VOTER_CFG 0x162e +#define MT6357_BUCK_VS1_ELR_NUM 0x1630 +#define MT6357_BUCK_VS1_ELR0 0x1632 +#define MT6357_BUCK_VPA_DSN_ID 0x1680 +#define MT6357_BUCK_VPA_DSN_REV0 0x1682 +#define MT6357_BUCK_VPA_DSN_DBI 0x1684 +#define MT6357_BUCK_VPA_DSN_DXI 0x1686 +#define MT6357_BUCK_VPA_CON0 0x1688 +#define MT6357_BUCK_VPA_CON1 0x168a +#define MT6357_BUCK_VPA_CFG0 0x168c +#define MT6357_BUCK_VPA_CFG1 0x168e +#define MT6357_BUCK_VPA_OC_CFG 0x1690 +#define MT6357_BUCK_VPA_DBG0 0x1692 +#define MT6357_BUCK_VPA_DBG1 0x1694 +#define MT6357_BUCK_VPA_DBG2 0x1696 +#define MT6357_BUCK_VPA_DLC_CON0 0x1698 +#define MT6357_BUCK_VPA_DLC_CON1 0x169a +#define MT6357_BUCK_VPA_DLC_CON2 0x169c +#define MT6357_BUCK_VPA_MSFG_CON0 0x169e +#define MT6357_BUCK_VPA_MSFG_CON1 0x16a0 +#define MT6357_BUCK_VPA_MSFG_RRATE0 0x16a2 +#define MT6357_BUCK_VPA_MSFG_RRATE1 0x16a4 +#define MT6357_BUCK_VPA_MSFG_RRATE2 0x16a6 +#define MT6357_BUCK_VPA_MSFG_RTHD0 0x16a8 +#define MT6357_BUCK_VPA_MSFG_RTHD1 0x16aa +#define MT6357_BUCK_VPA_MSFG_RTHD2 0x16ac +#define MT6357_BUCK_VPA_MSFG_FRATE0 0x16ae +#define MT6357_BUCK_VPA_MSFG_FRATE1 0x16b0 +#define MT6357_BUCK_VPA_MSFG_FRATE2 0x16b2 +#define MT6357_BUCK_VPA_MSFG_FTHD0 0x16b4 +#define MT6357_BUCK_VPA_MSFG_FTHD1 0x16b6 +#define MT6357_BUCK_VPA_MSFG_FTHD2 0x16b8 +#define MT6357_BUCK_ANA_DSN_ID 0x1700 +#define MT6357_BUCK_ANA_DSN_REV0 0x1702 +#define MT6357_BUCK_ANA_DSN_DBI 0x1704 +#define MT6357_BUCK_ANA_DSN_FPI 0x1706 +#define MT6357_SMPS_ANA_CON0 0x1708 +#define MT6357_SMPS_ANA_CON1 0x170a +#define MT6357_SMPS_ANA_CON2 0x170c +#define MT6357_VCORE_VPROC_ANA_CON0 0x170e +#define MT6357_VCORE_VPROC_ANA_CON1 0x1710 +#define MT6357_VCORE_VPROC_ANA_CON2 0x1712 +#define MT6357_VCORE_VPROC_ANA_CON3 0x1714 +#define MT6357_VCORE_VPROC_ANA_CON4 0x1716 +#define MT6357_VCORE_VPROC_ANA_CON5 0x1718 +#define MT6357_VCORE_VPROC_ANA_CON6 0x171a +#define MT6357_VCORE_VPROC_ANA_CON7 0x171c +#define MT6357_VCORE_VPROC_ANA_CON8 0x171e +#define MT6357_VCORE_VPROC_ANA_CON9 0x1720 +#define MT6357_VCORE_VPROC_ANA_CON10 0x1722 +#define MT6357_VCORE_VPROC_ANA_CON11 0x1724 +#define MT6357_VMODEM_ANA_CON0 0x1726 +#define MT6357_VMODEM_ANA_CON1 0x1728 +#define MT6357_VMODEM_ANA_CON2 0x172a +#define MT6357_VMODEM_ANA_CON3 0x172c +#define MT6357_VMODEM_ANA_CON4 0x172e +#define MT6357_VMODEM_ANA_CON5 0x1730 +#define MT6357_VS1_ANA_CON0 0x1732 +#define MT6357_VS1_ANA_CON1 0x1734 +#define MT6357_VS1_ANA_CON2 0x1736 +#define MT6357_VS1_ANA_CON3 0x1738 +#define MT6357_VS1_ANA_CON4 0x173a +#define MT6357_VS1_ANA_CON5 0x173c +#define MT6357_VPA_ANA_CON0 0x173e +#define MT6357_VPA_ANA_CON1 0x1740 +#define MT6357_VPA_ANA_CON2 0x1742 +#define MT6357_VPA_ANA_CON3 0x1744 +#define MT6357_VPA_ANA_CON4 0x1746 +#define MT6357_VPA_ANA_CON5 0x1748 +#define MT6357_BUCK_ANA_ELR_NUM 0x174a +#define MT6357_SMPS_ELR_0 0x174c +#define MT6357_SMPS_ELR_1 0x174e +#define MT6357_SMPS_ELR_2 0x1750 +#define MT6357_SMPS_ELR_3 0x1752 +#define MT6357_SMPS_ELR_4 0x1754 +#define MT6357_SMPS_ELR_5 0x1756 +#define MT6357_VCORE_VPROC_ELR_0 0x1758 +#define MT6357_VCORE_VPROC_ELR_1 0x175a +#define MT6357_VCORE_VPROC_ELR_2 0x175c +#define MT6357_VCORE_VPROC_ELR_3 0x175e +#define MT6357_VCORE_VPROC_ELR_4 0x1760 +#define MT6357_VMODEM_ELR_0 0x1762 +#define MT6357_VMODEM_ELR_1 0x1764 +#define MT6357_VMODEM_ELR_2 0x1766 +#define MT6357_VS1_ELR_0 0x1768 +#define MT6357_VS1_ELR_1 0x176a +#define MT6357_VPA_ELR_0 0x176c +#define MT6357_LDO_TOP_ID 0x1880 +#define MT6357_LDO_TOP_REV0 0x1882 +#define MT6357_LDO_TOP_DBI 0x1884 +#define MT6357_LDO_TOP_DXI 0x1886 +#define MT6357_LDO_TPM0 0x1888 +#define MT6357_LDO_TPM1 0x188a +#define MT6357_LDO_TOP_CLK_DCM_CON0 0x188c +#define MT6357_LDO_TOP_CLK_VIO28_CON0 0x188e +#define MT6357_LDO_TOP_CLK_VIO18_CON0 0x1890 +#define MT6357_LDO_TOP_CLK_VAUD28_CON0 0x1892 +#define MT6357_LDO_TOP_CLK_VDRAM_CON0 0x1894 +#define MT6357_LDO_TOP_CLK_VSRAM_PROC_CON0 0x1896 +#define MT6357_LDO_TOP_CLK_VSRAM_OTHERS_CON0 0x1898 +#define MT6357_LDO_TOP_CLK_VAUX18_CON0 0x189a +#define MT6357_LDO_TOP_CLK_VUSB33_CON0 0x189c +#define MT6357_LDO_TOP_CLK_VEMC_CON0 0x189e +#define MT6357_LDO_TOP_CLK_VXO22_CON0 0x18a0 +#define MT6357_LDO_TOP_CLK_VSIM1_CON0 0x18a2 +#define MT6357_LDO_TOP_CLK_VSIM2_CON0 0x18a4 +#define MT6357_LDO_TOP_CLK_VCAMD_CON0 0x18a6 +#define MT6357_LDO_TOP_CLK_VCAMIO_CON0 0x18a8 +#define MT6357_LDO_TOP_CLK_VEFUSE_CON0 0x18aa +#define MT6357_LDO_TOP_CLK_VCN33_CON0 0x18ac +#define MT6357_LDO_TOP_CLK_VCN18_CON0 0x18ae +#define MT6357_LDO_TOP_CLK_VCN28_CON0 0x18b0 +#define MT6357_LDO_TOP_CLK_VIBR_CON0 0x18b2 +#define MT6357_LDO_TOP_CLK_VFE28_CON0 0x18b4 +#define MT6357_LDO_TOP_CLK_VMCH_CON0 0x18b6 +#define MT6357_LDO_TOP_CLK_VMC_CON0 0x18b8 +#define MT6357_LDO_TOP_CLK_VRF18_CON0 0x18ba +#define MT6357_LDO_TOP_CLK_VLDO28_CON0 0x18bc +#define MT6357_LDO_TOP_CLK_VRF12_CON0 0x18be +#define MT6357_LDO_TOP_CLK_VCAMA_CON0 0x18c0 +#define MT6357_LDO_TOP_CLK_TREF_CON0 0x18c2 +#define MT6357_LDO_TOP_INT_CON0 0x18c4 +#define MT6357_LDO_TOP_INT_CON0_SET 0x18c6 +#define MT6357_LDO_TOP_INT_CON0_CLR 0x18c8 +#define MT6357_LDO_TOP_INT_CON1 0x18ca +#define MT6357_LDO_TOP_INT_CON1_SET 0x18cc +#define MT6357_LDO_TOP_INT_CON1_CLR 0x18ce +#define MT6357_LDO_TOP_INT_MASK_CON0 0x18d0 +#define MT6357_LDO_TOP_INT_MASK_CON0_SET 0x18d2 +#define MT6357_LDO_TOP_INT_MASK_CON0_CLR 0x18d4 +#define MT6357_LDO_TOP_INT_MASK_CON1 0x18d6 +#define MT6357_LDO_TOP_INT_MASK_CON1_SET 0x18d8 +#define MT6357_LDO_TOP_INT_MASK_CON1_CLR 0x18da +#define MT6357_LDO_TOP_INT_STATUS0 0x18dc +#define MT6357_LDO_TOP_INT_STATUS1 0x18de +#define MT6357_LDO_TOP_INT_RAW_STATUS0 0x18e0 +#define MT6357_LDO_TOP_INT_RAW_STATUS1 0x18e2 +#define MT6357_LDO_TEST_CON0 0x18e4 +#define MT6357_LDO_TOP_WDT_CON0 0x18e6 +#define MT6357_LDO_TOP_RSV_CON0 0x18e8 +#define MT6357_LDO_TOP_RSV_CON1 0x18ea +#define MT6357_LDO_OCFB0 0x18ec +#define MT6357_LDO_LP_PROTECTION 0x18ee +#define MT6357_LDO_DUMMY_LOAD_GATED 0x18f0 +#define MT6357_LDO_GON0_DSN_ID 0x1900 +#define MT6357_LDO_GON0_DSN_REV0 0x1902 +#define MT6357_LDO_GON0_DSN_DBI 0x1904 +#define MT6357_LDO_GON0_DSN_DXI 0x1906 +#define MT6357_LDO_VXO22_CON0 0x1908 +#define MT6357_LDO_VXO22_OP_EN 0x190a +#define MT6357_LDO_VXO22_OP_EN_SET 0x190c +#define MT6357_LDO_VXO22_OP_EN_CLR 0x190e +#define MT6357_LDO_VXO22_OP_CFG 0x1910 +#define MT6357_LDO_VXO22_OP_CFG_SET 0x1912 +#define MT6357_LDO_VXO22_OP_CFG_CLR 0x1914 +#define MT6357_LDO_VXO22_CON1 0x1916 +#define MT6357_LDO_VXO22_CON2 0x1918 +#define MT6357_LDO_VXO22_CON3 0x191a +#define MT6357_LDO_VAUX18_CON0 0x191c +#define MT6357_LDO_VAUX18_OP_EN 0x191e +#define MT6357_LDO_VAUX18_OP_EN_SET 0x1920 +#define MT6357_LDO_VAUX18_OP_EN_CLR 0x1922 +#define MT6357_LDO_VAUX18_OP_CFG 0x1924 +#define MT6357_LDO_VAUX18_OP_CFG_SET 0x1926 +#define MT6357_LDO_VAUX18_OP_CFG_CLR 0x1928 +#define MT6357_LDO_VAUX18_CON1 0x192a +#define MT6357_LDO_VAUX18_CON2 0x192c +#define MT6357_LDO_VAUX18_CON3 0x192e +#define MT6357_LDO_VAUD28_CON0 0x1930 +#define MT6357_LDO_VAUD28_OP_EN 0x1932 +#define MT6357_LDO_VAUD28_OP_EN_SET 0x1934 +#define MT6357_LDO_VAUD28_OP_EN_CLR 0x1936 +#define MT6357_LDO_VAUD28_OP_CFG 0x1938 +#define MT6357_LDO_VAUD28_OP_CFG_SET 0x193a +#define MT6357_LDO_VAUD28_OP_CFG_CLR 0x193c +#define MT6357_LDO_VAUD28_CON1 0x193e +#define MT6357_LDO_VAUD28_CON2 0x1940 +#define MT6357_LDO_VAUD28_CON3 0x1942 +#define MT6357_LDO_VIO28_CON0 0x1944 +#define MT6357_LDO_VIO28_OP_EN 0x1946 +#define MT6357_LDO_VIO28_OP_EN_SET 0x1948 +#define MT6357_LDO_VIO28_OP_EN_CLR 0x194a +#define MT6357_LDO_VIO28_OP_CFG 0x194c +#define MT6357_LDO_VIO28_OP_CFG_SET 0x194e +#define MT6357_LDO_VIO28_OP_CFG_CLR 0x1950 +#define MT6357_LDO_VIO28_CON1 0x1952 +#define MT6357_LDO_VIO28_CON2 0x1954 +#define MT6357_LDO_VIO28_CON3 0x1956 +#define MT6357_LDO_VIO18_CON0 0x1958 +#define MT6357_LDO_VIO18_OP_EN 0x195a +#define MT6357_LDO_VIO18_OP_EN_SET 0x195c +#define MT6357_LDO_VIO18_OP_EN_CLR 0x195e +#define MT6357_LDO_VIO18_OP_CFG 0x1960 +#define MT6357_LDO_VIO18_OP_CFG_SET 0x1962 +#define MT6357_LDO_VIO18_OP_CFG_CLR 0x1964 +#define MT6357_LDO_VIO18_CON1 0x1966 +#define MT6357_LDO_VIO18_CON2 0x1968 +#define MT6357_LDO_VIO18_CON3 0x196a +#define MT6357_LDO_VDRAM_CON0 0x196c +#define MT6357_LDO_VDRAM_OP_EN 0x196e +#define MT6357_LDO_VDRAM_OP_EN_SET 0x1970 +#define MT6357_LDO_VDRAM_OP_EN_CLR 0x1972 +#define MT6357_LDO_VDRAM_OP_CFG 0x1974 +#define MT6357_LDO_VDRAM_OP_CFG_SET 0x1976 +#define MT6357_LDO_VDRAM_OP_CFG_CLR 0x1978 +#define MT6357_LDO_VDRAM_CON1 0x197a +#define MT6357_LDO_VDRAM_CON2 0x197c +#define MT6357_LDO_VDRAM_CON3 0x197e +#define MT6357_LDO_GON1_DSN_ID 0x1980 +#define MT6357_LDO_GON1_DSN_REV0 0x1982 +#define MT6357_LDO_GON1_DSN_DBI 0x1984 +#define MT6357_LDO_GON1_DSN_DXI 0x1986 +#define MT6357_LDO_VEMC_CON0 0x1988 +#define MT6357_LDO_VEMC_OP_EN 0x198a +#define MT6357_LDO_VEMC_OP_EN_SET 0x198c +#define MT6357_LDO_VEMC_OP_EN_CLR 0x198e +#define MT6357_LDO_VEMC_OP_CFG 0x1990 +#define MT6357_LDO_VEMC_OP_CFG_SET 0x1992 +#define MT6357_LDO_VEMC_OP_CFG_CLR 0x1994 +#define MT6357_LDO_VEMC_CON1 0x1996 +#define MT6357_LDO_VEMC_CON2 0x1998 +#define MT6357_LDO_VEMC_CON3 0x199a +#define MT6357_LDO_VUSB33_CON0_0 0x199c +#define MT6357_LDO_VUSB33_OP_EN 0x199e +#define MT6357_LDO_VUSB33_OP_EN_SET 0x19a0 +#define MT6357_LDO_VUSB33_OP_EN_CLR 0x19a2 +#define MT6357_LDO_VUSB33_OP_CFG 0x19a4 +#define MT6357_LDO_VUSB33_OP_CFG_SET 0x19a6 +#define MT6357_LDO_VUSB33_OP_CFG_CLR 0x19a8 +#define MT6357_LDO_VUSB33_CON0_1 0x19aa +#define MT6357_LDO_VUSB33_CON1 0x19ac +#define MT6357_LDO_VUSB33_CON2 0x19ae +#define MT6357_LDO_VUSB33_CON3 0x19b0 +#define MT6357_LDO_VSRAM_PROC_CON0 0x19b2 +#define MT6357_LDO_VSRAM_PROC_CON2 0x19b4 +#define MT6357_LDO_VSRAM_PROC_CFG0 0x19b6 +#define MT6357_LDO_VSRAM_PROC_CFG1 0x19b8 +#define MT6357_LDO_VSRAM_PROC_OP_EN 0x19ba +#define MT6357_LDO_VSRAM_PROC_OP_EN_SET 0x19bc +#define MT6357_LDO_VSRAM_PROC_OP_EN_CLR 0x19be +#define MT6357_LDO_VSRAM_PROC_OP_CFG 0x19c0 +#define MT6357_LDO_VSRAM_PROC_OP_CFG_SET 0x19c2 +#define MT6357_LDO_VSRAM_PROC_OP_CFG_CLR 0x19c4 +#define MT6357_LDO_VSRAM_PROC_CON3 0x19c6 +#define MT6357_LDO_VSRAM_PROC_CON4 0x19c8 +#define MT6357_LDO_VSRAM_PROC_CON5 0x19ca +#define MT6357_LDO_VSRAM_PROC_DBG0 0x19cc +#define MT6357_LDO_VSRAM_PROC_DBG1 0x19ce +#define MT6357_LDO_VSRAM_OTHERS_CON0 0x19d0 +#define MT6357_LDO_VSRAM_OTHERS_CON2 0x19d2 +#define MT6357_LDO_VSRAM_OTHERS_CFG0 0x19d4 +#define MT6357_LDO_VSRAM_OTHERS_CFG1 0x19d6 +#define MT6357_LDO_VSRAM_OTHERS_OP_EN 0x19d8 +#define MT6357_LDO_VSRAM_OTHERS_OP_EN_SET 0x19da +#define MT6357_LDO_VSRAM_OTHERS_OP_EN_CLR 0x19dc +#define MT6357_LDO_VSRAM_OTHERS_OP_CFG 0x19de +#define MT6357_LDO_VSRAM_OTHERS_OP_CFG_SET 0x19e0 +#define MT6357_LDO_VSRAM_OTHERS_OP_CFG_CLR 0x19e2 +#define MT6357_LDO_VSRAM_OTHERS_CON3 0x19e4 +#define MT6357_LDO_VSRAM_OTHERS_CON4 0x19e6 +#define MT6357_LDO_VSRAM_OTHERS_CON5 0x19e8 +#define MT6357_LDO_VSRAM_OTHERS_DBG0 0x19ea +#define MT6357_LDO_VSRAM_OTHERS_DBG1 0x19ec +#define MT6357_LDO_VSRAM_PROC_SP 0x19ee +#define MT6357_LDO_VSRAM_OTHERS_SP 0x19f0 +#define MT6357_LDO_VSRAM_PROC_R2R_PDN_DIS 0x19f2 +#define MT6357_LDO_VSRAM_OTHERS_R2R_PDN_DIS 0x19f4 +#define MT6357_LDO_VSRAM_WDT_DBG0 0x19f6 +#define MT6357_LDO_GON1_ELR_NUM 0x19f8 +#define MT6357_LDO_VSRAM_CON0 0x19fa +#define MT6357_LDO_VSRAM_CON1 0x19fc +#define MT6357_LDO_VSRAM_CON2 0x19fe +#define MT6357_LDO_GOFF0_DSN_ID 0x1a00 +#define MT6357_LDO_GOFF0_DSN_REV0 0x1a02 +#define MT6357_LDO_GOFF0_DSN_DBI 0x1a04 +#define MT6357_LDO_GOFF0_DSN_DXI 0x1a06 +#define MT6357_LDO_VFE28_CON0 0x1a08 +#define MT6357_LDO_VFE28_OP_EN 0x1a0a +#define MT6357_LDO_VFE28_OP_EN_SET 0x1a0c +#define MT6357_LDO_VFE28_OP_EN_CLR 0x1a0e +#define MT6357_LDO_VFE28_OP_CFG 0x1a10 +#define MT6357_LDO_VFE28_OP_CFG_SET 0x1a12 +#define MT6357_LDO_VFE28_OP_CFG_CLR 0x1a14 +#define MT6357_LDO_VFE28_CON1 0x1a16 +#define MT6357_LDO_VFE28_CON2 0x1a18 +#define MT6357_LDO_VFE28_CON3 0x1a1a +#define MT6357_LDO_VRF18_CON0 0x1a1c +#define MT6357_LDO_VRF18_OP_EN 0x1a1e +#define MT6357_LDO_VRF18_OP_EN_SET 0x1a20 +#define MT6357_LDO_VRF18_OP_EN_CLR 0x1a22 +#define MT6357_LDO_VRF18_OP_CFG 0x1a24 +#define MT6357_LDO_VRF18_OP_CFG_SET 0x1a26 +#define MT6357_LDO_VRF18_OP_CFG_CLR 0x1a28 +#define MT6357_LDO_VRF18_CON1 0x1a2a +#define MT6357_LDO_VRF18_CON2 0x1a2c +#define MT6357_LDO_VRF18_CON3 0x1a2e +#define MT6357_LDO_VRF12_CON0 0x1a30 +#define MT6357_LDO_VRF12_OP_EN 0x1a32 +#define MT6357_LDO_VRF12_OP_EN_SET 0x1a34 +#define MT6357_LDO_VRF12_OP_EN_CLR 0x1a36 +#define MT6357_LDO_VRF12_OP_CFG 0x1a38 +#define MT6357_LDO_VRF12_OP_CFG_SET 0x1a3a +#define MT6357_LDO_VRF12_OP_CFG_CLR 0x1a3c +#define MT6357_LDO_VRF12_CON1 0x1a3e +#define MT6357_LDO_VRF12_CON2 0x1a40 +#define MT6357_LDO_VRF12_CON3 0x1a42 +#define MT6357_LDO_VEFUSE_CON0 0x1a44 +#define MT6357_LDO_VEFUSE_OP_EN 0x1a46 +#define MT6357_LDO_VEFUSE_OP_EN_SET 0x1a48 +#define MT6357_LDO_VEFUSE_OP_EN_CLR 0x1a4a +#define MT6357_LDO_VEFUSE_OP_CFG 0x1a4c +#define MT6357_LDO_VEFUSE_OP_CFG_SET 0x1a4e +#define MT6357_LDO_VEFUSE_OP_CFG_CLR 0x1a50 +#define MT6357_LDO_VEFUSE_CON1 0x1a52 +#define MT6357_LDO_VEFUSE_CON2 0x1a54 +#define MT6357_LDO_VEFUSE_CON3 0x1a56 +#define MT6357_LDO_VCN18_CON0 0x1a58 +#define MT6357_LDO_VCN18_OP_EN 0x1a5a +#define MT6357_LDO_VCN18_OP_EN_SET 0x1a5c +#define MT6357_LDO_VCN18_OP_EN_CLR 0x1a5e +#define MT6357_LDO_VCN18_OP_CFG 0x1a60 +#define MT6357_LDO_VCN18_OP_CFG_SET 0x1a62 +#define MT6357_LDO_VCN18_OP_CFG_CLR 0x1a64 +#define MT6357_LDO_VCN18_CON1 0x1a66 +#define MT6357_LDO_VCN18_CON2 0x1a68 +#define MT6357_LDO_VCN18_CON3 0x1a6a +#define MT6357_LDO_VCAMA_CON0 0x1a6c +#define MT6357_LDO_VCAMA_OP_EN 0x1a6e +#define MT6357_LDO_VCAMA_OP_EN_SET 0x1a70 +#define MT6357_LDO_VCAMA_OP_EN_CLR 0x1a72 +#define MT6357_LDO_VCAMA_OP_CFG 0x1a74 +#define MT6357_LDO_VCAMA_OP_CFG_SET 0x1a76 +#define MT6357_LDO_VCAMA_OP_CFG_CLR 0x1a78 +#define MT6357_LDO_VCAMA_CON1 0x1a7a +#define MT6357_LDO_VCAMA_CON2 0x1a7c +#define MT6357_LDO_VCAMA_CON3 0x1a7e +#define MT6357_LDO_GOFF1_DSN_ID 0x1a80 +#define MT6357_LDO_GOFF1_DSN_REV0 0x1a82 +#define MT6357_LDO_GOFF1_DSN_DBI 0x1a84 +#define MT6357_LDO_GOFF1_DSN_DXI 0x1a86 +#define MT6357_LDO_VCAMD_CON0 0x1a88 +#define MT6357_LDO_VCAMD_OP_EN 0x1a8a +#define MT6357_LDO_VCAMD_OP_EN_SET 0x1a8c +#define MT6357_LDO_VCAMD_OP_EN_CLR 0x1a8e +#define MT6357_LDO_VCAMD_OP_CFG 0x1a90 +#define MT6357_LDO_VCAMD_OP_CFG_SET 0x1a92 +#define MT6357_LDO_VCAMD_OP_CFG_CLR 0x1a94 +#define MT6357_LDO_VCAMD_CON1 0x1a96 +#define MT6357_LDO_VCAMD_CON2 0x1a98 +#define MT6357_LDO_VCAMD_CON3 0x1a9a +#define MT6357_LDO_VCAMIO_CON0 0x1a9c +#define MT6357_LDO_VCAMIO_OP_EN 0x1a9e +#define MT6357_LDO_VCAMIO_OP_EN_SET 0x1aa0 +#define MT6357_LDO_VCAMIO_OP_EN_CLR 0x1aa2 +#define MT6357_LDO_VCAMIO_OP_CFG 0x1aa4 +#define MT6357_LDO_VCAMIO_OP_CFG_SET 0x1aa6 +#define MT6357_LDO_VCAMIO_OP_CFG_CLR 0x1aa8 +#define MT6357_LDO_VCAMIO_CON1 0x1aaa +#define MT6357_LDO_VCAMIO_CON2 0x1aac +#define MT6357_LDO_VCAMIO_CON3 0x1aae +#define MT6357_LDO_VMC_CON0 0x1ab0 +#define MT6357_LDO_VMC_OP_EN 0x1ab2 +#define MT6357_LDO_VMC_OP_EN_SET 0x1ab4 +#define MT6357_LDO_VMC_OP_EN_CLR 0x1ab6 +#define MT6357_LDO_VMC_OP_CFG 0x1ab8 +#define MT6357_LDO_VMC_OP_CFG_SET 0x1aba +#define MT6357_LDO_VMC_OP_CFG_CLR 0x1abc +#define MT6357_LDO_VMC_CON1 0x1abe +#define MT6357_LDO_VMC_CON2 0x1ac0 +#define MT6357_LDO_VMC_CON3 0x1ac2 +#define MT6357_LDO_VMCH_CON0 0x1ac4 +#define MT6357_LDO_VMCH_OP_EN 0x1ac6 +#define MT6357_LDO_VMCH_OP_EN_SET 0x1ac8 +#define MT6357_LDO_VMCH_OP_EN_CLR 0x1aca +#define MT6357_LDO_VMCH_OP_CFG 0x1acc +#define MT6357_LDO_VMCH_OP_CFG_SET 0x1ace +#define MT6357_LDO_VMCH_OP_CFG_CLR 0x1ad0 +#define MT6357_LDO_VMCH_CON1 0x1ad2 +#define MT6357_LDO_VMCH_CON2 0x1ad4 +#define MT6357_LDO_VMCH_CON3 0x1ad6 +#define MT6357_LDO_VSIM1_CON0 0x1ad8 +#define MT6357_LDO_VSIM1_OP_EN 0x1ada +#define MT6357_LDO_VSIM1_OP_EN_SET 0x1adc +#define MT6357_LDO_VSIM1_OP_EN_CLR 0x1ade +#define MT6357_LDO_VSIM1_OP_CFG 0x1ae0 +#define MT6357_LDO_VSIM1_OP_CFG_SET 0x1ae2 +#define MT6357_LDO_VSIM1_OP_CFG_CLR 0x1ae4 +#define MT6357_LDO_VSIM1_CON1 0x1ae6 +#define MT6357_LDO_VSIM1_CON2 0x1ae8 +#define MT6357_LDO_VSIM1_CON3 0x1aea +#define MT6357_LDO_VSIM2_CON0 0x1aec +#define MT6357_LDO_VSIM2_OP_EN 0x1aee +#define MT6357_LDO_VSIM2_OP_EN_SET 0x1af0 +#define MT6357_LDO_VSIM2_OP_EN_CLR 0x1af2 +#define MT6357_LDO_VSIM2_OP_CFG 0x1af4 +#define MT6357_LDO_VSIM2_OP_CFG_SET 0x1af6 +#define MT6357_LDO_VSIM2_OP_CFG_CLR 0x1af8 +#define MT6357_LDO_VSIM2_CON1 0x1afa +#define MT6357_LDO_VSIM2_CON2 0x1afc +#define MT6357_LDO_VSIM2_CON3 0x1afe +#define MT6357_LDO_GOFF2_DSN_ID 0x1b00 +#define MT6357_LDO_GOFF2_DSN_REV0 0x1b02 +#define MT6357_LDO_GOFF2_DSN_DBI 0x1b04 +#define MT6357_LDO_GOFF2_DSN_DXI 0x1b06 +#define MT6357_LDO_VIBR_CON0 0x1b08 +#define MT6357_LDO_VIBR_OP_EN 0x1b0a +#define MT6357_LDO_VIBR_OP_EN_SET 0x1b0c +#define MT6357_LDO_VIBR_OP_EN_CLR 0x1b0e +#define MT6357_LDO_VIBR_OP_CFG 0x1b10 +#define MT6357_LDO_VIBR_OP_CFG_SET 0x1b12 +#define MT6357_LDO_VIBR_OP_CFG_CLR 0x1b14 +#define MT6357_LDO_VIBR_CON1 0x1b16 +#define MT6357_LDO_VIBR_CON2 0x1b18 +#define MT6357_LDO_VIBR_CON3 0x1b1a +#define MT6357_LDO_VCN33_CON0_0 0x1b1c +#define MT6357_LDO_VCN33_OP_EN 0x1b1e +#define MT6357_LDO_VCN33_OP_EN_SET 0x1b20 +#define MT6357_LDO_VCN33_OP_EN_CLR 0x1b22 +#define MT6357_LDO_VCN33_OP_CFG 0x1b24 +#define MT6357_LDO_VCN33_OP_CFG_SET 0x1b26 +#define MT6357_LDO_VCN33_OP_CFG_CLR 0x1b28 +#define MT6357_LDO_VCN33_CON0_1 0x1b2a +#define MT6357_LDO_VCN33_CON1 0x1b2c +#define MT6357_LDO_VCN33_CON2 0x1b2e +#define MT6357_LDO_VCN33_CON3 0x1b30 +#define MT6357_LDO_VLDO28_CON0_0 0x1b32 +#define MT6357_LDO_VLDO28_OP_EN 0x1b34 +#define MT6357_LDO_VLDO28_OP_EN_SET 0x1b36 +#define MT6357_LDO_VLDO28_OP_EN_CLR 0x1b38 +#define MT6357_LDO_VLDO28_OP_CFG 0x1b3a +#define MT6357_LDO_VLDO28_OP_CFG_SET 0x1b3c +#define MT6357_LDO_VLDO28_OP_CFG_CLR 0x1b3e +#define MT6357_LDO_VLDO28_CON0_1 0x1b40 +#define MT6357_LDO_VLDO28_CON1 0x1b42 +#define MT6357_LDO_VLDO28_CON2 0x1b44 +#define MT6357_LDO_VLDO28_CON3 0x1b46 +#define MT6357_LDO_GOFF2_RSV_CON0 0x1b48 +#define MT6357_LDO_GOFF2_RSV_CON1 0x1b4a +#define MT6357_LDO_GOFF3_DSN_ID 0x1b80 +#define MT6357_LDO_GOFF3_DSN_REV0 0x1b82 +#define MT6357_LDO_GOFF3_DSN_DBI 0x1b84 +#define MT6357_LDO_GOFF3_DSN_DXI 0x1b86 +#define MT6357_LDO_VCN28_CON0 0x1b88 +#define MT6357_LDO_VCN28_OP_EN 0x1b8a +#define MT6357_LDO_VCN28_OP_EN_SET 0x1b8c +#define MT6357_LDO_VCN28_OP_EN_CLR 0x1b8e +#define MT6357_LDO_VCN28_OP_CFG 0x1b90 +#define MT6357_LDO_VCN28_OP_CFG_SET 0x1b92 +#define MT6357_LDO_VCN28_OP_CFG_CLR 0x1b94 +#define MT6357_LDO_VCN28_CON1 0x1b96 +#define MT6357_LDO_VCN28_CON2 0x1b98 +#define MT6357_LDO_VCN28_CON3 0x1b9a +#define MT6357_VRTC_CON0 0x1b9c +#define MT6357_LDO_TREF_CON0 0x1b9e +#define MT6357_LDO_TREF_OP_EN 0x1ba0 +#define MT6357_LDO_TREF_OP_EN_SET 0x1ba2 +#define MT6357_LDO_TREF_OP_EN_CLR 0x1ba4 +#define MT6357_LDO_TREF_OP_CFG 0x1ba6 +#define MT6357_LDO_TREF_OP_CFG_SET 0x1ba8 +#define MT6357_LDO_TREF_OP_CFG_CLR 0x1baa +#define MT6357_LDO_TREF_CON1 0x1bac +#define MT6357_LDO_GOFF3_RSV_CON0 0x1bae +#define MT6357_LDO_GOFF3_RSV_CON1 0x1bb0 +#define MT6357_LDO_ANA0_DSN_ID 0x1c00 +#define MT6357_LDO_ANA0_DSN_REV0 0x1c02 +#define MT6357_LDO_ANA0_DSN_DBI 0x1c04 +#define MT6357_LDO_ANA0_DSN_DXI 0x1c06 +#define MT6357_VFE28_ANA_CON0 0x1c08 +#define MT6357_VFE28_ANA_CON1 0x1c0a +#define MT6357_VCN28_ANA_CON0 0x1c0c +#define MT6357_VCN28_ANA_CON1 0x1c0e +#define MT6357_VAUD28_ANA_CON0 0x1c10 +#define MT6357_VAUD28_ANA_CON1 0x1c12 +#define MT6357_VAUX18_ANA_CON0 0x1c14 +#define MT6357_VAUX18_ANA_CON1 0x1c16 +#define MT6357_VXO22_ANA_CON0 0x1c18 +#define MT6357_VXO22_ANA_CON1 0x1c1a +#define MT6357_VCN33_ANA_CON0 0x1c1c +#define MT6357_VCN33_ANA_CON1 0x1c1e +#define MT6357_VEMC_ANA_CON0 0x1c20 +#define MT6357_VEMC_ANA_CON1 0x1c22 +#define MT6357_VLDO28_ANA_CON0 0x1c24 +#define MT6357_VLDO28_ANA_CON1 0x1c26 +#define MT6357_VIO28_ANA_CON0 0x1c28 +#define MT6357_VIO28_ANA_CON1 0x1c2a +#define MT6357_VIBR_ANA_CON0 0x1c2c +#define MT6357_VIBR_ANA_CON1 0x1c2e +#define MT6357_VSIM1_ANA_CON0 0x1c30 +#define MT6357_VSIM1_ANA_CON1 0x1c32 +#define MT6357_VSIM2_ANA_CON0 0x1c34 +#define MT6357_VSIM2_ANA_CON1 0x1c36 +#define MT6357_VMCH_ANA_CON0 0x1c38 +#define MT6357_VMCH_ANA_CON1 0x1c3a +#define MT6357_VMC_ANA_CON0 0x1c3c +#define MT6357_VMC_ANA_CON1 0x1c3e +#define MT6357_VCAMIO_ANA_CON0 0x1c40 +#define MT6357_VCAMIO_ANA_CON1 0x1c42 +#define MT6357_VCN18_ANA_CON0 0x1c44 +#define MT6357_VCN18_ANA_CON1 0x1c46 +#define MT6357_VRF18_ANA_CON0 0x1c48 +#define MT6357_VRF18_ANA_CON1 0x1c4a +#define MT6357_VIO18_ANA_CON0 0x1c4c +#define MT6357_VIO18_ANA_CON1 0x1c4e +#define MT6357_VDRAM_ANA_CON1 0x1c50 +#define MT6357_VRF12_ANA_CON0 0x1c52 +#define MT6357_VRF12_ANA_CON1 0x1c54 +#define MT6357_VSRAM_PROC_ANA_CON0 0x1c56 +#define MT6357_VSRAM_OTHERS_ANA_CON0 0x1c58 +#define MT6357_LDO_ANA0_ELR_NUM 0x1c5a +#define MT6357_VFE28_ELR_0 0x1c5c +#define MT6357_VCN28_ELR_0 0x1c5e +#define MT6357_VAUD28_ELR_0 0x1c60 +#define MT6357_VAUX18_ELR_0 0x1c62 +#define MT6357_VXO22_ELR_0 0x1c64 +#define MT6357_VCN33_ELR_0 0x1c66 +#define MT6357_VEMC_ELR_0 0x1c68 +#define MT6357_VLDO28_ELR_0 0x1c6a +#define MT6357_VIO28_ELR_0 0x1c6c +#define MT6357_VIBR_ELR_0 0x1c6e +#define MT6357_VSIM1_ELR_0 0x1c70 +#define MT6357_VSIM2_ELR_0 0x1c72 +#define MT6357_VMCH_ELR_0 0x1c74 +#define MT6357_VMC_ELR_0 0x1c76 +#define MT6357_VCAMIO_ELR_0 0x1c78 +#define MT6357_VCN18_ELR_0 0x1c7a +#define MT6357_VRF18_ELR_0 0x1c7c +#define MT6357_LDO_ANA1_DSN_ID 0x1c80 +#define MT6357_LDO_ANA1_DSN_REV0 0x1c82 +#define MT6357_LDO_ANA1_DSN_DBI 0x1c84 +#define MT6357_LDO_ANA1_DSN_DXI 0x1c86 +#define MT6357_VUSB33_ANA_CON0 0x1c88 +#define MT6357_VUSB33_ANA_CON1 0x1c8a +#define MT6357_VCAMA_ANA_CON0 0x1c8c +#define MT6357_VCAMA_ANA_CON1 0x1c8e +#define MT6357_VEFUSE_ANA_CON0 0x1c90 +#define MT6357_VEFUSE_ANA_CON1 0x1c92 +#define MT6357_VCAMD_ANA_CON0 0x1c94 +#define MT6357_VCAMD_ANA_CON1 0x1c96 +#define MT6357_LDO_ANA1_ELR_NUM 0x1c98 +#define MT6357_VUSB33_ELR_0 0x1c9a +#define MT6357_VCAMA_ELR_0 0x1c9c +#define MT6357_VEFUSE_ELR_0 0x1c9e +#define MT6357_VCAMD_ELR_0 0x1ca0 +#define MT6357_VIO18_ELR_0 0x1ca2 +#define MT6357_VDRAM_ELR_0 0x1ca4 +#define MT6357_VRF12_ELR_0 0x1ca6 +#define MT6357_VRTC_ELR_0 0x1ca8 +#define MT6357_VDRAM_ELR_1 0x1caa +#define MT6357_VDRAM_ELR_2 0x1cac +#define MT6357_XPP_TOP_ID 0x1e00 +#define MT6357_XPP_TOP_REV0 0x1e02 +#define MT6357_XPP_TOP_DBI 0x1e04 +#define MT6357_XPP_TOP_DXI 0x1e06 +#define MT6357_XPP_TPM0 0x1e08 +#define MT6357_XPP_TPM1 0x1e0a +#define MT6357_XPP_TOP_TEST_OUT 0x1e0c +#define MT6357_XPP_TOP_TEST_CON0 0x1e0e +#define MT6357_XPP_TOP_CKPDN_CON0 0x1e10 +#define MT6357_XPP_TOP_CKPDN_CON0_SET 0x1e12 +#define MT6357_XPP_TOP_CKPDN_CON0_CLR 0x1e14 +#define MT6357_XPP_TOP_CKSEL_CON0 0x1e16 +#define MT6357_XPP_TOP_CKSEL_CON0_SET 0x1e18 +#define MT6357_XPP_TOP_CKSEL_CON0_CLR 0x1e1a +#define MT6357_XPP_TOP_RST_CON0 0x1e1c +#define MT6357_XPP_TOP_RST_CON0_SET 0x1e1e +#define MT6357_XPP_TOP_RST_CON0_CLR 0x1e20 +#define MT6357_XPP_TOP_RST_BANK_CON0 0x1e22 +#define MT6357_XPP_TOP_RST_BANK_CON0_SET 0x1e24 +#define MT6357_XPP_TOP_RST_BANK_CON0_CLR 0x1e26 +#define MT6357_DRIVER_BL_DSN_ID 0x1e80 +#define MT6357_DRIVER_BL_DSN_REV0 0x1e82 +#define MT6357_DRIVER_BL_DSN_DBI 0x1e84 +#define MT6357_DRIVER_BL_DSN_DXI 0x1e86 +#define MT6357_ISINK1_CON0 0x1e88 +#define MT6357_ISINK1_CON1 0x1e8a +#define MT6357_ISINK1_CON2 0x1e8c +#define MT6357_ISINK1_CON3 0x1e8e +#define MT6357_ISINK_ANA1 0x1e90 +#define MT6357_ISINK_PHASE_DLY 0x1e92 +#define MT6357_ISINK_SFSTR 0x1e94 +#define MT6357_ISINK_EN_CTRL 0x1e96 +#define MT6357_ISINK_MODE_CTRL 0x1e98 +#define MT6357_DRIVER_ANA_CON0 0x1e9a +#define MT6357_ISINK_ANA_CON0 0x1e9c +#define MT6357_ISINK_ANA_CON1 0x1e9e +#define MT6357_DRIVER_BL_ELR_NUM 0x1ea0 +#define MT6357_DRIVER_BL_ELR_0 0x1ea2 +#define MT6357_DRIVER_CI_DSN_ID 0x1f00 +#define MT6357_DRIVER_CI_DSN_REV0 0x1f02 +#define MT6357_DRIVER_CI_DSN_DBI 0x1f04 +#define MT6357_DRIVER_CI_DSN_DXI 0x1f06 +#define MT6357_CHRIND_CON0 0x1f08 +#define MT6357_CHRIND_CON1 0x1f0a +#define MT6357_CHRIND_CON2 0x1f0c +#define MT6357_CHRIND_CON3 0x1f0e +#define MT6357_CHRIND_CON4 0x1f10 +#define MT6357_CHRIND_EN_CTRL 0x1f12 +#define MT6357_CHRIND_ANA_CON0 0x1f14 +#define MT6357_DRIVER_DL_DSN_ID 0x1f80 +#define MT6357_DRIVER_DL_DSN_REV0 0x1f82 +#define MT6357_DRIVER_DL_DSN_DBI 0x1f84 +#define MT6357_DRIVER_DL_DSN_DXI 0x1f86 +#define MT6357_ISINK2_CON0 0x1f88 +#define MT6357_ISINK3_CON0 0x1f8a +#define MT6357_ISINK_EN_CTRL_SMPL 0x1f8c +#define MT6357_AUD_TOP_ID 0x2080 +#define MT6357_AUD_TOP_REV0 0x2082 +#define MT6357_AUD_TOP_DBI 0x2084 +#define MT6357_AUD_TOP_DXI 0x2086 +#define MT6357_AUD_TOP_CKPDN_TPM0 0x2088 +#define MT6357_AUD_TOP_CKPDN_TPM1 0x208a +#define MT6357_AUD_TOP_CKPDN_CON0 0x208c +#define MT6357_AUD_TOP_CKPDN_CON0_SET 0x208e +#define MT6357_AUD_TOP_CKPDN_CON0_CLR 0x2090 +#define MT6357_AUD_TOP_CKSEL_CON0 0x2092 +#define MT6357_AUD_TOP_CKSEL_CON0_SET 0x2094 +#define MT6357_AUD_TOP_CKSEL_CON0_CLR 0x2096 +#define MT6357_AUD_TOP_CKTST_CON0 0x2098 +#define MT6357_AUD_TOP_RST_CON0 0x209a +#define MT6357_AUD_TOP_RST_CON0_SET 0x209c +#define MT6357_AUD_TOP_RST_CON0_CLR 0x209e +#define MT6357_AUD_TOP_RST_BANK_CON0 0x20a0 +#define MT6357_AUD_TOP_INT_CON0 0x20a2 +#define MT6357_AUD_TOP_INT_CON0_SET 0x20a4 +#define MT6357_AUD_TOP_INT_CON0_CLR 0x20a6 +#define MT6357_AUD_TOP_INT_MASK_CON0 0x20a8 +#define MT6357_AUD_TOP_INT_MASK_CON0_SET 0x20aa +#define MT6357_AUD_TOP_INT_MASK_CON0_CLR 0x20ac +#define MT6357_AUD_TOP_INT_STATUS0 0x20ae +#define MT6357_AUD_TOP_INT_RAW_STATUS0 0x20b0 +#define MT6357_AUD_TOP_INT_MISC_CON0 0x20b2 +#define MT6357_AUDNCP_CLKDIV_CON0 0x20b4 +#define MT6357_AUDNCP_CLKDIV_CON1 0x20b6 +#define MT6357_AUDNCP_CLKDIV_CON2 0x20b8 +#define MT6357_AUDNCP_CLKDIV_CON3 0x20ba +#define MT6357_AUDNCP_CLKDIV_CON4 0x20bc +#define MT6357_AUD_TOP_MON_CON0 0x20be +#define MT6357_AUDIO_DIG_DSN_ID 0x2100 +#define MT6357_AUDIO_DIG_DSN_REV0 0x2102 +#define MT6357_AUDIO_DIG_DSN_DBI 0x2104 +#define MT6357_AUDIO_DIG_DSN_DXI 0x2106 +#define MT6357_AFE_UL_DL_CON0 0x2108 +#define MT6357_AFE_DL_SRC2_CON0_L 0x210a +#define MT6357_AFE_UL_SRC_CON0_H 0x210c +#define MT6357_AFE_UL_SRC_CON0_L 0x210e +#define MT6357_AFE_TOP_CON0 0x2110 +#define MT6357_AUDIO_TOP_CON0 0x2112 +#define MT6357_AFE_MON_DEBUG0 0x2114 +#define MT6357_AFUNC_AUD_CON0 0x2116 +#define MT6357_AFUNC_AUD_CON1 0x2118 +#define MT6357_AFUNC_AUD_CON2 0x211a +#define MT6357_AFUNC_AUD_CON3 0x211c +#define MT6357_AFUNC_AUD_CON4 0x211e +#define MT6357_AFUNC_AUD_CON5 0x2120 +#define MT6357_AFUNC_AUD_CON6 0x2122 +#define MT6357_AFUNC_AUD_MON0 0x2124 +#define MT6357_AUDRC_TUNE_MON0 0x2126 +#define MT6357_AFE_ADDA_MTKAIF_FIFO_CFG0 0x2128 +#define MT6357_AFE_ADDA_MTKAIF_FIFO_LOG_MON1 0x212a +#define MT6357_AFE_ADDA_MTKAIF_MON0 0x212c +#define MT6357_AFE_ADDA_MTKAIF_MON1 0x212e +#define MT6357_AFE_ADDA_MTKAIF_MON2 0x2130 +#define MT6357_AFE_ADDA_MTKAIF_MON3 0x2132 +#define MT6357_AFE_ADDA_MTKAIF_CFG0 0x2134 +#define MT6357_AFE_ADDA_MTKAIF_RX_CFG0 0x2136 +#define MT6357_AFE_ADDA_MTKAIF_RX_CFG1 0x2138 +#define MT6357_AFE_ADDA_MTKAIF_RX_CFG2 0x213a +#define MT6357_AFE_ADDA_MTKAIF_RX_CFG3 0x213c +#define MT6357_AFE_ADDA_MTKAIF_TX_CFG1 0x213e +#define MT6357_AFE_SGEN_CFG0 0x2140 +#define MT6357_AFE_SGEN_CFG1 0x2142 +#define MT6357_AFE_ADC_ASYNC_FIFO_CFG 0x2144 +#define MT6357_AFE_DCCLK_CFG0 0x2146 +#define MT6357_AFE_DCCLK_CFG1 0x2148 +#define MT6357_AUDIO_DIG_CFG 0x214a +#define MT6357_AFE_AUD_PAD_TOP 0x214c +#define MT6357_AFE_AUD_PAD_TOP_MON 0x214e +#define MT6357_AFE_AUD_PAD_TOP_MON1 0x2150 +#define MT6357_AUDENC_DSN_ID 0x2180 +#define MT6357_AUDENC_DSN_REV0 0x2182 +#define MT6357_AUDENC_DSN_DBI 0x2184 +#define MT6357_AUDENC_DSN_FPI 0x2186 +#define MT6357_AUDENC_ANA_CON0 0x2188 +#define MT6357_AUDENC_ANA_CON1 0x218a +#define MT6357_AUDENC_ANA_CON2 0x218c +#define MT6357_AUDENC_ANA_CON3 0x218e +#define MT6357_AUDENC_ANA_CON4 0x2190 +#define MT6357_AUDENC_ANA_CON5 0x2192 +#define MT6357_AUDENC_ANA_CON6 0x2194 +#define MT6357_AUDENC_ANA_CON7 0x2196 +#define MT6357_AUDENC_ANA_CON8 0x2198 +#define MT6357_AUDENC_ANA_CON9 0x219a +#define MT6357_AUDENC_ANA_CON10 0x219c +#define MT6357_AUDENC_ANA_CON11 0x219e +#define MT6357_AUDDEC_DSN_ID 0x2200 +#define MT6357_AUDDEC_DSN_REV0 0x2202 +#define MT6357_AUDDEC_DSN_DBI 0x2204 +#define MT6357_AUDDEC_DSN_FPI 0x2206 +#define MT6357_AUDDEC_ANA_CON0 0x2208 +#define MT6357_AUDDEC_ANA_CON1 0x220a +#define MT6357_AUDDEC_ANA_CON2 0x220c +#define MT6357_AUDDEC_ANA_CON3 0x220e +#define MT6357_AUDDEC_ANA_CON4 0x2210 +#define MT6357_AUDDEC_ANA_CON5 0x2212 +#define MT6357_AUDDEC_ANA_CON6 0x2214 +#define MT6357_AUDDEC_ANA_CON7 0x2216 +#define MT6357_AUDDEC_ANA_CON8 0x2218 +#define MT6357_AUDDEC_ANA_CON9 0x221a +#define MT6357_AUDDEC_ANA_CON10 0x221c +#define MT6357_AUDDEC_ANA_CON11 0x221e +#define MT6357_AUDDEC_ANA_CON12 0x2220 +#define MT6357_AUDDEC_ANA_CON13 0x2222 +#define MT6357_AUDDEC_ELR_NUM 0x2224 +#define MT6357_AUDDEC_ELR_0 0x2226 +#define MT6357_AUDZCD_DSN_ID 0x2280 +#define MT6357_AUDZCD_DSN_REV0 0x2282 +#define MT6357_AUDZCD_DSN_DBI 0x2284 +#define MT6357_AUDZCD_DSN_FPI 0x2286 +#define MT6357_ZCD_CON0 0x2288 +#define MT6357_ZCD_CON1 0x228a +#define MT6357_ZCD_CON2 0x228c +#define MT6357_ZCD_CON3 0x228e +#define MT6357_ZCD_CON4 0x2290 +#define MT6357_ZCD_CON5 0x2292 +#define MT6357_ACCDET_DSN_DIG_ID 0x2300 +#define MT6357_ACCDET_DSN_DIG_REV0 0x2302 +#define MT6357_ACCDET_DSN_DBI 0x2304 +#define MT6357_ACCDET_DSN_FPI 0x2306 +#define MT6357_ACCDET_CON0 0x2308 +#define MT6357_ACCDET_CON1 0x230a +#define MT6357_ACCDET_CON2 0x230c +#define MT6357_ACCDET_CON3 0x230e +#define MT6357_ACCDET_CON4 0x2310 +#define MT6357_ACCDET_CON5 0x2312 +#define MT6357_ACCDET_CON6 0x2314 +#define MT6357_ACCDET_CON7 0x2316 +#define MT6357_ACCDET_CON8 0x2318 +#define MT6357_ACCDET_CON9 0x231a +#define MT6357_ACCDET_CON10 0x231c +#define MT6357_ACCDET_CON11 0x231e +#define MT6357_ACCDET_CON12 0x2320 +#define MT6357_ACCDET_CON13 0x2322 +#define MT6357_ACCDET_CON14 0x2324 +#define MT6357_ACCDET_CON15 0x2326 +#define MT6357_ACCDET_CON16 0x2328 +#define MT6357_ACCDET_CON17 0x232a +#define MT6357_ACCDET_CON18 0x232c +#define MT6357_ACCDET_CON19 0x232e +#define MT6357_ACCDET_CON20 0x2330 +#define MT6357_ACCDET_CON21 0x2332 +#define MT6357_ACCDET_CON22 0x2334 +#define MT6357_ACCDET_CON23 0x2336 +#define MT6357_ACCDET_CON24 0x2338 +#define MT6357_ACCDET_CON25 0x233a +#define MT6357_ACCDET_CON26 0x233c +#define MT6357_ACCDET_CON27 0x233e +#define MT6357_ACCDET_CON28 0x2340 + +#endif /* __MFD_MT6357_REGISTERS_H__ */ diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h index 1cf78726503b..627487e26287 100644 --- a/include/linux/mfd/mt6397/core.h +++ b/include/linux/mfd/mt6397/core.h @@ -12,6 +12,9 @@ enum chip_id { MT6323_CHIP_ID = 0x23, + MT6331_CHIP_ID = 0x20, + MT6332_CHIP_ID = 0x20, + MT6357_CHIP_ID = 0x57, MT6358_CHIP_ID = 0x58, MT6359_CHIP_ID = 0x59, MT6366_CHIP_ID = 0x66, diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h index 69632c1b07bd..ae3e7a5c5219 100644 --- a/include/linux/mfd/t7l66xb.h +++ b/include/linux/mfd/t7l66xb.h @@ -12,7 +12,6 @@ struct t7l66xb_platform_data { int (*enable)(struct platform_device *dev); - int (*disable)(struct platform_device *dev); int (*suspend)(struct platform_device *dev); int (*resume)(struct platform_device *dev); diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h index b4888209494a..aacf1dcc86b9 100644 --- a/include/linux/mfd/tc6387xb.h +++ b/include/linux/mfd/tc6387xb.h @@ -12,7 +12,6 @@ struct tc6387xb_platform_data { int (*enable)(struct platform_device *dev); - int (*disable)(struct platform_device *dev); int (*suspend)(struct platform_device *dev); int (*resume)(struct platform_device *dev); }; diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h index d336c541b7df..d17807f2d0c9 100644 --- a/include/linux/mfd/tc6393xb.h +++ b/include/linux/mfd/tc6393xb.h @@ -22,7 +22,7 @@ struct tc6393xb_platform_data { u16 scr_gper; /* GP Enable */ int (*enable)(struct platform_device *dev); - int (*disable)(struct platform_device *dev); + void (*disable)(struct platform_device *dev); int (*suspend)(struct platform_device *dev); int (*resume)(struct platform_device *dev); diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h index e0a417e53766..16f87cccc003 100644 --- a/include/linux/mfd/tps65086.h +++ b/include/linux/mfd/tps65086.h @@ -1,16 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis <afd@ti.com> * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether expressed or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License version 2 for more details. - * * Based on the TPS65912 driver */ diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h index db7091824ed0..877d9c41c53d 100644 --- a/include/linux/mfd/tps65217.h +++ b/include/linux/mfd/tps65217.h @@ -1,18 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/mfd/tps65217.h * * Functions to access TPS65217 power management chip. * * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __LINUX_MFD_TPS65217_H diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h index 122e24ddbd4b..2946be2f15f3 100644 --- a/include/linux/mfd/tps65218.h +++ b/include/linux/mfd/tps65218.h @@ -1,18 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/mfd/tps65218.h * * Functions to access TPS65218 power management chip. * * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether expressed or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License version 2 for more details. */ #ifndef __LINUX_MFD_TPS65218_H diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h index 8a61386cb8c1..860ec0a16c96 100644 --- a/include/linux/mfd/tps65912.h +++ b/include/linux/mfd/tps65912.h @@ -1,16 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis <afd@ti.com> * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether expressed or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License version 2 for more details. - * * Based on the TPS65218 driver and the previous TPS65912 driver by * Margarita Olaya Cabrera <magi@slimlogic.co.uk> */ diff --git a/include/linux/mfd/twl.h b/include/linux/mfd/twl.h index 8871cc5188a0..eaa233038254 100644 --- a/include/linux/mfd/twl.h +++ b/include/linux/mfd/twl.h @@ -594,8 +594,6 @@ struct twl4030_gpio_platform_data { int (*setup)(struct device *dev, unsigned gpio, unsigned ngpio); - int (*teardown)(struct device *dev, - unsigned gpio, unsigned ngpio); }; struct twl4030_madc_platform_data { @@ -694,61 +692,6 @@ struct twl4030_audio_data { unsigned int irq_base; }; -struct twl4030_platform_data { - struct twl4030_clock_init_data *clock; - struct twl4030_bci_platform_data *bci; - struct twl4030_gpio_platform_data *gpio; - struct twl4030_madc_platform_data *madc; - struct twl4030_keypad_data *keypad; - struct twl4030_usb_data *usb; - struct twl4030_power_data *power; - struct twl4030_audio_data *audio; - - /* Common LDO regulators for TWL4030/TWL6030 */ - struct regulator_init_data *vdac; - struct regulator_init_data *vaux1; - struct regulator_init_data *vaux2; - struct regulator_init_data *vaux3; - struct regulator_init_data *vdd1; - struct regulator_init_data *vdd2; - struct regulator_init_data *vdd3; - /* TWL4030 LDO regulators */ - struct regulator_init_data *vpll1; - struct regulator_init_data *vpll2; - struct regulator_init_data *vmmc1; - struct regulator_init_data *vmmc2; - struct regulator_init_data *vsim; - struct regulator_init_data *vaux4; - struct regulator_init_data *vio; - struct regulator_init_data *vintana1; - struct regulator_init_data *vintana2; - struct regulator_init_data *vintdig; - /* TWL6030 LDO regulators */ - struct regulator_init_data *vmmc; - struct regulator_init_data *vpp; - struct regulator_init_data *vusim; - struct regulator_init_data *vana; - struct regulator_init_data *vcxio; - struct regulator_init_data *vusb; - struct regulator_init_data *clk32kg; - struct regulator_init_data *v1v8; - struct regulator_init_data *v2v1; - /* TWL6032 LDO regulators */ - struct regulator_init_data *ldo1; - struct regulator_init_data *ldo2; - struct regulator_init_data *ldo3; - struct regulator_init_data *ldo4; - struct regulator_init_data *ldo5; - struct regulator_init_data *ldo6; - struct regulator_init_data *ldo7; - struct regulator_init_data *ldoln; - struct regulator_init_data *ldousb; - /* TWL6032 DCDC regulators */ - struct regulator_init_data *smps3; - struct regulator_init_data *smps4; - struct regulator_init_data *vio6025; -}; - struct twl_regulator_driver_data { int (*set_voltage)(void *data, int target_uV); int (*get_voltage)(void *data); diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 069a89e847f3..22c0a0cf5e0c 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -19,27 +19,59 @@ struct migration_target_control; */ #define MIGRATEPAGE_SUCCESS 0 +/** + * struct movable_operations - Driver page migration + * @isolate_page: + * The VM calls this function to prepare the page to be moved. The page + * is locked and the driver should not unlock it. The driver should + * return ``true`` if the page is movable and ``false`` if it is not + * currently movable. After this function returns, the VM uses the + * page->lru field, so the driver must preserve any information which + * is usually stored here. + * + * @migrate_page: + * After isolation, the VM calls this function with the isolated + * @src page. The driver should copy the contents of the + * @src page to the @dst page and set up the fields of @dst page. + * Both pages are locked. + * If page migration is successful, the driver should call + * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS. + * If the driver cannot migrate the page at the moment, it can return + * -EAGAIN. The VM interprets this as a temporary migration failure and + * will retry it later. Any other error value is a permanent migration + * failure and migration will not be retried. + * The driver shouldn't touch the @src->lru field while in the + * migrate_page() function. It may write to @dst->lru. + * + * @putback_page: + * If migration fails on the isolated page, the VM informs the driver + * that the page is no longer a candidate for migration by calling + * this function. The driver should put the isolated page back into + * its own data structure. + */ +struct movable_operations { + bool (*isolate_page)(struct page *, isolate_mode_t); + int (*migrate_page)(struct page *dst, struct page *src, + enum migrate_mode); + void (*putback_page)(struct page *); +}; + /* Defined in mm/debug.c: */ extern const char *migrate_reason_names[MR_TYPES]; #ifdef CONFIG_MIGRATION extern void putback_movable_pages(struct list_head *l); -extern int migrate_page(struct address_space *mapping, - struct page *newpage, struct page *page, - enum migrate_mode mode); +int migrate_folio(struct address_space *mapping, struct folio *dst, + struct folio *src, enum migrate_mode mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason, unsigned int *ret_succeeded); extern struct page *alloc_migration_target(struct page *page, unsigned long private); extern int isolate_movable_page(struct page *page, isolate_mode_t mode); -extern void migrate_page_states(struct page *newpage, struct page *page); -extern void migrate_page_copy(struct page *newpage, struct page *page); -extern int migrate_huge_page_move_mapping(struct address_space *mapping, - struct page *newpage, struct page *page); -extern int migrate_page_move_mapping(struct address_space *mapping, - struct page *newpage, struct page *page, int extra_count); +int migrate_huge_page_move_mapping(struct address_space *mapping, + struct folio *dst, struct folio *src); void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep, spinlock_t *ptl); void folio_migrate_flags(struct folio *newfolio, struct folio *folio); @@ -60,15 +92,8 @@ static inline struct page *alloc_migration_target(struct page *page, static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) { return -EBUSY; } -static inline void migrate_page_states(struct page *newpage, struct page *page) -{ -} - -static inline void migrate_page_copy(struct page *newpage, - struct page *page) {} - static inline int migrate_huge_page_move_mapping(struct address_space *mapping, - struct page *newpage, struct page *page) + struct folio *dst, struct folio *src) { return -ENOSYS; } @@ -91,13 +116,13 @@ static inline int next_demotion_node(int node) #endif #ifdef CONFIG_COMPACTION -extern int PageMovable(struct page *page); -extern void __SetPageMovable(struct page *page, struct address_space *mapping); -extern void __ClearPageMovable(struct page *page); +bool PageMovable(struct page *page); +void __SetPageMovable(struct page *page, const struct movable_operations *ops); +void __ClearPageMovable(struct page *page); #else -static inline int PageMovable(struct page *page) { return 0; } +static inline bool PageMovable(struct page *page) { return false; } static inline void __SetPageMovable(struct page *page, - struct address_space *mapping) + const struct movable_operations *ops) { } static inline void __ClearPageMovable(struct page *page) @@ -110,6 +135,15 @@ static inline bool folio_test_movable(struct folio *folio) return PageMovable(&folio->page); } +static inline +const struct movable_operations *page_movable_ops(struct page *page) +{ + VM_BUG_ON(!__PageMovable(page)); + + return (const struct movable_operations *) + ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE); +} + #ifdef CONFIG_NUMA_BALANCING extern int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node); @@ -148,6 +182,7 @@ static inline unsigned long migrate_pfn(unsigned long pfn) enum migrate_vma_direction { MIGRATE_VMA_SELECT_SYSTEM = 1 << 0, MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1, + MIGRATE_VMA_SELECT_DEVICE_COHERENT = 1 << 2, }; struct migrate_vma { diff --git a/include/linux/mii.h b/include/linux/mii.h index 5ee13083cec7..d5a959ce4877 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -545,4 +545,39 @@ static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv) return cap; } +/** + * mii_bmcr_encode_fixed - encode fixed speed/duplex settings to a BMCR value + * @speed: a SPEED_* value + * @duplex: a DUPLEX_* value + * + * Encode the speed and duplex to a BMCR value. 2500, 1000, 100 and 10 Mbps are + * supported. 2500Mbps is encoded to 1000Mbps. Other speeds are encoded as 10 + * Mbps. Unknown duplex values are encoded to half-duplex. + */ +static inline u16 mii_bmcr_encode_fixed(int speed, int duplex) +{ + u16 bmcr; + + switch (speed) { + case SPEED_2500: + case SPEED_1000: + bmcr = BMCR_SPEED1000; + break; + + case SPEED_100: + bmcr = BMCR_SPEED100; + break; + + case SPEED_10: + default: + bmcr = BMCR_SPEED10; + break; + } + + if (duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + + return bmcr; +} + #endif /* __LINUX_MII_H__ */ diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 0676f18093f9..c0fea6ca5076 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -44,7 +44,7 @@ #define AGPGART_MINOR 175 #define TOSH_MINOR_DEV 181 #define HWRNG_MINOR 183 -#define MICROCODE_MINOR 184 +/*#define MICROCODE_MINOR 184 unused */ #define KEYPAD_MINOR 185 #define IRNET_MINOR 187 #define D7S_MINOR 193 diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 604b85dd770a..b5f58fd37a0f 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -387,21 +387,6 @@ enum { }; enum { - MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, - MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, - MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, - MLX5_DEV_CAP_FLAG_APM = 1LL << 17, - MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, - MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23, - MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, - MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, - MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, - MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, - MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, - MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, -}; - -enum { MLX5_ROCE_VERSION_1 = 0, MLX5_ROCE_VERSION_2 = 2, }; @@ -455,6 +440,7 @@ enum { MLX5_OPCODE_UMR = 0x25, + MLX5_OPCODE_ACCESS_ASO = 0x2d, }; enum { @@ -496,10 +482,6 @@ enum { }; enum { - MLX5_CAP_OFF_CMDIF_CSUM = 46, -}; - -enum { /* * Max wqe size for rdma read is 512 bytes, so this * limits our max_sge_rd as the wqe needs to fit: @@ -840,7 +822,10 @@ struct mlx5_cqe64 { __be32 timestamp_l; __be32 sop_drop_qpn; __be16 wqe_counter; - u8 signature; + union { + u8 signature; + u8 validity_iteration_count; + }; u8 op_own; }; @@ -872,6 +857,11 @@ enum { MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3, }; +enum { + MLX5_CQE_COMPRESS_LAYOUT_BASIC = 0, + MLX5_CQE_COMPRESS_LAYOUT_ENHANCED = 1, +}; + #define MLX5_MINI_CQE_ARRAY_SIZE 8 static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) @@ -884,6 +874,12 @@ static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe) return cqe->op_own >> 4; } +static inline u8 get_cqe_enhanced_num_mini_cqes(struct mlx5_cqe64 *cqe) +{ + /* num_of_mini_cqes is zero based */ + return get_cqe_opcode(cqe) + 1; +} + static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) { return (cqe->lro.tcppsh_abort_dupack >> 6) & 1; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 5040cd774c5a..96b16fbe1aa4 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -551,6 +551,10 @@ enum { * creation/deletion on drivers rescan. Unset during device attach. */ MLX5_PRIV_FLAGS_DETACH = 1 << 2, + /* Distinguish between mlx5e_probe/remove called by module init/cleanup + * and called by other flows which can already hold devlink lock + */ + MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW = 1 << 3, }; struct mlx5_adev { @@ -606,6 +610,7 @@ struct mlx5_priv { spinlock_t ctx_lock; struct mlx5_adev **adev; int adev_idx; + int sw_vhca_id; struct mlx5_events *events; struct mlx5_flow_steering *steering; @@ -676,6 +681,7 @@ struct mlx5e_resources { enum mlx5_sw_icm_type { MLX5_SW_ICM_TYPE_STEERING, MLX5_SW_ICM_TYPE_HEADER_MODIFY, + MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN, }; #define MLX5_MAX_RESERVED_GIDS 8 @@ -727,10 +733,10 @@ enum { }; enum { - MR_CACHE_LAST_STD_ENTRY = 20, + MKEY_CACHE_LAST_STD_ENTRY = 20, MLX5_IMR_MTT_CACHE_ENTRY, MLX5_IMR_KSM_CACHE_ENTRY, - MAX_MR_CACHE_ENTRIES + MAX_MKEY_CACHE_ENTRIES }; struct mlx5_profile { @@ -739,7 +745,7 @@ struct mlx5_profile { struct { int size; int limit; - } mr_cache[MAX_MR_CACHE_ENTRIES]; + } mr_cache[MAX_MKEY_CACHE_ENTRIES]; }; struct mlx5_hca_cap { diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index 8b18fe9771f9..e2701ed0200e 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -12,7 +12,6 @@ #define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager) enum { - MLX5_ESWITCH_NONE, MLX5_ESWITCH_LEGACY, MLX5_ESWITCH_OFFLOADS }; @@ -153,7 +152,7 @@ struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw); static inline u8 mlx5_eswitch_mode(const struct mlx5_core_dev *dev) { - return MLX5_ESWITCH_NONE; + return MLX5_ESWITCH_LEGACY; } static inline enum devlink_eswitch_encap_mode @@ -198,6 +197,11 @@ static inline struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitc #endif /* CONFIG_MLX5_ESWITCH */ +static inline bool is_mdev_legacy_mode(struct mlx5_core_dev *dev) +{ + return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_LEGACY; +} + static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev) { return mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS; diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 8135713b0d2d..8e73c377da2c 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -178,6 +178,7 @@ struct mlx5_flow_table_attr { int max_fte; u32 level; u32 flags; + u16 uid; struct mlx5_flow_table *next_ft; struct { @@ -212,6 +213,19 @@ struct mlx5_flow_group * mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in); void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); +struct mlx5_exe_aso { + u32 object_id; + u8 type; + u8 return_reg_id; + union { + u32 ctrl_data; + struct { + u8 meter_idx; + u8 init_color; + } flow_meter; + }; +}; + struct mlx5_fs_vlan { u16 ethtype; u16 vid; @@ -237,6 +251,7 @@ struct mlx5_flow_act { struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; struct ib_counters *counters; struct mlx5_flow_group *fg; + struct mlx5_exe_aso exe_aso; }; #define MLX5_DECLARE_FLOW_ACT(name) \ @@ -301,4 +316,5 @@ struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, struct mlx5_pkt_reformat *reformat); +u32 mlx5_flow_table_id(struct mlx5_flow_table *ft); #endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index fd7d083a34d3..4acd5610e96b 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -442,7 +442,9 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 max_modify_header_actions[0x8]; u8 max_ft_level[0x8]; - u8 reserved_at_40[0x20]; + u8 reserved_at_40[0x6]; + u8 execute_aso[0x1]; + u8 reserved_at_47[0x19]; u8 reserved_at_60[0x2]; u8 reformat_insert[0x1]; @@ -940,7 +942,17 @@ struct mlx5_ifc_qos_cap_bits { u8 max_tsar_bw_share[0x20]; - u8 reserved_at_100[0x700]; + u8 reserved_at_100[0x20]; + + u8 reserved_at_120[0x3]; + u8 log_meter_aso_granularity[0x5]; + u8 reserved_at_128[0x3]; + u8 log_meter_aso_max_alloc[0x5]; + u8 reserved_at_130[0x3]; + u8 log_max_num_meter_aso[0x5]; + u8 reserved_at_138[0x8]; + + u8 reserved_at_140[0x6c0]; }; struct mlx5_ifc_debug_cap_bits { @@ -1086,11 +1098,14 @@ struct mlx5_ifc_device_mem_cap_bits { u8 log_sw_icm_alloc_granularity[0x6]; u8 log_steering_sw_icm_size[0x8]; - u8 reserved_at_120[0x20]; + u8 reserved_at_120[0x18]; + u8 log_header_modify_pattern_sw_icm_size[0x8]; u8 header_modify_sw_icm_start_address[0x40]; - u8 reserved_at_180[0x80]; + u8 reserved_at_180[0x40]; + + u8 header_modify_pattern_sw_icm_start_address[0x40]; u8 memic_operations[0x20]; @@ -1356,7 +1371,9 @@ enum { }; struct mlx5_ifc_cmd_hca_cap_bits { - u8 reserved_at_0[0x1f]; + u8 reserved_at_0[0x10]; + u8 shared_object_to_user_object_allowed[0x1]; + u8 reserved_at_13[0xe]; u8 vhca_resource_manager[0x1]; u8 hca_cap_2[0x1]; @@ -1426,7 +1443,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_120[0xa]; u8 log_max_ra_req_dc[0x6]; - u8 reserved_at_130[0xa]; + u8 reserved_at_130[0x9]; + u8 vnic_env_cq_overrun[0x1]; u8 log_max_ra_res_dc[0x6]; u8 reserved_at_140[0x5]; @@ -1621,7 +1639,11 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 nic_receive_steering_discard[0x1]; u8 receive_discard_vport_down[0x1]; u8 transmit_discard_vport_down[0x1]; - u8 reserved_at_343[0x5]; + u8 eq_overrun_count[0x1]; + u8 reserved_at_344[0x1]; + u8 invalid_command_count[0x1]; + u8 quota_exceeded_count[0x1]; + u8 reserved_at_347[0x1]; u8 log_max_flow_counter_bulk[0x8]; u8 max_flow_counter_15_0[0x10]; @@ -1719,7 +1741,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_dci_errored_streams[0x5]; u8 reserved_at_598[0x8]; - u8 reserved_at_5a0[0x13]; + u8 reserved_at_5a0[0x10]; + u8 enhanced_cqe_compression[0x1]; + u8 reserved_at_5b1[0x2]; u8 log_max_dek[0x5]; u8 reserved_at_5b8[0x4]; u8 mini_cqe_resp_stride_index[0x1]; @@ -1804,7 +1828,18 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 max_reformat_remove_size[0x8]; u8 max_reformat_remove_offset[0x8]; - u8 reserved_at_c0[0x740]; + u8 reserved_at_c0[0x160]; + + u8 reserved_at_220[0x1]; + u8 sw_vhca_id_valid[0x1]; + u8 sw_vhca_id[0xe]; + u8 reserved_at_230[0x10]; + + u8 reserved_at_240[0xb]; + u8 ts_cqe_metadata_size2wqe_counter[0x5]; + u8 reserved_at_250[0x10]; + + u8 reserved_at_260[0x5a0]; }; enum mlx5_ifc_flow_destination_type { @@ -3277,6 +3312,7 @@ enum { MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800, MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT = 0x1000, MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT = 0x2000, + MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO = 0x4000, }; enum { @@ -3292,6 +3328,38 @@ struct mlx5_ifc_vlan_bits { u8 vid[0xc]; }; +enum { + MLX5_FLOW_METER_COLOR_RED = 0x0, + MLX5_FLOW_METER_COLOR_YELLOW = 0x1, + MLX5_FLOW_METER_COLOR_GREEN = 0x2, + MLX5_FLOW_METER_COLOR_UNDEFINED = 0x3, +}; + +enum { + MLX5_EXE_ASO_FLOW_METER = 0x2, +}; + +struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits { + u8 return_reg_id[0x4]; + u8 aso_type[0x4]; + u8 reserved_at_8[0x14]; + u8 action[0x1]; + u8 init_color[0x2]; + u8 meter_id[0x1]; +}; + +union mlx5_ifc_exe_aso_ctrl { + struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits exe_aso_ctrl_flow_meter; +}; + +struct mlx5_ifc_execute_aso_bits { + u8 valid[0x1]; + u8 reserved_at_1[0x7]; + u8 aso_object_id[0x18]; + + union mlx5_ifc_exe_aso_ctrl exe_aso_ctrl; +}; + struct mlx5_ifc_flow_context_bits { struct mlx5_ifc_vlan_bits push_vlan; @@ -3323,7 +3391,9 @@ struct mlx5_ifc_flow_context_bits { struct mlx5_ifc_fte_match_param_bits match_value; - u8 reserved_at_1200[0x600]; + struct mlx5_ifc_execute_aso_bits execute_aso[4]; + + u8 reserved_at_1300[0x500]; union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[]; }; @@ -3391,11 +3461,21 @@ struct mlx5_ifc_vnic_diagnostic_statistics_bits { u8 transmit_discard_vport_down[0x40]; - u8 reserved_at_140[0xa0]; + u8 async_eq_overrun[0x20]; + + u8 comp_eq_overrun[0x20]; + + u8 reserved_at_180[0x20]; + + u8 invalid_command[0x20]; + + u8 quota_exceeded_command[0x20]; u8 internal_rq_out_of_buffer[0x20]; - u8 reserved_at_200[0xe00]; + u8 cq_overrun[0x20]; + + u8 reserved_at_220[0xde0]; }; struct mlx5_ifc_traffic_counter_bits { @@ -3715,6 +3795,11 @@ struct mlx5_ifc_rmpc_bits { struct mlx5_ifc_wq_bits wq; }; +enum { + VHCA_ID_TYPE_HW = 0, + VHCA_ID_TYPE_SW = 1, +}; + struct mlx5_ifc_nic_vport_context_bits { u8 reserved_at_0[0x5]; u8 min_wqe_inline_mode[0x3]; @@ -3731,8 +3816,8 @@ struct mlx5_ifc_nic_vport_context_bits { u8 event_on_mc_address_change[0x1]; u8 event_on_uc_address_change[0x1]; - u8 reserved_at_40[0xc]; - + u8 vhca_id_type[0x1]; + u8 reserved_at_41[0xb]; u8 affiliation_criteria[0x4]; u8 affiliated_vhca_id[0x10]; @@ -4074,7 +4159,8 @@ struct mlx5_ifc_cqc_bits { u8 cqe_comp_en[0x1]; u8 mini_cqe_res_format[0x2]; u8 st[0x4]; - u8 reserved_at_18[0x8]; + u8 reserved_at_18[0x6]; + u8 cqe_compression_layout[0x2]; u8 reserved_at_20[0x20]; @@ -5970,7 +6056,9 @@ struct mlx5_ifc_general_obj_in_cmd_hdr_bits { u8 obj_id[0x20]; - u8 reserved_at_60[0x20]; + u8 reserved_at_60[0x3]; + u8 log_obj_range[0x5]; + u8 reserved_at_68[0x18]; }; struct mlx5_ifc_general_obj_out_cmd_hdr_bits { @@ -7189,7 +7277,12 @@ struct mlx5_ifc_init_hca_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x20]; + + u8 reserved_at_60[0x2]; + u8 sw_vhca_id[0xe]; + u8 reserved_at_70[0x10]; + u8 sw_owner_id[4][0x20]; }; @@ -8437,7 +8530,7 @@ struct mlx5_ifc_create_flow_table_out_bits { struct mlx5_ifc_create_flow_table_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -11370,12 +11463,14 @@ enum { MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT_ULL(0xc), MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT_ULL(0x13), MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_SAMPLER = BIT_ULL(0x20), + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = BIT_ULL(0x24), }; enum { MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc, MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13, MLX5_GENERAL_OBJECT_TYPES_SAMPLER = 0x20, + MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24, }; enum { @@ -11448,6 +11543,61 @@ struct mlx5_ifc_create_encryption_key_in_bits { struct mlx5_ifc_encryption_key_obj_bits encryption_key_object; }; +enum { + MLX5_FLOW_METER_MODE_BYTES_IP_LENGTH = 0x0, + MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2 = 0x1, + MLX5_FLOW_METER_MODE_BYTES_CALC_WITH_L2_IPG = 0x2, + MLX5_FLOW_METER_MODE_NUM_PACKETS = 0x3, +}; + +struct mlx5_ifc_flow_meter_parameters_bits { + u8 valid[0x1]; + u8 bucket_overflow[0x1]; + u8 start_color[0x2]; + u8 both_buckets_on_green[0x1]; + u8 reserved_at_5[0x1]; + u8 meter_mode[0x2]; + u8 reserved_at_8[0x18]; + + u8 reserved_at_20[0x20]; + + u8 reserved_at_40[0x3]; + u8 cbs_exponent[0x5]; + u8 cbs_mantissa[0x8]; + u8 reserved_at_50[0x3]; + u8 cir_exponent[0x5]; + u8 cir_mantissa[0x8]; + + u8 reserved_at_60[0x20]; + + u8 reserved_at_80[0x3]; + u8 ebs_exponent[0x5]; + u8 ebs_mantissa[0x8]; + u8 reserved_at_90[0x3]; + u8 eir_exponent[0x5]; + u8 eir_mantissa[0x8]; + + u8 reserved_at_a0[0x60]; +}; + +struct mlx5_ifc_flow_meter_aso_obj_bits { + u8 modify_field_select[0x40]; + + u8 reserved_at_40[0x40]; + + u8 reserved_at_80[0x8]; + u8 meter_aso_access_pd[0x18]; + + u8 reserved_at_a0[0x160]; + + struct mlx5_ifc_flow_meter_parameters_bits flow_meter_parameters[2]; +}; + +struct mlx5_ifc_create_flow_meter_aso_obj_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; + struct mlx5_ifc_flow_meter_aso_obj_bits flow_meter_aso_obj; +}; + struct mlx5_ifc_sampler_obj_bits { u8 modify_field_select[0x40]; diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h index 4414ed5b6ed2..9becdc3fa503 100644 --- a/include/linux/mlx5/mlx5_ifc_vdpa.h +++ b/include/linux/mlx5/mlx5_ifc_vdpa.h @@ -150,6 +150,14 @@ enum { MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR = 0x3, }; +/* This indicates that the object was not created or has already + * been desroyed. It is very safe to assume that this object will never + * have so many states + */ +enum { + MLX5_VIRTIO_NET_Q_OBJECT_NONE = 0xffffffff +}; + enum { MLX5_RQTC_LIST_Q_TYPE_RQ = 0x0, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q = 0x1, diff --git a/include/linux/mm.h b/include/linux/mm.h index 7898e29bcfb5..3bedc449c14d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -28,6 +28,7 @@ #include <linux/sched.h> #include <linux/pgtable.h> #include <linux/kasan.h> +#include <linux/memremap.h> struct mempolicy; struct anon_vma; @@ -221,6 +222,9 @@ int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, /* to align the pointer to the (next) page boundary */ #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) +/* to align the pointer to the (prev) page boundary */ +#define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE) + /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */ #define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE) @@ -424,7 +428,6 @@ extern unsigned int kobjsize(const void *objp); * mapping from the currently active vm_flags protection bits (the * low four bits) to a page protection mask.. */ -extern pgprot_t protection_map[16]; /* * The default fault flags that should be used by most of the @@ -855,7 +858,7 @@ static inline struct folio *virt_to_folio(const void *x) return page_folio(page); } -void __put_page(struct page *page); +void __folio_put(struct folio *folio); void put_pages_list(struct list_head *pages); @@ -892,11 +895,7 @@ static inline void set_compound_page_dtor(struct page *page, page[1].compound_dtor = compound_dtor; } -static inline void destroy_compound_page(struct page *page) -{ - VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); - compound_page_dtors[page[1].compound_dtor](page); -} +void destroy_large_folio(struct folio *folio); static inline int head_compound_pincount(struct page *head) { @@ -1049,84 +1048,6 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); * back into memory. */ -/* - * The zone field is never updated after free_area_init_core() - * sets it, so none of the operations on it need to be atomic. - */ - -/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ -#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) -#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) -#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) -#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) -#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) - -/* - * Define the bit shifts to access each section. For non-existent - * sections we define the shift as 0; that plus a 0 mask ensures - * the compiler will optimise away reference to them. - */ -#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) -#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) -#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) -#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) -#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0)) - -/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ -#ifdef NODE_NOT_IN_PAGE_FLAGS -#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) -#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ - SECTIONS_PGOFF : ZONES_PGOFF) -#else -#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) -#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \ - NODES_PGOFF : ZONES_PGOFF) -#endif - -#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) - -#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) -#define NODES_MASK ((1UL << NODES_WIDTH) - 1) -#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) -#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) -#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1) -#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) - -static inline enum zone_type page_zonenum(const struct page *page) -{ - ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); - return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; -} - -static inline enum zone_type folio_zonenum(const struct folio *folio) -{ - return page_zonenum(&folio->page); -} - -#ifdef CONFIG_ZONE_DEVICE -static inline bool is_zone_device_page(const struct page *page) -{ - return page_zonenum(page) == ZONE_DEVICE; -} -extern void memmap_init_zone_device(struct zone *, unsigned long, - unsigned long, struct dev_pagemap *); -#else -static inline bool is_zone_device_page(const struct page *page) -{ - return false; -} -#endif - -static inline bool folio_is_zone_device(const struct folio *folio) -{ - return is_zone_device_page(&folio->page); -} - -static inline bool is_zone_movable_page(const struct page *page) -{ - return page_zonenum(page) == ZONE_MOVABLE; -} - #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX) DECLARE_STATIC_KEY_FALSE(devmap_managed_key); @@ -1201,7 +1122,7 @@ static inline __must_check bool try_get_page(struct page *page) static inline void folio_put(struct folio *folio) { if (folio_put_testzero(folio)) - __put_page(&folio->page); + __folio_put(folio); } /** @@ -1221,7 +1142,26 @@ static inline void folio_put(struct folio *folio) static inline void folio_put_refs(struct folio *folio, int refs) { if (folio_ref_sub_and_test(folio, refs)) - __put_page(&folio->page); + __folio_put(folio); +} + +void release_pages(struct page **pages, int nr); + +/** + * folios_put - Decrement the reference count on an array of folios. + * @folios: The folios. + * @nr: How many folios there are. + * + * Like folio_put(), but for an array of folios. This is more efficient + * than writing the loop yourself as it will optimise the locks which + * need to be taken if the folios are freed. + * + * Context: May be called in process or interrupt context, but not in NMI + * context. May be called while holding a spinlock. + */ +static inline void folios_put(struct folio **folios, unsigned int nr) +{ + release_pages((struct page **)folios, nr); } static inline void put_page(struct page *page) @@ -1596,7 +1536,7 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */ #ifdef CONFIG_MIGRATION -static inline bool is_pinnable_page(struct page *page) +static inline bool is_longterm_pinnable_page(struct page *page) { #ifdef CONFIG_CMA int mt = get_pageblock_migratetype(page); @@ -1604,18 +1544,20 @@ static inline bool is_pinnable_page(struct page *page) if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE) return false; #endif - return !is_zone_movable_page(page) || is_zero_pfn(page_to_pfn(page)); + return !(is_device_coherent_page(page) || + is_zone_movable_page(page) || + is_zero_pfn(page_to_pfn(page))); } #else -static inline bool is_pinnable_page(struct page *page) +static inline bool is_longterm_pinnable_page(struct page *page) { return true; } #endif -static inline bool folio_is_pinnable(struct folio *folio) +static inline bool folio_is_longterm_pinnable(struct folio *folio) { - return is_pinnable_page(&folio->page); + return is_longterm_pinnable_page(&folio->page); } static inline void set_page_zone(struct page *page, enum zone_type zone) @@ -1966,8 +1908,12 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma, * for now all the callers are only use one of the flags at the same * time. */ -/* Whether we should allow dirty bit accounting */ -#define MM_CP_DIRTY_ACCT (1UL << 0) +/* + * Whether we should manually check if we can map individual PTEs writable, + * because something (e.g., COW, uffd-wp) blocks that from happening for all + * PTEs automatically in a writable mapping. + */ +#define MM_CP_TRY_CHANGE_WRITABLE (1UL << 0) /* Whether this protection change is for NUMA hints */ #define MM_CP_PROT_NUMA (1UL << 1) /* Whether this change is for write protecting */ @@ -3196,13 +3142,6 @@ static inline void print_vma_addr(char *prefix, unsigned long rip) } #endif -#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP -int vmemmap_remap_free(unsigned long start, unsigned long end, - unsigned long reuse); -int vmemmap_remap_alloc(unsigned long start, unsigned long end, - unsigned long reuse, gfp_t gfp_mask); -#endif - void *sparse_buffer_alloc(unsigned long size); struct page * __populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap, @@ -3237,7 +3176,10 @@ enum mf_flags { MF_SOFT_OFFLINE = 1 << 3, MF_UNPOISON = 1 << 4, MF_SW_SIMULATED = 1 << 5, + MF_NO_RETRY = 1 << 6, }; +int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, + unsigned long count, int mf_flags); extern int memory_failure(unsigned long pfn, int flags); extern void memory_failure_queue(unsigned long pfn, int flags); extern void memory_failure_queue_kick(int cpu); @@ -3287,7 +3229,6 @@ enum mf_action_page_type { MF_MSG_DIFFERENT_COMPOUND, MF_MSG_HUGE, MF_MSG_FREE_HUGE, - MF_MSG_NON_PMD_HUGE, MF_MSG_UNMAP_FAILED, MF_MSG_DIRTY_SWAPCACHE, MF_MSG_CLEAN_SWAPCACHE, diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index c29ab4c0cd5c..cf97f3884fda 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -87,6 +87,7 @@ struct page { */ union { struct list_head lru; + /* Or, for the Unevictable "LRU list" slot */ struct { /* Always even, to negate PageTail */ @@ -94,6 +95,10 @@ struct page { /* Count page's or folio's mlocks */ unsigned int mlock_count; }; + + /* Or, free page */ + struct list_head buddy_list; + struct list_head pcp_list; }; /* See page-flags.h for PAGE_MAPPING_FLAGS */ struct address_space *mapping; @@ -729,6 +734,7 @@ typedef __bitwise unsigned int vm_fault_t; * @VM_FAULT_NEEDDSYNC: ->fault did not modify page tables and needs * fsync() to complete (for synchronous page faults * in DAX) + * @VM_FAULT_COMPLETED: ->fault completed, meanwhile mmap lock released * @VM_FAULT_HINDEX_MASK: mask HINDEX value * */ @@ -746,6 +752,7 @@ enum vm_fault_reason { VM_FAULT_FALLBACK = (__force vm_fault_t)0x000800, VM_FAULT_DONE_COW = (__force vm_fault_t)0x001000, VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000, + VM_FAULT_COMPLETED = (__force vm_fault_t)0x004000, VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000, }; diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 37f975875102..8a30de08e913 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -219,7 +219,8 @@ struct sdio_cccr { wide_bus:1, high_power:1, high_speed:1, - disable_cd:1; + disable_cd:1, + enable_async_irq:1; }; struct sdio_cis { @@ -343,10 +344,16 @@ static inline bool mmc_large_sector(struct mmc_card *card) return card->ext_csd.data_sector_size == 4096; } +static inline int mmc_card_enable_async_irq(struct mmc_card *card) +{ + return card->cccr.enable_async_irq; +} + bool mmc_card_is_blockaddr(struct mmc_card *card); #define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC) #define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD) #define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO) +#define mmc_card_sd_combo(c) ((c)->type == MMC_TYPE_SD_COMBO) #endif /* LINUX_MMC_CARD_H */ diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index c193c50ccd78..eb8bc5b9b0b7 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -93,6 +93,25 @@ struct mmc_clk_phase_map { struct mmc_host; +enum mmc_err_stat { + MMC_ERR_CMD_TIMEOUT, + MMC_ERR_CMD_CRC, + MMC_ERR_DAT_TIMEOUT, + MMC_ERR_DAT_CRC, + MMC_ERR_AUTO_CMD, + MMC_ERR_ADMA, + MMC_ERR_TUNING, + MMC_ERR_CMDQ_RED, + MMC_ERR_CMDQ_GCE, + MMC_ERR_CMDQ_ICCE, + MMC_ERR_REQ_TIMEOUT, + MMC_ERR_CMDQ_REQ_TIMEOUT, + MMC_ERR_ICE_CFG, + MMC_ERR_CTRL_TIMEOUT, + MMC_ERR_UNEXPECTED_IRQ, + MMC_ERR_MAX, +}; + struct mmc_host_ops { /* * It is optional for the host to implement pre_req and post_req in @@ -501,6 +520,7 @@ struct mmc_host { /* Host Software Queue support */ bool hsq_enabled; + u32 err_stats[MMC_ERR_MAX]; unsigned long private[] ____cacheline_aligned; }; @@ -635,6 +655,12 @@ static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data) return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE; } +static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host, + enum mmc_err_stat stat) +{ + host->err_stats[stat] += 1; +} + int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode); int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd); diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index d9a65c6a8816..9c50bc40f8ff 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -99,6 +99,12 @@ static inline bool mmc_op_multi(u32 opcode) opcode == MMC_READ_MULTIPLE_BLOCK; } +static inline bool mmc_op_tuning(u32 opcode) +{ + return opcode == MMC_SEND_TUNING_BLOCK || + opcode == MMC_SEND_TUNING_BLOCK_HS200; +} + /* * MMC_SWITCH argument format: * diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h index 2a05d1ac4f0e..1ef400f28642 100644 --- a/include/linux/mmc/sdio.h +++ b/include/linux/mmc/sdio.h @@ -159,6 +159,11 @@ #define SDIO_DTSx_SET_TYPE_A (1 << SDIO_DRIVE_DTSx_SHIFT) #define SDIO_DTSx_SET_TYPE_C (2 << SDIO_DRIVE_DTSx_SHIFT) #define SDIO_DTSx_SET_TYPE_D (3 << SDIO_DRIVE_DTSx_SHIFT) + +#define SDIO_CCCR_INTERRUPT_EXT 0x16 +#define SDIO_INTERRUPT_EXT_SAI (1 << 0) +#define SDIO_INTERRUPT_EXT_EAI (1 << 1) + /* * Function Basic Registers (FBR) */ diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index d7285f8148a3..15ae78cd2853 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -54,6 +54,15 @@ void dump_mm(const struct mm_struct *mm); } \ unlikely(__ret_warn_once); \ }) +#define VM_WARN_ON_FOLIO(cond, folio) ({ \ + int __ret_warn = !!(cond); \ + \ + if (unlikely(__ret_warn)) { \ + dump_page(&folio->page, "VM_WARN_ON_FOLIO(" __stringify(cond)")");\ + WARN_ON(1); \ + } \ + unlikely(__ret_warn); \ +}) #define VM_WARN_ON_ONCE_FOLIO(cond, folio) ({ \ static bool __section(".data.once") __warned; \ int __ret_warn_once = !!(cond); \ @@ -79,6 +88,7 @@ void dump_mm(const struct mm_struct *mm); #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond) +#define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 45fc2c81e370..d6c06e140277 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -198,7 +198,7 @@ struct mmu_notifier_ops { * invalidate_range_start()/end() notifiers, as * invalidate_range() already catches the points in time when an * external TLB range needs to be flushed. For more in depth - * discussion on this see Documentation/vm/mmu_notifier.rst + * discussion on this see Documentation/mm/mmu_notifier.rst * * Note that this function might be called with just a sub-range * of what was passed to invalidate_range_start()/end(), if diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index aab70355d64f..e24b40c52468 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -355,15 +355,18 @@ enum zone_watermarks { }; /* - * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER plus one additional - * for pageblock size for THP if configured. + * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. One additional list + * for THP which will usually be GFP_MOVABLE. Even if it is another type, + * it should not contribute to serious fragmentation causing THP allocation + * failures. */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define NR_PCP_THP 1 #else #define NR_PCP_THP 0 #endif -#define NR_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1 + NR_PCP_THP)) +#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1)) +#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP) /* * Shift to encode migratetype and order in the same integer, with order @@ -379,6 +382,7 @@ enum zone_watermarks { /* Fields and list protected by pagesets local_lock in page_alloc.c */ struct per_cpu_pages { + spinlock_t lock; /* Protects lists field */ int count; /* number of pages in the list */ int high; /* high watermark, emptying needed */ int batch; /* chunk size for buddy add/remove */ @@ -389,7 +393,7 @@ struct per_cpu_pages { /* Lists of pages, one per migrate type stored on the pcp-lists */ struct list_head lists[NR_PCP_LISTS]; -}; +} ____cacheline_aligned_in_smp; struct per_cpu_zonestat { #ifdef CONFIG_SMP @@ -591,8 +595,8 @@ struct zone { * give them a chance of being in the same cacheline. * * Write access to present_pages at runtime should be protected by - * mem_hotplug_begin/end(). Any reader who can't tolerant drift of - * present_pages should get_online_mems() to get a stable value. + * mem_hotplug_begin/done(). Any reader who can't tolerant drift of + * present_pages should use get_online_mems() to get a stable value. */ atomic_long_t managed_pages; unsigned long spanned_pages; @@ -730,6 +734,86 @@ static inline bool zone_is_empty(struct zone *zone) return zone->spanned_pages == 0; } +#ifndef BUILD_VDSO32_64 +/* + * The zone field is never updated after free_area_init_core() + * sets it, so none of the operations on it need to be atomic. + */ + +/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ +#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) +#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) +#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) +#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) +#define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) + +/* + * Define the bit shifts to access each section. For non-existent + * sections we define the shift as 0; that plus a 0 mask ensures + * the compiler will optimise away reference to them. + */ +#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) +#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) +#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) +#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) +#define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0)) + +/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ +#ifdef NODE_NOT_IN_PAGE_FLAGS +#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) +#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? \ + SECTIONS_PGOFF : ZONES_PGOFF) +#else +#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) +#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? \ + NODES_PGOFF : ZONES_PGOFF) +#endif + +#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) + +#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) +#define NODES_MASK ((1UL << NODES_WIDTH) - 1) +#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) +#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) +#define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1) +#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) + +static inline enum zone_type page_zonenum(const struct page *page) +{ + ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); + return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; +} + +static inline enum zone_type folio_zonenum(const struct folio *folio) +{ + return page_zonenum(&folio->page); +} + +#ifdef CONFIG_ZONE_DEVICE +static inline bool is_zone_device_page(const struct page *page) +{ + return page_zonenum(page) == ZONE_DEVICE; +} +extern void memmap_init_zone_device(struct zone *, unsigned long, + unsigned long, struct dev_pagemap *); +#else +static inline bool is_zone_device_page(const struct page *page) +{ + return false; +} +#endif + +static inline bool folio_is_zone_device(const struct folio *folio) +{ + return is_zone_device_page(&folio->page); +} + +static inline bool is_zone_movable_page(const struct page *page) +{ + return page_zonenum(page) == ZONE_MOVABLE; +} +#endif + /* * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty * intersection with the given zone @@ -870,7 +954,7 @@ typedef struct pglist_data { unsigned long nr_reclaim_start; /* nr pages written while throttled * when throttling started. */ struct task_struct *kswapd; /* Protected by - mem_hotplug_begin/end() */ + mem_hotplug_begin/done() */ int kswapd_order; enum zone_type kswapd_highest_zoneidx; @@ -1053,15 +1137,6 @@ static inline int is_highmem_idx(enum zone_type idx) #endif } -#ifdef CONFIG_ZONE_DMA -bool has_managed_dma(void); -#else -static inline bool has_managed_dma(void) -{ - return false; -} -#endif - /** * is_highmem - helper function to quickly check if a struct zone is a * highmem zone or not. This is an attempt to keep references @@ -1071,12 +1146,17 @@ static inline bool has_managed_dma(void) */ static inline int is_highmem(struct zone *zone) { -#ifdef CONFIG_HIGHMEM return is_highmem_idx(zone_idx(zone)); +} + +#ifdef CONFIG_ZONE_DMA +bool has_managed_dma(void); #else - return 0; -#endif +static inline bool has_managed_dma(void) +{ + return false; } +#endif /* These two functions are used to setup the per zone pages min values */ struct ctl_table; @@ -1418,16 +1498,32 @@ extern size_t mem_section_usage_size(void); * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the * worst combination is powerpc with 256k pages, * which results in PFN_SECTION_SHIFT equal 6. - * To sum it up, at least 6 bits are available. + * To sum it up, at least 6 bits are available on all architectures. + * However, we can exceed 6 bits on some other architectures except + * powerpc (e.g. 15 bits are available on x86_64, 13 bits are available + * with the worst case of 64K pages on arm64) if we make sure the + * exceeded bit is not applicable to powerpc. */ -#define SECTION_MARKED_PRESENT (1UL<<0) -#define SECTION_HAS_MEM_MAP (1UL<<1) -#define SECTION_IS_ONLINE (1UL<<2) -#define SECTION_IS_EARLY (1UL<<3) -#define SECTION_TAINT_ZONE_DEVICE (1UL<<4) -#define SECTION_MAP_LAST_BIT (1UL<<5) -#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) -#define SECTION_NID_SHIFT 6 +enum { + SECTION_MARKED_PRESENT_BIT, + SECTION_HAS_MEM_MAP_BIT, + SECTION_IS_ONLINE_BIT, + SECTION_IS_EARLY_BIT, +#ifdef CONFIG_ZONE_DEVICE + SECTION_TAINT_ZONE_DEVICE_BIT, +#endif + SECTION_MAP_LAST_BIT, +}; + +#define SECTION_MARKED_PRESENT BIT(SECTION_MARKED_PRESENT_BIT) +#define SECTION_HAS_MEM_MAP BIT(SECTION_HAS_MEM_MAP_BIT) +#define SECTION_IS_ONLINE BIT(SECTION_IS_ONLINE_BIT) +#define SECTION_IS_EARLY BIT(SECTION_IS_EARLY_BIT) +#ifdef CONFIG_ZONE_DEVICE +#define SECTION_TAINT_ZONE_DEVICE BIT(SECTION_TAINT_ZONE_DEVICE_BIT) +#endif +#define SECTION_MAP_MASK (~(BIT(SECTION_MAP_LAST_BIT) - 1)) +#define SECTION_NID_SHIFT SECTION_MAP_LAST_BIT static inline struct page *__section_mem_map_addr(struct mem_section *section) { @@ -1466,12 +1562,19 @@ static inline int online_section(struct mem_section *section) return (section && (section->section_mem_map & SECTION_IS_ONLINE)); } +#ifdef CONFIG_ZONE_DEVICE static inline int online_device_section(struct mem_section *section) { unsigned long flags = SECTION_IS_ONLINE | SECTION_TAINT_ZONE_DEVICE; return section && ((section->section_mem_map & flags) == flags); } +#else +static inline int online_device_section(struct mem_section *section) +{ + return 0; +} +#endif static inline int online_section_nr(unsigned long nr) { diff --git a/include/linux/mpage.h b/include/linux/mpage.h index 43986f7ec4dd..1bdc39daac0a 100644 --- a/include/linux/mpage.h +++ b/include/linux/mpage.h @@ -19,7 +19,5 @@ void mpage_readahead(struct readahead_control *, get_block_t get_block); int mpage_read_folio(struct folio *folio, get_block_t get_block); int mpage_writepages(struct address_space *mapping, struct writeback_control *wbc, get_block_t get_block); -int mpage_writepage(struct page *page, get_block_t *get_block, - struct writeback_control *wbc); #endif diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h index e05ee9f001ff..9dd4bf157255 100644 --- a/include/linux/mroute_base.h +++ b/include/linux/mroute_base.h @@ -26,7 +26,7 @@ * @remote: Remote address for tunnels */ struct vif_device { - struct net_device *dev; + struct net_device __rcu *dev; netdevice_tracker dev_tracker; unsigned long bytes_in, bytes_out; unsigned long pkt_in, pkt_out; @@ -52,6 +52,7 @@ static inline int mr_call_vif_notifier(struct notifier_block *nb, unsigned short family, enum fib_event_type event_type, struct vif_device *vif, + struct net_device *vif_dev, unsigned short vif_index, u32 tb_id, struct netlink_ext_ack *extack) { @@ -60,7 +61,7 @@ static inline int mr_call_vif_notifier(struct notifier_block *nb, .family = family, .extack = extack, }, - .dev = vif->dev, + .dev = vif_dev, .vif_index = vif_index, .vif_flags = vif->flags, .tb_id = tb_id, @@ -73,6 +74,7 @@ static inline int mr_call_vif_notifiers(struct net *net, unsigned short family, enum fib_event_type event_type, struct vif_device *vif, + struct net_device *vif_dev, unsigned short vif_index, u32 tb_id, unsigned int *ipmr_seq) { @@ -80,7 +82,7 @@ static inline int mr_call_vif_notifiers(struct net *net, .info = { .family = family, }, - .dev = vif->dev, + .dev = vif_dev, .vif_index = vif_index, .vif_flags = vif->flags, .tb_id = tb_id, @@ -98,7 +100,8 @@ static inline int mr_call_vif_notifiers(struct net *net, #define MAXVIFS 32 #endif -#define VIF_EXISTS(_mrt, _idx) (!!((_mrt)->vif_table[_idx].dev)) +/* Note: This helper is deprecated. */ +#define VIF_EXISTS(_mrt, _idx) (!!rcu_access_pointer((_mrt)->vif_table[_idx].dev)) /* mfc_flags: * MFC_STATIC - the entry was added statically (not by a routing daemon) @@ -305,7 +308,7 @@ int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family, struct netlink_ext_ack *extack), struct mr_table *(*mr_iter)(struct net *net, struct mr_table *mrt), - rwlock_t *mrt_lock, struct netlink_ext_ack *extack); + struct netlink_ext_ack *extack); #else static inline void vif_device_init(struct vif_device *v, struct net_device *dev, @@ -360,7 +363,7 @@ static inline int mr_dump(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack), struct mr_table *(*mr_iter)(struct net *net, struct mr_table *mrt), - rwlock_t *mrt_lock, struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack) { return -EINVAL; } diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h index 0ce612428aea..bb6b7121a542 100644 --- a/include/linux/mtd/hyperbus.h +++ b/include/linux/mtd/hyperbus.h @@ -89,9 +89,7 @@ int hyperbus_register_device(struct hyperbus_device *hbdev); /** * hyperbus_unregister_device - deregister HyperBus slave memory device * @hbdev: hyperbus_device to be unregistered - * - * Return: 0 for success, others for failure. */ -int hyperbus_unregister_device(struct hyperbus_device *hbdev); +void hyperbus_unregister_device(struct hyperbus_device *hbdev); #endif /* __LINUX_MTD_HYPERBUS_H__ */ diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 1ede4c89805a..42218a1164f6 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -351,7 +351,7 @@ struct spi_nor_flash_parameter; * @bouncebuf_size: size of the bounce buffer * @info: SPI NOR part JEDEC MFR ID and other info * @manufacturer: SPI NOR manufacturer - * @addr_width: number of address bytes + * @addr_nbytes: number of address bytes * @erase_opcode: the opcode for erasing a sector * @read_opcode: the read opcode * @read_dummy: the dummy needed by the read operation @@ -381,7 +381,7 @@ struct spi_nor { size_t bouncebuf_size; const struct flash_info *info; const struct spi_nor_manufacturer *manufacturer; - u8 addr_width; + u8 addr_nbytes; u8 erase_opcode; u8 read_opcode; u8 read_dummy; diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 5584d3bb6556..6d3392a7edc6 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -260,6 +260,7 @@ struct spinand_manufacturer { }; /* SPI NAND manufacturers */ +extern const struct spinand_manufacturer ato_spinand_manufacturer; extern const struct spinand_manufacturer gigadevice_spinand_manufacturer; extern const struct spinand_manufacturer macronix_spinand_manufacturer; extern const struct spinand_manufacturer micron_spinand_manufacturer; diff --git a/include/linux/net.h b/include/linux/net.h index 12093f4db50c..711c3593c3b8 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -152,6 +152,8 @@ struct module; struct sk_buff; typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, unsigned int, size_t); +typedef int (*skb_read_actor_t)(struct sock *, struct sk_buff *); + struct proto_ops { int family; @@ -214,6 +216,8 @@ struct proto_ops { */ int (*read_sock)(struct sock *sk, read_descriptor_t *desc, sk_read_actor_t recv_actor); + /* This is different from read_sock(), it reads an entire skb at a time. */ + int (*read_skb)(struct sock *sk, skb_read_actor_t recv_actor); int (*sendpage_locked)(struct sock *sk, struct page *page, int offset, size_t size, int flags); int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg, @@ -303,8 +307,6 @@ do { \ #define net_get_random_once(buf, nbytes) \ get_random_once((buf), (nbytes)) -#define net_get_random_once_wait(buf, nbytes) \ - get_random_once_wait((buf), (nbytes)) /* * E.g. XFS meta- & log-data is in slab pages, or bcache meta diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 2563d30736e9..1a3cb93c3dcc 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2636,10 +2636,10 @@ struct packet_offload { /* often modified stats are per-CPU, other are shared (netdev->stats) */ struct pcpu_sw_netstats { - u64 rx_packets; - u64 rx_bytes; - u64 tx_packets; - u64 tx_bytes; + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; struct u64_stats_sync syncp; } __aligned(4 * sizeof(u64)); @@ -2656,8 +2656,8 @@ static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int l struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); u64_stats_update_begin(&tstats->syncp); - tstats->rx_bytes += len; - tstats->rx_packets++; + u64_stats_add(&tstats->rx_bytes, len); + u64_stats_inc(&tstats->rx_packets); u64_stats_update_end(&tstats->syncp); } @@ -2668,8 +2668,8 @@ static inline void dev_sw_netstats_tx_add(struct net_device *dev, struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); u64_stats_update_begin(&tstats->syncp); - tstats->tx_bytes += len; - tstats->tx_packets += packets; + u64_stats_add(&tstats->tx_bytes, len); + u64_stats_add(&tstats->tx_packets, packets); u64_stats_update_end(&tstats->syncp); } @@ -3981,8 +3981,8 @@ static inline void netdev_tracker_free(struct net_device *dev, #endif } -static inline void dev_hold_track(struct net_device *dev, - netdevice_tracker *tracker, gfp_t gfp) +static inline void netdev_hold(struct net_device *dev, + netdevice_tracker *tracker, gfp_t gfp) { if (dev) { __dev_hold(dev); @@ -3990,8 +3990,8 @@ static inline void dev_hold_track(struct net_device *dev, } } -static inline void dev_put_track(struct net_device *dev, - netdevice_tracker *tracker) +static inline void netdev_put(struct net_device *dev, + netdevice_tracker *tracker) { if (dev) { netdev_tracker_free(dev, tracker); @@ -4004,11 +4004,11 @@ static inline void dev_put_track(struct net_device *dev, * @dev: network device * * Hold reference to device to keep it from being freed. - * Try using dev_hold_track() instead. + * Try using netdev_hold() instead. */ static inline void dev_hold(struct net_device *dev) { - dev_hold_track(dev, NULL, GFP_ATOMIC); + netdev_hold(dev, NULL, GFP_ATOMIC); } /** @@ -4016,17 +4016,17 @@ static inline void dev_hold(struct net_device *dev) * @dev: network device * * Release reference to device to allow it to be freed. - * Try using dev_put_track() instead. + * Try using netdev_put() instead. */ static inline void dev_put(struct net_device *dev) { - dev_put_track(dev, NULL); + netdev_put(dev, NULL); } -static inline void dev_replace_track(struct net_device *odev, - struct net_device *ndev, - netdevice_tracker *tracker, - gfp_t gfp) +static inline void netdev_ref_replace(struct net_device *odev, + struct net_device *ndev, + netdevice_tracker *tracker, + gfp_t gfp) { if (odev) netdev_tracker_free(odev, tracker); diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h index 4561ec0fcea4..9e937f64a1ad 100644 --- a/include/linux/netfilter/nf_conntrack_h323.h +++ b/include/linux/netfilter/nf_conntrack_h323.h @@ -38,60 +38,63 @@ void nf_conntrack_h245_expect(struct nf_conn *new, struct nf_conntrack_expect *this); void nf_conntrack_q931_expect(struct nf_conn *new, struct nf_conntrack_expect *this); -extern int (*set_h245_addr_hook) (struct sk_buff *skb, unsigned int protoff, - unsigned char **data, int dataoff, - H245_TransportAddress *taddr, - union nf_inet_addr *addr, - __be16 port); -extern int (*set_h225_addr_hook) (struct sk_buff *skb, unsigned int protoff, - unsigned char **data, int dataoff, - TransportAddress *taddr, - union nf_inet_addr *addr, - __be16 port); -extern int (*set_sig_addr_hook) (struct sk_buff *skb, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int protoff, unsigned char **data, - TransportAddress *taddr, int count); -extern int (*set_ras_addr_hook) (struct sk_buff *skb, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int protoff, unsigned char **data, - TransportAddress *taddr, int count); -extern int (*nat_rtp_rtcp_hook) (struct sk_buff *skb, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int protoff, unsigned char **data, - int dataoff, - H245_TransportAddress *taddr, - __be16 port, __be16 rtp_port, - struct nf_conntrack_expect *rtp_exp, - struct nf_conntrack_expect *rtcp_exp); -extern int (*nat_t120_hook) (struct sk_buff *skb, struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int protoff, + +struct nfct_h323_nat_hooks { + int (*set_h245_addr)(struct sk_buff *skb, unsigned int protoff, unsigned char **data, int dataoff, - H245_TransportAddress *taddr, __be16 port, - struct nf_conntrack_expect *exp); -extern int (*nat_h245_hook) (struct sk_buff *skb, struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int protoff, + H245_TransportAddress *taddr, + union nf_inet_addr *addr, __be16 port); + int (*set_h225_addr)(struct sk_buff *skb, unsigned int protoff, unsigned char **data, int dataoff, - TransportAddress *taddr, __be16 port, - struct nf_conntrack_expect *exp); -extern int (*nat_callforwarding_hook) (struct sk_buff *skb, - struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int protoff, - unsigned char **data, int dataoff, - TransportAddress *taddr, - __be16 port, - struct nf_conntrack_expect *exp); -extern int (*nat_q931_hook) (struct sk_buff *skb, struct nf_conn *ct, - enum ip_conntrack_info ctinfo, - unsigned int protoff, - unsigned char **data, TransportAddress *taddr, - int idx, __be16 port, - struct nf_conntrack_expect *exp); + TransportAddress *taddr, + union nf_inet_addr *addr, __be16 port); + int (*set_sig_addr)(struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, unsigned char **data, + TransportAddress *taddr, int count); + int (*set_ras_addr)(struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, unsigned char **data, + TransportAddress *taddr, int count); + int (*nat_rtp_rtcp)(struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, + unsigned char **data, int dataoff, + H245_TransportAddress *taddr, + __be16 port, __be16 rtp_port, + struct nf_conntrack_expect *rtp_exp, + struct nf_conntrack_expect *rtcp_exp); + int (*nat_t120)(struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, + unsigned char **data, int dataoff, + H245_TransportAddress *taddr, __be16 port, + struct nf_conntrack_expect *exp); + int (*nat_h245)(struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, + unsigned char **data, int dataoff, + TransportAddress *taddr, __be16 port, + struct nf_conntrack_expect *exp); + int (*nat_callforwarding)(struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, + unsigned char **data, int dataoff, + TransportAddress *taddr, __be16 port, + struct nf_conntrack_expect *exp); + int (*nat_q931)(struct sk_buff *skb, + struct nf_conn *ct, + enum ip_conntrack_info ctinfo, + unsigned int protoff, + unsigned char **data, TransportAddress *taddr, int idx, + __be16 port, struct nf_conntrack_expect *exp); +}; +extern const struct nfct_h323_nat_hooks __rcu *nfct_h323_nat_hook; #endif diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h index c620521c42bc..dbc614dfe0d5 100644 --- a/include/linux/netfilter/nf_conntrack_sip.h +++ b/include/linux/netfilter/nf_conntrack_sip.h @@ -164,7 +164,7 @@ struct nf_nat_sip_hooks { unsigned int medialen, union nf_inet_addr *rtp_addr); }; -extern const struct nf_nat_sip_hooks *nf_nat_sip_hooks; +extern const struct nf_nat_sip_hooks __rcu *nf_nat_sip_hooks; int ct_sip_parse_request(const struct nf_conn *ct, const char *dptr, unsigned int datalen, unsigned int *matchoff, diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 1b18dfa52e48..f2402ddeafbf 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -276,19 +276,18 @@ struct netfs_cache_ops { }; struct readahead_control; -extern void netfs_readahead(struct readahead_control *); +void netfs_readahead(struct readahead_control *); int netfs_read_folio(struct file *, struct folio *); -extern int netfs_write_begin(struct netfs_inode *, - struct file *, struct address_space *, - loff_t, unsigned int, struct folio **, - void **); - -extern void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); -extern void netfs_get_subrequest(struct netfs_io_subrequest *subreq, - enum netfs_sreq_ref_trace what); -extern void netfs_put_subrequest(struct netfs_io_subrequest *subreq, - bool was_async, enum netfs_sreq_ref_trace what); -extern void netfs_stats_show(struct seq_file *); +int netfs_write_begin(struct netfs_inode *, struct file *, + struct address_space *, loff_t pos, unsigned int len, + struct folio **, void **fsdata); + +void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); +void netfs_get_subrequest(struct netfs_io_subrequest *subreq, + enum netfs_sreq_ref_trace what); +void netfs_put_subrequest(struct netfs_io_subrequest *subreq, + bool was_async, enum netfs_sreq_ref_trace what); +void netfs_stats_show(struct seq_file *); /** * netfs_inode - Get the netfs inode context from the inode diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index a17c337dbdf1..b32ed68e7dc4 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -617,6 +617,15 @@ nfs_fileid_to_ino_t(u64 fileid) #define NFS_JUKEBOX_RETRY_TIME (5 * HZ) +/* We need to block new opens while a file is being unlinked. + * If it is opened *before* we decide to unlink, we will silly-rename + * instead. If it is opened *after*, then we need to create or will fail. + * If we allow the two to race, we could end up with a file that is open + * but deleted on the server resulting in ESTALE. + * So use ->d_fsdata to record when the unlink is happening + * and block dentry revalidation while it is set. + */ +#define NFS_FSDATA_BLOCKED ((void*)1) # undef ifdebug # ifdef NFS_DEBUG diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index f0373a6cb5fb..ba7e2e4b0926 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -202,8 +202,7 @@ nfs_list_entry(struct list_head *head) return list_entry(head, struct nfs_page, wb_list); } -static inline -loff_t req_offset(struct nfs_page *req) +static inline loff_t req_offset(const struct nfs_page *req) { return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset; } diff --git a/include/linux/nfs_ssc.h b/include/linux/nfs_ssc.h index 222ae8883e85..75843c00f326 100644 --- a/include/linux/nfs_ssc.h +++ b/include/linux/nfs_ssc.h @@ -64,7 +64,7 @@ struct nfsd4_ssc_umount_item { refcount_t nsui_refcnt; unsigned long nsui_expire; struct vfsmount *nsui_vfsmount; - char nsui_ipaddr[RPC_MAX_ADDRBUFLEN]; + char nsui_ipaddr[RPC_MAX_ADDRBUFLEN + 1]; }; #endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 0e3aa0f5f324..e86cf6642d21 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1600,6 +1600,7 @@ enum { NFS_IOHDR_STAT, NFS_IOHDR_RESEND_PNFS, NFS_IOHDR_RESEND_MDS, + NFS_IOHDR_UNSTABLE_WRITES, }; struct nfs_io_completion; diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h index b22782225f27..cbe5fd1dd2e7 100644 --- a/include/linux/nl802154.h +++ b/include/linux/nl802154.h @@ -8,6 +8,8 @@ #ifndef NL802154_H #define NL802154_H +#include <net/netlink.h> + #define IEEE802154_NL_NAME "802.15.4 MAC" #define IEEE802154_MCAST_COORD_NAME "coordinator" #define IEEE802154_MCAST_BEACON_NAME "beacon" diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 750c7f395ca9..f700ff2df074 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -122,6 +122,8 @@ int watchdog_nmi_probe(void); int watchdog_nmi_enable(unsigned int cpu); void watchdog_nmi_disable(unsigned int cpu); +void lockup_detector_reconfigure(void); + /** * touch_nmi_watchdog - restart NMI watchdog timeout. * diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 0f233b76c9ce..4b71a96190a8 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -94,6 +94,7 @@ #include <linux/bitmap.h> #include <linux/minmax.h> #include <linux/numa.h> +#include <linux/random.h> typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t; extern nodemask_t _unused_nodemask_arg_; @@ -276,7 +277,14 @@ static inline unsigned int __next_node(int n, const nodemask_t *srcp) * the first node in src if needed. Returns MAX_NUMNODES if src is empty. */ #define next_node_in(n, src) __next_node_in((n), &(src)) -unsigned int __next_node_in(int node, const nodemask_t *srcp); +static inline unsigned int __next_node_in(int node, const nodemask_t *srcp) +{ + unsigned int ret = __next_node(node, srcp); + + if (ret == MAX_NUMNODES) + ret = __first_node(srcp); + return ret; +} static inline void init_nodemask_of_node(nodemask_t *mask, int node) { @@ -493,14 +501,20 @@ static inline int num_node_state(enum node_states state) #endif +static inline int node_random(const nodemask_t *maskp) +{ #if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1) -extern int node_random(const nodemask_t *maskp); + int w, bit = NUMA_NO_NODE; + + w = nodes_weight(*maskp); + if (w) + bit = bitmap_ord_to_pos(maskp->bits, + get_random_int() % w, MAX_NUMNODES); + return bit; #else -static inline int node_random(const nodemask_t *mask) -{ return 0; -} #endif +} #define node_online_map node_states[N_ONLINE] #define node_possible_map node_states[N_POSSIBLE] diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h new file mode 100644 index 000000000000..dcb8030062dd --- /dev/null +++ b/include/linux/nvme-auth.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021 Hannes Reinecke, SUSE Software Solutions + */ + +#ifndef _NVME_AUTH_H +#define _NVME_AUTH_H + +#include <crypto/kpp.h> + +struct nvme_dhchap_key { + u8 *key; + size_t len; + u8 hash; +}; + +u32 nvme_auth_get_seqnum(void); +const char *nvme_auth_dhgroup_name(u8 dhgroup_id); +const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id); +u8 nvme_auth_dhgroup_id(const char *dhgroup_name); + +const char *nvme_auth_hmac_name(u8 hmac_id); +const char *nvme_auth_digest_name(u8 hmac_id); +size_t nvme_auth_hmac_hash_len(u8 hmac_id); +u8 nvme_auth_hmac_id(const char *hmac_name); + +struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret, + u8 key_hash); +void nvme_auth_free_key(struct nvme_dhchap_key *key); +u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn); +int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key); +int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len, + u8 *challenge, u8 *aug, size_t hlen); +int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid); +int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm, + u8 *host_key, size_t host_key_len); +int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm, + u8 *ctrl_key, size_t ctrl_key_len, + u8 *sess_key, size_t sess_key_len); + +#endif /* _NVME_AUTH_H */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 07cfc922f8e4..ae53d74f3696 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -19,6 +19,7 @@ #define NVMF_TRSVCID_SIZE 32 #define NVMF_TRADDR_SIZE 256 #define NVMF_TSAS_SIZE 256 +#define NVMF_AUTH_HASH_LEN 64 #define NVME_DISC_SUBSYS_NAME "nqn.2014-08.org.nvmexpress.discovery" @@ -712,6 +713,10 @@ enum { }; enum { + NVME_AER_ERROR_PERSIST_INT_ERR = 0x03, +}; + +enum { NVME_AER_NOTICE_NS_CHANGED = 0x00, NVME_AER_NOTICE_FW_ACT_STARTING = 0x01, NVME_AER_NOTICE_ANA = 0x03, @@ -1369,6 +1374,8 @@ enum nvmf_capsule_command { nvme_fabrics_type_property_set = 0x00, nvme_fabrics_type_connect = 0x01, nvme_fabrics_type_property_get = 0x04, + nvme_fabrics_type_auth_send = 0x05, + nvme_fabrics_type_auth_receive = 0x06, }; #define nvme_fabrics_type_name(type) { type, #type } @@ -1376,7 +1383,9 @@ enum nvmf_capsule_command { __print_symbolic(type, \ nvme_fabrics_type_name(nvme_fabrics_type_property_set), \ nvme_fabrics_type_name(nvme_fabrics_type_connect), \ - nvme_fabrics_type_name(nvme_fabrics_type_property_get)) + nvme_fabrics_type_name(nvme_fabrics_type_property_get), \ + nvme_fabrics_type_name(nvme_fabrics_type_auth_send), \ + nvme_fabrics_type_name(nvme_fabrics_type_auth_receive)) /* * If not fabrics command, fctype will be ignored. @@ -1472,6 +1481,11 @@ struct nvmf_connect_command { __u8 resv4[12]; }; +enum { + NVME_CONNECT_AUTHREQ_ASCR = (1 << 2), + NVME_CONNECT_AUTHREQ_ATR = (1 << 1), +}; + struct nvmf_connect_data { uuid_t hostid; __le16 cntlid; @@ -1506,6 +1520,200 @@ struct nvmf_property_get_command { __u8 resv4[16]; }; +struct nvmf_auth_common_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __u8 resv3; + __u8 spsp0; + __u8 spsp1; + __u8 secp; + __le32 al_tl; + __u8 resv4[16]; +}; + +struct nvmf_auth_send_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __u8 resv3; + __u8 spsp0; + __u8 spsp1; + __u8 secp; + __le32 tl; + __u8 resv4[16]; +}; + +struct nvmf_auth_receive_command { + __u8 opcode; + __u8 resv1; + __u16 command_id; + __u8 fctype; + __u8 resv2[19]; + union nvme_data_ptr dptr; + __u8 resv3; + __u8 spsp0; + __u8 spsp1; + __u8 secp; + __le32 al; + __u8 resv4[16]; +}; + +/* Value for secp */ +enum { + NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER = 0xe9, +}; + +/* Defined value for auth_type */ +enum { + NVME_AUTH_COMMON_MESSAGES = 0x00, + NVME_AUTH_DHCHAP_MESSAGES = 0x01, +}; + +/* Defined messages for auth_id */ +enum { + NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE = 0x00, + NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE = 0x01, + NVME_AUTH_DHCHAP_MESSAGE_REPLY = 0x02, + NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1 = 0x03, + NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 = 0x04, + NVME_AUTH_DHCHAP_MESSAGE_FAILURE2 = 0xf0, + NVME_AUTH_DHCHAP_MESSAGE_FAILURE1 = 0xf1, +}; + +struct nvmf_auth_dhchap_protocol_descriptor { + __u8 authid; + __u8 rsvd; + __u8 halen; + __u8 dhlen; + __u8 idlist[60]; +}; + +enum { + NVME_AUTH_DHCHAP_AUTH_ID = 0x01, +}; + +/* Defined hash functions for DH-HMAC-CHAP authentication */ +enum { + NVME_AUTH_HASH_SHA256 = 0x01, + NVME_AUTH_HASH_SHA384 = 0x02, + NVME_AUTH_HASH_SHA512 = 0x03, + NVME_AUTH_HASH_INVALID = 0xff, +}; + +/* Defined Diffie-Hellman group identifiers for DH-HMAC-CHAP authentication */ +enum { + NVME_AUTH_DHGROUP_NULL = 0x00, + NVME_AUTH_DHGROUP_2048 = 0x01, + NVME_AUTH_DHGROUP_3072 = 0x02, + NVME_AUTH_DHGROUP_4096 = 0x03, + NVME_AUTH_DHGROUP_6144 = 0x04, + NVME_AUTH_DHGROUP_8192 = 0x05, + NVME_AUTH_DHGROUP_INVALID = 0xff, +}; + +union nvmf_auth_protocol { + struct nvmf_auth_dhchap_protocol_descriptor dhchap; +}; + +struct nvmf_auth_dhchap_negotiate_data { + __u8 auth_type; + __u8 auth_id; + __le16 rsvd; + __le16 t_id; + __u8 sc_c; + __u8 napd; + union nvmf_auth_protocol auth_protocol[]; +}; + +struct nvmf_auth_dhchap_challenge_data { + __u8 auth_type; + __u8 auth_id; + __u16 rsvd1; + __le16 t_id; + __u8 hl; + __u8 rsvd2; + __u8 hashid; + __u8 dhgid; + __le16 dhvlen; + __le32 seqnum; + /* 'hl' bytes of challenge value */ + __u8 cval[]; + /* followed by 'dhvlen' bytes of DH value */ +}; + +struct nvmf_auth_dhchap_reply_data { + __u8 auth_type; + __u8 auth_id; + __le16 rsvd1; + __le16 t_id; + __u8 hl; + __u8 rsvd2; + __u8 cvalid; + __u8 rsvd3; + __le16 dhvlen; + __le32 seqnum; + /* 'hl' bytes of response data */ + __u8 rval[]; + /* followed by 'hl' bytes of Challenge value */ + /* followed by 'dhvlen' bytes of DH value */ +}; + +enum { + NVME_AUTH_DHCHAP_RESPONSE_VALID = (1 << 0), +}; + +struct nvmf_auth_dhchap_success1_data { + __u8 auth_type; + __u8 auth_id; + __le16 rsvd1; + __le16 t_id; + __u8 hl; + __u8 rsvd2; + __u8 rvalid; + __u8 rsvd3[7]; + /* 'hl' bytes of response value if 'rvalid' is set */ + __u8 rval[]; +}; + +struct nvmf_auth_dhchap_success2_data { + __u8 auth_type; + __u8 auth_id; + __le16 rsvd1; + __le16 t_id; + __u8 rsvd2[10]; +}; + +struct nvmf_auth_dhchap_failure_data { + __u8 auth_type; + __u8 auth_id; + __le16 rsvd1; + __le16 t_id; + __u8 rescode; + __u8 rescode_exp; +}; + +enum { + NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED = 0x01, +}; + +enum { + NVME_AUTH_DHCHAP_FAILURE_FAILED = 0x01, + NVME_AUTH_DHCHAP_FAILURE_NOT_USABLE = 0x02, + NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH = 0x03, + NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE = 0x04, + NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE = 0x05, + NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD = 0x06, + NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE = 0x07, +}; + + struct nvme_dbbuf { __u8 opcode; __u8 flags; @@ -1549,6 +1757,9 @@ struct nvme_command { struct nvmf_connect_command connect; struct nvmf_property_set_command prop_set; struct nvmf_property_get_command prop_get; + struct nvmf_auth_common_command auth_common; + struct nvmf_auth_send_command auth_send; + struct nvmf_auth_receive_command auth_receive; struct nvme_dbbuf dbbuf; struct nvme_directive_cmd directive; }; diff --git a/include/linux/of.h b/include/linux/of.h index 20a4e7cb7afe..766d002bddb9 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -207,7 +207,7 @@ static inline void of_node_clear_flag(struct device_node *n, unsigned long flag) } #if defined(CONFIG_OF_DYNAMIC) || defined(CONFIG_SPARC) -static inline int of_property_check_flag(struct property *p, unsigned long flag) +static inline int of_property_check_flag(const struct property *p, unsigned long flag) { return test_bit(flag, &p->_flags); } @@ -812,7 +812,8 @@ static inline void of_node_clear_flag(struct device_node *n, unsigned long flag) { } -static inline int of_property_check_flag(struct property *p, unsigned long flag) +static inline int of_property_check_flag(const struct property *p, + unsigned long flag) { return 0; } diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index 8bf2ea859653..a5166eb93437 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -29,6 +29,7 @@ enum of_gpio_flags { OF_GPIO_TRANSITORY = 0x8, OF_GPIO_PULL_UP = 0x10, OF_GPIO_PULL_DOWN = 0x20, + OF_GPIO_PULL_DISABLE = 0x40, }; #ifdef CONFIG_OF_GPIO diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index 84a966623e78..d15b6cd5e1c3 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h @@ -61,16 +61,18 @@ static inline struct platform_device *of_find_device_by_node(struct device_node } #endif +extern int of_platform_bus_probe(struct device_node *root, + const struct of_device_id *matches, + struct device *parent); + +#ifdef CONFIG_OF_ADDRESS /* Platform devices and busses creation */ extern struct platform_device *of_platform_device_create(struct device_node *np, const char *bus_id, struct device *parent); extern int of_platform_device_destroy(struct device *dev, void *data); -extern int of_platform_bus_probe(struct device_node *root, - const struct of_device_id *matches, - struct device *parent); -#ifdef CONFIG_OF_ADDRESS + extern int of_platform_populate(struct device_node *root, const struct of_device_id *matches, const struct of_dev_auxdata *lookup, @@ -84,6 +86,18 @@ extern int devm_of_platform_populate(struct device *dev); extern void devm_of_platform_depopulate(struct device *dev); #else +/* Platform devices and busses creation */ +static inline struct platform_device *of_platform_device_create(struct device_node *np, + const char *bus_id, + struct device *parent) +{ + return NULL; +} +static inline int of_platform_device_destroy(struct device *dev, void *data) +{ + return -ENODEV; +} + static inline int of_platform_populate(struct device_node *root, const struct of_device_id *matches, const struct of_dev_auxdata *lookup, diff --git a/include/linux/once.h b/include/linux/once.h index f54523052bbc..b14d8b309d52 100644 --- a/include/linux/once.h +++ b/include/linux/once.h @@ -54,7 +54,5 @@ void __do_once_done(bool *done, struct static_key_true *once_key, #define get_random_once(buf, nbytes) \ DO_ONCE(get_random_bytes, (buf), (nbytes)) -#define get_random_once_wait(buf, nbytes) \ - DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \ #endif /* _LINUX_ONCE_H */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index e66f7aa3191d..465ff35a8c00 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -193,6 +193,11 @@ enum pageflags { /* Only valid for buddy pages. Used to track pages that are reported */ PG_reported = PG_uptodate, + +#ifdef CONFIG_MEMORY_HOTPLUG + /* For self-hosted memmap pages */ + PG_vmemmap_self_hosted = PG_owner_priv_1, +#endif }; #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) @@ -200,34 +205,15 @@ enum pageflags { #ifndef __GENERATING_BOUNDS_H #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP -DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON, - hugetlb_optimize_vmemmap_key); - -static __always_inline bool hugetlb_optimize_vmemmap_enabled(void) -{ - return static_branch_maybe(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON, - &hugetlb_optimize_vmemmap_key); -} +DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key); /* - * If the feature of optimizing vmemmap pages associated with each HugeTLB - * page is enabled, the head vmemmap page frame is reused and all of the tail - * vmemmap addresses map to the head vmemmap page frame (furture details can - * refer to the figure at the head of the mm/hugetlb_vmemmap.c). In other - * words, there are more than one page struct with PG_head associated with each - * HugeTLB page. We __know__ that there is only one head page struct, the tail - * page structs with PG_head are fake head page structs. We need an approach - * to distinguish between those two different types of page structs so that - * compound_head() can return the real head page struct when the parameter is - * the tail page struct but with PG_head. - * - * The page_fixed_fake_head() returns the real head page struct if the @page is - * fake page head, otherwise, returns @page which can either be a true page - * head or tail. + * Return the real head page struct iff the @page is a fake head page, otherwise + * return the @page itself. See Documentation/mm/vmemmap_dedup.rst. */ static __always_inline const struct page *page_fixed_fake_head(const struct page *page) { - if (!hugetlb_optimize_vmemmap_enabled()) + if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key)) return page; /* @@ -255,11 +241,6 @@ static inline const struct page *page_fixed_fake_head(const struct page *page) { return page; } - -static inline bool hugetlb_optimize_vmemmap_enabled(void) -{ - return false; -} #endif static __always_inline int page_is_fake_head(struct page *page) @@ -628,6 +609,12 @@ PAGEFLAG_FALSE(SkipKASanPoison, skip_kasan_poison) */ __PAGEFLAG(Reported, reported, PF_NO_COMPOUND) +#ifdef CONFIG_MEMORY_HOTPLUG +PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY) +#else +PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) +#endif + /* * On an anonymous page mapped into a user virtual memory area, * page->mapping points to its anon_vma, not to a struct address_space; @@ -639,7 +626,7 @@ __PAGEFLAG(Reported, reported, PF_NO_COMPOUND) * structure which KSM associates with that merged page. See ksm.h. * * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable - * page and then page->mapping points a struct address_space. + * page and then page->mapping points to a struct movable_operations. * * Please note that, confusingly, "page_mapping" refers to the inode * address_space which maps the page from disk; whereas "page_mapped" @@ -650,6 +637,12 @@ __PAGEFLAG(Reported, reported, PF_NO_COMPOUND) #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) +/* + * Different with flags above, this flag is used only for fsdax mode. It + * indicates that this page->mapping is now under reflink case. + */ +#define PAGE_MAPPING_DAX_COW 0x1 + static __always_inline bool folio_mapping_flags(struct folio *folio) { return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0; @@ -670,6 +663,12 @@ static __always_inline bool PageAnon(struct page *page) return folio_test_anon(page_folio(page)); } +static __always_inline bool __folio_test_movable(const struct folio *folio) +{ + return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == + PAGE_MAPPING_MOVABLE; +} + static __always_inline int __PageMovable(struct page *page) { return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index ce96866fbec4..0178b2040ea3 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -345,8 +345,6 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping) #endif } -void release_pages(struct page **pages, int nr); - struct address_space *page_mapping(struct page *); struct address_space *folio_mapping(struct folio *); struct address_space *swapcache_mapping(struct folio *); @@ -718,9 +716,8 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index) return head + (index & (thp_nr_pages(head) - 1)); } -unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, - pgoff_t end, unsigned int nr_pages, - struct page **pages); +unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, + pgoff_t end, struct folio_batch *fbatch); unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages); unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, @@ -1079,6 +1076,12 @@ static inline int __must_check write_one_page(struct page *page) int __set_page_dirty_nobuffers(struct page *page); bool noop_dirty_folio(struct address_space *mapping, struct folio *folio); +#ifdef CONFIG_MIGRATION +int filemap_migrate_folio(struct address_space *mapping, struct folio *dst, + struct folio *src, enum migrate_mode mode); +#else +#define filemap_migrate_folio NULL +#endif void page_endio(struct page *page, bool is_write, int err); void folio_end_private_2(struct folio *folio); @@ -1098,8 +1101,6 @@ size_t fault_in_subpage_writeable(char __user *uaddr, size_t size); size_t fault_in_safe_writeable(const char __user *uaddr, size_t size); size_t fault_in_readable(const char __user *uaddr, size_t size); -int add_to_page_cache_locked(struct page *page, struct address_space *mapping, - pgoff_t index, gfp_t gfp); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp); int filemap_add_folio(struct address_space *mapping, struct folio *folio, @@ -1107,10 +1108,6 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio, void filemap_remove_folio(struct folio *folio); void delete_from_page_cache(struct page *page); void __filemap_remove_folio(struct folio *folio, void *shadow); -static inline void __delete_from_page_cache(struct page *page, void *shadow) -{ - __filemap_remove_folio(page_folio(page), shadow); -} void replace_page_cache_page(struct page *old, struct page *new); void delete_from_page_cache_batch(struct address_space *mapping, struct folio_batch *fbatch); @@ -1119,22 +1116,6 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp); loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, int whence); -/* - * Like add_to_page_cache_locked, but used to add newly allocated pages: - * the page is new, so we can just run __SetPageLocked() against it. - */ -static inline int add_to_page_cache(struct page *page, - struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) -{ - int error; - - __SetPageLocked(page); - error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); - if (unlikely(error)) - __ClearPageLocked(page); - return error; -} - /* Must be non-static for BPF error injection */ int __filemap_add_folio(struct address_space *mapping, struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp); diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index 67b1246f136b..215eb6c3bdc9 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -26,17 +26,6 @@ struct pagevec { }; void __pagevec_release(struct pagevec *pvec); -void __pagevec_lru_add(struct pagevec *pvec); -unsigned pagevec_lookup_range(struct pagevec *pvec, - struct address_space *mapping, - pgoff_t *start, pgoff_t end); -static inline unsigned pagevec_lookup(struct pagevec *pvec, - struct address_space *mapping, - pgoff_t *start) -{ - return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1); -} - unsigned pagevec_lookup_range_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, pgoff_t end, xa_mark_t tag); diff --git a/include/linux/pci-doe.h b/include/linux/pci-doe.h new file mode 100644 index 000000000000..ed9b4df792b8 --- /dev/null +++ b/include/linux/pci-doe.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Data Object Exchange + * PCIe r6.0, sec 6.30 DOE + * + * Copyright (C) 2021 Huawei + * Jonathan Cameron <Jonathan.Cameron@huawei.com> + * + * Copyright (C) 2022 Intel Corporation + * Ira Weiny <ira.weiny@intel.com> + */ + +#ifndef LINUX_PCI_DOE_H +#define LINUX_PCI_DOE_H + +struct pci_doe_protocol { + u16 vid; + u8 type; +}; + +struct pci_doe_mb; + +/** + * struct pci_doe_task - represents a single query/response + * + * @prot: DOE Protocol + * @request_pl: The request payload + * @request_pl_sz: Size of the request payload (bytes) + * @response_pl: The response payload + * @response_pl_sz: Size of the response payload (bytes) + * @rv: Return value. Length of received response or error (bytes) + * @complete: Called when task is complete + * @private: Private data for the consumer + * @work: Used internally by the mailbox + * @doe_mb: Used internally by the mailbox + * + * The payload sizes and rv are specified in bytes with the following + * restrictions concerning the protocol. + * + * 1) The request_pl_sz must be a multiple of double words (4 bytes) + * 2) The response_pl_sz must be >= a single double word (4 bytes) + * 3) rv is returned as bytes but it will be a multiple of double words + * + * NOTE there is no need for the caller to initialize work or doe_mb. + */ +struct pci_doe_task { + struct pci_doe_protocol prot; + u32 *request_pl; + size_t request_pl_sz; + u32 *response_pl; + size_t response_pl_sz; + int rv; + void (*complete)(struct pci_doe_task *task); + void *private; + + /* No need for the user to initialize these fields */ + struct work_struct work; + struct pci_doe_mb *doe_mb; +}; + +/** + * pci_doe_for_each_off - Iterate each DOE capability + * @pdev: struct pci_dev to iterate + * @off: u16 of config space offset of each mailbox capability found + */ +#define pci_doe_for_each_off(pdev, off) \ + for (off = pci_find_next_ext_capability(pdev, off, \ + PCI_EXT_CAP_ID_DOE); \ + off > 0; \ + off = pci_find_next_ext_capability(pdev, off, \ + PCI_EXT_CAP_ID_DOE)) + +struct pci_doe_mb *pcim_doe_create_mb(struct pci_dev *pdev, u16 cap_offset); +bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type); +int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task); + +#endif diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index adea5a4771cf..6b1301e2498e 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -87,6 +87,7 @@ extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 * extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */ +extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */ #endif #if IS_ENABLED(CONFIG_PCI_HOST_COMMON) diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h index 8318a97c9c61..2c07aa6b7665 100644 --- a/include/linux/pci-p2pdma.h +++ b/include/linux/pci-p2pdma.h @@ -30,10 +30,6 @@ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, unsigned int *nents, u32 length); void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl); void pci_p2pmem_publish(struct pci_dev *pdev, bool publish); -int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, unsigned long attrs); -void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, unsigned long attrs); int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, bool *use_p2pdma); ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev, @@ -83,17 +79,6 @@ static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev, static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish) { } -static inline int pci_p2pdma_map_sg_attrs(struct device *dev, - struct scatterlist *sg, int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - return 0; -} -static inline void pci_p2pdma_unmap_sg_attrs(struct device *dev, - struct scatterlist *sg, int nents, enum dma_data_direction dir, - unsigned long attrs) -{ -} static inline int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, bool *use_p2pdma) { @@ -119,16 +104,4 @@ static inline struct pci_dev *pci_p2pmem_find(struct device *client) return pci_p2pmem_find_many(&client, 1); } -static inline int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) -{ - return pci_p2pdma_map_sg_attrs(dev, sg, nents, dir, 0); -} - -static inline void pci_p2pdma_unmap_sg(struct device *dev, - struct scatterlist *sg, int nents, enum dma_data_direction dir) -{ - pci_p2pdma_unmap_sg_attrs(dev, sg, nents, dir, 0); -} - #endif /* _LINUX_PCI_P2P_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 81a57b498f22..060af91bafcd 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1909,24 +1909,14 @@ pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, #include <asm/pci.h> -/* These two functions provide almost identical functionality. Depending - * on the architecture, one will be implemented as a wrapper around the - * other (in drivers/pci/mmap.c). - * +/* * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff * is expected to be an offset within that region. * - * pci_mmap_page_range() is the legacy architecture-specific interface, - * which accepts a "user visible" resource address converted by - * pci_resource_to_user(), as used in the legacy mmap() interface in - * /proc/bus/pci/. */ int pci_mmap_resource_range(struct pci_dev *dev, int bar, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine); -int pci_mmap_page_range(struct pci_dev *pdev, int bar, - struct vm_area_struct *vma, - enum pci_mmap_state mmap_state, int write_combine); #ifndef arch_can_pci_mmap_wc #define arch_can_pci_mmap_wc() 0 diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 7fa460ccf7fa..6feade66efdb 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -151,6 +151,7 @@ #define PCI_CLASS_OTHERS 0xff /* Vendors and devices. Sort key: vendor first, device next. */ +#define PCI_VENDOR_ID_PCI_SIG 0x0001 #define PCI_VENDOR_ID_LOONGSON 0x0014 diff --git a/include/linux/pcs-rzn1-miic.h b/include/linux/pcs-rzn1-miic.h new file mode 100644 index 000000000000..56d12b21365d --- /dev/null +++ b/include/linux/pcs-rzn1-miic.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2022 Schneider Electric + * + * Clément Léger <clement.leger@bootlin.com> + */ + +#ifndef __LINUX_PCS_MIIC_H +#define __LINUX_PCS_MIIC_H + +struct phylink; +struct device_node; + +struct phylink_pcs *miic_create(struct device *dev, struct device_node *np); + +void miic_destroy(struct phylink_pcs *pcs); + +#endif /* __LINUX_PCS_MIIC_H */ diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h index 266eb26fb029..d2da1e0b4a92 100644 --- a/include/linux/pcs/pcs-xpcs.h +++ b/include/linux/pcs/pcs-xpcs.h @@ -17,6 +17,7 @@ #define DW_AN_C73 1 #define DW_AN_C37_SGMII 2 #define DW_2500BASEX 3 +#define DW_AN_C37_1000BASEX 4 struct xpcs_id; @@ -30,7 +31,7 @@ int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface); void xpcs_link_up(struct phylink_pcs *pcs, unsigned int mode, phy_interface_t interface, int speed, int duplex); int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, - unsigned int mode); + unsigned int mode, const unsigned long *advertising); void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces); int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable); diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 3cdc16cfd867..014ee8f0fbaa 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1689,4 +1689,32 @@ typedef unsigned int pgtbl_mod_mask; #define MAX_PTRS_PER_P4D PTRS_PER_P4D #endif +/* description of effects of mapping type and prot in current implementation. + * this is due to the limited x86 page protection hardware. The expected + * behavior is in parens: + * + * map_type prot + * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC + * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes + * w: (no) no w: (no) no w: (yes) yes w: (no) no + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes + * + * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes + * w: (no) no w: (no) no w: (copy) copy w: (no) no + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes + * + * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and + * MAP_PRIVATE (with Enhanced PAN supported): + * r: (no) no + * w: (no) no + * x: (yes) yes + */ +#define DECLARE_VM_GET_PAGE_PROT \ +pgprot_t vm_get_page_prot(unsigned long vm_flags) \ +{ \ + return protection_map[vm_flags & \ + (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \ +} \ +EXPORT_SYMBOL(vm_get_page_prot); + #endif /* _LINUX_PGTABLE_H */ diff --git a/include/linux/phy.h b/include/linux/phy.h index b09f7d36cff2..87638c55d844 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1545,6 +1545,9 @@ static inline void phy_device_reset(struct phy_device *phydev, int value) #define phydev_err(_phydev, format, args...) \ dev_err(&_phydev->mdio.dev, format, ##args) +#define phydev_err_probe(_phydev, err, format, args...) \ + dev_err_probe(&_phydev->mdio.dev, err, format, ##args) + #define phydev_info(_phydev, format, args...) \ dev_info(&_phydev->mdio.dev, format, ##args) diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index 52bc8e487ef7..1acafd86ab13 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -2,6 +2,8 @@ #ifndef __PHY_FIXED_H #define __PHY_FIXED_H +#include <linux/types.h> + struct fixed_phy_status { int link; int speed; @@ -12,6 +14,7 @@ struct fixed_phy_status { struct device_node; struct gpio_desc; +struct net_device; #if IS_ENABLED(CONFIG_FIXED_PHY) extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier); diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h index 70b45d28e7a9..487117ccb1bc 100644 --- a/include/linux/pinctrl/pinctrl.h +++ b/include/linux/pinctrl/pinctrl.h @@ -27,6 +27,26 @@ struct gpio_chip; struct device_node; /** + * struct pingroup - provides information on pingroup + * @name: a name for pingroup + * @pins: an array of pins in the pingroup + * @npins: number of pins in the pingroup + */ +struct pingroup { + const char *name; + const unsigned int *pins; + size_t npins; +}; + +/* Convenience macro to define a single named or anonymous pingroup */ +#define PINCTRL_PINGROUP(_name, _pins, _npins) \ +(struct pingroup){ \ + .name = _name, \ + .pins = _pins, \ + .npins = _npins, \ +} + +/** * struct pinctrl_pin_desc - boards/machines provide information on their * pins, pads or other muxable units in this struct * @number: unique pin number from the global pin number space diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index cb0fd633a610..6cb65df3e3ba 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -157,26 +157,6 @@ static inline bool pipe_full(unsigned int head, unsigned int tail, } /** - * pipe_space_for_user - Return number of slots available to userspace - * @head: The pipe ring head pointer - * @tail: The pipe ring tail pointer - * @pipe: The pipe info structure - */ -static inline unsigned int pipe_space_for_user(unsigned int head, unsigned int tail, - struct pipe_inode_info *pipe) -{ - unsigned int p_occupancy, p_space; - - p_occupancy = pipe_occupancy(head, tail); - if (p_occupancy >= pipe->max_usage) - return 0; - p_space = pipe->ring_size - p_occupancy; - if (p_space > pipe->max_usage) - p_space = pipe->max_usage; - return p_space; -} - -/** * pipe_buf_get - get a reference to a pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to get a reference to @@ -229,6 +209,15 @@ static inline bool pipe_buf_try_steal(struct pipe_inode_info *pipe, return buf->ops->try_steal(pipe, buf); } +static inline void pipe_discard_from(struct pipe_inode_info *pipe, + unsigned int old_head) +{ + unsigned int mask = pipe->ring_size - 1; + + while (pipe->head > old_head) + pipe_buf_release(pipe, &pipe->bufs[--pipe->head & mask]); +} + /* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual memory allocation, whereas PIPE_BUF makes atomicity guarantees. */ #define PIPE_SIZE PAGE_SIZE diff --git a/include/linux/platform-feature.h b/include/linux/platform-feature.h deleted file mode 100644 index b2f48be999fa..000000000000 --- a/include/linux/platform-feature.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _PLATFORM_FEATURE_H -#define _PLATFORM_FEATURE_H - -#include <linux/bitops.h> -#include <asm/platform-feature.h> - -/* The platform features are starting with the architecture specific ones. */ - -/* Used to enable platform specific DMA handling for virtio devices. */ -#define PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS (0 + PLATFORM_ARCH_FEAT_N) - -#define PLATFORM_FEAT_N (1 + PLATFORM_ARCH_FEAT_N) - -void platform_set(unsigned int feature); -void platform_clear(unsigned int feature); -bool platform_has(unsigned int feature); - -#endif /* _PLATFORM_FEATURE_H */ diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 8cfa8cfca77e..8b1b795867a1 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -13,8 +13,8 @@ #ifndef __CROS_EC_COMMANDS_H #define __CROS_EC_COMMANDS_H - - +#include <linux/bits.h> +#include <linux/types.h> #define BUILD_ASSERT(_cond) @@ -787,7 +787,7 @@ struct ec_host_response { * * Packets always start with a request or response header. They are followed * by data_len bytes of data. If the data_crc_present flag is set, the data - * bytes are followed by a CRC-8 of that data, using using x^8 + x^2 + x + 1 + * bytes are followed by a CRC-8 of that data, using x^8 + x^2 + x + 1 * polynomial. * * Host algorithm when sending a request q: @@ -1300,6 +1300,8 @@ enum ec_feature_code { * mux. */ EC_FEATURE_TYPEC_MUX_REQUIRE_AP_ACK = 43, + /* The MCU is a System Companion Processor (SCP) 2nd Core. */ + EC_FEATURE_SCP_C1 = 45, }; #define EC_FEATURE_MASK_0(event_code) BIT(event_code % 32) diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h index 138fd912c808..408b29ca4004 100644 --- a/include/linux/platform_data/cros_ec_proto.h +++ b/include/linux/platform_data/cros_ec_proto.h @@ -19,8 +19,12 @@ #define CROS_EC_DEV_ISH_NAME "cros_ish" #define CROS_EC_DEV_PD_NAME "cros_pd" #define CROS_EC_DEV_SCP_NAME "cros_scp" +#define CROS_EC_DEV_SCP_C1_NAME "cros_scp_c1" #define CROS_EC_DEV_TP_NAME "cros_tp" +#define CROS_EC_DEV_EC_INDEX 0 +#define CROS_EC_DEV_PD_INDEX 1 + /* * The EC is unresponsive for a time after a reboot command. Add a * simple delay to make sure that the bus stays locked. @@ -231,8 +235,8 @@ bool cros_ec_check_features(struct cros_ec_dev *ec, int feature); int cros_ec_get_sensor_count(struct cros_ec_dev *ec); -int cros_ec_command(struct cros_ec_device *ec_dev, unsigned int version, int command, void *outdata, - int outsize, void *indata, int insize); +int cros_ec_cmd(struct cros_ec_device *ec_dev, unsigned int version, int command, void *outdata, + size_t outsize, void *indata, size_t insize); /** * cros_ec_get_time_ns() - Return time in ns. diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h index 76b13ef67562..c8645b2ed3c0 100644 --- a/include/linux/platform_data/davinci_asp.h +++ b/include/linux/platform_data/davinci_asp.h @@ -1,16 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * TI DaVinci Audio Serial Port support * * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __DAVINCI_ASP_H diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h index e182a46e609f..b82e44662efe 100644 --- a/include/linux/platform_data/gpio-davinci.h +++ b/include/linux/platform_data/gpio-davinci.h @@ -1,16 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * DaVinci GPIO Platform Related Defines * * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/ - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __DAVINCI_GPIO_PLATFORM_H diff --git a/include/linux/platform_data/uio_dmem_genirq.h b/include/linux/platform_data/uio_dmem_genirq.h index 973c1bb32168..c8f6de685306 100644 --- a/include/linux/platform_data/uio_dmem_genirq.h +++ b/include/linux/platform_data/uio_dmem_genirq.h @@ -1,16 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/platform_data/uio_dmem_genirq.h * * Copyright (C) 2012 Damian Hobson-Garcia - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef _UIO_DMEM_GENIRQ_H diff --git a/include/linux/platform_data/uio_pruss.h b/include/linux/platform_data/uio_pruss.h index 31f2e22661bc..f76fa393b802 100644 --- a/include/linux/platform_data/uio_pruss.h +++ b/include/linux/platform_data/uio_pruss.h @@ -1,18 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/platform_data/uio_pruss.h * * Platform data for uio_pruss driver * * Copyright (C) 2010-11 Texas Instruments Incorporated - https://www.ti.com/ - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef _UIO_PRUSS_H_ diff --git a/include/linux/platform_data/usb-omap.h b/include/linux/platform_data/usb-omap.h index 5e70d667031c..580978e468f8 100644 --- a/include/linux/platform_data/usb-omap.h +++ b/include/linux/platform_data/usb-omap.h @@ -1,22 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * usb-omap.h - Platform data for the various OMAP USB IPs * * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com - * - * This software is distributed under the terms of the GNU General Public - * License ("GPL") version 2, as published by the Free Software Foundation. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. */ #define OMAP3_HS_USB_PORTS 3 diff --git a/include/linux/platform_data/video-imxfb.h b/include/linux/platform_data/video-imxfb.h deleted file mode 100644 index 02812651af7d..000000000000 --- a/include/linux/platform_data/video-imxfb.h +++ /dev/null @@ -1,70 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * This structure describes the machine which we are running on. - */ -#ifndef __MACH_IMXFB_H__ -#define __MACH_IMXFB_H__ - -#include <linux/fb.h> - -#define PCR_TFT (1 << 31) -#define PCR_COLOR (1 << 30) -#define PCR_PBSIZ_1 (0 << 28) -#define PCR_PBSIZ_2 (1 << 28) -#define PCR_PBSIZ_4 (2 << 28) -#define PCR_PBSIZ_8 (3 << 28) -#define PCR_BPIX_1 (0 << 25) -#define PCR_BPIX_2 (1 << 25) -#define PCR_BPIX_4 (2 << 25) -#define PCR_BPIX_8 (3 << 25) -#define PCR_BPIX_12 (4 << 25) -#define PCR_BPIX_16 (5 << 25) -#define PCR_BPIX_18 (6 << 25) -#define PCR_PIXPOL (1 << 24) -#define PCR_FLMPOL (1 << 23) -#define PCR_LPPOL (1 << 22) -#define PCR_CLKPOL (1 << 21) -#define PCR_OEPOL (1 << 20) -#define PCR_SCLKIDLE (1 << 19) -#define PCR_END_SEL (1 << 18) -#define PCR_END_BYTE_SWAP (1 << 17) -#define PCR_REV_VS (1 << 16) -#define PCR_ACD_SEL (1 << 15) -#define PCR_ACD(x) (((x) & 0x7f) << 8) -#define PCR_SCLK_SEL (1 << 7) -#define PCR_SHARP (1 << 6) -#define PCR_PCD(x) ((x) & 0x3f) - -#define PWMR_CLS(x) (((x) & 0x1ff) << 16) -#define PWMR_LDMSK (1 << 15) -#define PWMR_SCR1 (1 << 10) -#define PWMR_SCR0 (1 << 9) -#define PWMR_CC_EN (1 << 8) -#define PWMR_PW(x) ((x) & 0xff) - -#define LSCR1_PS_RISE_DELAY(x) (((x) & 0x7f) << 26) -#define LSCR1_CLS_RISE_DELAY(x) (((x) & 0x3f) << 16) -#define LSCR1_REV_TOGGLE_DELAY(x) (((x) & 0xf) << 8) -#define LSCR1_GRAY2(x) (((x) & 0xf) << 4) -#define LSCR1_GRAY1(x) (((x) & 0xf)) - -struct imx_fb_videomode { - struct fb_videomode mode; - u32 pcr; - bool aus_mode; - unsigned char bpp; -}; - -struct imx_fb_platform_data { - struct imx_fb_videomode *mode; - int num_modes; - - u_int pwmr; - u_int lscr1; - u_int dmacr; - - int (*init)(struct platform_device *); - void (*exit)(struct platform_device *); -}; - -#endif /* ifndef __MACH_IMXFB_H__ */ diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index a571b47ff362..98f2b2f20f3e 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -49,6 +49,7 @@ #define ASUS_WMI_DEVID_LED4 0x00020014 #define ASUS_WMI_DEVID_LED5 0x00020015 #define ASUS_WMI_DEVID_LED6 0x00020016 +#define ASUS_WMI_DEVID_MICMUTE_LED 0x00040017 /* Backlight and Brightness */ #define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */ diff --git a/include/linux/platform_data/x86/p2sb.h b/include/linux/platform_data/x86/p2sb.h new file mode 100644 index 000000000000..a1d5fddc8f13 --- /dev/null +++ b/include/linux/platform_data/x86/p2sb.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Primary to Sideband (P2SB) bridge access support + */ + +#ifndef _PLATFORM_DATA_X86_P2SB_H +#define _PLATFORM_DATA_X86_P2SB_H + +#include <linux/errno.h> +#include <linux/kconfig.h> + +struct pci_bus; +struct resource; + +#if IS_BUILTIN(CONFIG_P2SB) + +int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem); + +#else /* CONFIG_P2SB */ + +static inline int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem) +{ + return -ENODEV; +} + +#endif /* CONFIG_P2SB is not set */ + +#endif /* _PLATFORM_DATA_X86_P2SB_H */ diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h index 6807839c718b..3edfb6d4e67a 100644 --- a/include/linux/platform_data/x86/pmc_atom.h +++ b/include/linux/platform_data/x86/pmc_atom.h @@ -47,7 +47,7 @@ #define PMC_S0I2_TMR 0x88 #define PMC_S0I3_TMR 0x8C #define PMC_S0_TMR 0x90 -/* Sleep state counter is in units of of 32us */ +/* Sleep state counter is in units of 32us */ #define PMC_TMR_SHIFT 5 /* Power status of power islands */ diff --git a/include/linux/platform_data/x86/simatic-ipc-base.h b/include/linux/platform_data/x86/simatic-ipc-base.h index 62d2bc774067..39fefd48cf4d 100644 --- a/include/linux/platform_data/x86/simatic-ipc-base.h +++ b/include/linux/platform_data/x86/simatic-ipc-base.h @@ -24,6 +24,4 @@ struct simatic_ipc_platform { u8 devmode; }; -u32 simatic_ipc_get_membase0(unsigned int p2sb); - #endif /* __PLATFORM_DATA_X86_SIMATIC_IPC_BASE_H */ diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 6708b4ec244d..dc1fb5890792 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -57,37 +57,39 @@ struct dev_pm_opp_icc_bw { u32 peak; }; -/** - * struct dev_pm_opp_info - OPP freq/voltage/current values - * @rate: Target clk rate in hz - * @supplies: Array of voltage/current values for all power supplies - * - * This structure stores the freq/voltage/current values for a single OPP. - */ -struct dev_pm_opp_info { - unsigned long rate; - struct dev_pm_opp_supply *supplies; -}; +typedef int (*config_regulators_t)(struct device *dev, + struct dev_pm_opp *old_opp, struct dev_pm_opp *new_opp, + struct regulator **regulators, unsigned int count); + +typedef int (*config_clks_t)(struct device *dev, struct opp_table *opp_table, + struct dev_pm_opp *opp, void *data, bool scaling_down); /** - * struct dev_pm_set_opp_data - Set OPP data - * @old_opp: Old OPP info - * @new_opp: New OPP info - * @regulators: Array of regulator pointers - * @regulator_count: Number of regulators - * @clk: Pointer to clk - * @dev: Pointer to the struct device + * struct dev_pm_opp_config - Device OPP configuration values + * @clk_names: Clk names, NULL terminated array. + * @config_clks: Custom set clk helper. + * @prop_name: Name to postfix to properties. + * @config_regulators: Custom set regulator helper. + * @supported_hw: Array of hierarchy of versions to match. + * @supported_hw_count: Number of elements in the array. + * @regulator_names: Array of pointers to the names of the regulator, NULL terminated. + * @genpd_names: Null terminated array of pointers containing names of genpd to + * attach. + * @virt_devs: Pointer to return the array of virtual devices. * - * This structure contains all information required for setting an OPP. + * This structure contains platform specific OPP configurations for the device. */ -struct dev_pm_set_opp_data { - struct dev_pm_opp_info old_opp; - struct dev_pm_opp_info new_opp; - - struct regulator **regulators; - unsigned int regulator_count; - struct clk *clk; - struct device *dev; +struct dev_pm_opp_config { + /* NULL terminated */ + const char * const *clk_names; + config_clks_t config_clks; + const char *prop_name; + config_regulators_t config_regulators; + const unsigned int *supported_hw; + unsigned int supported_hw_count; + const char * const *regulator_names; + const char * const *genpd_names; + struct device ***virt_devs; }; #if defined(CONFIG_PM_OPP) @@ -97,6 +99,8 @@ void dev_pm_opp_put_opp_table(struct opp_table *opp_table); unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); +int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *supplies); + unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp); unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); @@ -119,8 +123,6 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, bool available); struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq); -struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev, - unsigned long u_volt); struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, unsigned int level); @@ -154,23 +156,13 @@ int dev_pm_opp_disable(struct device *dev, unsigned long freq); int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb); int dev_pm_opp_unregister_notifier(struct device *dev, struct notifier_block *nb); -struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count); -void dev_pm_opp_put_supported_hw(struct opp_table *opp_table); -int devm_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count); -struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name); -void dev_pm_opp_put_prop_name(struct opp_table *opp_table); -struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); -void dev_pm_opp_put_regulators(struct opp_table *opp_table); -int devm_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count); -struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name); -void dev_pm_opp_put_clkname(struct opp_table *opp_table); -int devm_pm_opp_set_clkname(struct device *dev, const char *name); -struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); -void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); -int devm_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); -struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char * const *names, struct device ***virt_devs); -void dev_pm_opp_detach_genpd(struct opp_table *opp_table); -int devm_pm_opp_attach_genpd(struct device *dev, const char * const *names, struct device ***virt_devs); +int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config); +int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config); +void dev_pm_opp_clear_config(int token); +int dev_pm_opp_config_clks_simple(struct device *dev, + struct opp_table *opp_table, struct dev_pm_opp *opp, void *data, + bool scaling_down); + struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, struct opp_table *dst_table, struct dev_pm_opp *src_opp); int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); @@ -198,6 +190,11 @@ static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) return 0; } +static inline int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *supplies) +{ + return -EOPNOTSUPP; +} + static inline unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp) { return 0; @@ -274,12 +271,6 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, return ERR_PTR(-EOPNOTSUPP); } -static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev, - unsigned long u_volt) -{ - return ERR_PTR(-EOPNOTSUPP); -} - static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq) { @@ -342,79 +333,21 @@ static inline int dev_pm_opp_unregister_notifier(struct device *dev, struct noti return -EOPNOTSUPP; } -static inline struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, - const u32 *versions, - unsigned int count) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) {} - -static inline int devm_pm_opp_set_supported_hw(struct device *dev, - const u32 *versions, - unsigned int count) -{ - return -EOPNOTSUPP; -} - -static inline struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, - int (*set_opp)(struct dev_pm_set_opp_data *data)) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table) {} - -static inline int devm_pm_opp_register_set_opp_helper(struct device *dev, - int (*set_opp)(struct dev_pm_set_opp_data *data)) -{ - return -EOPNOTSUPP; -} - -static inline struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void dev_pm_opp_put_prop_name(struct opp_table *opp_table) {} - -static inline struct opp_table *dev_pm_opp_set_regulators(struct device *dev, const char * const names[], unsigned int count) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void dev_pm_opp_put_regulators(struct opp_table *opp_table) {} - -static inline int devm_pm_opp_set_regulators(struct device *dev, - const char * const names[], - unsigned int count) +static inline int dev_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config) { return -EOPNOTSUPP; } -static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char *name) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {} - -static inline int devm_pm_opp_set_clkname(struct device *dev, const char *name) +static inline int devm_pm_opp_set_config(struct device *dev, struct dev_pm_opp_config *config) { return -EOPNOTSUPP; } -static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char * const *names, struct device ***virt_devs) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {} +static inline void dev_pm_opp_clear_config(int token) {} -static inline int devm_pm_opp_attach_genpd(struct device *dev, - const char * const *names, - struct device ***virt_devs) +static inline int dev_pm_opp_config_clks_simple(struct device *dev, + struct opp_table *opp_table, struct dev_pm_opp *opp, void *data, + bool scaling_down) { return -EOPNOTSUPP; } @@ -469,8 +402,6 @@ static inline int dev_pm_opp_sync_regulators(struct device *dev) int dev_pm_opp_of_add_table(struct device *dev); int dev_pm_opp_of_add_table_indexed(struct device *dev, int index); int devm_pm_opp_of_add_table_indexed(struct device *dev, int index); -int dev_pm_opp_of_add_table_noclk(struct device *dev, int index); -int devm_pm_opp_of_add_table_noclk(struct device *dev, int index); void dev_pm_opp_of_remove_table(struct device *dev); int devm_pm_opp_of_add_table(struct device *dev); int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); @@ -501,16 +432,6 @@ static inline int devm_pm_opp_of_add_table_indexed(struct device *dev, int index return -EOPNOTSUPP; } -static inline int dev_pm_opp_of_add_table_noclk(struct device *dev, int index) -{ - return -EOPNOTSUPP; -} - -static inline int devm_pm_opp_of_add_table_noclk(struct device *dev, int index) -{ - return -EOPNOTSUPP; -} - static inline void dev_pm_opp_of_remove_table(struct device *dev) { } @@ -565,4 +486,149 @@ static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_ta } #endif +/* OPP Configuration helpers */ + +/* Regulators helpers */ +static inline int dev_pm_opp_set_regulators(struct device *dev, + const char * const names[]) +{ + struct dev_pm_opp_config config = { + .regulator_names = names, + }; + + return dev_pm_opp_set_config(dev, &config); +} + +static inline void dev_pm_opp_put_regulators(int token) +{ + dev_pm_opp_clear_config(token); +} + +static inline int devm_pm_opp_set_regulators(struct device *dev, + const char * const names[]) +{ + struct dev_pm_opp_config config = { + .regulator_names = names, + }; + + return devm_pm_opp_set_config(dev, &config); +} + +/* Supported-hw helpers */ +static inline int dev_pm_opp_set_supported_hw(struct device *dev, + const u32 *versions, + unsigned int count) +{ + struct dev_pm_opp_config config = { + .supported_hw = versions, + .supported_hw_count = count, + }; + + return dev_pm_opp_set_config(dev, &config); +} + +static inline void dev_pm_opp_put_supported_hw(int token) +{ + dev_pm_opp_clear_config(token); +} + +static inline int devm_pm_opp_set_supported_hw(struct device *dev, + const u32 *versions, + unsigned int count) +{ + struct dev_pm_opp_config config = { + .supported_hw = versions, + .supported_hw_count = count, + }; + + return devm_pm_opp_set_config(dev, &config); +} + +/* clkname helpers */ +static inline int dev_pm_opp_set_clkname(struct device *dev, const char *name) +{ + const char *names[] = { name, NULL }; + struct dev_pm_opp_config config = { + .clk_names = names, + }; + + return dev_pm_opp_set_config(dev, &config); +} + +static inline void dev_pm_opp_put_clkname(int token) +{ + dev_pm_opp_clear_config(token); +} + +static inline int devm_pm_opp_set_clkname(struct device *dev, const char *name) +{ + const char *names[] = { name, NULL }; + struct dev_pm_opp_config config = { + .clk_names = names, + }; + + return devm_pm_opp_set_config(dev, &config); +} + +/* config-regulators helpers */ +static inline int dev_pm_opp_set_config_regulators(struct device *dev, + config_regulators_t helper) +{ + struct dev_pm_opp_config config = { + .config_regulators = helper, + }; + + return dev_pm_opp_set_config(dev, &config); +} + +static inline void dev_pm_opp_put_config_regulators(int token) +{ + dev_pm_opp_clear_config(token); +} + +/* genpd helpers */ +static inline int dev_pm_opp_attach_genpd(struct device *dev, + const char * const *names, + struct device ***virt_devs) +{ + struct dev_pm_opp_config config = { + .genpd_names = names, + .virt_devs = virt_devs, + }; + + return dev_pm_opp_set_config(dev, &config); +} + +static inline void dev_pm_opp_detach_genpd(int token) +{ + dev_pm_opp_clear_config(token); +} + +static inline int devm_pm_opp_attach_genpd(struct device *dev, + const char * const *names, + struct device ***virt_devs) +{ + struct dev_pm_opp_config config = { + .genpd_names = names, + .virt_devs = virt_devs, + }; + + return devm_pm_opp_set_config(dev, &config); +} + +/* prop-name helpers */ +static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) +{ + struct dev_pm_opp_config config = { + .prop_name = name, + }; + + return dev_pm_opp_set_config(dev, &config); +} + +static inline void dev_pm_opp_put_prop_name(int token) +{ + dev_pm_opp_clear_config(token); +} + #endif /* __LINUX_OPP_H__ */ diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h index e63a63aa47a3..dd42d16945d0 100644 --- a/include/linux/pm_wakeirq.h +++ b/include/linux/pm_wakeirq.h @@ -1,15 +1,5 @@ -/* - * pm_wakeirq.h - Device wakeirq helper functions - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* pm_wakeirq.h - Device wakeirq helper functions */ #ifndef _LINUX_PM_WAKEIRQ_H #define _LINUX_PM_WAKEIRQ_H diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h index 9d3ffc8f5ea6..fb847e47f148 100644 --- a/include/linux/ppp-comp.h +++ b/include/linux/ppp-comp.h @@ -9,7 +9,7 @@ #include <uapi/linux/ppp-comp.h> - +struct compstat; struct module; /* diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h index 91f9a928344e..45e6e427ceb8 100644 --- a/include/linux/ppp_channel.h +++ b/include/linux/ppp_channel.h @@ -20,6 +20,8 @@ #include <linux/poll.h> #include <net/net_namespace.h> +struct net_device_path; +struct net_device_path_ctx; struct ppp_channel; struct ppp_channel_ops { diff --git a/include/linux/ppp_defs.h b/include/linux/ppp_defs.h index 9d2b388fae1a..b7e57fdbd413 100644 --- a/include/linux/ppp_defs.h +++ b/include/linux/ppp_defs.h @@ -11,4 +11,18 @@ #include <uapi/linux/ppp_defs.h> #define PPP_FCS(fcs, c) crc_ccitt_byte(fcs, c) + +/** + * ppp_proto_is_valid - checks if PPP protocol is valid + * @proto: PPP protocol + * + * Assumes proto is not compressed. + * Protocol is valid if the value is odd and the least significant bit of the + * most significant octet is 0 (see RFC 1661, section 2). + */ +static inline bool ppp_proto_is_valid(u16 proto) +{ + return !!((proto & 0x0101) == 0x0001); +} + #endif /* _PPP_DEFS_H_ */ diff --git a/include/linux/pstore.h b/include/linux/pstore.h index e97a8188f0fd..638507a3c8ff 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -57,6 +57,9 @@ struct pstore_info; * @size: size of @buf * @ecc_notice_size: * ECC information for @buf + * @priv: pointer for backend specific use, will be + * kfree()d by the pstore core if non-NULL + * when the record is freed. * * Valid for PSTORE_TYPE_DMESG @type: * @@ -74,6 +77,7 @@ struct pstore_record { char *buf; ssize_t size; ssize_t ecc_notice_size; + void *priv; int count; enum kmsg_dump_reason reason; diff --git a/include/linux/ptp_kvm.h b/include/linux/ptp_kvm.h index f960a719f0d5..c2e28deef33a 100644 --- a/include/linux/ptp_kvm.h +++ b/include/linux/ptp_kvm.h @@ -8,6 +8,8 @@ #ifndef _PTP_KVM_H_ #define _PTP_KVM_H_ +#include <linux/types.h> + struct timespec64; struct clocksource; diff --git a/include/linux/ptp_pch.h b/include/linux/ptp_pch.h index 51818198c292..7ba643b62c15 100644 --- a/include/linux/ptp_pch.h +++ b/include/linux/ptp_pch.h @@ -10,6 +10,10 @@ #ifndef _PTP_PCH_H_ #define _PTP_PCH_H_ +#include <linux/types.h> + +struct pci_dev; + void pch_ch_control_write(struct pci_dev *pdev, u32 val); u32 pch_ch_event_read(struct pci_dev *pdev); void pch_ch_event_write(struct pci_dev *pdev, u32 val); diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index 235047d7a1b5..f7edca369eda 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -17,9 +17,9 @@ #ifndef _LINUX_RBTREE_H #define _LINUX_RBTREE_H +#include <linux/container_of.h> #include <linux/rbtree_types.h> -#include <linux/kernel.h> #include <linux/stddef.h> #include <linux/rcupdate.h> diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 7c943f0a2fc4..aea79c77db0f 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -597,7 +597,7 @@ struct rproc_subdev { /** * struct rproc_vring - remoteproc vring state * @va: virtual address - * @len: length, in bytes + * @num: vring size * @da: device address * @align: vring alignment * @notifyid: rproc-specific unique vring index @@ -606,7 +606,7 @@ struct rproc_subdev { */ struct rproc_vring { void *va; - int len; + int num; u32 da; u32 align; int notifyid; diff --git a/include/linux/reset/bcm63xx_pmb.h b/include/linux/reset/bcm63xx_pmb.h index bb4af7b5eb36..c77b6999518a 100644 --- a/include/linux/reset/bcm63xx_pmb.h +++ b/include/linux/reset/bcm63xx_pmb.h @@ -1,17 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Broadcom BCM63xx Processor Monitor Bus shared routines (SMP and reset) * * Copyright (C) 2015, Broadcom Corporation * Author: Florian Fainelli <f.fainelli@gmail.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __BCM63XX_PMB_H #define __BCM63XX_PMB_H diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 9ec23138e410..bf80adca980b 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -325,8 +325,8 @@ struct page_vma_mapped_walk { #define DEFINE_PAGE_VMA_WALK(name, _page, _vma, _address, _flags) \ struct page_vma_mapped_walk name = { \ .pfn = page_to_pfn(_page), \ - .nr_pages = compound_nr(page), \ - .pgoff = page_to_pgoff(page), \ + .nr_pages = compound_nr(_page), \ + .pgoff = page_to_pgoff(_page), \ .vma = _vma, \ .address = _address, \ .flags = _flags, \ diff --git a/include/linux/rv.h b/include/linux/rv.h new file mode 100644 index 000000000000..8883b41d88ec --- /dev/null +++ b/include/linux/rv.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Runtime Verification. + * + * For futher information, see: kernel/trace/rv/rv.c. + */ +#ifndef _LINUX_RV_H +#define _LINUX_RV_H + +#define MAX_DA_NAME_LEN 24 + +#ifdef CONFIG_RV +/* + * Deterministic automaton per-object variables. + */ +struct da_monitor { + bool monitoring; + unsigned int curr_state; +}; + +/* + * Per-task RV monitors count. Nowadays fixed in RV_PER_TASK_MONITORS. + * If we find justification for more monitors, we can think about + * adding more or developing a dynamic method. So far, none of + * these are justified. + */ +#define RV_PER_TASK_MONITORS 1 +#define RV_PER_TASK_MONITOR_INIT (RV_PER_TASK_MONITORS) + +/* + * Futher monitor types are expected, so make this a union. + */ +union rv_task_monitor { + struct da_monitor da_mon; +}; + +#ifdef CONFIG_RV_REACTORS +struct rv_reactor { + const char *name; + const char *description; + void (*react)(char *msg); +}; +#endif + +struct rv_monitor { + const char *name; + const char *description; + bool enabled; + int (*enable)(void); + void (*disable)(void); + void (*reset)(void); +#ifdef CONFIG_RV_REACTORS + void (*react)(char *msg); +#endif +}; + +bool rv_monitoring_on(void); +int rv_unregister_monitor(struct rv_monitor *monitor); +int rv_register_monitor(struct rv_monitor *monitor); +int rv_get_task_monitor_slot(void); +void rv_put_task_monitor_slot(int slot); + +#ifdef CONFIG_RV_REACTORS +bool rv_reacting_on(void); +int rv_unregister_reactor(struct rv_reactor *reactor); +int rv_register_reactor(struct rv_reactor *reactor); +#endif /* CONFIG_RV_REACTORS */ + +#endif /* CONFIG_RV */ +#endif /* _LINUX_RV_H */ diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 7ff9d6386c12..375a5e90d86a 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -16,6 +16,9 @@ struct scatterlist { #ifdef CONFIG_NEED_SG_DMA_LENGTH unsigned int dma_length; #endif +#ifdef CONFIG_PCI_P2PDMA + unsigned int dma_flags; +#endif }; /* @@ -245,6 +248,72 @@ static inline void sg_unmark_end(struct scatterlist *sg) sg->page_link &= ~SG_END; } +/* + * CONFGI_PCI_P2PDMA depends on CONFIG_64BIT which means there is 4 bytes + * in struct scatterlist (assuming also CONFIG_NEED_SG_DMA_LENGTH is set). + * Use this padding for DMA flags bits to indicate when a specific + * dma address is a bus address. + */ +#ifdef CONFIG_PCI_P2PDMA + +#define SG_DMA_BUS_ADDRESS (1 << 0) + +/** + * sg_dma_is_bus address - Return whether a given segment was marked + * as a bus address + * @sg: SG entry + * + * Description: + * Returns true if sg_dma_mark_bus_address() has been called on + * this segment. + **/ +static inline bool sg_is_dma_bus_address(struct scatterlist *sg) +{ + return sg->dma_flags & SG_DMA_BUS_ADDRESS; +} + +/** + * sg_dma_mark_bus address - Mark the scatterlist entry as a bus address + * @sg: SG entry + * + * Description: + * Marks the passed in sg entry to indicate that the dma_address is + * a bus address and doesn't need to be unmapped. This should only be + * used by dma_map_sg() implementations to mark bus addresses + * so they can be properly cleaned up in dma_unmap_sg(). + **/ +static inline void sg_dma_mark_bus_address(struct scatterlist *sg) +{ + sg->dma_flags |= SG_DMA_BUS_ADDRESS; +} + +/** + * sg_unmark_bus_address - Unmark the scatterlist entry as a bus address + * @sg: SG entry + * + * Description: + * Clears the bus address mark. + **/ +static inline void sg_dma_unmark_bus_address(struct scatterlist *sg) +{ + sg->dma_flags &= ~SG_DMA_BUS_ADDRESS; +} + +#else + +static inline bool sg_is_dma_bus_address(struct scatterlist *sg) +{ + return false; +} +static inline void sg_dma_mark_bus_address(struct scatterlist *sg) +{ +} +static inline void sg_dma_unmark_bus_address(struct scatterlist *sg) +{ +} + +#endif + /** * sg_phys - Return physical address of an sg entry * @sg: SG entry diff --git a/include/linux/sched.h b/include/linux/sched.h index d6b0866c71ed..e7b2f8a5c711 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -34,6 +34,7 @@ #include <linux/rseq.h> #include <linux/seqlock.h> #include <linux/kcsan.h> +#include <linux/rv.h> #include <asm/kmap_size.h> /* task_struct member predeclarations (sorted alphabetically): */ @@ -1501,6 +1502,16 @@ struct task_struct { struct callback_head l1d_flush_kill; #endif +#ifdef CONFIG_RV + /* + * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS. + * If we find justification for more monitors, we can think + * about adding more or developing a dynamic method. So far, + * none of these are justified. + */ + union rv_task_monitor rv[RV_PER_TASK_MONITORS]; +#endif + /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. @@ -1814,7 +1825,7 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags) } extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); -extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); +extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus); #ifdef CONFIG_SMP extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 8cd975a8bfeb..2a243616f222 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -29,7 +29,7 @@ extern struct mm_struct *mm_alloc(void); * * Use mmdrop() to release the reference acquired by mmgrab(). * - * See also <Documentation/vm/active_mm.rst> for an in-depth explanation + * See also <Documentation/mm/active_mm.rst> for an in-depth explanation * of &mm_struct.mm_count vs &mm_struct.mm_users. */ static inline void mmgrab(struct mm_struct *mm) @@ -92,7 +92,7 @@ static inline void mmdrop_sched(struct mm_struct *mm) * * Use mmput() to release the reference acquired by mmget(). * - * See also <Documentation/vm/active_mm.rst> for an in-depth explanation + * See also <Documentation/mm/active_mm.rst> for an in-depth explanation * of &mm_struct.mm_count vs &mm_struct.mm_users. */ static inline void mmget(struct mm_struct *mm) diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 00ed419dd464..f054d0360a75 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -24,7 +24,8 @@ struct user_struct { kuid_t uid; #if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) || \ - defined(CONFIG_NET) || defined(CONFIG_IO_URING) + defined(CONFIG_NET) || defined(CONFIG_IO_URING) || \ + defined(CONFIG_VFIO_PCI_ZDEV_KVM) atomic_long_t locked_vm; #endif #ifdef CONFIG_WATCH_QUEUE diff --git a/include/linux/seq_file_net.h b/include/linux/seq_file_net.h index b97912fdbae7..79638395bc32 100644 --- a/include/linux/seq_file_net.h +++ b/include/linux/seq_file_net.h @@ -3,6 +3,7 @@ #define __SEQ_FILE_NET_H__ #include <linux/seq_file.h> +#include <net/net_trackers.h> struct net; extern struct net init_net; diff --git a/include/linux/serial.h b/include/linux/serial.h index 0b8b7d7c8f33..3d6fe3ef92cf 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h @@ -9,12 +9,20 @@ #ifndef _LINUX_SERIAL_H #define _LINUX_SERIAL_H -#include <asm/page.h> #include <uapi/linux/serial.h> +#include <uapi/linux/serial_reg.h> /* Helper for dealing with UART_LCR_WLEN* defines */ #define UART_LCR_WLEN(x) ((x) - 5) +/* FIFO and shifting register empty */ +#define UART_LSR_BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) + +static inline bool uart_lsr_tx_empty(u16 lsr) +{ + return (lsr & UART_LSR_BOTH_EMPTY) == UART_LSR_BOTH_EMPTY; +} + /* * Counters of the input lines (CTS, DSR, RI, CD) interrupts */ @@ -25,11 +33,6 @@ struct async_icount { __u32 buf_overrun; }; -/* - * The size of the serial xmit buffer is 1 page, or 4096 bytes - */ -#define SERIAL_XMIT_SIZE PAGE_SIZE - #include <linux/compiler.h> #endif /* _LINUX_SERIAL_H */ diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index ff84a3ed10ea..8c7b793aa4d7 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -119,7 +119,8 @@ struct uart_8250_port { * be immediately processed. */ #define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS - unsigned char lsr_saved_flags; + u16 lsr_saved_flags; + u16 lsr_save_mask; #define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA unsigned char msr_saved_flags; @@ -170,8 +171,8 @@ extern void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud, unsigned int quot_frac); extern int fsl8250_handle_irq(struct uart_port *port); int serial8250_handle_irq(struct uart_port *port, unsigned int iir); -unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr); -void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr); +u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr); +void serial8250_read_char(struct uart_8250_port *up, u16 lsr); void serial8250_tx_chars(struct uart_8250_port *up); unsigned int serial8250_modem_status(struct uart_8250_port *up); void serial8250_init_port(struct uart_8250_port *up); diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index fde258b3decd..aef3145f2032 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -31,9 +31,336 @@ struct serial_struct; struct device; struct gpio_desc; -/* +/** + * struct uart_ops -- interface between serial_core and the driver + * * This structure describes all the operations that can be done on the - * physical hardware. See Documentation/driver-api/serial/driver.rst for details. + * physical hardware. + * + * @tx_empty: ``unsigned int ()(struct uart_port *port)`` + * + * This function tests whether the transmitter fifo and shifter for the + * @port is empty. If it is empty, this function should return + * %TIOCSER_TEMT, otherwise return 0. If the port does not support this + * operation, then it should return %TIOCSER_TEMT. + * + * Locking: none. + * Interrupts: caller dependent. + * This call must not sleep + * + * @set_mctrl: ``void ()(struct uart_port *port, unsigned int mctrl)`` + * + * This function sets the modem control lines for @port to the state + * described by @mctrl. The relevant bits of @mctrl are: + * + * - %TIOCM_RTS RTS signal. + * - %TIOCM_DTR DTR signal. + * - %TIOCM_OUT1 OUT1 signal. + * - %TIOCM_OUT2 OUT2 signal. + * - %TIOCM_LOOP Set the port into loopback mode. + * + * If the appropriate bit is set, the signal should be driven + * active. If the bit is clear, the signal should be driven + * inactive. + * + * Locking: @port->lock taken. + * Interrupts: locally disabled. + * This call must not sleep + * + * @get_mctrl: ``unsigned int ()(struct uart_port *port)`` + * + * Returns the current state of modem control inputs of @port. The state + * of the outputs should not be returned, since the core keeps track of + * their state. The state information should include: + * + * - %TIOCM_CAR state of DCD signal + * - %TIOCM_CTS state of CTS signal + * - %TIOCM_DSR state of DSR signal + * - %TIOCM_RI state of RI signal + * + * The bit is set if the signal is currently driven active. If + * the port does not support CTS, DCD or DSR, the driver should + * indicate that the signal is permanently active. If RI is + * not available, the signal should not be indicated as active. + * + * Locking: @port->lock taken. + * Interrupts: locally disabled. + * This call must not sleep + * + * @stop_tx: ``void ()(struct uart_port *port)`` + * + * Stop transmitting characters. This might be due to the CTS line + * becoming inactive or the tty layer indicating we want to stop + * transmission due to an %XOFF character. + * + * The driver should stop transmitting characters as soon as possible. + * + * Locking: @port->lock taken. + * Interrupts: locally disabled. + * This call must not sleep + * + * @start_tx: ``void ()(struct uart_port *port)`` + * + * Start transmitting characters. + * + * Locking: @port->lock taken. + * Interrupts: locally disabled. + * This call must not sleep + * + * @throttle: ``void ()(struct uart_port *port)`` + * + * Notify the serial driver that input buffers for the line discipline are + * close to full, and it should somehow signal that no more characters + * should be sent to the serial port. + * This will be called only if hardware assisted flow control is enabled. + * + * Locking: serialized with @unthrottle() and termios modification by the + * tty layer. + * + * @unthrottle: ``void ()(struct uart_port *port)`` + * + * Notify the serial driver that characters can now be sent to the serial + * port without fear of overrunning the input buffers of the line + * disciplines. + * + * This will be called only if hardware assisted flow control is enabled. + * + * Locking: serialized with @throttle() and termios modification by the + * tty layer. + * + * @send_xchar: ``void ()(struct uart_port *port, char ch)`` + * + * Transmit a high priority character, even if the port is stopped. This + * is used to implement XON/XOFF flow control and tcflow(). If the serial + * driver does not implement this function, the tty core will append the + * character to the circular buffer and then call start_tx() / stop_tx() + * to flush the data out. + * + * Do not transmit if @ch == '\0' (%__DISABLED_CHAR). + * + * Locking: none. + * Interrupts: caller dependent. + * + * @stop_rx: ``void ()(struct uart_port *port)`` + * + * Stop receiving characters; the @port is in the process of being closed. + * + * Locking: @port->lock taken. + * Interrupts: locally disabled. + * This call must not sleep + * + * @enable_ms: ``void ()(struct uart_port *port)`` + * + * Enable the modem status interrupts. + * + * This method may be called multiple times. Modem status interrupts + * should be disabled when the @shutdown() method is called. + * + * Locking: @port->lock taken. + * Interrupts: locally disabled. + * This call must not sleep + * + * @break_ctl: ``void ()(struct uart_port *port, int ctl)`` + * + * Control the transmission of a break signal. If @ctl is nonzero, the + * break signal should be transmitted. The signal should be terminated + * when another call is made with a zero @ctl. + * + * Locking: caller holds tty_port->mutex + * + * @startup: ``int ()(struct uart_port *port)`` + * + * Grab any interrupt resources and initialise any low level driver state. + * Enable the port for reception. It should not activate RTS nor DTR; + * this will be done via a separate call to @set_mctrl(). + * + * This method will only be called when the port is initially opened. + * + * Locking: port_sem taken. + * Interrupts: globally disabled. + * + * @shutdown: ``void ()(struct uart_port *port)`` + * + * Disable the @port, disable any break condition that may be in effect, + * and free any interrupt resources. It should not disable RTS nor DTR; + * this will have already been done via a separate call to @set_mctrl(). + * + * Drivers must not access @port->state once this call has completed. + * + * This method will only be called when there are no more users of this + * @port. + * + * Locking: port_sem taken. + * Interrupts: caller dependent. + * + * @flush_buffer: ``void ()(struct uart_port *port)`` + * + * Flush any write buffers, reset any DMA state and stop any ongoing DMA + * transfers. + * + * This will be called whenever the @port->state->xmit circular buffer is + * cleared. + * + * Locking: @port->lock taken. + * Interrupts: locally disabled. + * This call must not sleep + * + * @set_termios: ``void ()(struct uart_port *port, struct ktermios *new, + * struct ktermios *old)`` + * + * Change the @port parameters, including word length, parity, stop bits. + * Update @port->read_status_mask and @port->ignore_status_mask to + * indicate the types of events we are interested in receiving. Relevant + * ktermios::c_cflag bits are: + * + * - %CSIZE - word size + * - %CSTOPB - 2 stop bits + * - %PARENB - parity enable + * - %PARODD - odd parity (when %PARENB is in force) + * - %ADDRB - address bit (changed through uart_port::rs485_config()). + * - %CREAD - enable reception of characters (if not set, still receive + * characters from the port, but throw them away). + * - %CRTSCTS - if set, enable CTS status change reporting. + * - %CLOCAL - if not set, enable modem status change reporting. + * + * Relevant ktermios::c_iflag bits are: + * + * - %INPCK - enable frame and parity error events to be passed to the TTY + * layer. + * - %BRKINT / %PARMRK - both of these enable break events to be passed to + * the TTY layer. + * - %IGNPAR - ignore parity and framing errors. + * - %IGNBRK - ignore break errors. If %IGNPAR is also set, ignore overrun + * errors as well. + * + * The interaction of the ktermios::c_iflag bits is as follows (parity + * error given as an example): + * + * ============ ======= ======= ========================================= + * Parity error INPCK IGNPAR + * ============ ======= ======= ========================================= + * n/a 0 n/a character received, marked as %TTY_NORMAL + * None 1 n/a character received, marked as %TTY_NORMAL + * Yes 1 0 character received, marked as %TTY_PARITY + * Yes 1 1 character discarded + * ============ ======= ======= ========================================= + * + * Other flags may be used (eg, xon/xoff characters) if your hardware + * supports hardware "soft" flow control. + * + * Locking: caller holds tty_port->mutex + * Interrupts: caller dependent. + * This call must not sleep + * + * @set_ldisc: ``void ()(struct uart_port *port, struct ktermios *termios)`` + * + * Notifier for discipline change. See + * Documentation/driver-api/tty/tty_ldisc.rst. + * + * Locking: caller holds tty_port->mutex + * + * @pm: ``void ()(struct uart_port *port, unsigned int state, + * unsigned int oldstate)`` + * + * Perform any power management related activities on the specified @port. + * @state indicates the new state (defined by enum uart_pm_state), + * @oldstate indicates the previous state. + * + * This function should not be used to grab any resources. + * + * This will be called when the @port is initially opened and finally + * closed, except when the @port is also the system console. This will + * occur even if %CONFIG_PM is not set. + * + * Locking: none. + * Interrupts: caller dependent. + * + * @type: ``const char *()(struct uart_port *port)`` + * + * Return a pointer to a string constant describing the specified @port, + * or return %NULL, in which case the string 'unknown' is substituted. + * + * Locking: none. + * Interrupts: caller dependent. + * + * @release_port: ``void ()(struct uart_port *port)`` + * + * Release any memory and IO region resources currently in use by the + * @port. + * + * Locking: none. + * Interrupts: caller dependent. + * + * @request_port: ``int ()(struct uart_port *port)`` + * + * Request any memory and IO region resources required by the port. If any + * fail, no resources should be registered when this function returns, and + * it should return -%EBUSY on failure. + * + * Locking: none. + * Interrupts: caller dependent. + * + * @config_port: ``void ()(struct uart_port *port, int type)`` + * + * Perform any autoconfiguration steps required for the @port. @type + * contains a bit mask of the required configuration. %UART_CONFIG_TYPE + * indicates that the port requires detection and identification. + * @port->type should be set to the type found, or %PORT_UNKNOWN if no + * port was detected. + * + * %UART_CONFIG_IRQ indicates autoconfiguration of the interrupt signal, + * which should be probed using standard kernel autoprobing techniques. + * This is not necessary on platforms where ports have interrupts + * internally hard wired (eg, system on a chip implementations). + * + * Locking: none. + * Interrupts: caller dependent. + * + * @verify_port: ``int ()(struct uart_port *port, + * struct serial_struct *serinfo)`` + * + * Verify the new serial port information contained within @serinfo is + * suitable for this port type. + * + * Locking: none. + * Interrupts: caller dependent. + * + * @ioctl: ``int ()(struct uart_port *port, unsigned int cmd, + * unsigned long arg)`` + * + * Perform any port specific IOCTLs. IOCTL commands must be defined using + * the standard numbering system found in <asm/ioctl.h>. + * + * Locking: none. + * Interrupts: caller dependent. + * + * @poll_init: ``int ()(struct uart_port *port)`` + * + * Called by kgdb to perform the minimal hardware initialization needed to + * support @poll_put_char() and @poll_get_char(). Unlike @startup(), this + * should not request interrupts. + * + * Locking: %tty_mutex and tty_port->mutex taken. + * Interrupts: n/a. + * + * @poll_put_char: ``void ()(struct uart_port *port, unsigned char ch)`` + * + * Called by kgdb to write a single character @ch directly to the serial + * @port. It can and should block until there is space in the TX FIFO. + * + * Locking: none. + * Interrupts: caller dependent. + * This call must not sleep + * + * @poll_get_char: ``int ()(struct uart_port *port)`` + * + * Called by kgdb to read a single character directly from the serial + * port. If data is available, it should be returned; otherwise the + * function should return %NO_POLL_CHAR immediately. + * + * Locking: none. + * Interrupts: caller dependent. + * This call must not sleep */ struct uart_ops { unsigned int (*tx_empty)(struct uart_port *); @@ -56,22 +383,8 @@ struct uart_ops { void (*set_ldisc)(struct uart_port *, struct ktermios *); void (*pm)(struct uart_port *, unsigned int state, unsigned int oldstate); - - /* - * Return a string describing the type of the port - */ const char *(*type)(struct uart_port *); - - /* - * Release IO and memory resources used by the port. - * This includes iounmap if necessary. - */ void (*release_port)(struct uart_port *); - - /* - * Request IO and memory resources used by the port. - * This includes iomapping the port if necessary. - */ int (*request_port)(struct uart_port *); void (*config_port)(struct uart_port *, int); int (*verify_port)(struct uart_port *, struct serial_struct *); @@ -133,6 +446,7 @@ struct uart_port { unsigned int old); void (*handle_break)(struct uart_port *); int (*rs485_config)(struct uart_port *, + struct ktermios *termios, struct serial_rs485 *rs485); int (*iso7816_config)(struct uart_port *, struct serial_iso7816 *iso7816); @@ -232,7 +546,6 @@ struct uart_port { int hw_stopped; /* sw-assisted CTS flow state */ unsigned int mctrl; /* current modem ctrl settings */ - unsigned int timeout; /* character-based timeout */ unsigned int frame_time; /* frame timing in ns */ unsigned int type; /* port type */ const struct uart_ops *ops; @@ -255,6 +568,7 @@ struct uart_port { struct attribute_group *attr_group; /* port specific attributes */ const struct attribute_group **tty_groups; /* all attributes (serial core use only) */ struct serial_rs485 rs485; + struct serial_rs485 rs485_supported; /* Supported mask for serial_rs485 */ struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */ struct serial_iso7816 iso7816; void *private_data; /* generic platform data pointer */ @@ -334,10 +648,23 @@ unsigned int uart_get_baud_rate(struct uart_port *port, struct ktermios *termios unsigned int max); unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud); +/* + * Calculates FIFO drain time. + */ +static inline unsigned long uart_fifo_timeout(struct uart_port *port) +{ + u64 fifo_timeout = (u64)READ_ONCE(port->frame_time) * port->fifosize; + + /* Add .02 seconds of slop */ + fifo_timeout += 20 * NSEC_PER_MSEC; + + return max(nsecs_to_jiffies(fifo_timeout), 1UL); +} + /* Base timer interval for polling */ static inline int uart_poll_timeout(struct uart_port *port) { - int timeout = port->timeout; + int timeout = uart_fifo_timeout(port); return timeout > 6 ? (timeout / 2 - 2) : 1; } @@ -598,4 +925,5 @@ static inline int uart_handle_break(struct uart_port *port) !((cflag) & CLOCAL)) int uart_get_rs485_mode(struct uart_port *port); +int uart_rs485_config(struct uart_port *port); #endif /* LINUX_SERIAL_CORE_H */ diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h index dec15f5b3dec..1672cf0810ef 100644 --- a/include/linux/serial_s3c.h +++ b/include/linux/serial_s3c.h @@ -83,7 +83,7 @@ #define S3C2410_UCON_RXIRQMODE (1<<0) #define S3C2410_UCON_RXFIFO_TOI (1<<7) #define S3C2443_UCON_RXERR_IRQEN (1<<6) -#define S3C2443_UCON_LOOPBACK (1<<5) +#define S3C2410_UCON_LOOPBACK (1<<5) #define S3C2410_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \ S3C2410_UCON_RXILEVEL | \ diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index a68f982f22d1..1b6c4013f691 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -25,9 +25,20 @@ struct shmem_inode_info { struct simple_xattrs xattrs; /* list of xattrs */ atomic_t stop_eviction; /* hold when working on inode */ struct timespec64 i_crtime; /* file creation time */ + unsigned int fsflags; /* flags for FS_IOC_[SG]ETFLAGS */ struct inode vfs_inode; }; +#define SHMEM_FL_USER_VISIBLE FS_FL_USER_VISIBLE +#define SHMEM_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE +#define SHMEM_FL_INHERITED FS_FL_USER_MODIFIABLE + +/* Flags that are appropriate for regular files (all but dir-specific ones). */ +#define SHMEM_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL)) + +/* Flags that are appropriate for non-directories/regular files. */ +#define SHMEM_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL) + struct shmem_sb_info { unsigned long max_blocks; /* How many blocks are allowed */ struct percpu_counter used_blocks; /* How many are allocated */ diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 76fbf92b04d9..08e6054e061f 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -73,6 +73,11 @@ struct shrinker { /* ID in shrinker_idr */ int id; #endif +#ifdef CONFIG_SHRINKER_DEBUG + int debugfs_id; + const char *name; + struct dentry *debugfs_entry; +#endif /* objs pending delete, per node */ atomic_long_t *nr_deferred; }; @@ -88,10 +93,32 @@ struct shrinker { */ #define SHRINKER_NONSLAB (1 << 3) -extern int prealloc_shrinker(struct shrinker *shrinker); +extern int __printf(2, 3) prealloc_shrinker(struct shrinker *shrinker, + const char *fmt, ...); extern void register_shrinker_prepared(struct shrinker *shrinker); -extern int register_shrinker(struct shrinker *shrinker); +extern int __printf(2, 3) register_shrinker(struct shrinker *shrinker, + const char *fmt, ...); extern void unregister_shrinker(struct shrinker *shrinker); extern void free_prealloced_shrinker(struct shrinker *shrinker); extern void synchronize_shrinkers(void); -#endif + +#ifdef CONFIG_SHRINKER_DEBUG +extern int shrinker_debugfs_add(struct shrinker *shrinker); +extern void shrinker_debugfs_remove(struct shrinker *shrinker); +extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker, + const char *fmt, ...); +#else /* CONFIG_SHRINKER_DEBUG */ +static inline int shrinker_debugfs_add(struct shrinker *shrinker) +{ + return 0; +} +static inline void shrinker_debugfs_remove(struct shrinker *shrinker) +{ +} +static inline __printf(2, 3) +int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...) +{ + return 0; +} +#endif /* CONFIG_SHRINKER_DEBUG */ +#endif /* _LINUX_SHRINKER_H */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 1111adefd906..ca8afa382bf2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -43,6 +43,7 @@ #include <linux/netfilter/nf_conntrack_common.h> #endif #include <net/net_debug.h> +#include <net/dropreason.h> /** * DOC: skb checksums @@ -337,184 +338,6 @@ struct sk_buff_head { struct sk_buff; -/* The reason of skb drop, which is used in kfree_skb_reason(). - * en...maybe they should be splited by group? - * - * Each item here should also be in 'TRACE_SKB_DROP_REASON', which is - * used to translate the reason to string. - */ -enum skb_drop_reason { - SKB_NOT_DROPPED_YET = 0, - SKB_DROP_REASON_NOT_SPECIFIED, /* drop reason is not specified */ - SKB_DROP_REASON_NO_SOCKET, /* socket not found */ - SKB_DROP_REASON_PKT_TOO_SMALL, /* packet size is too small */ - SKB_DROP_REASON_TCP_CSUM, /* TCP checksum error */ - SKB_DROP_REASON_SOCKET_FILTER, /* dropped by socket filter */ - SKB_DROP_REASON_UDP_CSUM, /* UDP checksum error */ - SKB_DROP_REASON_NETFILTER_DROP, /* dropped by netfilter */ - SKB_DROP_REASON_OTHERHOST, /* packet don't belong to current - * host (interface is in promisc - * mode) - */ - SKB_DROP_REASON_IP_CSUM, /* IP checksum error */ - SKB_DROP_REASON_IP_INHDR, /* there is something wrong with - * IP header (see - * IPSTATS_MIB_INHDRERRORS) - */ - SKB_DROP_REASON_IP_RPFILTER, /* IP rpfilter validate failed. - * see the document for rp_filter - * in ip-sysctl.rst for more - * information - */ - SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST, /* destination address of L2 - * is multicast, but L3 is - * unicast. - */ - SKB_DROP_REASON_XFRM_POLICY, /* xfrm policy check failed */ - SKB_DROP_REASON_IP_NOPROTO, /* no support for IP protocol */ - SKB_DROP_REASON_SOCKET_RCVBUFF, /* socket receive buff is full */ - SKB_DROP_REASON_PROTO_MEM, /* proto memory limition, such as - * udp packet drop out of - * udp_memory_allocated. - */ - SKB_DROP_REASON_TCP_MD5NOTFOUND, /* no MD5 hash and one - * expected, corresponding - * to LINUX_MIB_TCPMD5NOTFOUND - */ - SKB_DROP_REASON_TCP_MD5UNEXPECTED, /* MD5 hash and we're not - * expecting one, corresponding - * to LINUX_MIB_TCPMD5UNEXPECTED - */ - SKB_DROP_REASON_TCP_MD5FAILURE, /* MD5 hash and its wrong, - * corresponding to - * LINUX_MIB_TCPMD5FAILURE - */ - SKB_DROP_REASON_SOCKET_BACKLOG, /* failed to add skb to socket - * backlog (see - * LINUX_MIB_TCPBACKLOGDROP) - */ - SKB_DROP_REASON_TCP_FLAGS, /* TCP flags invalid */ - SKB_DROP_REASON_TCP_ZEROWINDOW, /* TCP receive window size is zero, - * see LINUX_MIB_TCPZEROWINDOWDROP - */ - SKB_DROP_REASON_TCP_OLD_DATA, /* the TCP data reveived is already - * received before (spurious retrans - * may happened), see - * LINUX_MIB_DELAYEDACKLOST - */ - SKB_DROP_REASON_TCP_OVERWINDOW, /* the TCP data is out of window, - * the seq of the first byte exceed - * the right edges of receive - * window - */ - SKB_DROP_REASON_TCP_OFOMERGE, /* the data of skb is already in - * the ofo queue, corresponding to - * LINUX_MIB_TCPOFOMERGE - */ - SKB_DROP_REASON_TCP_RFC7323_PAWS, /* PAWS check, corresponding to - * LINUX_MIB_PAWSESTABREJECTED - */ - SKB_DROP_REASON_TCP_INVALID_SEQUENCE, /* Not acceptable SEQ field */ - SKB_DROP_REASON_TCP_RESET, /* Invalid RST packet */ - SKB_DROP_REASON_TCP_INVALID_SYN, /* Incoming packet has unexpected SYN flag */ - SKB_DROP_REASON_TCP_CLOSE, /* TCP socket in CLOSE state */ - SKB_DROP_REASON_TCP_FASTOPEN, /* dropped by FASTOPEN request socket */ - SKB_DROP_REASON_TCP_OLD_ACK, /* TCP ACK is old, but in window */ - SKB_DROP_REASON_TCP_TOO_OLD_ACK, /* TCP ACK is too old */ - SKB_DROP_REASON_TCP_ACK_UNSENT_DATA, /* TCP ACK for data we haven't sent yet */ - SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE, /* pruned from TCP OFO queue */ - SKB_DROP_REASON_TCP_OFO_DROP, /* data already in receive queue */ - SKB_DROP_REASON_IP_OUTNOROUTES, /* route lookup failed */ - SKB_DROP_REASON_BPF_CGROUP_EGRESS, /* dropped by - * BPF_PROG_TYPE_CGROUP_SKB - * eBPF program - */ - SKB_DROP_REASON_IPV6DISABLED, /* IPv6 is disabled on the device */ - SKB_DROP_REASON_NEIGH_CREATEFAIL, /* failed to create neigh - * entry - */ - SKB_DROP_REASON_NEIGH_FAILED, /* neigh entry in failed state */ - SKB_DROP_REASON_NEIGH_QUEUEFULL, /* arp_queue for neigh - * entry is full - */ - SKB_DROP_REASON_NEIGH_DEAD, /* neigh entry is dead */ - SKB_DROP_REASON_TC_EGRESS, /* dropped in TC egress HOOK */ - SKB_DROP_REASON_QDISC_DROP, /* dropped by qdisc when packet - * outputting (failed to enqueue to - * current qdisc) - */ - SKB_DROP_REASON_CPU_BACKLOG, /* failed to enqueue the skb to - * the per CPU backlog queue. This - * can be caused by backlog queue - * full (see netdev_max_backlog in - * net.rst) or RPS flow limit - */ - SKB_DROP_REASON_XDP, /* dropped by XDP in input path */ - SKB_DROP_REASON_TC_INGRESS, /* dropped in TC ingress HOOK */ - SKB_DROP_REASON_UNHANDLED_PROTO, /* protocol not implemented - * or not supported - */ - SKB_DROP_REASON_SKB_CSUM, /* sk_buff checksum computation - * error - */ - SKB_DROP_REASON_SKB_GSO_SEG, /* gso segmentation error */ - SKB_DROP_REASON_SKB_UCOPY_FAULT, /* failed to copy data from - * user space, e.g., via - * zerocopy_sg_from_iter() - * or skb_orphan_frags_rx() - */ - SKB_DROP_REASON_DEV_HDR, /* device driver specific - * header/metadata is invalid - */ - /* the device is not ready to xmit/recv due to any of its data - * structure that is not up/ready/initialized, e.g., the IFF_UP is - * not set, or driver specific tun->tfiles[txq] is not initialized - */ - SKB_DROP_REASON_DEV_READY, - SKB_DROP_REASON_FULL_RING, /* ring buffer is full */ - SKB_DROP_REASON_NOMEM, /* error due to OOM */ - SKB_DROP_REASON_HDR_TRUNC, /* failed to trunc/extract the header - * from networking data, e.g., failed - * to pull the protocol header from - * frags via pskb_may_pull() - */ - SKB_DROP_REASON_TAP_FILTER, /* dropped by (ebpf) filter directly - * attached to tun/tap, e.g., via - * TUNSETFILTEREBPF - */ - SKB_DROP_REASON_TAP_TXFILTER, /* dropped by tx filter implemented - * at tun/tap, e.g., check_filter() - */ - SKB_DROP_REASON_ICMP_CSUM, /* ICMP checksum error */ - SKB_DROP_REASON_INVALID_PROTO, /* the packet doesn't follow RFC - * 2211, such as a broadcasts - * ICMP_TIMESTAMP - */ - SKB_DROP_REASON_IP_INADDRERRORS, /* host unreachable, corresponding - * to IPSTATS_MIB_INADDRERRORS - */ - SKB_DROP_REASON_IP_INNOROUTES, /* network unreachable, corresponding - * to IPSTATS_MIB_INADDRERRORS - */ - SKB_DROP_REASON_PKT_TOO_BIG, /* packet size is too big (maybe exceed - * the MTU) - */ - SKB_DROP_REASON_MAX, -}; - -#define SKB_DR_INIT(name, reason) \ - enum skb_drop_reason name = SKB_DROP_REASON_##reason -#define SKB_DR(name) \ - SKB_DR_INIT(name, NOT_SPECIFIED) -#define SKB_DR_SET(name, reason) \ - (name = SKB_DROP_REASON_##reason) -#define SKB_DR_OR(name, reason) \ - do { \ - if (name == SKB_DROP_REASON_NOT_SPECIFIED || \ - name == SKB_NOT_DROPPED_YET) \ - SKB_DR_SET(name, reason); \ - } while (0) - /* To allow 64K frame to be packed as single skb without frag_list we * require 64K/PAGE_SIZE pages plus 1 additional page to allow for * buffers which do not start on a page boundary. @@ -2567,6 +2390,18 @@ static inline void __skb_fill_page_desc_noacc(struct skb_shared_info *shinfo, } /** + * skb_len_add - adds a number to len fields of skb + * @skb: buffer to add len to + * @delta: number of bytes to add + */ +static inline void skb_len_add(struct sk_buff *skb, int delta) +{ + skb->len += delta; + skb->data_len += delta; + skb->truesize += delta; +} + +/** * __skb_fill_page_desc - initialise a paged fragment in an skb * @skb: buffer containing fragment to be initialised * @i: paged fragment index to initialise @@ -2652,6 +2487,14 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) #endif /* NET_SKBUFF_DATA_USES_OFFSET */ +static inline void skb_assert_len(struct sk_buff *skb) +{ +#ifdef CONFIG_DEBUG_NET + if (WARN_ONCE(!skb->len, "%s\n", __func__)) + DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); +#endif /* CONFIG_DEBUG_NET */ +} + /* * Add data to an sk_buff */ @@ -2968,8 +2811,14 @@ static inline void skb_set_network_header(struct sk_buff *skb, const int offset) skb->network_header += offset; } +static inline int skb_mac_header_was_set(const struct sk_buff *skb) +{ + return skb->mac_header != (typeof(skb->mac_header))~0U; +} + static inline unsigned char *skb_mac_header(const struct sk_buff *skb) { + DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb)); return skb->head + skb->mac_header; } @@ -2980,14 +2829,10 @@ static inline int skb_mac_offset(const struct sk_buff *skb) static inline u32 skb_mac_header_len(const struct sk_buff *skb) { + DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb)); return skb->network_header - skb->mac_header; } -static inline int skb_mac_header_was_set(const struct sk_buff *skb) -{ - return skb->mac_header != (typeof(skb->mac_header))~0U; -} - static inline void skb_unset_mac_header(struct sk_buff *skb) { skb->mac_header = (typeof(skb->mac_header))~0U; diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index c5a2d6f50f25..48f4b645193b 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -95,6 +95,7 @@ struct sk_psock { spinlock_t link_lock; refcount_t refcnt; void (*saved_unhash)(struct sock *sk); + void (*saved_destroy)(struct sock *sk); void (*saved_close)(struct sock *sk, long timeout); void (*saved_write_space)(struct sock *sk); void (*saved_data_ready)(struct sock *sk); @@ -277,7 +278,8 @@ static inline void sk_msg_sg_copy_clear(struct sk_msg *msg, u32 start) static inline struct sk_psock *sk_psock(const struct sock *sk) { - return rcu_dereference_sk_user_data(sk); + return __rcu_dereference_sk_user_data_with_flags(sk, + SK_USER_DATA_PSOCK); } static inline void sk_psock_set_state(struct sk_psock *psock, diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h index 7127ec301537..18d806a8e52c 100644 --- a/include/linux/soc/ti/knav_dma.h +++ b/include/linux/soc/ti/knav_dma.h @@ -1,17 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2014 Texas Instruments Incorporated * Authors: Sandeep Nair <sandeep_n@ti.com * Cyril Chemparathy <cyril@ti.com Santosh Shilimkar <santosh.shilimkar@ti.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h index c75ef99c99ca..175f466ebcc3 100644 --- a/include/linux/soc/ti/knav_qmss.h +++ b/include/linux/soc/ti/knav_qmss.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Keystone Navigator Queue Management Sub-System header * @@ -5,15 +6,6 @@ * Author: Sandeep Nair <sandeep_n@ti.com> * Cyril Chemparathy <cyril@ti.com> * Santosh Shilimkar <santosh.shilimkar@ti.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __SOC_TI_KNAV_QMSS_H__ diff --git a/include/linux/soc/ti/ti-msgmgr.h b/include/linux/soc/ti/ti-msgmgr.h index 69a8d7682c4b..543da257a5f2 100644 --- a/include/linux/soc/ti/ti-msgmgr.h +++ b/include/linux/soc/ti/ti-msgmgr.h @@ -1,17 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Texas Instruments' Message Manager * * Copyright (C) 2015-2022 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef TI_MSGMGR_H diff --git a/include/linux/socket.h b/include/linux/socket.h index d4523974efbd..de3701a2a212 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -432,10 +432,6 @@ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size, extern int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags, struct sockaddr __user *addr, int addr_len); -extern int __sys_accept4_file(struct file *file, unsigned file_flags, - struct sockaddr __user *upeer_sockaddr, - int __user *upeer_addrlen, int flags, - unsigned long nofile); extern struct file *do_accept(struct file *file, unsigned file_flags, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, int flags); diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index ea193414298b..d45902fb4cad 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -102,4 +102,12 @@ static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count) return strncpy_from_user(dst, src.user, count); } +static inline int check_zeroed_sockptr(sockptr_t src, size_t offset, + size_t size) +{ + if (!sockptr_is_kernel(src)) + return check_zeroed_user(src.user + offset, size); + return memchr_inv(src.kernel + offset, 0, size) == NULL; +} + #endif /* _LINUX_SOCKPTR_H */ diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 76ce3f3ac0f2..39058c841469 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -637,7 +637,6 @@ struct sdw_slave_ops { * @dev: Linux device * @status: Status reported by the Slave * @bus: Bus handle - * @ops: Slave callback ops * @prop: Slave properties * @debugfs: Slave debugfs * @node: node for bus list @@ -646,9 +645,6 @@ struct sdw_slave_ops { * @dev_num: Current Device Number, values can be 0 or dev_num_sticky * @dev_num_sticky: one-time static Device Number assigned by Bus * @probed: boolean tracking driver state - * @probe_complete: completion utility to control potential races - * on startup between driver probe/initialization and SoundWire - * Slave state changes/implementation-defined interrupts * @enumeration_complete: completion utility to control potential races * on startup between device enumeration and read/write access to the * Slave device @@ -663,13 +659,13 @@ struct sdw_slave_ops { * for a Slave happens for the first time after enumeration * @is_mockup_device: status flag used to squelch errors in the command/control * protocol for SoundWire mockup devices + * @sdw_dev_lock: mutex used to protect callbacks/remove races */ struct sdw_slave { struct sdw_slave_id id; struct device dev; enum sdw_slave_status status; struct sdw_bus *bus; - const struct sdw_slave_ops *ops; struct sdw_slave_prop prop; #ifdef CONFIG_DEBUG_FS struct dentry *debugfs; @@ -680,12 +676,12 @@ struct sdw_slave { u16 dev_num; u16 dev_num_sticky; bool probed; - struct completion probe_complete; struct completion enumeration_complete; struct completion initialization_complete; u32 unattach_request; bool first_interrupt_done; bool is_mockup_device; + struct mutex sdw_dev_lock; /* protect callbacks/remove races */ }; #define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev) diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index 67e0d3e750b5..ec16ae49e6a4 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -9,6 +9,8 @@ #define SDW_SHIM_BASE 0x2C000 #define SDW_ALH_BASE 0x2C800 +#define SDW_SHIM_BASE_ACE 0x38000 +#define SDW_ALH_BASE_ACE 0x24000 #define SDW_LINK_BASE 0x30000 #define SDW_LINK_SIZE 0x10000 @@ -119,6 +121,7 @@ struct sdw_intel_ops { struct sdw_intel_stream_params_data *params_data); int (*free_stream)(struct device *dev, struct sdw_intel_stream_free_data *free_data); + int (*trigger)(struct snd_soc_dai *dai, int cmd, int stream); }; /** diff --git a/include/linux/spmi.h b/include/linux/spmi.h index 729bcbf9f5ad..eac1956a8727 100644 --- a/include/linux/spmi.h +++ b/include/linux/spmi.h @@ -164,6 +164,9 @@ static inline void spmi_driver_unregister(struct spmi_driver *sdrv) module_driver(__spmi_driver, spmi_driver_register, \ spmi_driver_unregister) +struct device_node; + +struct spmi_device *spmi_device_from_of(struct device_node *np); int spmi_register_read(struct spmi_device *sdev, u8 addr, u8 *buf); int spmi_ext_register_read(struct spmi_device *sdev, u8 addr, u8 *buf, size_t len); diff --git a/include/linux/sram.h b/include/linux/sram.h index 4fb405fb0480..d7dee19505c6 100644 --- a/include/linux/sram.h +++ b/include/linux/sram.h @@ -1,15 +1,5 @@ -/* - * Generic SRAM Driver Interface - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Generic SRAM Driver Interface */ #ifndef __LINUX_SRAM_H__ #define __LINUX_SRAM_H__ diff --git a/include/linux/sungem_phy.h b/include/linux/sungem_phy.h index 3a11fa41a131..c505f30e8b68 100644 --- a/include/linux/sungem_phy.h +++ b/include/linux/sungem_phy.h @@ -2,6 +2,8 @@ #ifndef __SUNGEM_PHY_H__ #define __SUNGEM_PHY_H__ +#include <linux/types.h> + struct mii_phy; /* Operations supported by any kind of PHY */ diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index f07c334c599f..db30a159f9d5 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h @@ -1,22 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** (c) 2008 NetApp. All Rights Reserved. -NetApp provides this source code under the GPL v2 License. -The GPL v2 license is available at -https://opensource.org/licenses/gpl-license.php. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ @@ -83,4 +69,3 @@ static inline void xprt_free_bc_request(struct rpc_rqst *req) } #endif /* CONFIG_SUNRPC_BACKCHANNEL */ #endif /* _LINUX_SUNRPC_BC_XPRT_H */ - diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 90501404fa49..75eea5ebb179 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -234,13 +234,18 @@ int rpc_clnt_setup_test_and_add_xprt(struct rpc_clnt *, struct rpc_xprt_switch *, struct rpc_xprt *, void *); +void rpc_clnt_manage_trunked_xprts(struct rpc_clnt *); +void rpc_clnt_probe_trunked_xprts(struct rpc_clnt *, + struct rpc_add_xprt_test *); const char *rpc_proc_name(const struct rpc_task *task); void rpc_clnt_xprt_switch_put(struct rpc_clnt *); void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *); +void rpc_clnt_xprt_switch_remove_xprt(struct rpc_clnt *, struct rpc_xprt *); bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt, const struct sockaddr *sap); +void rpc_clnt_xprt_set_online(struct rpc_clnt *clnt, struct rpc_xprt *xprt); void rpc_cleanup_clids(void); static inline int rpc_reply_expected(struct rpc_task *task) diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 1d7a3e51b795..acc62647317c 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -61,8 +61,6 @@ struct rpc_task { struct rpc_wait tk_wait; /* RPC wait */ } u; - int tk_rpc_status; /* Result of last RPC operation */ - /* * RPC call state */ @@ -82,6 +80,8 @@ struct rpc_task { ktime_t tk_start; /* RPC task init timestamp */ pid_t tk_owner; /* Process id for batching tasks */ + + int tk_rpc_status; /* Result of last RPC operation */ unsigned short tk_flags; /* misc flags */ unsigned short tk_timeouts; /* maj timeouts */ diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 5860f32e3958..69175029abbb 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -258,10 +258,13 @@ extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes); extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len); extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len); extern int xdr_process_buf(const struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data); -extern unsigned int xdr_align_data(struct xdr_stream *, unsigned int offset, unsigned int length); -extern unsigned int xdr_expand_hole(struct xdr_stream *, unsigned int offset, unsigned int length); +extern void xdr_set_pagelen(struct xdr_stream *, unsigned int len); extern bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf, unsigned int len); +extern unsigned int xdr_stream_move_subsegment(struct xdr_stream *xdr, unsigned int offset, + unsigned int target, unsigned int length); +extern unsigned int xdr_stream_zero(struct xdr_stream *xdr, unsigned int offset, + unsigned int length); /** * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data. @@ -419,8 +422,8 @@ static inline int xdr_stream_encode_item_absent(struct xdr_stream *xdr) */ static inline __be32 *xdr_encode_bool(__be32 *p, u32 n) { - *p = n ? xdr_one : xdr_zero; - return p++; + *p++ = n ? xdr_one : xdr_zero; + return p; } /** diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 522bbf937957..b9f59aabee53 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -144,7 +144,8 @@ struct rpc_xprt_ops { unsigned short (*get_srcport)(struct rpc_xprt *xprt); int (*buf_alloc)(struct rpc_task *task); void (*buf_free)(struct rpc_task *task); - int (*prepare_request)(struct rpc_rqst *req); + int (*prepare_request)(struct rpc_rqst *req, + struct xdr_buf *buf); int (*send_request)(struct rpc_rqst *req); void (*wait_for_reply_request)(struct rpc_task *task); void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task); @@ -505,4 +506,7 @@ static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt) return test_and_set_bit(XPRT_BINDING, &xprt->state); } +void xprt_set_offline_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps); +void xprt_set_online_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps); +void xprt_delete_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps); #endif /* _LINUX_SUNRPC_XPRT_H */ diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h index bbb8a5fa0816..c0514c684b2c 100644 --- a/include/linux/sunrpc/xprtmultipath.h +++ b/include/linux/sunrpc/xprtmultipath.h @@ -55,7 +55,7 @@ extern void rpc_xprt_switch_set_roundrobin(struct rpc_xprt_switch *xps); extern void rpc_xprt_switch_add_xprt(struct rpc_xprt_switch *xps, struct rpc_xprt *xprt); extern void rpc_xprt_switch_remove_xprt(struct rpc_xprt_switch *xps, - struct rpc_xprt *xprt); + struct rpc_xprt *xprt, bool offline); extern void xprt_iter_init(struct rpc_xprt_iter *xpi, struct rpc_xprt_switch *xps); @@ -63,8 +63,13 @@ extern void xprt_iter_init(struct rpc_xprt_iter *xpi, extern void xprt_iter_init_listall(struct rpc_xprt_iter *xpi, struct rpc_xprt_switch *xps); +extern void xprt_iter_init_listoffline(struct rpc_xprt_iter *xpi, + struct rpc_xprt_switch *xps); + extern void xprt_iter_destroy(struct rpc_xprt_iter *xpi); +extern void xprt_iter_rewind(struct rpc_xprt_iter *xpi); + extern struct rpc_xprt_switch *xprt_iter_xchg_switch( struct rpc_xprt_iter *xpi, struct rpc_xprt_switch *newswitch); diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h index 74bfdffaf7b0..d11a1c6e3186 100644 --- a/include/linux/surface_aggregator/controller.h +++ b/include/linux/surface_aggregator/controller.h @@ -470,6 +470,67 @@ struct ssam_request_spec_md { } /** + * SSAM_DEFINE_SYNC_REQUEST_WR() - Define synchronous SAM request function with + * both argument and return value. + * @name: Name of the generated function. + * @atype: Type of the request's argument. + * @rtype: Type of the request's return value. + * @spec: Specification (&struct ssam_request_spec) defining the request. + * + * Defines a function executing the synchronous SAM request specified by @spec, + * with the request taking an argument of type @atype and having a return value + * of type @rtype. The generated function takes care of setting up the request + * and response structs, buffer allocation, as well as execution of the request + * itself, returning once the request has been fully completed. The required + * transport buffer will be allocated on the stack. + * + * The generated function is defined as ``static int name(struct + * ssam_controller *ctrl, const atype *arg, rtype *ret)``, returning the status + * of the request, which is zero on success and negative on failure. The + * ``ctrl`` parameter is the controller via which the request is sent. The + * request argument is specified via the ``arg`` pointer. The request's return + * value is written to the memory pointed to by the ``ret`` parameter. + * + * Refer to ssam_request_sync_onstack() for more details on the behavior of + * the generated function. + */ +#define SSAM_DEFINE_SYNC_REQUEST_WR(name, atype, rtype, spec...) \ + static int name(struct ssam_controller *ctrl, const atype *arg, rtype *ret) \ + { \ + struct ssam_request_spec s = (struct ssam_request_spec)spec; \ + struct ssam_request rqst; \ + struct ssam_response rsp; \ + int status; \ + \ + rqst.target_category = s.target_category; \ + rqst.target_id = s.target_id; \ + rqst.command_id = s.command_id; \ + rqst.instance_id = s.instance_id; \ + rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \ + rqst.length = sizeof(atype); \ + rqst.payload = (u8 *)arg; \ + \ + rsp.capacity = sizeof(rtype); \ + rsp.length = 0; \ + rsp.pointer = (u8 *)ret; \ + \ + status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, sizeof(atype)); \ + if (status) \ + return status; \ + \ + if (rsp.length != sizeof(rtype)) { \ + struct device *dev = ssam_controller_device(ctrl); \ + dev_err(dev, \ + "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \ + sizeof(rtype), rsp.length, rqst.target_category,\ + rqst.command_id); \ + return -EIO; \ + } \ + \ + return 0; \ + } + +/** * SSAM_DEFINE_SYNC_REQUEST_MD_N() - Define synchronous multi-device SAM * request function with neither argument nor return value. * @name: Name of the generated function. @@ -613,6 +674,70 @@ struct ssam_request_spec_md { return 0; \ } +/** + * SSAM_DEFINE_SYNC_REQUEST_MD_WR() - Define synchronous multi-device SAM + * request function with both argument and return value. + * @name: Name of the generated function. + * @atype: Type of the request's argument. + * @rtype: Type of the request's return value. + * @spec: Specification (&struct ssam_request_spec_md) defining the request. + * + * Defines a function executing the synchronous SAM request specified by @spec, + * with the request taking an argument of type @atype and having a return value + * of type @rtype. Device specifying parameters are not hard-coded, but instead + * must be provided to the function. The generated function takes care of + * setting up the request and response structs, buffer allocation, as well as + * execution of the request itself, returning once the request has been fully + * completed. The required transport buffer will be allocated on the stack. + * + * The generated function is defined as ``static int name(struct + * ssam_controller *ctrl, u8 tid, u8 iid, const atype *arg, rtype *ret)``, + * returning the status of the request, which is zero on success and negative + * on failure. The ``ctrl`` parameter is the controller via which the request + * is sent, ``tid`` the target ID for the request, and ``iid`` the instance ID. + * The request argument is specified via the ``arg`` pointer. The request's + * return value is written to the memory pointed to by the ``ret`` parameter. + * + * Refer to ssam_request_sync_onstack() for more details on the behavior of + * the generated function. + */ +#define SSAM_DEFINE_SYNC_REQUEST_MD_WR(name, atype, rtype, spec...) \ + static int name(struct ssam_controller *ctrl, u8 tid, u8 iid, \ + const atype *arg, rtype *ret) \ + { \ + struct ssam_request_spec_md s = (struct ssam_request_spec_md)spec; \ + struct ssam_request rqst; \ + struct ssam_response rsp; \ + int status; \ + \ + rqst.target_category = s.target_category; \ + rqst.target_id = tid; \ + rqst.command_id = s.command_id; \ + rqst.instance_id = iid; \ + rqst.flags = s.flags | SSAM_REQUEST_HAS_RESPONSE; \ + rqst.length = sizeof(atype); \ + rqst.payload = (u8 *)arg; \ + \ + rsp.capacity = sizeof(rtype); \ + rsp.length = 0; \ + rsp.pointer = (u8 *)ret; \ + \ + status = ssam_request_sync_onstack(ctrl, &rqst, &rsp, sizeof(atype)); \ + if (status) \ + return status; \ + \ + if (rsp.length != sizeof(rtype)) { \ + struct device *dev = ssam_controller_device(ctrl); \ + dev_err(dev, \ + "rqst: invalid response length, expected %zu, got %zu (tc: %#04x, cid: %#04x)", \ + sizeof(rtype), rsp.length, rqst.target_category,\ + rqst.command_id); \ + return -EIO; \ + } \ + \ + return 0; \ + } + /* -- Event notifier/callbacks. --------------------------------------------- */ @@ -835,8 +960,28 @@ struct ssam_event_notifier { int ssam_notifier_register(struct ssam_controller *ctrl, struct ssam_event_notifier *n); -int ssam_notifier_unregister(struct ssam_controller *ctrl, - struct ssam_event_notifier *n); +int __ssam_notifier_unregister(struct ssam_controller *ctrl, + struct ssam_event_notifier *n, bool disable); + +/** + * ssam_notifier_unregister() - Unregister an event notifier. + * @ctrl: The controller the notifier has been registered on. + * @n: The event notifier to unregister. + * + * Unregister an event notifier. Decrement the usage counter of the associated + * SAM event if the notifier is not marked as an observer. If the usage counter + * reaches zero, the event will be disabled. + * + * Return: Returns zero on success, %-ENOENT if the given notifier block has + * not been registered on the controller. If the given notifier block was the + * last one associated with its specific event, returns the status of the + * event-disable EC-command. + */ +static inline int ssam_notifier_unregister(struct ssam_controller *ctrl, + struct ssam_event_notifier *n) +{ + return __ssam_notifier_unregister(ctrl, n, true); +} int ssam_controller_event_enable(struct ssam_controller *ctrl, struct ssam_event_registry reg, diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h index cc257097eb05..46c45d1b6368 100644 --- a/include/linux/surface_aggregator/device.h +++ b/include/linux/surface_aggregator/device.h @@ -15,6 +15,7 @@ #include <linux/device.h> #include <linux/mod_devicetable.h> +#include <linux/property.h> #include <linux/types.h> #include <linux/surface_aggregator/controller.h> @@ -148,17 +149,30 @@ struct ssam_device_uid { #define SSAM_SDEV(cat, tid, iid, fun) \ SSAM_DEVICE(SSAM_DOMAIN_SERIALHUB, SSAM_SSH_TC_##cat, tid, iid, fun) +/* + * enum ssam_device_flags - Flags for SSAM client devices. + * @SSAM_DEVICE_HOT_REMOVED_BIT: + * The device has been hot-removed. Further communication with it may time + * out and should be avoided. + */ +enum ssam_device_flags { + SSAM_DEVICE_HOT_REMOVED_BIT = 0, +}; + /** * struct ssam_device - SSAM client device. - * @dev: Driver model representation of the device. - * @ctrl: SSAM controller managing this device. - * @uid: UID identifying the device. + * @dev: Driver model representation of the device. + * @ctrl: SSAM controller managing this device. + * @uid: UID identifying the device. + * @flags: Device state flags, see &enum ssam_device_flags. */ struct ssam_device { struct device dev; struct ssam_controller *ctrl; struct ssam_device_uid uid; + + unsigned long flags; }; /** @@ -177,6 +191,8 @@ struct ssam_device_driver { void (*remove)(struct ssam_device *sdev); }; +#ifdef CONFIG_SURFACE_AGGREGATOR_BUS + extern struct bus_type ssam_bus_type; extern const struct device_type ssam_device_type; @@ -193,6 +209,15 @@ static inline bool is_ssam_device(struct device *d) return d->type == &ssam_device_type; } +#else /* CONFIG_SURFACE_AGGREGATOR_BUS */ + +static inline bool is_ssam_device(struct device *d) +{ + return false; +} + +#endif /* CONFIG_SURFACE_AGGREGATOR_BUS */ + /** * to_ssam_device() - Casts the given device to a SSAM client device. * @d: The device to cast. @@ -241,6 +266,35 @@ int ssam_device_add(struct ssam_device *sdev); void ssam_device_remove(struct ssam_device *sdev); /** + * ssam_device_mark_hot_removed() - Mark the given device as hot-removed. + * @sdev: The device to mark as hot-removed. + * + * Mark the device as having been hot-removed. This signals drivers using the + * device that communication with the device should be avoided and may lead to + * timeouts. + */ +static inline void ssam_device_mark_hot_removed(struct ssam_device *sdev) +{ + dev_dbg(&sdev->dev, "marking device as hot-removed\n"); + set_bit(SSAM_DEVICE_HOT_REMOVED_BIT, &sdev->flags); +} + +/** + * ssam_device_is_hot_removed() - Check if the given device has been + * hot-removed. + * @sdev: The device to check. + * + * Checks if the given device has been marked as hot-removed. See + * ssam_device_mark_hot_removed() for more details. + * + * Return: Returns ``true`` if the device has been marked as hot-removed. + */ +static inline bool ssam_device_is_hot_removed(struct ssam_device *sdev) +{ + return test_bit(SSAM_DEVICE_HOT_REMOVED_BIT, &sdev->flags); +} + +/** * ssam_device_get() - Increment reference count of SSAM client device. * @sdev: The device to increment the reference count of. * @@ -322,11 +376,62 @@ void ssam_device_driver_unregister(struct ssam_device_driver *d); /* -- Helpers for controller and hub devices. ------------------------------- */ #ifdef CONFIG_SURFACE_AGGREGATOR_BUS + +int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl, + struct fwnode_handle *node); void ssam_remove_clients(struct device *dev); + #else /* CONFIG_SURFACE_AGGREGATOR_BUS */ + +static inline int __ssam_register_clients(struct device *parent, struct ssam_controller *ctrl, + struct fwnode_handle *node) +{ + return 0; +} + static inline void ssam_remove_clients(struct device *dev) {} + #endif /* CONFIG_SURFACE_AGGREGATOR_BUS */ +/** + * ssam_register_clients() - Register all client devices defined under the + * given parent device. + * @dev: The parent device under which clients should be registered. + * @ctrl: The controller with which client should be registered. + * + * Register all clients that have via firmware nodes been defined as children + * of the given (parent) device. The respective child firmware nodes will be + * associated with the correspondingly created child devices. + * + * The given controller will be used to instantiate the new devices. See + * ssam_device_add() for details. + * + * Return: Returns zero on success, nonzero on failure. + */ +static inline int ssam_register_clients(struct device *dev, struct ssam_controller *ctrl) +{ + return __ssam_register_clients(dev, ctrl, dev_fwnode(dev)); +} + +/** + * ssam_device_register_clients() - Register all client devices defined under + * the given SSAM parent device. + * @sdev: The parent device under which clients should be registered. + * + * Register all clients that have via firmware nodes been defined as children + * of the given (parent) device. The respective child firmware nodes will be + * associated with the correspondingly created child devices. + * + * The controller used by the parent device will be used to instantiate the new + * devices. See ssam_device_add() for details. + * + * Return: Returns zero on success, nonzero on failure. + */ +static inline int ssam_device_register_clients(struct ssam_device *sdev) +{ + return ssam_register_clients(&sdev->dev, sdev->ctrl); +} + /* -- Helpers for client-device requests. ----------------------------------- */ @@ -430,4 +535,106 @@ static inline void ssam_remove_clients(struct device *dev) {} sdev->uid.instance, ret); \ } +/** + * SSAM_DEFINE_SYNC_REQUEST_CL_WR() - Define synchronous client-device SAM + * request function with argument and return value. + * @name: Name of the generated function. + * @atype: Type of the request's argument. + * @rtype: Type of the request's return value. + * @spec: Specification (&struct ssam_request_spec_md) defining the request. + * + * Defines a function executing the synchronous SAM request specified by @spec, + * with the request taking an argument of type @atype and having a return value + * of type @rtype. Device specifying parameters are not hard-coded, but instead + * are provided via the client device, specifically its UID, supplied when + * calling this function. The generated function takes care of setting up the + * request struct, buffer allocation, as well as execution of the request + * itself, returning once the request has been fully completed. The required + * transport buffer will be allocated on the stack. + * + * The generated function is defined as ``static int name(struct ssam_device + * *sdev, const atype *arg, rtype *ret)``, returning the status of the request, + * which is zero on success and negative on failure. The ``sdev`` parameter + * specifies both the target device of the request and by association the + * controller via which the request is sent. The request's argument is + * specified via the ``arg`` pointer. The request's return value is written to + * the memory pointed to by the ``ret`` parameter. + * + * Refer to ssam_request_sync_onstack() for more details on the behavior of + * the generated function. + */ +#define SSAM_DEFINE_SYNC_REQUEST_CL_WR(name, atype, rtype, spec...) \ + SSAM_DEFINE_SYNC_REQUEST_MD_WR(__raw_##name, atype, rtype, spec) \ + static int name(struct ssam_device *sdev, const atype *arg, rtype *ret) \ + { \ + return __raw_##name(sdev->ctrl, sdev->uid.target, \ + sdev->uid.instance, arg, ret); \ + } + + +/* -- Helpers for client-device notifiers. ---------------------------------- */ + +/** + * ssam_device_notifier_register() - Register an event notifier for the + * specified client device. + * @sdev: The device the notifier should be registered on. + * @n: The event notifier to register. + * + * Register an event notifier. Increment the usage counter of the associated + * SAM event if the notifier is not marked as an observer. If the event is not + * marked as an observer and is currently not enabled, it will be enabled + * during this call. If the notifier is marked as an observer, no attempt will + * be made at enabling any event and no reference count will be modified. + * + * Notifiers marked as observers do not need to be associated with one specific + * event, i.e. as long as no event matching is performed, only the event target + * category needs to be set. + * + * Return: Returns zero on success, %-ENOSPC if there have already been + * %INT_MAX notifiers for the event ID/type associated with the notifier block + * registered, %-ENOMEM if the corresponding event entry could not be + * allocated, %-ENODEV if the device is marked as hot-removed. If this is the + * first time that a notifier block is registered for the specific associated + * event, returns the status of the event-enable EC-command. + */ +static inline int ssam_device_notifier_register(struct ssam_device *sdev, + struct ssam_event_notifier *n) +{ + /* + * Note that this check does not provide any guarantees whatsoever as + * hot-removal could happen at any point and we can't protect against + * it. Nevertheless, if we can detect hot-removal, bail early to avoid + * communication timeouts. + */ + if (ssam_device_is_hot_removed(sdev)) + return -ENODEV; + + return ssam_notifier_register(sdev->ctrl, n); +} + +/** + * ssam_device_notifier_unregister() - Unregister an event notifier for the + * specified client device. + * @sdev: The device the notifier has been registered on. + * @n: The event notifier to unregister. + * + * Unregister an event notifier. Decrement the usage counter of the associated + * SAM event if the notifier is not marked as an observer. If the usage counter + * reaches zero, the event will be disabled. + * + * In case the device has been marked as hot-removed, the event will not be + * disabled on the EC, as in those cases any attempt at doing so may time out. + * + * Return: Returns zero on success, %-ENOENT if the given notifier block has + * not been registered on the controller. If the given notifier block was the + * last one associated with its specific event, returns the status of the + * event-disable EC-command. + */ +static inline int ssam_device_notifier_unregister(struct ssam_device *sdev, + struct ssam_event_notifier *n) +{ + return __ssam_notifier_unregister(sdev->ctrl, n, + !ssam_device_is_hot_removed(sdev)); +} + #endif /* _LINUX_SURFACE_AGGREGATOR_DEVICE_H */ diff --git a/include/linux/surface_aggregator/serial_hub.h b/include/linux/surface_aggregator/serial_hub.h index c3de43edcffa..45501b6e54e8 100644 --- a/include/linux/surface_aggregator/serial_hub.h +++ b/include/linux/surface_aggregator/serial_hub.h @@ -201,7 +201,7 @@ static inline u16 ssh_crc(const u8 *buf, size_t len) * exception of zero, which is not an event ID. Thus, this is also the * absolute maximum number of event handlers that can be registered. */ -#define SSH_NUM_EVENTS 34 +#define SSH_NUM_EVENTS 38 /* * SSH_NUM_TARGETS - The number of communication targets used in the protocol. @@ -292,40 +292,45 @@ struct ssam_span { * Windows driver. */ enum ssam_ssh_tc { - /* Category 0x00 is invalid for EC use. */ - SSAM_SSH_TC_SAM = 0x01, /* Generic system functionality, real-time clock. */ - SSAM_SSH_TC_BAT = 0x02, /* Battery/power subsystem. */ - SSAM_SSH_TC_TMP = 0x03, /* Thermal subsystem. */ - SSAM_SSH_TC_PMC = 0x04, - SSAM_SSH_TC_FAN = 0x05, - SSAM_SSH_TC_PoM = 0x06, - SSAM_SSH_TC_DBG = 0x07, - SSAM_SSH_TC_KBD = 0x08, /* Legacy keyboard (Laptop 1/2). */ - SSAM_SSH_TC_FWU = 0x09, - SSAM_SSH_TC_UNI = 0x0a, - SSAM_SSH_TC_LPC = 0x0b, - SSAM_SSH_TC_TCL = 0x0c, - SSAM_SSH_TC_SFL = 0x0d, - SSAM_SSH_TC_KIP = 0x0e, - SSAM_SSH_TC_EXT = 0x0f, - SSAM_SSH_TC_BLD = 0x10, - SSAM_SSH_TC_BAS = 0x11, /* Detachment system (Surface Book 2/3). */ - SSAM_SSH_TC_SEN = 0x12, - SSAM_SSH_TC_SRQ = 0x13, - SSAM_SSH_TC_MCU = 0x14, - SSAM_SSH_TC_HID = 0x15, /* Generic HID input subsystem. */ - SSAM_SSH_TC_TCH = 0x16, - SSAM_SSH_TC_BKL = 0x17, - SSAM_SSH_TC_TAM = 0x18, - SSAM_SSH_TC_ACC = 0x19, - SSAM_SSH_TC_UFI = 0x1a, - SSAM_SSH_TC_USC = 0x1b, - SSAM_SSH_TC_PEN = 0x1c, - SSAM_SSH_TC_VID = 0x1d, - SSAM_SSH_TC_AUD = 0x1e, - SSAM_SSH_TC_SMC = 0x1f, - SSAM_SSH_TC_KPD = 0x20, - SSAM_SSH_TC_REG = 0x21, /* Extended event registry. */ + /* Category 0x00 is invalid for EC use. */ + SSAM_SSH_TC_SAM = 0x01, /* Generic system functionality, real-time clock. */ + SSAM_SSH_TC_BAT = 0x02, /* Battery/power subsystem. */ + SSAM_SSH_TC_TMP = 0x03, /* Thermal subsystem. */ + SSAM_SSH_TC_PMC = 0x04, + SSAM_SSH_TC_FAN = 0x05, + SSAM_SSH_TC_PoM = 0x06, + SSAM_SSH_TC_DBG = 0x07, + SSAM_SSH_TC_KBD = 0x08, /* Legacy keyboard (Laptop 1/2). */ + SSAM_SSH_TC_FWU = 0x09, + SSAM_SSH_TC_UNI = 0x0a, + SSAM_SSH_TC_LPC = 0x0b, + SSAM_SSH_TC_TCL = 0x0c, + SSAM_SSH_TC_SFL = 0x0d, + SSAM_SSH_TC_KIP = 0x0e, /* Manages detachable peripherals (Pro X/8 keyboard cover) */ + SSAM_SSH_TC_EXT = 0x0f, + SSAM_SSH_TC_BLD = 0x10, + SSAM_SSH_TC_BAS = 0x11, /* Detachment system (Surface Book 2/3). */ + SSAM_SSH_TC_SEN = 0x12, + SSAM_SSH_TC_SRQ = 0x13, + SSAM_SSH_TC_MCU = 0x14, + SSAM_SSH_TC_HID = 0x15, /* Generic HID input subsystem. */ + SSAM_SSH_TC_TCH = 0x16, + SSAM_SSH_TC_BKL = 0x17, + SSAM_SSH_TC_TAM = 0x18, + SSAM_SSH_TC_ACC0 = 0x19, + SSAM_SSH_TC_UFI = 0x1a, + SSAM_SSH_TC_USC = 0x1b, + SSAM_SSH_TC_PEN = 0x1c, + SSAM_SSH_TC_VID = 0x1d, + SSAM_SSH_TC_AUD = 0x1e, + SSAM_SSH_TC_SMC = 0x1f, + SSAM_SSH_TC_KPD = 0x20, + SSAM_SSH_TC_REG = 0x21, /* Extended event registry. */ + SSAM_SSH_TC_SPT = 0x22, + SSAM_SSH_TC_SYS = 0x23, + SSAM_SSH_TC_ACC1 = 0x24, + SSAM_SSH_TC_SHB = 0x25, + SSAM_SSH_TC_POS = 0x26, /* For obtaining Laptop Studio screen position. */ }; diff --git a/include/linux/swap.h b/include/linux/swap.h index 0c0fed1b348f..43150b9bbc5c 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -74,7 +74,7 @@ static inline int current_is_kswapd(void) /* * Unaddressable device memory support. See include/linux/hmm.h and - * Documentation/vm/hmm.rst. Short description is we need struct pages for + * Documentation/mm/hmm.rst. Short description is we need struct pages for * device memory that is unaddressable (inaccessible) by CPU, so that we can * migrate part of a process memory to device memory. * @@ -411,10 +411,13 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page, extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); + +#define MEMCG_RECLAIM_MAY_SWAP (1 << 1) +#define MEMCG_RECLAIM_PROACTIVE (1 << 2) extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, unsigned long nr_pages, gfp_t gfp_mask, - bool may_swap); + unsigned int reclaim_options); extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, pg_data_t *pgdat, @@ -438,7 +441,8 @@ static inline bool node_reclaim_enabled(void) return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP); } -extern void check_move_unevictable_pages(struct pagevec *pvec); +void check_move_unevictable_folios(struct folio_batch *fbatch); +void check_move_unevictable_pages(struct pagevec *pvec); extern void kswapd_run(int nid); extern void kswapd_stop(int nid); @@ -455,6 +459,7 @@ static inline unsigned long total_swapcache_pages(void) return global_node_page_state(NR_SWAPCACHE); } +extern void free_swap_cache(struct page *page); extern void free_page_and_swap_cache(struct page *); extern void free_pages_and_swap_cache(struct page **, int); /* linux/mm/swapfile.c */ @@ -539,6 +544,10 @@ static inline void put_swap_device(struct swap_info_struct *si) /* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */ #define free_swap_and_cache(e) is_pfn_swap_entry(e) +static inline void free_swap_cache(struct page *page) +{ +} + static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) { return 0; diff --git a/include/linux/swapops.h b/include/linux/swapops.h index f24775b41880..a3d435bf9f97 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -244,8 +244,10 @@ extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, spinlock_t *ptl); extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address); -extern void migration_entry_wait_huge(struct vm_area_struct *vma, - struct mm_struct *mm, pte_t *pte); +#ifdef CONFIG_HUGETLB_PAGE +extern void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl); +extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte); +#endif #else static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) { @@ -271,8 +273,10 @@ static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, spinlock_t *ptl) { } static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { } -static inline void migration_entry_wait_huge(struct vm_area_struct *vma, - struct mm_struct *mm, pte_t *pte) { } +#ifdef CONFIG_HUGETLB_PAGE +static inline void __migration_entry_wait_huge(pte_t *ptep, spinlock_t *ptl) { } +static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { } +#endif static inline int is_writable_migration_entry(swp_entry_t entry) { return 0; @@ -486,6 +490,11 @@ static inline void num_poisoned_pages_dec(void) atomic_long_dec(&num_poisoned_pages); } +static inline void num_poisoned_pages_sub(long i) +{ + atomic_long_sub(i, &num_poisoned_pages); +} + #else static inline swp_entry_t make_hwpoison_entry(struct page *page) @@ -501,6 +510,10 @@ static inline int is_hwpoison_entry(swp_entry_t swp) static inline void num_poisoned_pages_inc(void) { } + +static inline void num_poisoned_pages_sub(long i) +{ +} #endif static inline int non_swap_entry(swp_entry_t entry) diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 7ed35dd3de6e..35bc4e281c21 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -60,7 +60,6 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs); #ifdef CONFIG_SWIOTLB -extern enum swiotlb_force swiotlb_force; /** * struct io_tlb_mem - IO TLB Memory Pool Descriptor @@ -80,15 +79,14 @@ extern enum swiotlb_force swiotlb_force; * @used: The number of used IO TLB block. * @list: The free list describing the number of free entries available * from each index. - * @index: The index to start searching in the next round. * @orig_addr: The original address corresponding to a mapped entry. * @alloc_size: Size of the allocated buffer. - * @lock: The lock to protect the above data structures in the map and - * unmap calls. * @debugfs: The dentry to debugfs. * @late_alloc: %true if allocated using the page allocator * @force_bounce: %true if swiotlb bouncing is forced * @for_alloc: %true if the pool is used for memory allocation + * @nareas: The area number in the pool. + * @area_nslabs: The slot number in the area. */ struct io_tlb_mem { phys_addr_t start; @@ -96,17 +94,14 @@ struct io_tlb_mem { void *vaddr; unsigned long nslabs; unsigned long used; - unsigned int index; - spinlock_t lock; struct dentry *debugfs; bool late_alloc; bool force_bounce; bool for_alloc; - struct io_tlb_slot { - phys_addr_t orig_addr; - size_t alloc_size; - unsigned int list; - } *slots; + unsigned int nareas; + unsigned int area_nslabs; + struct io_tlb_area *areas; + struct io_tlb_slot *slots; }; extern struct io_tlb_mem io_tlb_default_mem; diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 80263f7cdb77..780690dc08cd 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -75,6 +75,8 @@ int proc_douintvec_minmax(struct ctl_table *table, int write, void *buffer, int proc_dou8vec_minmax(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); int proc_dointvec_jiffies(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_dointvec_ms_jiffies_minmax(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); int proc_dointvec_userhz_jiffies(struct ctl_table *, int, void *, size_t *, loff_t *); int proc_dointvec_ms_jiffies(struct ctl_table *, int, void *, size_t *, @@ -266,6 +268,10 @@ static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * return NULL; } +static inline void register_sysctl_init(const char *path, struct ctl_table *table) +{ +} + static inline struct ctl_table_header *register_sysctl_mount_point(const char *path) { return NULL; diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index e3f1e8ac1f85..fd3fe5c8c17f 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -235,6 +235,22 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_WO(_name, _size) #define BIN_ATTR_RW(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size) + +#define __BIN_ATTR_ADMIN_RO(_name, _size) { \ + .attr = { .name = __stringify(_name), .mode = 0400 }, \ + .read = _name##_read, \ + .size = _size, \ +} + +#define __BIN_ATTR_ADMIN_RW(_name, _size) \ + __BIN_ATTR(_name, 0600, _name##_read, _name##_write, _size) + +#define BIN_ATTR_ADMIN_RO(_name, _size) \ +struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RO(_name, _size) + +#define BIN_ATTR_ADMIN_RW(_name, _size) \ +struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RW(_name, _size) + struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); diff --git a/include/linux/tboot.h b/include/linux/tboot.h index 5146d2574e85..d2279160ef39 100644 --- a/include/linux/tboot.h +++ b/include/linux/tboot.h @@ -126,7 +126,6 @@ extern void tboot_probe(void); extern void tboot_shutdown(u32 shutdown_type); extern struct acpi_table_header *tboot_get_dmar_table( struct acpi_table_header *dmar_tbl); -extern int tboot_force_iommu(void); #else @@ -136,7 +135,6 @@ extern int tboot_force_iommu(void); #define tboot_sleep(sleep_state, pm1a_control, pm1b_control) \ do { } while (0) #define tboot_get_dmar_table(dmar_tbl) (dmar_tbl) -#define tboot_force_iommu() 0 #endif /* !CONFIG_INTEL_TXT */ diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 1168302b7927..a9fbe22732c3 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -46,6 +46,36 @@ static inline unsigned int inner_tcp_hdrlen(const struct sk_buff *skb) return inner_tcp_hdr(skb)->doff * 4; } +/** + * skb_tcp_all_headers - Returns size of all headers for a TCP packet + * @skb: buffer + * + * Used in TX path, for a packet known to be a TCP one. + * + * if (skb_is_gso(skb)) { + * int hlen = skb_tcp_all_headers(skb); + * ... + */ +static inline int skb_tcp_all_headers(const struct sk_buff *skb) +{ + return skb_transport_offset(skb) + tcp_hdrlen(skb); +} + +/** + * skb_inner_tcp_all_headers - Returns size of all headers for an encap TCP packet + * @skb: buffer + * + * Used in TX path, for a packet known to be a TCP one. + * + * if (skb_is_gso(skb) && skb->encapsulation) { + * int hlen = skb_inner_tcp_all_headers(skb); + * ... + */ +static inline int skb_inner_tcp_all_headers(const struct sk_buff *skb) +{ + return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); +} + static inline unsigned int tcp_optlen(const struct sk_buff *skb) { return (tcp_hdr(skb)->doff - 5) * 4; diff --git a/include/linux/ti-emif-sram.h b/include/linux/ti-emif-sram.h index 2fc854155c27..441b2988e66a 100644 --- a/include/linux/ti-emif-sram.h +++ b/include/linux/ti-emif-sram.h @@ -1,17 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * TI AM33XX EMIF Routines * * Copyright (C) 2016-2017 Texas Instruments Inc. * Dave Gerlach - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __LINUX_TI_EMIF_H #define __LINUX_TI_EMIF_H diff --git a/include/linux/time64.h b/include/linux/time64.h index 81b9686a2079..2fb8232cff1d 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -20,6 +20,9 @@ struct itimerspec64 { struct timespec64 it_value; }; +/* Parameters used to convert the timespec values: */ +#define PSEC_PER_NSEC 1000L + /* Located here for timespec[64]_valid_strict */ #define TIME64_MAX ((s64)~((u64)1 << 63)) #define TIME64_MIN (-TIME64_MAX - 1) diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 739ba9a03ec1..20c0ff54b7a0 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -157,7 +157,7 @@ struct tcg_algorithm_info { * Return: size of the event on success, 0 on failure */ -static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, +static __always_inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, struct tcg_pcr_event *event_header, bool do_mapping) { diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index e6e95a9f07a5..b18759a673c6 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -916,6 +916,24 @@ perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type, #endif +#define TRACE_EVENT_STR_MAX 512 + +/* + * gcc warns that you can not use a va_list in an inlined + * function. But lets me make it into a macro :-/ + */ +#define __trace_event_vstr_len(fmt, va) \ +({ \ + va_list __ap; \ + int __ret; \ + \ + va_copy(__ap, *(va)); \ + __ret = vsnprintf(NULL, 0, fmt, __ap) + 1; \ + va_end(__ap); \ + \ + min(__ret, TRACE_EVENT_STR_MAX); \ +}) + #endif /* _LINUX_TRACE_EVENT_H */ /* diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 55717a2eda08..4b33b95eb8be 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -151,7 +151,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) /* * Individual subsystem my have a separate configuration to * enable their tracepoints. By default, this file will create - * the tracepoints if CONFIG_TRACEPOINT is defined. If a subsystem + * the tracepoints if CONFIG_TRACEPOINTS is defined. If a subsystem * wants to be able to disable its tracepoints from being created * it can define NOTRACE before including the tracepoint headers. */ diff --git a/include/linux/tty_buffer.h b/include/linux/tty_buffer.h index 3b9d77604291..1796648c2907 100644 --- a/include/linux/tty_buffer.h +++ b/include/linux/tty_buffer.h @@ -15,6 +15,7 @@ struct tty_buffer { int used; int size; int commit; + int lookahead; /* Lazy update on recv, can become less than "read" */ int read; int flags; /* Data points here */ diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index e85002b56752..ede6f2157f32 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -186,6 +186,18 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass, * indicate all data received is %TTY_NORMAL. If assigned, prefer this * function for automatic flow control. * + * @lookahead_buf: [DRV] ``void ()(struct tty_struct *tty, + * const unsigned char *cp, const char *fp, int count)`` + * + * This function is called by the low-level tty driver for characters + * not eaten by ->receive_buf() or ->receive_buf2(). It is useful for + * processing high-priority characters such as software flow-control + * characters that could otherwise get stuck into the intermediate + * buffer until tty has room to receive them. Ldisc must be able to + * handle later a ->receive_buf() or ->receive_buf2() call for the + * same characters (e.g. by skipping the actions for high-priority + * characters already handled by ->lookahead_buf()). + * * @owner: module containting this ldisc (for reference counting) * * This structure defines the interface between the tty line discipline @@ -229,6 +241,8 @@ struct tty_ldisc_ops { void (*dcd_change)(struct tty_struct *tty, unsigned int status); int (*receive_buf2)(struct tty_struct *tty, const unsigned char *cp, const char *fp, int count); + void (*lookahead_buf)(struct tty_struct *tty, const unsigned char *cp, + const unsigned char *fp, unsigned int count); struct module *owner; }; diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h index 58e9619116b7..fa3c3bdaa234 100644 --- a/include/linux/tty_port.h +++ b/include/linux/tty_port.h @@ -40,6 +40,8 @@ struct tty_port_operations { struct tty_port_client_operations { int (*receive_buf)(struct tty_port *port, const unsigned char *, const unsigned char *, size_t); + void (*lookahead_buf)(struct tty_port *port, const unsigned char *cp, + const unsigned char *fp, unsigned int count); void (*write_wakeup)(struct tty_port *port); }; diff --git a/include/linux/uacce.h b/include/linux/uacce.h index 48e319f40275..9ce88c28b0a8 100644 --- a/include/linux/uacce.h +++ b/include/linux/uacce.h @@ -70,6 +70,7 @@ enum uacce_q_state { * @wait: wait queue head * @list: index into uacce queues list * @qfrs: pointer of qfr regions + * @mutex: protects queue state * @state: queue state machine * @pasid: pasid associated to the mm * @handle: iommu_sva handle returned by iommu_sva_bind_device() @@ -80,6 +81,7 @@ struct uacce_queue { wait_queue_head_t wait; struct list_head list; struct uacce_qfile_region *qfrs[UACCE_MAX_REGION]; + struct mutex mutex; enum uacce_q_state state; u32 pasid; struct iommu_sva *handle; @@ -97,9 +99,9 @@ struct uacce_queue { * @dev_id: id of the uacce device * @cdev: cdev of the uacce * @dev: dev of the uacce + * @mutex: protects uacce operation * @priv: private pointer of the uacce * @queues: list of queues - * @queues_lock: lock for queues list * @inode: core vfs */ struct uacce_device { @@ -113,9 +115,9 @@ struct uacce_device { u32 dev_id; struct cdev *cdev; struct device dev; + struct mutex mutex; void *priv; struct list_head queues; - struct mutex queues_lock; struct inode *inode; }; diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 5a328cf02b75..47e5d374c7eb 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -148,7 +148,7 @@ _copy_to_user(void __user *, const void *, unsigned long); static __always_inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) { - if (likely(check_copy_size(to, n, false))) + if (check_copy_size(to, n, false)) n = _copy_from_user(to, from, n); return n; } @@ -156,7 +156,7 @@ copy_from_user(void *to, const void __user *from, unsigned long n) static __always_inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) { - if (likely(check_copy_size(from, n, true))) + if (check_copy_size(from, n, true)) n = _copy_to_user(to, from, n); return n; } diff --git a/include/linux/ucb1400.h b/include/linux/ucb1400.h index 0968ef458447..22345391350b 100644 --- a/include/linux/ucb1400.h +++ b/include/linux/ucb1400.h @@ -84,8 +84,6 @@ struct ucb1400_gpio { struct gpio_chip gc; struct snd_ac97 *ac97; int gpio_offset; - int (*gpio_setup)(struct device *dev, int ngpio); - int (*gpio_teardown)(struct device *dev, int ngpio); }; struct ucb1400_ts { diff --git a/include/linux/uio.h b/include/linux/uio.h index 34ba4a731179..5896af36199c 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -26,6 +26,7 @@ enum iter_type { ITER_PIPE, ITER_XARRAY, ITER_DISCARD, + ITER_UBUF, }; struct iov_iter_state { @@ -38,7 +39,11 @@ struct iov_iter { u8 iter_type; bool nofault; bool data_source; - size_t iov_offset; + bool user_backed; + union { + size_t iov_offset; + int last_offset; + }; size_t count; union { const struct iovec *iov; @@ -46,6 +51,7 @@ struct iov_iter { const struct bio_vec *bvec; struct xarray *xarray; struct pipe_inode_info *pipe; + void __user *ubuf; }; union { unsigned long nr_segs; @@ -70,6 +76,11 @@ static inline void iov_iter_save_state(struct iov_iter *iter, state->nr_segs = iter->nr_segs; } +static inline bool iter_is_ubuf(const struct iov_iter *i) +{ + return iov_iter_type(i) == ITER_UBUF; +} + static inline bool iter_is_iovec(const struct iov_iter *i) { return iov_iter_type(i) == ITER_IOVEC; @@ -105,6 +116,11 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i) return i->data_source ? WRITE : READ; } +static inline bool user_backed_iter(const struct iov_iter *i) +{ + return i->user_backed; +} + /* * Total number of bytes covered by an iovec. * @@ -156,19 +172,17 @@ static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset, static __always_inline __must_check size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { - if (unlikely(!check_copy_size(addr, bytes, true))) - return 0; - else + if (check_copy_size(addr, bytes, true)) return _copy_to_iter(addr, bytes, i); + return 0; } static __always_inline __must_check size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { - if (unlikely(!check_copy_size(addr, bytes, false))) - return 0; - else + if (check_copy_size(addr, bytes, false)) return _copy_from_iter(addr, bytes, i); + return 0; } static __always_inline __must_check @@ -184,10 +198,9 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) static __always_inline __must_check size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) { - if (unlikely(!check_copy_size(addr, bytes, false))) - return 0; - else + if (check_copy_size(addr, bytes, false)) return _copy_from_iter_nocache(addr, bytes, i); + return 0; } static __always_inline __must_check @@ -234,9 +247,9 @@ void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count); void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray, loff_t start, size_t count); -ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, +ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start); -ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, +ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages, size_t maxsize, size_t *start); int iov_iter_npages(const struct iov_iter *i, int maxpages); void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state); @@ -325,4 +338,17 @@ ssize_t __import_iovec(int type, const struct iovec __user *uvec, int import_single_range(int type, void __user *buf, size_t len, struct iovec *iov, struct iov_iter *i); +static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction, + void __user *buf, size_t count) +{ + WARN_ON(direction & ~(READ | WRITE)); + *i = (struct iov_iter) { + .iter_type = ITER_UBUF, + .user_backed = true, + .data_source = direction, + .ubuf = buf, + .count = count + }; +} + #endif diff --git a/include/linux/usb.h b/include/linux/usb.h index 60bee864d897..f7a9914fc97f 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -584,6 +584,7 @@ struct usb3_lpm_parameters { * @authenticated: Crypto authentication passed * @wusb: device is Wireless USB * @lpm_capable: device supports LPM + * @lpm_devinit_allow: Allow USB3 device initiated LPM, exit latency is in range * @usb2_hw_lpm_capable: device can perform USB2 hardware LPM * @usb2_hw_lpm_besl_capable: device can perform USB2 hardware BESL LPM * @usb2_hw_lpm_enabled: USB2 hardware LPM is enabled @@ -666,6 +667,7 @@ struct usb_device { unsigned authenticated:1; unsigned wusb:1; unsigned lpm_capable:1; + unsigned lpm_devinit_allow:1; unsigned usb2_hw_lpm_capable:1; unsigned usb2_hw_lpm_besl_capable:1; unsigned usb2_hw_lpm_enabled:1; diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h index 8fc2abd7aecb..ca796dc1a984 100644 --- a/include/linux/usb/audio-v2.h +++ b/include/linux/usb/audio-v2.h @@ -2,9 +2,6 @@ /* * Copyright (c) 2010 Daniel Mack <daniel@caiaq.de> * - * This software is distributed under the terms of the GNU General Public - * License ("GPL") version 2, as published by the Free Software Foundation. - * * This file holds USB constants and structures defined * by the USB Device Class Definition for Audio Devices in version 2.0. * Comments below reference relevant sections of the documents contained diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h index 170acd500ea1..0747b24a1a7c 100644 --- a/include/linux/usb/audio.h +++ b/include/linux/usb/audio.h @@ -6,9 +6,6 @@ * Developed for Thumtronics by Grey Innovation * Ben Williamson <ben.williamson@greyinnovation.com> * - * This software is distributed under the terms of the GNU General Public - * License ("GPL") version 2, as published by the Free Software Foundation. - * * This file holds USB constants and structures defined * by the USB Device Class Definition for Audio Devices. * Comments below reference relevant sections of that document: diff --git a/include/linux/usb/c67x00.h b/include/linux/usb/c67x00.h index 2fc39e3b7281..45e0757e58f3 100644 --- a/include/linux/usb/c67x00.h +++ b/include/linux/usb/c67x00.h @@ -3,21 +3,6 @@ * usb_c67x00.h: platform definitions for the Cypress C67X00 USB chip * * Copyright (C) 2006-2008 Barco N.V. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - * MA 02110-1301 USA. */ #ifndef _LINUX_USB_C67X00_H diff --git a/include/linux/usb/cdc-wdm.h b/include/linux/usb/cdc-wdm.h index 9f5a51f79ba5..85417f00a89a 100644 --- a/include/linux/usb/cdc-wdm.h +++ b/include/linux/usb/cdc-wdm.h @@ -3,10 +3,6 @@ * USB CDC Device Management subdriver * * Copyright (c) 2012 Bjørn Mork <bjorn@mork.no> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. */ #ifndef __LINUX_USB_CDC_WDM_H diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h index 35d784cf32a4..0af0db51bc89 100644 --- a/include/linux/usb/cdc.h +++ b/include/linux/usb/cdc.h @@ -3,10 +3,6 @@ * USB CDC common helpers * * Copyright (c) 2015 Oliver Neukum <oneukum@suse.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. */ #ifndef __LINUX_USB_CDC_H #define __LINUX_USB_CDC_H diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index f7cb3ddce7fb..2d207cb4837d 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h @@ -53,8 +53,8 @@ #define USB_CDC_NCM_NDP32_LENGTH_MIN 0x20 /* Maximum NTB length */ -#define CDC_NCM_NTB_MAX_SIZE_TX 32768 /* bytes */ -#define CDC_NCM_NTB_MAX_SIZE_RX 32768 /* bytes */ +#define CDC_NCM_NTB_MAX_SIZE_TX 65536 /* bytes */ +#define CDC_NCM_NTB_MAX_SIZE_RX 65536 /* bytes */ /* Initial NTB length */ #define CDC_NCM_NTB_DEF_SIZE_TX 16384 /* bytes */ diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 9d2762279286..43ac3fa760db 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -3,20 +3,6 @@ * composite.h -- framework for usb gadgets which are composite devices * * Copyright (C) 2006-2008 David Brownell - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef __LINUX_USB_COMPOSITE_H diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index c892c5bc6638..fbabadd3b372 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -1,20 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2001-2002 by David Brownell - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, - * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __LINUX_USB_EHCI_DEF_H diff --git a/include/linux/usb/ehci_pdriver.h b/include/linux/usb/ehci_pdriver.h index 89fc901e778f..0f1b166f5aa0 100644 --- a/include/linux/usb/ehci_pdriver.h +++ b/include/linux/usb/ehci_pdriver.h @@ -1,20 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2012 Hauke Mehrtens <hauke@hauke-m.de> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, - * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __USB_CORE_EHCI_PDRIVER_H diff --git a/include/linux/usb/g_hid.h b/include/linux/usb/g_hid.h index 7581e488c237..d56bfedeb079 100644 --- a/include/linux/usb/g_hid.h +++ b/include/linux/usb/g_hid.h @@ -3,20 +3,6 @@ * g_hid.h -- Header file for USB HID gadget driver * * Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_USB_G_HID_H diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 3ad58b7a0824..dc3092cea99e 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -10,8 +10,6 @@ * * (C) Copyright 2002-2004 by David Brownell * All Rights Reserved. - * - * This software is licensed under the GNU GPL version 2. */ #ifndef __LINUX_USB_GADGET_H diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 2c1fc9212cf2..67f8713d3fa3 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -1,20 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2001-2002 by David Brownell - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, - * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __USB_CORE_HCD_H @@ -66,6 +52,7 @@ struct giveback_urb_bh { bool running; + bool high_prio; spinlock_t lock; struct list_head head; struct tasklet_struct bh; diff --git a/include/linux/usb/input.h b/include/linux/usb/input.h index 974befa72ac0..5e759b2cf551 100644 --- a/include/linux/usb/input.h +++ b/include/linux/usb/input.h @@ -1,10 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2005 Dmitry Torokhov - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. */ #ifndef __LINUX_USB_INPUT_H diff --git a/include/linux/usb/isp1301.h b/include/linux/usb/isp1301.h index dedb3b2473e8..fa986b926a12 100644 --- a/include/linux/usb/isp1301.h +++ b/include/linux/usb/isp1301.h @@ -3,16 +3,6 @@ * NXP ISP1301 USB transceiver driver * * Copyright (C) 2012 Roland Stigge <stigge@antcom.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * */ #ifndef __LINUX_USB_ISP1301_H diff --git a/include/linux/usb/m66592.h b/include/linux/usb/m66592.h index 2dfe68183495..5f04de2b47fd 100644 --- a/include/linux/usb/m66592.h +++ b/include/linux/usb/m66592.h @@ -3,20 +3,6 @@ * M66592 driver platform data * * Copyright (C) 2009 Renesas Solutions Corp. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * */ #ifndef __LINUX_USB_M66592_H diff --git a/include/linux/usb/musb-ux500.h b/include/linux/usb/musb-ux500.h index c4b7ad9850ca..d60dcfc56b5a 100644 --- a/include/linux/usb/musb-ux500.h +++ b/include/linux/usb/musb-ux500.h @@ -1,16 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2013 ST-Ericsson AB - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __MUSB_UX500_H__ diff --git a/include/linux/usb/net2280.h b/include/linux/usb/net2280.h index 08b85caecfaf..f29fe6a1f415 100644 --- a/include/linux/usb/net2280.h +++ b/include/linux/usb/net2280.h @@ -5,20 +5,6 @@ * * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com) * Copyright (C) 2003 David Brownell - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_USB_NET2280_H diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h index dba55ccb9b53..98487fd7ab11 100644 --- a/include/linux/usb/of.h +++ b/include/linux/usb/of.h @@ -1,8 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* * OF helpers for usb devices. - * - * This file is released under the GPLv2 */ #ifndef __LINUX_USB_OF_H diff --git a/include/linux/usb/ohci_pdriver.h b/include/linux/usb/ohci_pdriver.h index 7eb16cf587ee..2447c78b1766 100644 --- a/include/linux/usb/ohci_pdriver.h +++ b/include/linux/usb/ohci_pdriver.h @@ -1,20 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2012 Hauke Mehrtens <hauke@hauke-m.de> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software Foundation, - * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef __USB_CORE_OHCI_PDRIVER_H diff --git a/include/linux/usb/onboard_hub.h b/include/linux/usb/onboard_hub.h new file mode 100644 index 000000000000..d9373230556e --- /dev/null +++ b/include/linux/usb/onboard_hub.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_USB_ONBOARD_HUB_H +#define __LINUX_USB_ONBOARD_HUB_H + +struct usb_device; +struct list_head; + +#if IS_ENABLED(CONFIG_USB_ONBOARD_HUB) +void onboard_hub_create_pdevs(struct usb_device *parent_hub, struct list_head *pdev_list); +void onboard_hub_destroy_pdevs(struct list_head *pdev_list); +#else +static inline void onboard_hub_create_pdevs(struct usb_device *parent_hub, + struct list_head *pdev_list) {} +static inline void onboard_hub_destroy_pdevs(struct list_head *pdev_list) {} +#endif + +#endif /* __LINUX_USB_ONBOARD_HUB_H */ diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h index 784659d4dc99..6135d076c53d 100644 --- a/include/linux/usb/otg-fsm.h +++ b/include/linux/usb/otg-fsm.h @@ -1,19 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ -/* Copyright (C) 2007,2008 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 675 Mass Ave, Cambridge, MA 02139, USA. +/* + * Copyright (C) 2007,2008 Freescale Semiconductor, Inc. */ #ifndef __LINUX_USB_OTG_FSM_H diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h index 96b7ff66f074..c59fb79a42e8 100644 --- a/include/linux/usb/pd.h +++ b/include/linux/usb/pd.h @@ -495,4 +495,42 @@ static inline unsigned int rdo_max_power(u32 rdo) #define PD_P_SNK_STDBY_MW 2500 /* 2500 mW */ +#if IS_ENABLED(CONFIG_TYPEC) + +struct usb_power_delivery; + +/** + * usb_power_delivery_desc - USB Power Delivery Descriptor + * @revision: USB Power Delivery Specification Revision + * @version: USB Power Delivery Specicication Version - optional + */ +struct usb_power_delivery_desc { + u16 revision; + u16 version; +}; + +/** + * usb_power_delivery_capabilities_desc - Description of USB Power Delivery Capabilities Message + * @pdo: The Power Data Objects in the Capability Message + * @role: Power role of the capabilities + */ +struct usb_power_delivery_capabilities_desc { + u32 pdo[PDO_MAX_OBJECTS]; + enum typec_role role; +}; + +struct usb_power_delivery_capabilities * +usb_power_delivery_register_capabilities(struct usb_power_delivery *pd, + struct usb_power_delivery_capabilities_desc *desc); +void usb_power_delivery_unregister_capabilities(struct usb_power_delivery_capabilities *cap); + +struct usb_power_delivery *usb_power_delivery_register(struct device *parent, + struct usb_power_delivery_desc *desc); +void usb_power_delivery_unregister(struct usb_power_delivery *pd); + +int usb_power_delivery_link_device(struct usb_power_delivery *pd, struct device *dev); +void usb_power_delivery_unlink_device(struct usb_power_delivery *pd, struct device *dev); + +#endif /* CONFIG_TYPEC */ + #endif /* __LINUX_USB_PD_H */ diff --git a/include/linux/usb/phy_companion.h b/include/linux/usb/phy_companion.h index 263196f05015..862aaeca2319 100644 --- a/include/linux/usb/phy_companion.h +++ b/include/linux/usb/phy_companion.h @@ -3,18 +3,8 @@ * phy-companion.h -- phy companion to indicate the comparator part of PHY * * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. * * Author: Kishon Vijay Abraham I <kishon@ti.com> - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * */ #ifndef __DRIVERS_PHY_COMPANION_H diff --git a/include/linux/usb/r8a66597.h b/include/linux/usb/r8a66597.h index c0753d026bbf..f0fa7ddadbaa 100644 --- a/include/linux/usb/r8a66597.h +++ b/include/linux/usb/r8a66597.h @@ -5,20 +5,6 @@ * Copyright (C) 2009 Renesas Solutions Corp. * * Author : Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * */ #ifndef __LINUX_USB_R8A66597_H diff --git a/include/linux/usb/rndis_host.h b/include/linux/usb/rndis_host.h index cc42db51bbba..489cfb1d00f6 100644 --- a/include/linux/usb/rndis_host.h +++ b/include/linux/usb/rndis_host.h @@ -2,20 +2,6 @@ /* * Host Side support for RNDIS Networking Links * Copyright (C) 2005 by David Brownell - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_USB_RNDIS_HOST_H diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 16ea5a4cc586..8ea319f89e1f 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -4,11 +4,6 @@ * * Copyright (C) 1999 - 2012 * Greg Kroah-Hartman (greg@kroah.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * */ #ifndef __LINUX_USB_SERIAL_H diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h index e0240f864548..2827ce72e502 100644 --- a/include/linux/usb/storage.h +++ b/include/linux/usb/storage.h @@ -9,8 +9,6 @@ * * This file contains definitions taken from the * USB Mass Storage Class Specification Overview - * - * Distributed under the terms of the GNU GPL, version two. */ /* Storage subclass codes */ diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h new file mode 100644 index 000000000000..20c0bedb8ec8 --- /dev/null +++ b/include/linux/usb/tcpci.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2015-2017 Google, Inc + * + * USB Type-C Port Controller Interface. + */ + +#ifndef __LINUX_USB_TCPCI_H +#define __LINUX_USB_TCPCI_H + +#include <linux/usb/typec.h> +#include <linux/usb/tcpm.h> + +#define TCPC_VENDOR_ID 0x0 +#define TCPC_PRODUCT_ID 0x2 +#define TCPC_BCD_DEV 0x4 +#define TCPC_TC_REV 0x6 +#define TCPC_PD_REV 0x8 +#define TCPC_PD_INT_REV 0xa + +#define TCPC_ALERT 0x10 +#define TCPC_ALERT_EXTND BIT(14) +#define TCPC_ALERT_EXTENDED_STATUS BIT(13) +#define TCPC_ALERT_VBUS_DISCNCT BIT(11) +#define TCPC_ALERT_RX_BUF_OVF BIT(10) +#define TCPC_ALERT_FAULT BIT(9) +#define TCPC_ALERT_V_ALARM_LO BIT(8) +#define TCPC_ALERT_V_ALARM_HI BIT(7) +#define TCPC_ALERT_TX_SUCCESS BIT(6) +#define TCPC_ALERT_TX_DISCARDED BIT(5) +#define TCPC_ALERT_TX_FAILED BIT(4) +#define TCPC_ALERT_RX_HARD_RST BIT(3) +#define TCPC_ALERT_RX_STATUS BIT(2) +#define TCPC_ALERT_POWER_STATUS BIT(1) +#define TCPC_ALERT_CC_STATUS BIT(0) + +#define TCPC_ALERT_MASK 0x12 +#define TCPC_POWER_STATUS_MASK 0x14 +#define TCPC_FAULT_STATUS_MASK 0x15 + +#define TCPC_EXTENDED_STATUS_MASK 0x16 +#define TCPC_EXTENDED_STATUS_MASK_VSAFE0V BIT(0) + +#define TCPC_ALERT_EXTENDED_MASK 0x17 +#define TCPC_SINK_FAST_ROLE_SWAP BIT(0) + +#define TCPC_CONFIG_STD_OUTPUT 0x18 + +#define TCPC_TCPC_CTRL 0x19 +#define TCPC_TCPC_CTRL_ORIENTATION BIT(0) +#define PLUG_ORNT_CC1 0 +#define PLUG_ORNT_CC2 1 +#define TCPC_TCPC_CTRL_BIST_TM BIT(1) +#define TCPC_TCPC_CTRL_EN_LK4CONN_ALRT BIT(6) + +#define TCPC_EXTENDED_STATUS 0x20 +#define TCPC_EXTENDED_STATUS_VSAFE0V BIT(0) + +#define TCPC_ROLE_CTRL 0x1a +#define TCPC_ROLE_CTRL_DRP BIT(6) +#define TCPC_ROLE_CTRL_RP_VAL_SHIFT 4 +#define TCPC_ROLE_CTRL_RP_VAL_MASK 0x3 +#define TCPC_ROLE_CTRL_RP_VAL_DEF 0x0 +#define TCPC_ROLE_CTRL_RP_VAL_1_5 0x1 +#define TCPC_ROLE_CTRL_RP_VAL_3_0 0x2 +#define TCPC_ROLE_CTRL_CC2_SHIFT 2 +#define TCPC_ROLE_CTRL_CC2_MASK 0x3 +#define TCPC_ROLE_CTRL_CC1_SHIFT 0 +#define TCPC_ROLE_CTRL_CC1_MASK 0x3 +#define TCPC_ROLE_CTRL_CC_RA 0x0 +#define TCPC_ROLE_CTRL_CC_RP 0x1 +#define TCPC_ROLE_CTRL_CC_RD 0x2 +#define TCPC_ROLE_CTRL_CC_OPEN 0x3 + +#define TCPC_FAULT_CTRL 0x1b + +#define TCPC_POWER_CTRL 0x1c +#define TCPC_POWER_CTRL_VCONN_ENABLE BIT(0) +#define TCPC_POWER_CTRL_BLEED_DISCHARGE BIT(3) +#define TCPC_POWER_CTRL_AUTO_DISCHARGE BIT(4) +#define TCPC_DIS_VOLT_ALRM BIT(5) +#define TCPC_POWER_CTRL_VBUS_VOLT_MON BIT(6) +#define TCPC_FAST_ROLE_SWAP_EN BIT(7) + +#define TCPC_CC_STATUS 0x1d +#define TCPC_CC_STATUS_TOGGLING BIT(5) +#define TCPC_CC_STATUS_TERM BIT(4) +#define TCPC_CC_STATUS_TERM_RP 0 +#define TCPC_CC_STATUS_TERM_RD 1 +#define TCPC_CC_STATE_SRC_OPEN 0 +#define TCPC_CC_STATUS_CC2_SHIFT 2 +#define TCPC_CC_STATUS_CC2_MASK 0x3 +#define TCPC_CC_STATUS_CC1_SHIFT 0 +#define TCPC_CC_STATUS_CC1_MASK 0x3 + +#define TCPC_POWER_STATUS 0x1e +#define TCPC_POWER_STATUS_DBG_ACC_CON BIT(7) +#define TCPC_POWER_STATUS_UNINIT BIT(6) +#define TCPC_POWER_STATUS_SOURCING_VBUS BIT(4) +#define TCPC_POWER_STATUS_VBUS_DET BIT(3) +#define TCPC_POWER_STATUS_VBUS_PRES BIT(2) +#define TCPC_POWER_STATUS_VCONN_PRES BIT(1) +#define TCPC_POWER_STATUS_SINKING_VBUS BIT(0) + +#define TCPC_FAULT_STATUS 0x1f + +#define TCPC_ALERT_EXTENDED 0x21 + +#define TCPC_COMMAND 0x23 +#define TCPC_CMD_WAKE_I2C 0x11 +#define TCPC_CMD_DISABLE_VBUS_DETECT 0x22 +#define TCPC_CMD_ENABLE_VBUS_DETECT 0x33 +#define TCPC_CMD_DISABLE_SINK_VBUS 0x44 +#define TCPC_CMD_SINK_VBUS 0x55 +#define TCPC_CMD_DISABLE_SRC_VBUS 0x66 +#define TCPC_CMD_SRC_VBUS_DEFAULT 0x77 +#define TCPC_CMD_SRC_VBUS_HIGH 0x88 +#define TCPC_CMD_LOOK4CONNECTION 0x99 +#define TCPC_CMD_RXONEMORE 0xAA +#define TCPC_CMD_I2C_IDLE 0xFF + +#define TCPC_DEV_CAP_1 0x24 +#define TCPC_DEV_CAP_2 0x26 +#define TCPC_STD_INPUT_CAP 0x28 +#define TCPC_STD_OUTPUT_CAP 0x29 + +#define TCPC_MSG_HDR_INFO 0x2e +#define TCPC_MSG_HDR_INFO_DATA_ROLE BIT(3) +#define TCPC_MSG_HDR_INFO_PWR_ROLE BIT(0) +#define TCPC_MSG_HDR_INFO_REV_SHIFT 1 +#define TCPC_MSG_HDR_INFO_REV_MASK 0x3 + +#define TCPC_RX_DETECT 0x2f +#define TCPC_RX_DETECT_HARD_RESET BIT(5) +#define TCPC_RX_DETECT_SOP BIT(0) +#define TCPC_RX_DETECT_SOP1 BIT(1) +#define TCPC_RX_DETECT_SOP2 BIT(2) +#define TCPC_RX_DETECT_DBG1 BIT(3) +#define TCPC_RX_DETECT_DBG2 BIT(4) + +#define TCPC_RX_BYTE_CNT 0x30 +#define TCPC_RX_BUF_FRAME_TYPE 0x31 +#define TCPC_RX_BUF_FRAME_TYPE_SOP 0 +#define TCPC_RX_HDR 0x32 +#define TCPC_RX_DATA 0x34 /* through 0x4f */ + +#define TCPC_TRANSMIT 0x50 +#define TCPC_TRANSMIT_RETRY_SHIFT 4 +#define TCPC_TRANSMIT_RETRY_MASK 0x3 +#define TCPC_TRANSMIT_TYPE_SHIFT 0 +#define TCPC_TRANSMIT_TYPE_MASK 0x7 + +#define TCPC_TX_BYTE_CNT 0x51 +#define TCPC_TX_HDR 0x52 +#define TCPC_TX_DATA 0x54 /* through 0x6f */ + +#define TCPC_VBUS_VOLTAGE 0x70 +#define TCPC_VBUS_VOLTAGE_MASK 0x3ff +#define TCPC_VBUS_VOLTAGE_LSB_MV 25 +#define TCPC_VBUS_SINK_DISCONNECT_THRESH 0x72 +#define TCPC_VBUS_SINK_DISCONNECT_THRESH_LSB_MV 25 +#define TCPC_VBUS_SINK_DISCONNECT_THRESH_MAX 0x3ff +#define TCPC_VBUS_STOP_DISCHARGE_THRESH 0x74 +#define TCPC_VBUS_VOLTAGE_ALARM_HI_CFG 0x76 +#define TCPC_VBUS_VOLTAGE_ALARM_LO_CFG 0x78 + +/* I2C_WRITE_BYTE_COUNT + 1 when TX_BUF_BYTE_x is only accessible I2C_WRITE_BYTE_COUNT */ +#define TCPC_TRANSMIT_BUFFER_MAX_LEN 31 + +struct tcpci; + +/* + * @TX_BUF_BYTE_x_hidden: + * optional; Set when TX_BUF_BYTE_x can only be accessed through I2C_WRITE_BYTE_COUNT. + * @frs_sourcing_vbus: + * Optional; Callback to perform chip specific operations when FRS + * is sourcing vbus. + * @auto_discharge_disconnect: + * Optional; Enables TCPC to autonously discharge vbus on disconnect. + * @vbus_vsafe0v: + * optional; Set when TCPC can detect whether vbus is at VSAFE0V. + * @set_partner_usb_comm_capable: + * Optional; The USB Communications Capable bit indicates if port + * partner is capable of communication over the USB data lines + * (e.g. D+/- or SS Tx/Rx). Called to notify the status of the bit. + */ +struct tcpci_data { + struct regmap *regmap; + unsigned char TX_BUF_BYTE_x_hidden:1; + unsigned char auto_discharge_disconnect:1; + unsigned char vbus_vsafe0v:1; + + int (*init)(struct tcpci *tcpci, struct tcpci_data *data); + int (*set_vconn)(struct tcpci *tcpci, struct tcpci_data *data, + bool enable); + int (*start_drp_toggling)(struct tcpci *tcpci, struct tcpci_data *data, + enum typec_cc_status cc); + int (*set_vbus)(struct tcpci *tcpci, struct tcpci_data *data, bool source, bool sink); + void (*frs_sourcing_vbus)(struct tcpci *tcpci, struct tcpci_data *data); + void (*set_partner_usb_comm_capable)(struct tcpci *tcpci, struct tcpci_data *data, + bool capable); +}; + +struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data); +void tcpci_unregister_port(struct tcpci *tcpci); +irqreturn_t tcpci_irq(struct tcpci *tcpci); + +struct tcpm_port; +struct tcpm_port *tcpci_get_tcpm_port(struct tcpci *tcpci); +#endif /* __LINUX_USB_TCPCI_H */ diff --git a/include/linux/usb/tegra_usb_phy.h b/include/linux/usb/tegra_usb_phy.h index d3e65eb9e16f..46e73584b6e6 100644 --- a/include/linux/usb/tegra_usb_phy.h +++ b/include/linux/usb/tegra_usb_phy.h @@ -1,16 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2010 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * */ #ifndef __TEGRA_USB_PHY_H diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index fdf737d48b3b..7751bedcae5d 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -22,6 +22,8 @@ struct typec_altmode_ops; struct fwnode_handle; struct device; +struct usb_power_delivery; + enum typec_port_type { TYPEC_PORT_SRC, TYPEC_PORT_SNK, @@ -52,6 +54,16 @@ enum typec_role { TYPEC_SOURCE, }; +static inline int is_sink(enum typec_role role) +{ + return role == TYPEC_SINK; +} + +static inline int is_source(enum typec_role role) +{ + return role == TYPEC_SOURCE; +} + enum typec_pwr_opmode { TYPEC_PWR_MODE_USB, TYPEC_PWR_MODE_1_5A, @@ -213,6 +225,8 @@ struct typec_partner_desc { * @pr_set: Set Power Role * @vconn_set: Source VCONN * @port_type_set: Set port type + * @pd_get: Get available USB Power Delivery Capabilities. + * @pd_set: Set USB Power Delivery Capabilities. */ struct typec_operations { int (*try_role)(struct typec_port *port, int role); @@ -221,6 +235,8 @@ struct typec_operations { int (*vconn_set)(struct typec_port *port, enum typec_role role); int (*port_type_set)(struct typec_port *port, enum typec_port_type type); + struct usb_power_delivery **(*pd_get)(struct typec_port *port); + int (*pd_set)(struct typec_port *port, struct usb_power_delivery *pd); }; enum usb_pd_svdm_ver { @@ -240,6 +256,7 @@ enum usb_pd_svdm_ver { * @accessory: Supported Accessory Modes * @fwnode: Optional fwnode of the port * @driver_data: Private pointer for driver specific info + * @pd: Optional USB Power Delivery Support * @ops: Port operations vector * * Static capabilities of a single USB Type-C port. @@ -257,6 +274,8 @@ struct typec_capability { struct fwnode_handle *fwnode; void *driver_data; + struct usb_power_delivery *pd; + const struct typec_operations *ops; }; @@ -308,4 +327,8 @@ void typec_partner_set_svdm_version(struct typec_partner *partner, enum usb_pd_svdm_ver svdm_version); int typec_get_negotiated_svdm_version(struct typec_port *port); +int typec_port_set_usb_power_delivery(struct typec_port *port, struct usb_power_delivery *pd); +int typec_partner_set_usb_power_delivery(struct typec_partner *partner, + struct usb_power_delivery *pd); + #endif /* __LINUX_USB_TYPEC_H */ diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h index 65933cbe9129..350d49012659 100644 --- a/include/linux/usb/typec_altmode.h +++ b/include/linux/usb/typec_altmode.h @@ -124,7 +124,7 @@ struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes, /** * typec_altmode_get_orientation - Get cable plug orientation - * altmode: Handle to the alternate mode + * @altmode: Handle to the alternate mode */ static inline enum typec_orientation typec_altmode_get_orientation(struct typec_altmode *altmode) diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h index ee57781dcf28..9292f0e07846 100644 --- a/include/linux/usb/typec_mux.h +++ b/include/linux/usb/typec_mux.h @@ -58,17 +58,13 @@ struct typec_mux_desc { void *drvdata; }; +#if IS_ENABLED(CONFIG_TYPEC) + struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode, const struct typec_altmode_desc *desc); void typec_mux_put(struct typec_mux *mux); int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state); -static inline struct typec_mux * -typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc) -{ - return fwnode_typec_mux_get(dev_fwnode(dev), desc); -} - struct typec_mux_dev * typec_mux_register(struct device *parent, const struct typec_mux_desc *desc); void typec_mux_unregister(struct typec_mux_dev *mux); @@ -76,4 +72,40 @@ void typec_mux_unregister(struct typec_mux_dev *mux); void typec_mux_set_drvdata(struct typec_mux_dev *mux, void *data); void *typec_mux_get_drvdata(struct typec_mux_dev *mux); +#else + +static inline struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode, + const struct typec_altmode_desc *desc) +{ + return NULL; +} + +static inline void typec_mux_put(struct typec_mux *mux) {} + +static inline int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state) +{ + return 0; +} + +static inline struct typec_mux_dev * +typec_mux_register(struct device *parent, const struct typec_mux_desc *desc) +{ + return ERR_PTR(-EOPNOTSUPP); +} +static inline void typec_mux_unregister(struct typec_mux_dev *mux) {} + +static inline void typec_mux_set_drvdata(struct typec_mux_dev *mux, void *data) {} +static inline void *typec_mux_get_drvdata(struct typec_mux_dev *mux) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +#endif /* CONFIG_TYPEC */ + +static inline struct typec_mux * +typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc) +{ + return fwnode_typec_mux_get(dev_fwnode(dev), desc); +} + #endif /* __USB_TYPEC_MUX */ diff --git a/include/linux/usb/typec_retimer.h b/include/linux/usb/typec_retimer.h new file mode 100644 index 000000000000..5e036b3360e2 --- /dev/null +++ b/include/linux/usb/typec_retimer.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __USB_TYPEC_RETIMER +#define __USB_TYPEC_RETIMER + +#include <linux/property.h> +#include <linux/usb/typec.h> + +struct device; +struct typec_retimer; +struct typec_altmode; +struct fwnode_handle; + +struct typec_retimer_state { + struct typec_altmode *alt; + unsigned long mode; + void *data; +}; + +typedef int (*typec_retimer_set_fn_t)(struct typec_retimer *retimer, + struct typec_retimer_state *state); + +struct typec_retimer_desc { + struct fwnode_handle *fwnode; + typec_retimer_set_fn_t set; + const char *name; + void *drvdata; +}; + +struct typec_retimer *fwnode_typec_retimer_get(struct fwnode_handle *fwnode); +void typec_retimer_put(struct typec_retimer *retimer); +int typec_retimer_set(struct typec_retimer *retimer, struct typec_retimer_state *state); + +static inline struct typec_retimer *typec_retimer_get(struct device *dev) +{ + return fwnode_typec_retimer_get(dev_fwnode(dev)); +} + +struct typec_retimer * +typec_retimer_register(struct device *parent, const struct typec_retimer_desc *desc); +void typec_retimer_unregister(struct typec_retimer *retimer); + +void *typec_retimer_get_drvdata(struct typec_retimer *retimer); + +#endif /* __USB_TYPEC_RETIMER */ diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h index 36c2982780ad..5050f502c1ed 100644 --- a/include/linux/usb/ulpi.h +++ b/include/linux/usb/ulpi.h @@ -3,10 +3,6 @@ * ulpi.h -- ULPI defines and function prorotypes * * Copyright (C) 2010 Nokia Corporation - * - * This software is distributed under the terms of the GNU General - * Public License ("GPL") as published by the Free Software Foundation, - * version 2 of that License. */ #ifndef __LINUX_USB_ULPI_H diff --git a/include/linux/usb/usb338x.h b/include/linux/usb/usb338x.h index 20020c1336d5..70a7e3cdb3c9 100644 --- a/include/linux/usb/usb338x.h +++ b/include/linux/usb/usb338x.h @@ -6,17 +6,6 @@ * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com) * Copyright (C) 2003 David Brownell * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * */ #ifndef __LINUX_USB_USB338X_H diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 1b4d72d5e891..9f08a584d707 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -4,25 +4,17 @@ * * Copyright (C) 2000-2005 by David Brownell <dbrownell@users.sourceforge.net> * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __LINUX_USB_USBNET_H #define __LINUX_USB_USBNET_H +#include <linux/mii.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/types.h> +#include <linux/usb.h> + /* interface from usbnet core to each USB networking link we handle */ struct usbnet { /* housekeeping */ diff --git a/include/linux/usb/xhci-dbgp.h b/include/linux/usb/xhci-dbgp.h index 01fe768873f9..171fd74b1cfc 100644 --- a/include/linux/usb/xhci-dbgp.h +++ b/include/linux/usb/xhci-dbgp.h @@ -5,10 +5,6 @@ * Copyright (C) 2016 Intel Corporation * * Author: Lu Baolu <baolu.lu@linux.intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef __LINUX_XHCI_DBGP_H diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 7b4a13d3bd91..d282f464d2f1 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -218,6 +218,9 @@ struct vdpa_map_file { * @reset: Reset device * @vdev: vdpa device * Returns integer: success (0) or error (< 0) + * @suspend: Suspend or resume the device (optional) + * @vdev: vdpa device + * Returns integer: success (0) or error (< 0) * @get_config_size: Get the size of the configuration space includes * fields that are conditional on feature bits. * @vdev: vdpa device @@ -319,6 +322,7 @@ struct vdpa_config_ops { u8 (*get_status)(struct vdpa_device *vdev); void (*set_status)(struct vdpa_device *vdev, u8 status); int (*reset)(struct vdpa_device *vdev); + int (*suspend)(struct vdpa_device *vdev); size_t (*get_config_size)(struct vdpa_device *vdev); void (*get_config)(struct vdpa_device *vdev, unsigned int offset, void *buf, unsigned int len); diff --git a/include/linux/vfio.h b/include/linux/vfio.h index aa888cc51757..e05ddc6fe6a5 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -32,6 +32,11 @@ struct vfio_device_set { struct vfio_device { struct device *dev; const struct vfio_device_ops *ops; + /* + * mig_ops is a static property of the vfio_device which must be set + * prior to registering the vfio_device. + */ + const struct vfio_migration_ops *mig_ops; struct vfio_group *group; struct vfio_device_set *dev_set; struct list_head dev_set_list; @@ -44,6 +49,7 @@ struct vfio_device { unsigned int open_count; struct completion comp; struct list_head group_next; + struct list_head iommu_entry; }; /** @@ -60,17 +66,9 @@ struct vfio_device { * @match: Optional device name match callback (return: 0 for no-match, >0 for * match, -errno for abort (ex. match with insufficient or incorrect * additional args) + * @dma_unmap: Called when userspace unmaps IOVA from the container + * this device is attached to. * @device_feature: Optional, fill in the VFIO_DEVICE_FEATURE ioctl - * @migration_set_state: Optional callback to change the migration state for - * devices that support migration. It's mandatory for - * VFIO_DEVICE_FEATURE_MIGRATION migration support. - * The returned FD is used for data transfer according to the FSM - * definition. The driver is responsible to ensure that FD reaches end - * of stream or error whenever the migration FSM leaves a data transfer - * state or before close_device() returns. - * @migration_get_state: Optional callback to get the migration state for - * devices that support migration. It's mandatory for - * VFIO_DEVICE_FEATURE_MIGRATION migration support. */ struct vfio_device_ops { char *name; @@ -85,8 +83,24 @@ struct vfio_device_ops { int (*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma); void (*request)(struct vfio_device *vdev, unsigned int count); int (*match)(struct vfio_device *vdev, char *buf); + void (*dma_unmap)(struct vfio_device *vdev, u64 iova, u64 length); int (*device_feature)(struct vfio_device *device, u32 flags, void __user *arg, size_t argsz); +}; + +/** + * @migration_set_state: Optional callback to change the migration state for + * devices that support migration. It's mandatory for + * VFIO_DEVICE_FEATURE_MIGRATION migration support. + * The returned FD is used for data transfer according to the FSM + * definition. The driver is responsible to ensure that FD reaches end + * of stream or error whenever the migration FSM leaves a data transfer + * state or before close_device() returns. + * @migration_get_state: Optional callback to get the migration state for + * devices that support migration. It's mandatory for + * VFIO_DEVICE_FEATURE_MIGRATION migration support. + */ +struct vfio_migration_ops { struct file *(*migration_set_state)( struct vfio_device *device, enum vfio_device_mig_state new_state); @@ -140,36 +154,18 @@ int vfio_mig_get_next_state(struct vfio_device *device, /* * External user API */ -extern struct iommu_group *vfio_file_iommu_group(struct file *file); -extern bool vfio_file_enforced_coherent(struct file *file); -extern void vfio_file_set_kvm(struct file *file, struct kvm *kvm); -extern bool vfio_file_has_dev(struct file *file, struct vfio_device *device); +struct iommu_group *vfio_file_iommu_group(struct file *file); +bool vfio_file_enforced_coherent(struct file *file); +void vfio_file_set_kvm(struct file *file, struct kvm *kvm); +bool vfio_file_has_dev(struct file *file, struct vfio_device *device); #define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long)) -extern int vfio_pin_pages(struct vfio_device *device, unsigned long *user_pfn, - int npage, int prot, unsigned long *phys_pfn); -extern int vfio_unpin_pages(struct vfio_device *device, unsigned long *user_pfn, - int npage); -extern int vfio_dma_rw(struct vfio_device *device, dma_addr_t user_iova, - void *data, size_t len, bool write); - -/* each type has independent events */ -enum vfio_notify_type { - VFIO_IOMMU_NOTIFY = 0, -}; - -/* events for VFIO_IOMMU_NOTIFY */ -#define VFIO_IOMMU_NOTIFY_DMA_UNMAP BIT(0) - -extern int vfio_register_notifier(struct vfio_device *device, - enum vfio_notify_type type, - unsigned long *required_events, - struct notifier_block *nb); -extern int vfio_unregister_notifier(struct vfio_device *device, - enum vfio_notify_type type, - struct notifier_block *nb); - +int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova, + int npage, int prot, struct page **pages); +void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage); +int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, + void *data, size_t len, bool write); /* * Sub-module helpers @@ -178,25 +174,24 @@ struct vfio_info_cap { struct vfio_info_cap_header *buf; size_t size; }; -extern struct vfio_info_cap_header *vfio_info_cap_add( - struct vfio_info_cap *caps, size_t size, u16 id, u16 version); -extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset); +struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps, + size_t size, u16 id, + u16 version); +void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset); -extern int vfio_info_add_capability(struct vfio_info_cap *caps, - struct vfio_info_cap_header *cap, - size_t size); +int vfio_info_add_capability(struct vfio_info_cap *caps, + struct vfio_info_cap_header *cap, size_t size); -extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, - int num_irqs, int max_irq_type, - size_t *data_size); +int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, + int num_irqs, int max_irq_type, + size_t *data_size); struct pci_dev; #if IS_ENABLED(CONFIG_VFIO_SPAPR_EEH) -extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev); -extern void vfio_spapr_pci_eeh_release(struct pci_dev *pdev); -extern long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, - unsigned int cmd, - unsigned long arg); +void vfio_spapr_pci_eeh_open(struct pci_dev *pdev); +void vfio_spapr_pci_eeh_release(struct pci_dev *pdev); +long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, unsigned int cmd, + unsigned long arg); #else static inline void vfio_spapr_pci_eeh_open(struct pci_dev *pdev) { @@ -230,10 +225,9 @@ struct virqfd { struct virqfd **pvirqfd; }; -extern int vfio_virqfd_enable(void *opaque, - int (*handler)(void *, void *), - void (*thread)(void *, void *), - void *data, struct virqfd **pvirqfd, int fd); -extern void vfio_virqfd_disable(struct virqfd **pvirqfd); +int vfio_virqfd_enable(void *opaque, int (*handler)(void *, void *), + void (*thread)(void *, void *), void *data, + struct virqfd **pvirqfd, int fd); +void vfio_virqfd_disable(struct virqfd **pvirqfd); #endif /* VFIO_H */ diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index 23c176d4b073..5579ece4347b 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -147,23 +147,23 @@ struct vfio_pci_core_device { #define is_irq_none(vdev) (!(is_intx(vdev) || is_msi(vdev) || is_msix(vdev))) #define irq_is(vdev, type) (vdev->irq_type == type) -extern void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev); -extern void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev); +void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev); +void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev); -extern int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, - uint32_t flags, unsigned index, - unsigned start, unsigned count, void *data); +int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, + uint32_t flags, unsigned index, + unsigned start, unsigned count, void *data); -extern ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, - char __user *buf, size_t count, - loff_t *ppos, bool iswrite); +ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, + char __user *buf, size_t count, + loff_t *ppos, bool iswrite); -extern ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, - size_t count, loff_t *ppos, bool iswrite); +ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite); #ifdef CONFIG_VFIO_PCI_VGA -extern ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf, - size_t count, loff_t *ppos, bool iswrite); +ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite); #else static inline ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf, size_t count, @@ -173,32 +173,31 @@ static inline ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, } #endif -extern long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset, - uint64_t data, int count, int fd); +long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset, + uint64_t data, int count, int fd); -extern int vfio_pci_init_perm_bits(void); -extern void vfio_pci_uninit_perm_bits(void); +int vfio_pci_init_perm_bits(void); +void vfio_pci_uninit_perm_bits(void); -extern int vfio_config_init(struct vfio_pci_core_device *vdev); -extern void vfio_config_free(struct vfio_pci_core_device *vdev); +int vfio_config_init(struct vfio_pci_core_device *vdev); +void vfio_config_free(struct vfio_pci_core_device *vdev); -extern int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev, - unsigned int type, unsigned int subtype, - const struct vfio_pci_regops *ops, - size_t size, u32 flags, void *data); +int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev, + unsigned int type, unsigned int subtype, + const struct vfio_pci_regops *ops, + size_t size, u32 flags, void *data); -extern int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, - pci_power_t state); +int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, + pci_power_t state); -extern bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev); -extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device - *vdev); -extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev); -extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, - u16 cmd); +bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev); +void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device *vdev); +u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev); +void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, + u16 cmd); #ifdef CONFIG_VFIO_PCI_IGD -extern int vfio_pci_igd_init(struct vfio_pci_core_device *vdev); +int vfio_pci_igd_init(struct vfio_pci_core_device *vdev); #else static inline int vfio_pci_igd_init(struct vfio_pci_core_device *vdev) { @@ -206,15 +205,25 @@ static inline int vfio_pci_igd_init(struct vfio_pci_core_device *vdev) } #endif -#ifdef CONFIG_S390 -extern int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev, - struct vfio_info_cap *caps); +#ifdef CONFIG_VFIO_PCI_ZDEV_KVM +int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev, + struct vfio_info_cap *caps); +int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev); +void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev); #else static inline int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev, struct vfio_info_cap *caps) { return -ENODEV; } + +static inline int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev) +{ + return 0; +} + +static inline void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev) +{} #endif /* Will be exported for vfio pci drivers usage */ diff --git a/include/linux/virtio.h b/include/linux/virtio.h index d8fdf170637c..a3f73bb6733e 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -19,6 +19,8 @@ * @priv: a pointer for the virtqueue implementation to use. * @index: the zero-based ordinal number for this queue. * @num_free: number of elements we expect to be able to fit. + * @num_max: the maximum number of elements supported by the device. + * @reset: vq is in reset state or not. * * A note on @num_free: with indirect buffers, each buffer needs one * element in the queue, otherwise a buffer will need one element per @@ -31,7 +33,9 @@ struct virtqueue { struct virtio_device *vdev; unsigned int index; unsigned int num_free; + unsigned int num_max; void *priv; + bool reset; }; int virtqueue_add_outbuf(struct virtqueue *vq, @@ -89,6 +93,9 @@ dma_addr_t virtqueue_get_desc_addr(struct virtqueue *vq); dma_addr_t virtqueue_get_avail_addr(struct virtqueue *vq); dma_addr_t virtqueue_get_used_addr(struct virtqueue *vq); +int virtqueue_resize(struct virtqueue *vq, u32 num, + void (*recycle)(struct virtqueue *vq, void *buf)); + /** * virtio_device - representation of a device using virtio * @index: unique position on the virtio bus @@ -133,6 +140,9 @@ bool is_virtio_device(struct device *dev); void virtio_break_device(struct virtio_device *dev); void __virtio_unbreak_device(struct virtio_device *dev); +void __virtqueue_break(struct virtqueue *_vq); +void __virtqueue_unbreak(struct virtqueue *_vq); + void virtio_config_changed(struct virtio_device *dev); #ifdef CONFIG_PM_SLEEP int virtio_device_freeze(struct virtio_device *dev); diff --git a/include/linux/virtio_anchor.h b/include/linux/virtio_anchor.h new file mode 100644 index 000000000000..432e6c00b3ca --- /dev/null +++ b/include/linux/virtio_anchor.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_VIRTIO_ANCHOR_H +#define _LINUX_VIRTIO_ANCHOR_H + +#ifdef CONFIG_VIRTIO_ANCHOR +struct virtio_device; + +bool virtio_require_restricted_mem_acc(struct virtio_device *dev); +extern bool (*virtio_check_mem_acc_cb)(struct virtio_device *dev); + +static inline void virtio_set_mem_acc_cb(bool (*func)(struct virtio_device *)) +{ + virtio_check_mem_acc_cb = func; +} +#else +#define virtio_set_mem_acc_cb(func) do { } while (0) +#endif + +#endif /* _LINUX_VIRTIO_ANCHOR_H */ diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index b47c2e7ed0ee..6adff09f7170 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -55,6 +55,7 @@ struct virtio_shm_region { * include a NULL entry for vqs that do not need a callback * names: array of virtqueue names (mainly for debugging) * include a NULL entry for vqs unused by driver + * sizes: array of virtqueue sizes * Returns 0 on success or error status * @del_vqs: free virtqueues found by find_vqs(). * @synchronize_cbs: synchronize with the virtqueue callbacks (optional) @@ -78,6 +79,18 @@ struct virtio_shm_region { * @set_vq_affinity: set the affinity for a virtqueue (optional). * @get_vq_affinity: get the affinity for a virtqueue (optional). * @get_shm_region: get a shared memory region based on the index. + * @disable_vq_and_reset: reset a queue individually (optional). + * vq: the virtqueue + * Returns 0 on success or error status + * disable_vq_and_reset will guarantee that the callbacks are disabled and + * synchronized. + * Except for the callback, the caller should guarantee that the vring is + * not accessed by any functions of virtqueue. + * @enable_vq_after_reset: enable a reset queue + * vq: the virtqueue + * Returns 0 on success or error status + * If disable_vq_and_reset is set, then enable_vq_after_reset must also be + * set. */ typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { @@ -91,7 +104,9 @@ struct virtio_config_ops { void (*reset)(struct virtio_device *vdev); int (*find_vqs)(struct virtio_device *, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char * const names[], const bool *ctx, + const char * const names[], + u32 sizes[], + const bool *ctx, struct irq_affinity *desc); void (*del_vqs)(struct virtio_device *); void (*synchronize_cbs)(struct virtio_device *); @@ -104,6 +119,8 @@ struct virtio_config_ops { int index); bool (*get_shm_region)(struct virtio_device *vdev, struct virtio_shm_region *region, u8 id); + int (*disable_vq_and_reset)(struct virtqueue *vq); + int (*enable_vq_after_reset)(struct virtqueue *vq); }; /* If driver didn't advertise the feature, it will never appear. */ @@ -198,7 +215,7 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, const char *names[] = { n }; struct virtqueue *vq; int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL, - NULL); + NULL, NULL); if (err < 0) return ERR_PTR(err); return vq; @@ -210,7 +227,8 @@ int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, const char * const names[], struct irq_affinity *desc) { - return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc); + return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, + NULL, desc); } static inline @@ -219,8 +237,20 @@ int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs, const char * const names[], const bool *ctx, struct irq_affinity *desc) { - return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, - desc); + return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, + ctx, desc); +} + +static inline +int virtio_find_vqs_ctx_size(struct virtio_device *vdev, u32 nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char * const names[], + u32 sizes[], + const bool *ctx, struct irq_affinity *desc) +{ + return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, sizes, + ctx, desc); } /** diff --git a/include/linux/virtio_pci_modern.h b/include/linux/virtio_pci_modern.h index eb2bd9b4077d..c4eeb79b0139 100644 --- a/include/linux/virtio_pci_modern.h +++ b/include/linux/virtio_pci_modern.h @@ -5,6 +5,13 @@ #include <linux/pci.h> #include <linux/virtio_pci.h> +struct virtio_pci_modern_common_cfg { + struct virtio_pci_common_cfg cfg; + + __le16 queue_notify_data; /* read-write */ + __le16 queue_reset; /* read-write */ +}; + struct virtio_pci_modern_device { struct pci_dev *pci_dev; @@ -106,4 +113,6 @@ void __iomem * vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev, u16 index, resource_size_t *pa); int vp_modern_probe(struct virtio_pci_modern_device *mdev); void vp_modern_remove(struct virtio_pci_modern_device *mdev); +int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index); +void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index); #endif diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index b485b13fa50b..8b8af1a38991 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -76,16 +76,6 @@ struct virtqueue *vring_create_virtqueue(unsigned int index, void (*callback)(struct virtqueue *vq), const char *name); -/* Creates a virtqueue with a custom layout. */ -struct virtqueue *__vring_new_virtqueue(unsigned int index, - struct vring vring, - struct virtio_device *vdev, - bool weak_barriers, - bool ctx, - bool (*notify)(struct virtqueue *), - void (*callback)(struct virtqueue *), - const char *name); - /* * Creates a virtqueue with a standard layout but a caller-allocated * ring. diff --git a/include/linux/vme.h b/include/linux/vme.h deleted file mode 100644 index b204a9b4be1b..000000000000 --- a/include/linux/vme.h +++ /dev/null @@ -1,190 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _VME_H_ -#define _VME_H_ - -/* Resource Type */ -enum vme_resource_type { - VME_MASTER, - VME_SLAVE, - VME_DMA, - VME_LM -}; - -/* VME Address Spaces */ -#define VME_A16 0x1 -#define VME_A24 0x2 -#define VME_A32 0x4 -#define VME_A64 0x8 -#define VME_CRCSR 0x10 -#define VME_USER1 0x20 -#define VME_USER2 0x40 -#define VME_USER3 0x80 -#define VME_USER4 0x100 - -#define VME_A16_MAX 0x10000ULL -#define VME_A24_MAX 0x1000000ULL -#define VME_A32_MAX 0x100000000ULL -#define VME_A64_MAX 0x10000000000000000ULL -#define VME_CRCSR_MAX 0x1000000ULL - - -/* VME Cycle Types */ -#define VME_SCT 0x1 -#define VME_BLT 0x2 -#define VME_MBLT 0x4 -#define VME_2eVME 0x8 -#define VME_2eSST 0x10 -#define VME_2eSSTB 0x20 - -#define VME_2eSST160 0x100 -#define VME_2eSST267 0x200 -#define VME_2eSST320 0x400 - -#define VME_SUPER 0x1000 -#define VME_USER 0x2000 -#define VME_PROG 0x4000 -#define VME_DATA 0x8000 - -/* VME Data Widths */ -#define VME_D8 0x1 -#define VME_D16 0x2 -#define VME_D32 0x4 -#define VME_D64 0x8 - -/* Arbitration Scheduling Modes */ -#define VME_R_ROBIN_MODE 0x1 -#define VME_PRIORITY_MODE 0x2 - -#define VME_DMA_PATTERN (1<<0) -#define VME_DMA_PCI (1<<1) -#define VME_DMA_VME (1<<2) - -#define VME_DMA_PATTERN_BYTE (1<<0) -#define VME_DMA_PATTERN_WORD (1<<1) -#define VME_DMA_PATTERN_INCREMENT (1<<2) - -#define VME_DMA_VME_TO_MEM (1<<0) -#define VME_DMA_MEM_TO_VME (1<<1) -#define VME_DMA_VME_TO_VME (1<<2) -#define VME_DMA_MEM_TO_MEM (1<<3) -#define VME_DMA_PATTERN_TO_VME (1<<4) -#define VME_DMA_PATTERN_TO_MEM (1<<5) - -struct vme_dma_attr { - u32 type; - void *private; -}; - -struct vme_resource { - enum vme_resource_type type; - struct list_head *entry; -}; - -extern struct bus_type vme_bus_type; - -/* Number of VME interrupt vectors */ -#define VME_NUM_STATUSID 256 - -/* VME_MAX_BRIDGES comes from the type of vme_bus_numbers */ -#define VME_MAX_BRIDGES (sizeof(unsigned int)*8) -#define VME_MAX_SLOTS 32 - -#define VME_SLOT_CURRENT -1 -#define VME_SLOT_ALL -2 - -/** - * struct vme_dev - Structure representing a VME device - * @num: The device number - * @bridge: Pointer to the bridge device this device is on - * @dev: Internal device structure - * @drv_list: List of devices (per driver) - * @bridge_list: List of devices (per bridge) - */ -struct vme_dev { - int num; - struct vme_bridge *bridge; - struct device dev; - struct list_head drv_list; - struct list_head bridge_list; -}; - -/** - * struct vme_driver - Structure representing a VME driver - * @name: Driver name, should be unique among VME drivers and usually the same - * as the module name. - * @match: Callback used to determine whether probe should be run. - * @probe: Callback for device binding, called when new device is detected. - * @remove: Callback, called on device removal. - * @driver: Underlying generic device driver structure. - * @devices: List of VME devices (struct vme_dev) associated with this driver. - */ -struct vme_driver { - const char *name; - int (*match)(struct vme_dev *); - int (*probe)(struct vme_dev *); - void (*remove)(struct vme_dev *); - struct device_driver driver; - struct list_head devices; -}; - -void *vme_alloc_consistent(struct vme_resource *, size_t, dma_addr_t *); -void vme_free_consistent(struct vme_resource *, size_t, void *, - dma_addr_t); - -size_t vme_get_size(struct vme_resource *); -int vme_check_window(u32 aspace, unsigned long long vme_base, - unsigned long long size); - -struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32); -int vme_slave_set(struct vme_resource *, int, unsigned long long, - unsigned long long, dma_addr_t, u32, u32); -int vme_slave_get(struct vme_resource *, int *, unsigned long long *, - unsigned long long *, dma_addr_t *, u32 *, u32 *); -void vme_slave_free(struct vme_resource *); - -struct vme_resource *vme_master_request(struct vme_dev *, u32, u32, u32); -int vme_master_set(struct vme_resource *, int, unsigned long long, - unsigned long long, u32, u32, u32); -int vme_master_get(struct vme_resource *, int *, unsigned long long *, - unsigned long long *, u32 *, u32 *, u32 *); -ssize_t vme_master_read(struct vme_resource *, void *, size_t, loff_t); -ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t); -unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int, - unsigned int, loff_t); -int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma); -void vme_master_free(struct vme_resource *); - -struct vme_resource *vme_dma_request(struct vme_dev *, u32); -struct vme_dma_list *vme_new_dma_list(struct vme_resource *); -struct vme_dma_attr *vme_dma_pattern_attribute(u32, u32); -struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t); -struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, u32, u32, u32); -void vme_dma_free_attribute(struct vme_dma_attr *); -int vme_dma_list_add(struct vme_dma_list *, struct vme_dma_attr *, - struct vme_dma_attr *, size_t); -int vme_dma_list_exec(struct vme_dma_list *); -int vme_dma_list_free(struct vme_dma_list *); -int vme_dma_free(struct vme_resource *); - -int vme_irq_request(struct vme_dev *, int, int, - void (*callback)(int, int, void *), void *); -void vme_irq_free(struct vme_dev *, int, int); -int vme_irq_generate(struct vme_dev *, int, int); - -struct vme_resource *vme_lm_request(struct vme_dev *); -int vme_lm_count(struct vme_resource *); -int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32); -int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *); -int vme_lm_attach(struct vme_resource *, int, void (*callback)(void *), void *); -int vme_lm_detach(struct vme_resource *, int); -void vme_lm_free(struct vme_resource *); - -int vme_slot_num(struct vme_dev *); -int vme_bus_num(struct vme_dev *); - -int vme_register_driver(struct vme_driver *, unsigned int); -void vme_unregister_driver(struct vme_driver *); - - -#endif /* _VME_H_ */ - diff --git a/include/linux/wkup_m3_ipc.h b/include/linux/wkup_m3_ipc.h index 26d1eb058fa3..5e1b26f988e2 100644 --- a/include/linux/wkup_m3_ipc.h +++ b/include/linux/wkup_m3_ipc.h @@ -1,17 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * TI Wakeup M3 for AMx3 SoCs Power Management Routines * * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Dave Gerlach <d-gerlach@ti.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef _LINUX_WKUP_M3_IPC_H diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 62e75dd40d9a..a0143dd24430 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -453,6 +453,7 @@ extern int schedule_on_each_cpu(work_func_t func); int execute_in_process_context(work_func_t fn, struct execute_work *); extern bool flush_work(struct work_struct *work); +extern bool cancel_work(struct work_struct *work); extern bool cancel_work_sync(struct work_struct *work); extern bool flush_delayed_work(struct delayed_work *dwork); diff --git a/include/linux/xarray.h b/include/linux/xarray.h index c29e11b2c073..44dd6d6e01bc 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -16,6 +16,7 @@ #include <linux/kconfig.h> #include <linux/kernel.h> #include <linux/rcupdate.h> +#include <linux/sched/mm.h> #include <linux/spinlock.h> #include <linux/types.h> @@ -586,6 +587,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index, { void *curr; + might_alloc(gfp); xa_lock_bh(xa); curr = __xa_store(xa, index, entry, gfp); xa_unlock_bh(xa); @@ -612,6 +614,7 @@ static inline void *xa_store_irq(struct xarray *xa, unsigned long index, { void *curr; + might_alloc(gfp); xa_lock_irq(xa); curr = __xa_store(xa, index, entry, gfp); xa_unlock_irq(xa); @@ -687,6 +690,7 @@ static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index, { void *curr; + might_alloc(gfp); xa_lock(xa); curr = __xa_cmpxchg(xa, index, old, entry, gfp); xa_unlock(xa); @@ -714,6 +718,7 @@ static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index, { void *curr; + might_alloc(gfp); xa_lock_bh(xa); curr = __xa_cmpxchg(xa, index, old, entry, gfp); xa_unlock_bh(xa); @@ -741,6 +746,7 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, { void *curr; + might_alloc(gfp); xa_lock_irq(xa); curr = __xa_cmpxchg(xa, index, old, entry, gfp); xa_unlock_irq(xa); @@ -770,6 +776,7 @@ static inline int __must_check xa_insert(struct xarray *xa, { int err; + might_alloc(gfp); xa_lock(xa); err = __xa_insert(xa, index, entry, gfp); xa_unlock(xa); @@ -799,6 +806,7 @@ static inline int __must_check xa_insert_bh(struct xarray *xa, { int err; + might_alloc(gfp); xa_lock_bh(xa); err = __xa_insert(xa, index, entry, gfp); xa_unlock_bh(xa); @@ -828,6 +836,7 @@ static inline int __must_check xa_insert_irq(struct xarray *xa, { int err; + might_alloc(gfp); xa_lock_irq(xa); err = __xa_insert(xa, index, entry, gfp); xa_unlock_irq(xa); @@ -857,6 +866,7 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, { int err; + might_alloc(gfp); xa_lock(xa); err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock(xa); @@ -886,6 +896,7 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, { int err; + might_alloc(gfp); xa_lock_bh(xa); err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock_bh(xa); @@ -915,6 +926,7 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, { int err; + might_alloc(gfp); xa_lock_irq(xa); err = __xa_alloc(xa, id, entry, limit, gfp); xa_unlock_irq(xa); @@ -948,6 +960,7 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, { int err; + might_alloc(gfp); xa_lock(xa); err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); xa_unlock(xa); @@ -981,6 +994,7 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, { int err; + might_alloc(gfp); xa_lock_bh(xa); err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); xa_unlock_bh(xa); @@ -1014,6 +1028,7 @@ static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, { int err; + might_alloc(gfp); xa_lock_irq(xa); err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); xa_unlock_irq(xa); diff --git a/include/media/hevc-ctrls.h b/include/media/hevc-ctrls.h deleted file mode 100644 index 01ccda48d8c5..000000000000 --- a/include/media/hevc-ctrls.h +++ /dev/null @@ -1,250 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * These are the HEVC state controls for use with stateless HEVC - * codec drivers. - * - * It turns out that these structs are not stable yet and will undergo - * more changes. So keep them private until they are stable and ready to - * become part of the official public API. - */ - -#ifndef _HEVC_CTRLS_H_ -#define _HEVC_CTRLS_H_ - -#include <linux/videodev2.h> - -/* The pixel format isn't stable at the moment and will likely be renamed. */ -#define V4L2_PIX_FMT_HEVC_SLICE v4l2_fourcc('S', '2', '6', '5') /* HEVC parsed slices */ - -#define V4L2_CID_MPEG_VIDEO_HEVC_SPS (V4L2_CID_CODEC_BASE + 1008) -#define V4L2_CID_MPEG_VIDEO_HEVC_PPS (V4L2_CID_CODEC_BASE + 1009) -#define V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS (V4L2_CID_CODEC_BASE + 1010) -#define V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX (V4L2_CID_CODEC_BASE + 1011) -#define V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS (V4L2_CID_CODEC_BASE + 1012) -#define V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE (V4L2_CID_CODEC_BASE + 1015) -#define V4L2_CID_MPEG_VIDEO_HEVC_START_CODE (V4L2_CID_CODEC_BASE + 1016) - -/* enum v4l2_ctrl_type type values */ -#define V4L2_CTRL_TYPE_HEVC_SPS 0x0120 -#define V4L2_CTRL_TYPE_HEVC_PPS 0x0121 -#define V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS 0x0122 -#define V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX 0x0123 -#define V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS 0x0124 - -enum v4l2_mpeg_video_hevc_decode_mode { - V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED, - V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED, -}; - -enum v4l2_mpeg_video_hevc_start_code { - V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE, - V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B, -}; - -#define V4L2_HEVC_SLICE_TYPE_B 0 -#define V4L2_HEVC_SLICE_TYPE_P 1 -#define V4L2_HEVC_SLICE_TYPE_I 2 - -#define V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE (1ULL << 0) -#define V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED (1ULL << 1) -#define V4L2_HEVC_SPS_FLAG_AMP_ENABLED (1ULL << 2) -#define V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET (1ULL << 3) -#define V4L2_HEVC_SPS_FLAG_PCM_ENABLED (1ULL << 4) -#define V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED (1ULL << 5) -#define V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT (1ULL << 6) -#define V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED (1ULL << 7) -#define V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED (1ULL << 8) - -/* The controls are not stable at the moment and will likely be reworked. */ -struct v4l2_ctrl_hevc_sps { - /* ISO/IEC 23008-2, ITU-T Rec. H.265: Sequence parameter set */ - __u16 pic_width_in_luma_samples; - __u16 pic_height_in_luma_samples; - __u8 bit_depth_luma_minus8; - __u8 bit_depth_chroma_minus8; - __u8 log2_max_pic_order_cnt_lsb_minus4; - __u8 sps_max_dec_pic_buffering_minus1; - __u8 sps_max_num_reorder_pics; - __u8 sps_max_latency_increase_plus1; - __u8 log2_min_luma_coding_block_size_minus3; - __u8 log2_diff_max_min_luma_coding_block_size; - __u8 log2_min_luma_transform_block_size_minus2; - __u8 log2_diff_max_min_luma_transform_block_size; - __u8 max_transform_hierarchy_depth_inter; - __u8 max_transform_hierarchy_depth_intra; - __u8 pcm_sample_bit_depth_luma_minus1; - __u8 pcm_sample_bit_depth_chroma_minus1; - __u8 log2_min_pcm_luma_coding_block_size_minus3; - __u8 log2_diff_max_min_pcm_luma_coding_block_size; - __u8 num_short_term_ref_pic_sets; - __u8 num_long_term_ref_pics_sps; - __u8 chroma_format_idc; - __u8 sps_max_sub_layers_minus1; - - __u64 flags; -}; - -#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED (1ULL << 0) -#define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1) -#define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2) -#define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3) -#define V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED (1ULL << 4) -#define V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED (1ULL << 5) -#define V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED (1ULL << 6) -#define V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT (1ULL << 7) -#define V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED (1ULL << 8) -#define V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED (1ULL << 9) -#define V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED (1ULL << 10) -#define V4L2_HEVC_PPS_FLAG_TILES_ENABLED (1ULL << 11) -#define V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED (1ULL << 12) -#define V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED (1ULL << 13) -#define V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 14) -#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED (1ULL << 15) -#define V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER (1ULL << 16) -#define V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT (1ULL << 17) -#define V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT (1ULL << 18) -#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT (1ULL << 19) -#define V4L2_HEVC_PPS_FLAG_UNIFORM_SPACING (1ULL << 20) - -struct v4l2_ctrl_hevc_pps { - /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture parameter set */ - __u8 num_extra_slice_header_bits; - __u8 num_ref_idx_l0_default_active_minus1; - __u8 num_ref_idx_l1_default_active_minus1; - __s8 init_qp_minus26; - __u8 diff_cu_qp_delta_depth; - __s8 pps_cb_qp_offset; - __s8 pps_cr_qp_offset; - __u8 num_tile_columns_minus1; - __u8 num_tile_rows_minus1; - __u8 column_width_minus1[20]; - __u8 row_height_minus1[22]; - __s8 pps_beta_offset_div2; - __s8 pps_tc_offset_div2; - __u8 log2_parallel_merge_level_minus2; - - __u8 padding[4]; - __u64 flags; -}; - -#define V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE 0x01 - -#define V4L2_HEVC_DPB_ENTRIES_NUM_MAX 16 - -struct v4l2_hevc_dpb_entry { - __u64 timestamp; - __u8 flags; - __u8 field_pic; - __u16 pic_order_cnt[2]; - __u8 padding[2]; -}; - -struct v4l2_hevc_pred_weight_table { - __s8 delta_luma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; - __s8 luma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; - __s8 delta_chroma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]; - __s8 chroma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]; - - __s8 delta_luma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; - __s8 luma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; - __s8 delta_chroma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]; - __s8 chroma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]; - - __u8 padding[6]; - - __u8 luma_log2_weight_denom; - __s8 delta_chroma_log2_weight_denom; -}; - -#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA (1ULL << 0) -#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA (1ULL << 1) -#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED (1ULL << 2) -#define V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO (1ULL << 3) -#define V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT (1ULL << 4) -#define V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0 (1ULL << 5) -#define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV (1ULL << 6) -#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7) -#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8) -#define V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 9) - -struct v4l2_ctrl_hevc_slice_params { - __u32 bit_size; - __u32 data_bit_offset; - - /* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */ - __u8 nal_unit_type; - __u8 nuh_temporal_id_plus1; - - /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ - __u8 slice_type; - __u8 colour_plane_id; - __u16 slice_pic_order_cnt; - __u8 num_ref_idx_l0_active_minus1; - __u8 num_ref_idx_l1_active_minus1; - __u8 collocated_ref_idx; - __u8 five_minus_max_num_merge_cand; - __s8 slice_qp_delta; - __s8 slice_cb_qp_offset; - __s8 slice_cr_qp_offset; - __s8 slice_act_y_qp_offset; - __s8 slice_act_cb_qp_offset; - __s8 slice_act_cr_qp_offset; - __s8 slice_beta_offset_div2; - __s8 slice_tc_offset_div2; - - /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture timing SEI message */ - __u8 pic_struct; - - /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ - __u32 slice_segment_addr; - __u8 ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; - __u8 ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; - - __u8 padding; - - /* ISO/IEC 23008-2, ITU-T Rec. H.265: Weighted prediction parameter */ - struct v4l2_hevc_pred_weight_table pred_weight_table; - - __u64 flags; -}; - -#define V4L2_HEVC_DECODE_PARAM_FLAG_IRAP_PIC 0x1 -#define V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC 0x2 -#define V4L2_HEVC_DECODE_PARAM_FLAG_NO_OUTPUT_OF_PRIOR 0x4 - -struct v4l2_ctrl_hevc_decode_params { - __s32 pic_order_cnt_val; - __u8 num_active_dpb_entries; - struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; - __u8 num_poc_st_curr_before; - __u8 num_poc_st_curr_after; - __u8 num_poc_lt_curr; - __u8 poc_st_curr_before[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; - __u8 poc_st_curr_after[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; - __u8 poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; - __u64 flags; -}; - -struct v4l2_ctrl_hevc_scaling_matrix { - __u8 scaling_list_4x4[6][16]; - __u8 scaling_list_8x8[6][64]; - __u8 scaling_list_16x16[6][64]; - __u8 scaling_list_32x32[2][64]; - __u8 scaling_list_dc_coef_16x16[6]; - __u8 scaling_list_dc_coef_32x32[2]; -}; - -/* MPEG-class control IDs specific to the Hantro driver as defined by V4L2 */ -#define V4L2_CID_CODEC_HANTRO_BASE (V4L2_CTRL_CLASS_CODEC | 0x1200) -/* - * V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP - - * the number of data (in bits) to skip in the - * slice segment header. - * If non-IDR, the bits to be skipped go from syntax element "pic_output_flag" - * to before syntax element "slice_temporal_mvp_enabled_flag". - * If IDR, the skipped bits are just "pic_output_flag" - * (separate_colour_plane_flag is not supported). - */ -#define V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP (V4L2_CID_CODEC_HANTRO_BASE + 0) - -#endif diff --git a/include/media/i2c/adv7343.h b/include/media/i2c/adv7343.h index b8937035c5d3..d35d3e925795 100644 --- a/include/media/i2c/adv7343.h +++ b/include/media/i2c/adv7343.h @@ -1,16 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * ADV7343 header file * * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed .as is. WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef ADV7343_H diff --git a/include/media/i2c/adv7393.h b/include/media/i2c/adv7393.h index b28edf351842..c73b36321d06 100644 --- a/include/media/i2c/adv7393.h +++ b/include/media/i2c/adv7393.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * ADV7393 header file * @@ -7,15 +8,6 @@ * Based on ADV7343 driver, * * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed .as is. WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef ADV7393_H diff --git a/include/media/i2c/ov2659.h b/include/media/i2c/ov2659.h index 4216adc1ede2..c9ea318a8fc3 100644 --- a/include/media/i2c/ov2659.h +++ b/include/media/i2c/ov2659.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * Omnivision OV2659 CMOS Image Sensor driver * @@ -5,19 +6,6 @@ * * Benoit Parrot <bparrot@ti.com> * Lad, Prabhakar <prabhakar.csengg@gmail.com> - * - * This program is free software; you may redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #ifndef OV2659_H diff --git a/include/media/media-entity.h b/include/media/media-entity.h index a9a1c0ec5d1c..f16ffe70f7a6 100644 --- a/include/media/media-entity.h +++ b/include/media/media-entity.h @@ -848,7 +848,7 @@ struct media_link *media_entity_find_link(struct media_pad *source, struct media_pad *sink); /** - * media_entity_remote_pad - Find the pad at the remote end of a link + * media_pad_remote_pad_first - Find the first pad at the remote end of a link * @pad: Pad at the local end of the link * * Search for a remote pad connected to the given pad by iterating over all @@ -857,7 +857,71 @@ struct media_link *media_entity_find_link(struct media_pad *source, * Return: returns a pointer to the pad at the remote end of the first found * enabled link, or %NULL if no enabled link has been found. */ -struct media_pad *media_entity_remote_pad(const struct media_pad *pad); +struct media_pad *media_pad_remote_pad_first(const struct media_pad *pad); + +/** + * media_pad_remote_pad_unique - Find a remote pad connected to a pad + * @pad: The pad + * + * Search for and return a remote pad connected to @pad through an enabled + * link. If multiple (or no) remote pads are found, an error is returned. + * + * The uniqueness constraint makes this helper function suitable for entities + * that support a single active source at a time on a given pad. + * + * Return: A pointer to the remote pad, or one of the following error pointers + * if an error occurs: + * + * * -ENOTUNIQ - Multiple links are enabled + * * -ENOLINK - No connected pad found + */ +struct media_pad *media_pad_remote_pad_unique(const struct media_pad *pad); + +/** + * media_entity_remote_pad_unique - Find a remote pad connected to an entity + * @entity: The entity + * @type: The type of pad to find (MEDIA_PAD_FL_SINK or MEDIA_PAD_FL_SOURCE) + * + * Search for and return a remote pad of @type connected to @entity through an + * enabled link. If multiple (or no) remote pads match these criteria, an error + * is returned. + * + * The uniqueness constraint makes this helper function suitable for entities + * that support a single active source or sink at a time. + * + * Return: A pointer to the remote pad, or one of the following error pointers + * if an error occurs: + * + * * -ENOTUNIQ - Multiple links are enabled + * * -ENOLINK - No connected pad found + */ +struct media_pad * +media_entity_remote_pad_unique(const struct media_entity *entity, + unsigned int type); + +/** + * media_entity_remote_source_pad_unique - Find a remote source pad connected to + * an entity + * @entity: The entity + * + * Search for and return a remote source pad connected to @entity through an + * enabled link. If multiple (or no) remote pads match these criteria, an error + * is returned. + * + * The uniqueness constraint makes this helper function suitable for entities + * that support a single active source at a time. + * + * Return: A pointer to the remote pad, or one of the following error pointers + * if an error occurs: + * + * * -ENOTUNIQ - Multiple links are enabled + * * -ENOLINK - No connected pad found + */ +static inline struct media_pad * +media_entity_remote_source_pad_unique(const struct media_entity *entity) +{ + return media_entity_remote_pad_unique(entity, MEDIA_PAD_FL_SOURCE); +} /** * media_entity_is_streaming - Test if an entity is part of a streaming pipeline @@ -1140,4 +1204,34 @@ struct media_link * media_create_ancillary_link(struct media_entity *primary, struct media_entity *ancillary); +/** + * __media_entity_next_link() - Iterate through a &media_entity's links + * + * @entity: pointer to the &media_entity + * @link: pointer to a &media_link to hold the iterated values + * @link_type: one of the MEDIA_LNK_FL_LINK_TYPE flags + * + * Return the next link against an entity matching a specific link type. This + * allows iteration through an entity's links whilst guaranteeing all of the + * returned links are of the given type. + */ +struct media_link *__media_entity_next_link(struct media_entity *entity, + struct media_link *link, + unsigned long link_type); + +/** + * for_each_media_entity_data_link() - Iterate through an entity's data links + * + * @entity: pointer to the &media_entity + * @link: pointer to a &media_link to hold the iterated values + * + * Iterate over a &media_entity's data links + */ +#define for_each_media_entity_data_link(entity, link) \ + for (link = __media_entity_next_link(entity, NULL, \ + MEDIA_LNK_FL_DATA_LINK); \ + link; \ + link = __media_entity_next_link(entity, link, \ + MEDIA_LNK_FL_DATA_LINK)) + #endif diff --git a/include/media/tpg/v4l2-tpg.h b/include/media/tpg/v4l2-tpg.h index 181dcbe777f3..a55088921d1d 100644 --- a/include/media/tpg/v4l2-tpg.h +++ b/include/media/tpg/v4l2-tpg.h @@ -210,6 +210,7 @@ struct tpg_data { bool show_square; bool insert_sav; bool insert_eav; + bool insert_hdmi_video_guard_band; /* Test pattern movement */ enum tpg_move_mode mv_hor_mode; @@ -591,6 +592,21 @@ static inline void tpg_s_insert_eav(struct tpg_data *tpg, bool insert_eav) tpg->insert_eav = insert_eav; } +/* + * This inserts 4 pixels of the RGB color 0xab55ab at the left hand side of the + * image. This is only done for 3 or 4 byte RGB pixel formats. This pixel value + * equals the Video Guard Band value as defined by HDMI (see section 5.2.2.1 + * in the HDMI 1.3 Specification) that preceeds the first actual pixel. If the + * HDMI receiver doesn't handle this correctly, then it might keep skipping + * these Video Guard Band patterns and end up with a shorter video line. So this + * is a nice pattern to test with. + */ +static inline void tpg_s_insert_hdmi_video_guard_band(struct tpg_data *tpg, + bool insert_hdmi_video_guard_band) +{ + tpg->insert_hdmi_video_guard_band = insert_hdmi_video_guard_band; +} + void tpg_update_mv_step(struct tpg_data *tpg); static inline void tpg_s_mv_hor_mode(struct tpg_data *tpg, diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h index 13ff3ad948f4..25eb1d138c06 100644 --- a/include/media/v4l2-async.h +++ b/include/media/v4l2-async.h @@ -81,6 +81,7 @@ struct v4l2_async_subdev { * @complete: All subdevices have been probed successfully. The complete * callback is only executed for the root notifier. * @unbind: a subdevice is leaving + * @destroy: the asd is about to be freed */ struct v4l2_async_notifier_operations { int (*bound)(struct v4l2_async_notifier *notifier, @@ -90,6 +91,7 @@ struct v4l2_async_notifier_operations { void (*unbind)(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_subdev *asd); + void (*destroy)(struct v4l2_async_subdev *asd); }; /** diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h index 3eb202259e8c..b708d63995f4 100644 --- a/include/media/v4l2-common.h +++ b/include/media/v4l2-common.h @@ -563,19 +563,19 @@ static inline void v4l2_buffer_set_timestamp(struct v4l2_buffer *buf, static inline bool v4l2_is_colorspace_valid(__u32 colorspace) { return colorspace > V4L2_COLORSPACE_DEFAULT && - colorspace <= V4L2_COLORSPACE_DCI_P3; + colorspace < V4L2_COLORSPACE_LAST; } static inline bool v4l2_is_xfer_func_valid(__u32 xfer_func) { return xfer_func > V4L2_XFER_FUNC_DEFAULT && - xfer_func <= V4L2_XFER_FUNC_SMPTE2084; + xfer_func < V4L2_XFER_FUNC_LAST; } static inline bool v4l2_is_ycbcr_enc_valid(__u8 ycbcr_enc) { return ycbcr_enc > V4L2_YCBCR_ENC_DEFAULT && - ycbcr_enc <= V4L2_YCBCR_ENC_SMPTE240M; + ycbcr_enc < V4L2_YCBCR_ENC_LAST; } static inline bool v4l2_is_hsv_enc_valid(__u8 hsv_enc) diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index b3ce438f1329..00828a4f9404 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -13,12 +13,6 @@ #include <linux/videodev2.h> #include <media/media-request.h> -/* - * Include the stateless codec compound control definitions. - * This will move to the public headers once this API is fully stable. - */ -#include <media/hevc-ctrls.h> - /* forward references */ struct file; struct poll_table_struct; @@ -185,6 +179,8 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv); * and/or has type %V4L2_CTRL_TYPE_STRING. In other words, &struct * v4l2_ext_control uses field p to point to the data. * @is_array: If set, then this control contains an N-dimensional array. + * @is_dyn_array: If set, then this control contains a dynamically sized 1-dimensional array. + * If this is set, then @is_array is also set. * @has_volatiles: If set, then one or more members of the cluster are volatile. * Drivers should never touch this flag. * @call_notify: If set, then call the handler's notify function whenever the @@ -205,6 +201,9 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv); * @step: The control's step value for non-menu controls. * @elems: The number of elements in the N-dimensional array. * @elem_size: The size in bytes of the control. + * @new_elems: The number of elements in p_new. This is the same as @elems, + * except for dynamic arrays. In that case it is in the range of + * 1 to @p_dyn_alloc_elems. * @dims: The size of each dimension. * @nr_of_dims:The number of dimensions in @dims. * @menu_skip_mask: The control's skip mask for menu controls. This makes it @@ -223,15 +222,21 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv); * :math:`ceil(\frac{maximum - minimum}{step}) + 1`. * Used only if the @type is %V4L2_CTRL_TYPE_INTEGER_MENU. * @flags: The control's flags. - * @cur: Structure to store the current value. - * @cur.val: The control's current value, if the @type is represented via - * a u32 integer (see &enum v4l2_ctrl_type). - * @val: The control's new s32 value. * @priv: The control's private pointer. For use by the driver. It is * untouched by the control framework. Note that this pointer is * not freed when the control is deleted. Should this be needed * then a new internal bitfield can be added to tell the framework * to free this pointer. + * @p_dyn: Pointer to the dynamically allocated array. Only valid if + * @is_dyn_array is true. + * @p_dyn_alloc_elems: The number of elements in the dynamically allocated + * array for both the cur and new values. So @p_dyn is actually + * sized for 2 * @p_dyn_alloc_elems * @elem_size. Only valid if + * @is_dyn_array is true. + * @cur: Structure to store the current value. + * @cur.val: The control's current value, if the @type is represented via + * a u32 integer (see &enum v4l2_ctrl_type). + * @val: The control's new s32 value. * @p_def: The control's default value represented via a union which * provides a standard way of accessing control types * through a pointer (for compound controls only). @@ -260,6 +265,7 @@ struct v4l2_ctrl { unsigned int is_string:1; unsigned int is_ptr:1; unsigned int is_array:1; + unsigned int is_dyn_array:1; unsigned int has_volatiles:1; unsigned int call_notify:1; unsigned int manual_mode_value:8; @@ -272,6 +278,7 @@ struct v4l2_ctrl { s64 minimum, maximum, default_value; u32 elems; u32 elem_size; + u32 new_elems; u32 dims[V4L2_CTRL_MAX_DIMS]; u32 nr_of_dims; union { @@ -284,6 +291,8 @@ struct v4l2_ctrl { }; unsigned long flags; void *priv; + void *p_dyn; + u32 p_dyn_alloc_elems; s32 val; struct { s32 val; @@ -309,12 +318,22 @@ struct v4l2_ctrl { * the control has been applied. This prevents applying controls * from a cluster with multiple controls twice (when the first * control of a cluster is applied, they all are). - * @valid_p_req: If set, then p_req contains the control value for the request. + * @p_req_valid: If set, then p_req contains the control value for the request. + * @p_req_dyn_enomem: If set, then p_req is invalid since allocating space for + * a dynamic array failed. Attempting to read this value shall + * result in ENOMEM. Only valid if ctrl->is_dyn_array is true. + * @p_req_dyn_alloc_elems: The number of elements allocated for the dynamic + * array. Only valid if @p_req_valid and ctrl->is_dyn_array are + * true. + * @p_req_elems: The number of elements in @p_req. This is the same as + * ctrl->elems, except for dynamic arrays. In that case it is in + * the range of 1 to @p_req_dyn_alloc_elems. Only valid if + * @p_req_valid is true. * @p_req: If the control handler containing this control reference * is bound to a media request, then this points to the * value of the control that must be applied when the request * is executed, or to the value of the control at the time - * that the request was completed. If @valid_p_req is false, + * that the request was completed. If @p_req_valid is false, * then this control was never set for this request and the * control will not be updated when this request is applied. * @@ -329,7 +348,10 @@ struct v4l2_ctrl_ref { struct v4l2_ctrl_helper *helper; bool from_other_dev; bool req_done; - bool valid_p_req; + bool p_req_valid; + bool p_req_dyn_enomem; + u32 p_req_dyn_alloc_elems; + u32 p_req_elems; union v4l2_ctrl_ptr p_req; }; diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index b661e1817470..9689f38a0af1 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -1434,6 +1434,40 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers; }) /** + * v4l2_subdev_call_state_try - call an operation of a v4l2_subdev which + * takes state as a parameter, passing the + * subdev a newly allocated try state. + * + * @sd: pointer to the &struct v4l2_subdev + * @o: name of the element at &struct v4l2_subdev_ops that contains @f. + * Each element there groups a set of callbacks functions. + * @f: callback function to be called. + * The callback functions are defined in groups, according to + * each element at &struct v4l2_subdev_ops. + * @args: arguments for @f. + * + * This is similar to v4l2_subdev_call_state_active(), except that as this + * version allocates a new state, this is only usable for + * V4L2_SUBDEV_FORMAT_TRY use cases. + * + * Note: only legacy non-MC drivers may need this macro. + */ +#define v4l2_subdev_call_state_try(sd, o, f, args...) \ + ({ \ + int __result; \ + static struct lock_class_key __key; \ + const char *name = KBUILD_BASENAME \ + ":" __stringify(__LINE__) ":state->lock"; \ + struct v4l2_subdev_state *state = \ + __v4l2_subdev_state_alloc(sd, name, &__key); \ + v4l2_subdev_lock_state(state); \ + __result = v4l2_subdev_call(sd, o, f, state, ##args); \ + v4l2_subdev_unlock_state(state); \ + __v4l2_subdev_state_free(state); \ + __result; \ + }) + +/** * v4l2_subdev_has_op - Checks if a subdev defines a certain operation. * * @sd: pointer to the &struct v4l2_subdev diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h index d818d9707695..76e405c0b003 100644 --- a/include/media/videobuf2-v4l2.h +++ b/include/media/videobuf2-v4l2.h @@ -78,6 +78,16 @@ struct vb2_v4l2_buffer { int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp, unsigned int start_idx); +/** + * vb2_find_buffer() - Find a buffer with given timestamp + * + * @q: pointer to &struct vb2_queue with videobuf2 queue. + * @timestamp: the timestamp to find. + * + * Returns the buffer with the given @timestamp, or NULL if not found. + */ +struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp); + int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b); /** diff --git a/include/net/9p/client.h b/include/net/9p/client.h index ec1d1706f43c..78ebcf782ce5 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h @@ -11,6 +11,7 @@ #include <linux/utsname.h> #include <linux/idr.h> +#include <linux/tracepoint-defs.h> /* Number of requests per row */ #define P9_ROW_MAXTAG 255 @@ -76,7 +77,7 @@ enum p9_req_status_t { struct p9_req_t { int status; int t_err; - struct kref refcount; + refcount_t refcount; wait_queue_head_t wq; struct p9_fcall tc; struct p9_fcall rc; @@ -227,15 +228,55 @@ struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag); static inline void p9_req_get(struct p9_req_t *r) { - kref_get(&r->refcount); + refcount_inc(&r->refcount); } static inline int p9_req_try_get(struct p9_req_t *r) { - return kref_get_unless_zero(&r->refcount); + return refcount_inc_not_zero(&r->refcount); } -int p9_req_put(struct p9_req_t *r); +int p9_req_put(struct p9_client *c, struct p9_req_t *r); + +/* We cannot have the real tracepoints in header files, + * use a wrapper function */ +DECLARE_TRACEPOINT(9p_fid_ref); +void do_trace_9p_fid_get(struct p9_fid *fid); +void do_trace_9p_fid_put(struct p9_fid *fid); + +/* fid reference counting helpers: + * - fids used for any length of time should always be referenced through + * p9_fid_get(), and released with p9_fid_put() + * - v9fs_fid_lookup() or similar will automatically call get for you + * and also require a put + * - the *_fid_add() helpers will stash the fid in the inode, + * at which point it is the responsibility of evict_inode() + * to call the put + * - the last put will automatically send a clunk to the server + */ +static inline struct p9_fid *p9_fid_get(struct p9_fid *fid) +{ + if (tracepoint_enabled(9p_fid_ref)) + do_trace_9p_fid_get(fid); + + refcount_inc(&fid->count); + + return fid; +} + +static inline int p9_fid_put(struct p9_fid *fid) +{ + if (!fid || IS_ERR(fid)) + return 0; + + if (tracepoint_enabled(9p_fid_ref)) + do_trace_9p_fid_put(fid); + + if (!refcount_dec_and_test(&fid->count)) + return 0; + + return p9_client_clunk(fid); +} void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status); diff --git a/include/net/af_unix.h b/include/net/af_unix.h index a7ef624ed726..480fa579787e 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -16,12 +16,11 @@ void wait_for_unix_gc(void); struct sock *unix_get_socket(struct file *filp); struct sock *unix_peer_get(struct sock *sk); -#define UNIX_HASH_SIZE 256 +#define UNIX_HASH_MOD (256 - 1) +#define UNIX_HASH_SIZE (256 * 2) #define UNIX_HASH_BITS 8 extern unsigned int unix_tot_inflight; -extern spinlock_t unix_table_locks[2 * UNIX_HASH_SIZE]; -extern struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE]; struct unix_address { refcount_t refcnt; diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index f742e50207fb..1c53c4c4d88f 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/workqueue.h> +#include <net/sock.h> #include <uapi/linux/vm_sockets.h> #include "vsock_addr.h" diff --git a/include/net/amt.h b/include/net/amt.h index 08fc30cf2f34..c881bc8b673b 100644 --- a/include/net/amt.h +++ b/include/net/amt.h @@ -7,6 +7,9 @@ #include <linux/siphash.h> #include <linux/jhash.h> +#include <linux/netdevice.h> +#include <net/gro_cells.h> +#include <net/rtnetlink.h> enum amt_msg_type { AMT_MSG_DISCOVERY = 1, diff --git a/include/net/ax25.h b/include/net/ax25.h index a427a05672e2..f8cf3629a419 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -236,6 +236,7 @@ typedef struct ax25_cb { ax25_address source_addr, dest_addr; ax25_digi *digipeat; ax25_dev *ax25_dev; + netdevice_tracker dev_tracker; unsigned char iamdigi; unsigned char state, modulus, pidincl; unsigned short vs, vr, va; diff --git a/include/net/ax88796.h b/include/net/ax88796.h index 2ed23a368602..303100f08ab8 100644 --- a/include/net/ax88796.h +++ b/include/net/ax88796.h @@ -8,6 +8,8 @@ #ifndef __NET_AX88796_PLAT_H #define __NET_AX88796_PLAT_H +#include <linux/types.h> + struct sk_buff; struct net_device; struct platform_device; @@ -32,8 +34,8 @@ struct ax_plat_data { const unsigned char *buf, int star_page); void (*block_input)(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); - /* returns nonzero if a pending interrupt request might by caused by - * the ax88786. Handles all interrupts if set to NULL + /* returns nonzero if a pending interrupt request might be caused by + * the ax88796. Handles all interrupts if set to NULL */ int (*check_irq)(struct platform_device *pdev); }; diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 6b48d9e2aab9..e72f3b247b5e 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -55,6 +55,8 @@ #define BTPROTO_CMTP 5 #define BTPROTO_HIDP 6 #define BTPROTO_AVDTP 7 +#define BTPROTO_ISO 8 +#define BTPROTO_LAST BTPROTO_ISO #define SOL_HCI 0 #define SOL_L2CAP 6 @@ -149,10 +151,51 @@ struct bt_voice { #define BT_MODE_LE_FLOWCTL 0x03 #define BT_MODE_EXT_FLOWCTL 0x04 -#define BT_PKT_STATUS 16 +#define BT_PKT_STATUS 16 #define BT_SCM_PKT_STATUS 0x03 +#define BT_ISO_QOS 17 + +#define BT_ISO_QOS_CIG_UNSET 0xff +#define BT_ISO_QOS_CIS_UNSET 0xff + +#define BT_ISO_QOS_BIG_UNSET 0xff +#define BT_ISO_QOS_BIS_UNSET 0xff + +struct bt_iso_io_qos { + __u32 interval; + __u16 latency; + __u16 sdu; + __u8 phy; + __u8 rtn; +}; + +struct bt_iso_qos { + union { + __u8 cig; + __u8 big; + }; + union { + __u8 cis; + __u8 bis; + }; + union { + __u8 sca; + __u8 sync_interval; + }; + __u8 packing; + __u8 framing; + struct bt_iso_io_qos in; + struct bt_iso_io_qos out; +}; + +#define BT_ISO_PHY_1M 0x01 +#define BT_ISO_PHY_2M 0x02 +#define BT_ISO_PHY_CODED 0x04 +#define BT_ISO_PHY_ANY (BT_ISO_PHY_1M | BT_ISO_PHY_2M | \ + BT_ISO_PHY_CODED) + #define BT_CODEC 19 struct bt_codec_caps { @@ -177,6 +220,8 @@ struct bt_codecs { #define BT_CODEC_TRANSPARENT 0x03 #define BT_CODEC_MSBC 0x05 +#define BT_ISO_BASE 20 + __printf(1, 2) void bt_info(const char *fmt, ...); __printf(1, 2) @@ -494,7 +539,7 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk, struct sk_buff *skb, **frag; skb = bt_skb_sendmsg(sk, msg, len, mtu, headroom, tailroom); - if (IS_ERR_OR_NULL(skb)) + if (IS_ERR(skb)) return skb; len -= skb->len; @@ -521,6 +566,7 @@ static inline struct sk_buff *bt_skb_sendmmsg(struct sock *sk, } int bt_to_errno(u16 code); +__u8 bt_status(int err); void hci_sock_set_flag(struct sock *sk, int nr); void hci_sock_clear_flag(struct sock *sk, int nr); @@ -558,6 +604,27 @@ static inline void sco_exit(void) } #endif +#if IS_ENABLED(CONFIG_BT_LE) +int iso_init(void); +int iso_exit(void); +bool iso_enabled(void); +#else +static inline int iso_init(void) +{ + return 0; +} + +static inline int iso_exit(void) +{ + return 0; +} + +static inline bool iso_enabled(void) +{ + return false; +} +#endif + int mgmt_init(void); void mgmt_exit(void); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index fe7935be7dc4..cf29511b25a8 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -228,17 +228,6 @@ enum { */ HCI_QUIRK_VALID_LE_STATES, - /* When this quirk is set, then erroneous data reporting - * is ignored. This is mainly due to the fact that the HCI - * Read Default Erroneous Data Reporting command is advertised, - * but not supported; these controllers often reply with unknown - * command and tend to lock up randomly. Needing a hard reset. - * - * This quirk can be set before hci_register_dev is called or - * during the hdev->setup vendor callback. - */ - HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, - /* * When this quirk is set, then the hci_suspend_notifier is not * registered. This is intended for devices which drop completely @@ -327,6 +316,7 @@ enum { HCI_USER_CHANNEL, HCI_EXT_CONFIGURED, HCI_LE_ADV, + HCI_LE_PER_ADV, HCI_LE_SCAN, HCI_SSP_ENABLED, HCI_SC_ENABLED, @@ -349,6 +339,7 @@ enum { HCI_LE_SCAN_INTERRUPTED, HCI_WIDEBAND_SPEECH_ENABLED, HCI_EVENT_FILTER_CONFIGURED, + HCI_PA_SYNC, HCI_DUT_MODE, HCI_VENDOR_DIAG, @@ -361,6 +352,7 @@ enum { HCI_QUALITY_REPORT, HCI_OFFLOAD_CODECS_ENABLED, HCI_LE_SIMULTANEOUS_ROLES, + HCI_CMD_DRAIN_WORKQUEUE, __HCI_NUM_FLAGS, }; @@ -496,6 +488,7 @@ enum { #define LMP_EXT_INQ 0x01 #define LMP_SIMUL_LE_BR 0x02 #define LMP_SIMPLE_PAIR 0x08 +#define LMP_ERR_DATA_REPORTING 0x20 #define LMP_NO_FLUSH 0x40 #define LMP_LSTO 0x01 @@ -528,9 +521,11 @@ enum { #define HCI_LE_PHY_2M 0x01 #define HCI_LE_PHY_CODED 0x08 #define HCI_LE_EXT_ADV 0x10 +#define HCI_LE_PERIODIC_ADV 0x20 #define HCI_LE_CHAN_SEL_ALG2 0x40 #define HCI_LE_CIS_CENTRAL 0x10 #define HCI_LE_CIS_PERIPHERAL 0x20 +#define HCI_LE_ISO_BROADCASTER 0x40 /* Connection modes */ #define HCI_CM_ACTIVE 0x0000 @@ -1874,6 +1869,22 @@ struct hci_cp_le_ext_conn_param { __le16 max_ce_len; } __packed; +#define HCI_OP_LE_PA_CREATE_SYNC 0x2044 +struct hci_cp_le_pa_create_sync { + __u8 options; + __u8 sid; + __u8 addr_type; + bdaddr_t addr; + __le16 skip; + __le16 sync_timeout; + __u8 sync_cte_type; +} __packed; + +#define HCI_OP_LE_PA_TERM_SYNC 0x2046 +struct hci_cp_le_pa_term_sync { + __le16 handle; +} __packed; + #define HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS 0x203b struct hci_rp_le_read_num_supported_adv_sets { __u8 status; @@ -1908,13 +1919,6 @@ struct hci_rp_le_set_ext_adv_params { __u8 tx_power; } __packed; -#define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039 -struct hci_cp_le_set_ext_adv_enable { - __u8 enable; - __u8 num_of_sets; - __u8 data[]; -} __packed; - struct hci_cp_ext_adv_set { __u8 handle; __le16 duration; @@ -1941,6 +1945,37 @@ struct hci_cp_le_set_ext_scan_rsp_data { __u8 data[]; } __packed; +#define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039 +struct hci_cp_le_set_ext_adv_enable { + __u8 enable; + __u8 num_of_sets; + __u8 data[]; +} __packed; + +#define HCI_OP_LE_SET_PER_ADV_PARAMS 0x203e +struct hci_cp_le_set_per_adv_params { + __u8 handle; + __le16 min_interval; + __le16 max_interval; + __le16 periodic_properties; +} __packed; + +#define HCI_MAX_PER_AD_LENGTH 252 + +#define HCI_OP_LE_SET_PER_ADV_DATA 0x203f +struct hci_cp_le_set_per_adv_data { + __u8 handle; + __u8 operation; + __u8 length; + __u8 data[]; +} __packed; + +#define HCI_OP_LE_SET_PER_ADV_ENABLE 0x2040 +struct hci_cp_le_set_per_adv_enable { + __u8 enable; + __u8 handle; +} __packed; + #define LE_SET_ADV_DATA_OP_COMPLETE 0x03 #define LE_SET_ADV_DATA_NO_FRAG 0x01 @@ -1998,7 +2033,7 @@ struct hci_rp_le_read_iso_tx_sync { struct hci_cis_params { __u8 cis_id; __le16 c_sdu; - __le16 p_pdu; + __le16 p_sdu; __u8 c_phy; __u8 p_phy; __u8 c_rtn; @@ -2009,7 +2044,7 @@ struct hci_cp_le_set_cig_params { __u8 cig_id; __u8 c_interval[3]; __u8 p_interval[3]; - __u8 wc_sca; + __u8 sca; __u8 packing; __u8 framing; __le16 c_latency; @@ -2052,6 +2087,73 @@ struct hci_cp_le_reject_cis { __u8 reason; } __packed; +#define HCI_OP_LE_CREATE_BIG 0x2068 +struct hci_bis { + __u8 sdu_interval[3]; + __le16 sdu; + __le16 latency; + __u8 rtn; + __u8 phy; + __u8 packing; + __u8 framing; + __u8 encryption; + __u8 bcode[16]; +} __packed; + +struct hci_cp_le_create_big { + __u8 handle; + __u8 adv_handle; + __u8 num_bis; + struct hci_bis bis; +} __packed; + +#define HCI_OP_LE_TERM_BIG 0x206a +struct hci_cp_le_term_big { + __u8 handle; + __u8 reason; +} __packed; + +#define HCI_OP_LE_BIG_CREATE_SYNC 0x206b +struct hci_cp_le_big_create_sync { + __u8 handle; + __le16 sync_handle; + __u8 encryption; + __u8 bcode[16]; + __u8 mse; + __le16 timeout; + __u8 num_bis; + __u8 bis[0]; +} __packed; + +#define HCI_OP_LE_BIG_TERM_SYNC 0x206c +struct hci_cp_le_big_term_sync { + __u8 handle; +} __packed; + +#define HCI_OP_LE_SETUP_ISO_PATH 0x206e +struct hci_cp_le_setup_iso_path { + __le16 handle; + __u8 direction; + __u8 path; + __u8 codec; + __le16 codec_cid; + __le16 codec_vid; + __u8 delay[3]; + __u8 codec_cfg_len; + __u8 codec_cfg[0]; +} __packed; + +struct hci_rp_le_setup_iso_path { + __u8 status; + __le16 handle; +} __packed; + +#define HCI_OP_LE_SET_HOST_FEATURE 0x2074 +struct hci_cp_le_set_host_feature { + __u8 bit_number; + __u8 bit_value; +} __packed; + /* ---- HCI Events ---- */ struct hci_ev_status { __u8 status; @@ -2580,6 +2682,18 @@ struct hci_ev_le_ext_adv_report { struct hci_ev_le_ext_adv_info info[]; } __packed; +#define HCI_EV_LE_PA_SYNC_ESTABLISHED 0x0e +struct hci_ev_le_pa_sync_established { + __u8 status; + __le16 handle; + __u8 sid; + __u8 bdaddr_type; + bdaddr_t bdaddr; + __u8 phy; + __le16 interval; + __u8 clock_accuracy; +} __packed; + #define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a struct hci_ev_le_enh_conn_complete { __u8 status; @@ -2631,6 +2745,55 @@ struct hci_evt_le_cis_req { __u8 cis_id; } __packed; +#define HCI_EVT_LE_CREATE_BIG_COMPLETE 0x1b +struct hci_evt_le_create_big_complete { + __u8 status; + __u8 handle; + __u8 sync_delay[3]; + __u8 transport_delay[3]; + __u8 phy; + __u8 nse; + __u8 bn; + __u8 pto; + __u8 irc; + __le16 max_pdu; + __le16 interval; + __u8 num_bis; + __le16 bis_handle[]; +} __packed; + +#define HCI_EVT_LE_BIG_SYNC_ESTABILISHED 0x1d +struct hci_evt_le_big_sync_estabilished { + __u8 status; + __u8 handle; + __u8 latency[3]; + __u8 nse; + __u8 bn; + __u8 pto; + __u8 irc; + __le16 max_pdu; + __le16 interval; + __u8 num_bis; + __le16 bis[]; +} __packed; + +#define HCI_EVT_LE_BIG_INFO_ADV_REPORT 0x22 +struct hci_evt_le_big_info_adv_report { + __le16 sync_handle; + __u8 num_bis; + __u8 nse; + __le16 iso_interval; + __u8 bn; + __u8 pto; + __u8 irc; + __le16 max_pdu; + __u8 sdu_interval[3]; + __le16 max_sdu; + __u8 phy; + __u8 framing; + __u8 encryption; +} __packed; + #define HCI_EV_VENDOR 0xff /* Internal events generated by Bluetooth stack */ diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index c0ea2a4892b1..e7862903187d 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -126,6 +126,7 @@ struct hci_conn_hash { unsigned int acl_num; unsigned int amp_num; unsigned int sco_num; + unsigned int iso_num; unsigned int le_num; unsigned int le_num_peripheral; }; @@ -234,8 +235,9 @@ struct oob_data { struct adv_info { struct list_head list; - bool enabled; - bool pending; + bool enabled; + bool pending; + bool periodic; __u8 instance; __u32 flags; __u16 timeout; @@ -243,8 +245,12 @@ struct adv_info { __u16 duration; __u16 adv_data_len; __u8 adv_data[HCI_MAX_EXT_AD_LENGTH]; + bool adv_data_changed; __u16 scan_rsp_len; __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; + bool scan_rsp_changed; + __u16 per_adv_data_len; + __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH]; __s8 tx_power; __u32 min_interval; __u32 max_interval; @@ -258,6 +264,15 @@ struct adv_info { #define HCI_ADV_TX_POWER_NO_PREFERENCE 0x7F +#define DATA_CMP(_d1, _l1, _d2, _l2) \ + (_l1 == _l2 ? memcmp(_d1, _d2, _l1) : _l1 - _l2) + +#define ADV_DATA_CMP(_adv, _data, _len) \ + DATA_CMP((_adv)->adv_data, (_adv)->adv_data_len, _data, _len) + +#define SCAN_RSP_CMP(_adv, _data, _len) \ + DATA_CMP((_adv)->scan_rsp_data, (_adv)->scan_rsp_len, _data, _len) + struct monitored_device { struct list_head list; @@ -463,13 +478,16 @@ struct hci_dev { unsigned int acl_cnt; unsigned int sco_cnt; unsigned int le_cnt; + unsigned int iso_cnt; unsigned int acl_mtu; unsigned int sco_mtu; unsigned int le_mtu; + unsigned int iso_mtu; unsigned int acl_pkts; unsigned int sco_pkts; unsigned int le_pkts; + unsigned int iso_pkts; __u16 block_len; __u16 block_mtu; @@ -506,8 +524,6 @@ struct hci_dev { struct work_struct cmd_work; struct work_struct tx_work; - struct work_struct discov_update; - struct work_struct scan_update; struct delayed_work le_scan_disable; struct delayed_work le_scan_restart; @@ -516,6 +532,7 @@ struct hci_dev { struct sk_buff_head cmd_q; struct sk_buff *sent_cmd; + struct sk_buff *recv_event; struct mutex req_lock; wait_queue_head_t req_wait_q; @@ -580,6 +597,8 @@ struct hci_dev { __u8 adv_data_len; __u8 scan_rsp_data[HCI_MAX_EXT_AD_LENGTH]; __u8 scan_rsp_data_len; + __u8 per_adv_data[HCI_MAX_PER_AD_LENGTH]; + __u8 per_adv_data_len; struct list_head adv_instances; unsigned int adv_instance_cnt; @@ -647,6 +666,7 @@ enum conn_reasons { CONN_REASON_PAIR_DEVICE, CONN_REASON_L2CAP_CHAN, CONN_REASON_SCO_CONNECT, + CONN_REASON_ISO_CONNECT, }; struct hci_conn { @@ -664,6 +684,7 @@ struct hci_conn { __u8 resp_addr_type; __u8 adv_instance; __u16 handle; + __u16 sync_handle; __u16 state; __u8 mode; __u8 type; @@ -694,11 +715,14 @@ struct hci_conn { __u16 le_supv_timeout; __u8 le_adv_data[HCI_MAX_AD_LENGTH]; __u8 le_adv_data_len; + __u8 le_per_adv_data[HCI_MAX_PER_AD_LENGTH]; + __u8 le_per_adv_data_len; __u8 le_tx_phy; __u8 le_rx_phy; __s8 rssi; __s8 tx_power; __s8 max_tx_power; + struct bt_iso_qos iso_qos; unsigned long flags; enum conn_reasons conn_reason; @@ -729,6 +753,7 @@ struct hci_conn { struct hci_dev *hdev; void *l2cap_data; void *sco_data; + void *iso_data; struct amp_mgr *amp_mgr; struct hci_conn *link; @@ -737,6 +762,8 @@ struct hci_conn { void (*connect_cfm_cb) (struct hci_conn *conn, u8 status); void (*security_cfm_cb) (struct hci_conn *conn, u8 status); void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason); + + void (*cleanup)(struct hci_conn *conn); }; struct hci_chan { @@ -824,6 +851,21 @@ static inline void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) } #endif +#if IS_ENABLED(CONFIG_BT_LE) +int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags); +void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); +#else +static inline int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, + __u8 *flags) +{ + return 0; +} +static inline void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, + u16 flags) +{ +} +#endif + /* ----- Inquiry cache ----- */ #define INQUIRY_CACHE_AGE_MAX (HZ*30) /* 30 seconds */ #define INQUIRY_ENTRY_AGE_MAX (HZ*60) /* 60 seconds */ @@ -908,6 +950,7 @@ enum { HCI_CONN_NEW_LINK_KEY, HCI_CONN_SCANNING, HCI_CONN_AUTH_FAILURE, + HCI_CONN_PER_ADV, }; static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) @@ -944,6 +987,9 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) case ESCO_LINK: h->sco_num++; break; + case ISO_LINK: + h->iso_num++; + break; } } @@ -970,6 +1016,9 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c) case ESCO_LINK: h->sco_num--; break; + case ISO_LINK: + h->iso_num--; + break; } } @@ -986,6 +1035,8 @@ static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type) case SCO_LINK: case ESCO_LINK: return h->sco_num; + case ISO_LINK: + return h->iso_num; default: return 0; } @@ -995,7 +1046,7 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev) { struct hci_conn_hash *c = &hdev->conn_hash; - return c->acl_num + c->amp_num + c->sco_num + c->le_num; + return c->acl_num + c->amp_num + c->sco_num + c->le_num + c->iso_num; } static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle) @@ -1018,6 +1069,29 @@ static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle) return type; } +static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev, + bdaddr_t *ba, + __u8 big, __u8 bis) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (bacmp(&c->dst, ba) || c->type != ISO_LINK) + continue; + + if (c->iso_qos.big == big && c->iso_qos.bis == bis) { + rcu_read_unlock(); + return c; + } + } + rcu_read_unlock(); + + return NULL; +} + static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev, __u16 handle) { @@ -1081,6 +1155,76 @@ static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev, return NULL; } +static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev, + bdaddr_t *ba, + __u8 ba_type) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type != ISO_LINK) + continue; + + if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) { + rcu_read_unlock(); + return c; + } + } + + rcu_read_unlock(); + + return NULL; +} + +static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev, + __u8 handle) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type != ISO_LINK) + continue; + + if (handle == c->iso_qos.cig) { + rcu_read_unlock(); + return c; + } + } + + rcu_read_unlock(); + + return NULL; +} + +static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev, + __u8 handle) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK) + continue; + + if (handle == c->iso_qos.big) { + rcu_read_unlock(); + return c; + } + } + + rcu_read_unlock(); + + return NULL; +} + static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, __u8 type, __u16 state) { @@ -1101,6 +1245,27 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev, return NULL; } +typedef void (*hci_conn_func_t)(struct hci_conn *conn, void *data); +static inline void hci_conn_hash_list_state(struct hci_dev *hdev, + hci_conn_func_t func, __u8 type, + __u16 state, void *data) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + if (!func) + return; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type == type && c->state == state) + func(c, data); + } + + rcu_read_unlock(); +} + static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev) { struct hci_conn_hash *h = &hdev->conn_hash; @@ -1124,6 +1289,8 @@ static inline struct hci_conn *hci_lookup_le_connect(struct hci_dev *hdev) int hci_disconnect(struct hci_conn *conn, __u8 reason); bool hci_setup_sync(struct hci_conn *conn, __u16 handle); void hci_sco_setup(struct hci_conn *conn, __u8 status); +bool hci_iso_setup_path(struct hci_conn *conn); +int hci_le_create_cis(struct hci_conn *conn); struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, u8 role); @@ -1148,6 +1315,17 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, enum conn_reasons conn_reason); struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, __u16 setting, struct bt_codec *codec); +struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, + __u8 dst_type, struct bt_iso_qos *qos); +struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, + __u8 dst_type, struct bt_iso_qos *qos); +struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, + __u8 dst_type, struct bt_iso_qos *qos, + __u8 data_len, __u8 *data); +int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, + __u8 sid); +int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos, + __u16 sync_handle, __u8 num_bis, __u8 bis[]); int hci_conn_check_link_mode(struct hci_conn *conn); int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level); int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, @@ -1286,6 +1464,8 @@ void hci_free_dev(struct hci_dev *hdev); int hci_register_dev(struct hci_dev *hdev); void hci_unregister_dev(struct hci_dev *hdev); void hci_release_dev(struct hci_dev *hdev); +int hci_register_suspend_notifier(struct hci_dev *hdev); +int hci_unregister_suspend_notifier(struct hci_dev *hdev); int hci_suspend_dev(struct hci_dev *hdev); int hci_resume_dev(struct hci_dev *hdev); int hci_reset_dev(struct hci_dev *hdev); @@ -1392,11 +1572,14 @@ int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, void hci_adv_instances_clear(struct hci_dev *hdev); struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance); struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance); -int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, - u16 adv_data_len, u8 *adv_data, - u16 scan_rsp_len, u8 *scan_rsp_data, - u16 timeout, u16 duration, s8 tx_power, - u32 min_interval, u32 max_interval); +struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, + u32 flags, u16 adv_data_len, u8 *adv_data, + u16 scan_rsp_len, u8 *scan_rsp_data, + u16 timeout, u16 duration, s8 tx_power, + u32 min_interval, u32 max_interval); +struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, + u32 flags, u8 data_len, u8 *data, + u32 min_interval, u32 max_interval); int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, u16 adv_data_len, u8 *adv_data, u16 scan_rsp_len, u8 *scan_rsp_data); @@ -1407,12 +1590,9 @@ bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance); void hci_adv_monitors_clear(struct hci_dev *hdev); void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); -int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status); -int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status); -bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, - int *err); -bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err); -bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err); +int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); +int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle); +int hci_remove_all_adv_monitor(struct hci_dev *hdev); bool hci_is_adv_monitoring(struct hci_dev *hdev); int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev); @@ -1516,6 +1696,19 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define use_enhanced_conn_complete(dev) (ll_privacy_capable(dev) || \ ext_adv_capable(dev)) +/* Periodic advertising support */ +#define per_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_PERIODIC_ADV)) + +/* CIS Master/Slave and BIS support */ +#define iso_capable(dev) (cis_capable(dev) || bis_capable(dev)) +#define cis_capable(dev) \ + (cis_central_capable(dev) || cis_peripheral_capable(dev)) +#define cis_central_capable(dev) \ + ((dev)->le_features[3] & HCI_LE_CIS_CENTRAL) +#define cis_peripheral_capable(dev) \ + ((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL) +#define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER) + /* ----- HCI protocols ----- */ #define HCI_PROTO_DEFER 0x01 @@ -1530,6 +1723,9 @@ static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, case ESCO_LINK: return sco_connect_ind(hdev, bdaddr, flags); + case ISO_LINK: + return iso_connect_ind(hdev, bdaddr, flags); + default: BT_ERR("unknown link type %d", type); return -EINVAL; @@ -1737,8 +1933,10 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, const void *param); void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags); void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); +void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb); void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); +void *hci_recv_event_data(struct hci_dev *hdev, __u8 event); u32 hci_conn_get_phy(struct hci_conn *conn); @@ -1797,6 +1995,8 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c); #define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */ #define DISCOV_LE_FAST_ADV_INT_MIN 0x00A0 /* 100 msec */ #define DISCOV_LE_FAST_ADV_INT_MAX 0x00F0 /* 150 msec */ +#define DISCOV_LE_PER_ADV_INT_MIN 0x00A0 /* 200 msec */ +#define DISCOV_LE_PER_ADV_INT_MAX 0x00A0 /* 200 msec */ #define NAME_RESOLVE_DURATION msecs_to_jiffies(10240) /* 10.24 sec */ @@ -1872,8 +2072,6 @@ void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, u8 instance); void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle); int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip); -int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status); -int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status); void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle, bdaddr_t *bdaddr, u8 addr_type); diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h index 9949870f7d78..0520e21ab698 100644 --- a/include/net/bluetooth/hci_sock.h +++ b/include/net/bluetooth/hci_sock.h @@ -124,6 +124,8 @@ struct hci_dev_info { __u16 acl_pkts; __u16 sco_mtu; __u16 sco_pkts; + __u16 iso_mtu; + __u16 iso_pkts; struct hci_dev_stats stat; }; diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h index 2492e3b46a8f..3843f5060c73 100644 --- a/include/net/bluetooth/hci_sync.h +++ b/include/net/bluetooth/hci_sync.h @@ -65,6 +65,10 @@ int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance); int hci_enable_advertising_sync(struct hci_dev *hdev); int hci_enable_advertising(struct hci_dev *hdev); +int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len, + u8 *data, u32 flags, u16 min_interval, + u16 max_interval, u16 sync_interval); + int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk, u8 instance, bool force); int hci_disable_advertising_sync(struct hci_dev *hdev); @@ -78,10 +82,12 @@ int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp); int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable); int hci_update_scan_sync(struct hci_dev *hdev); +int hci_update_scan(struct hci_dev *hdev); int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul); int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance, struct sock *sk); +int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance); struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, bool ext, struct sock *sk); @@ -105,4 +111,14 @@ int hci_resume_sync(struct hci_dev *hdev); struct hci_conn; +int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason); + int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn); + +int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle); + +int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason); + +int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle); + +int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle); diff --git a/include/net/bluetooth/iso.h b/include/net/bluetooth/iso.h new file mode 100644 index 000000000000..3f4fe8b78e1b --- /dev/null +++ b/include/net/bluetooth/iso.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * BlueZ - Bluetooth protocol stack for Linux + * + * Copyright (C) 2022 Intel Corporation + */ + +#ifndef __ISO_H +#define __ISO_H + +/* ISO defaults */ +#define ISO_DEFAULT_MTU 251 +#define ISO_MAX_NUM_BIS 0x1f + +/* ISO socket broadcast address */ +struct sockaddr_iso_bc { + bdaddr_t bc_bdaddr; + __u8 bc_bdaddr_type; + __u8 bc_sid; + __u8 bc_num_bis; + __u8 bc_bis[ISO_MAX_NUM_BIS]; +}; + +/* ISO socket address */ +struct sockaddr_iso { + sa_family_t iso_family; + bdaddr_t iso_bdaddr; + __u8 iso_bdaddr_type; + struct sockaddr_iso_bc iso_bc[]; +}; + +#endif /* __ISO_H */ diff --git a/include/net/bond_options.h b/include/net/bond_options.h index 61b49063791c..69292ecc0325 100644 --- a/include/net/bond_options.h +++ b/include/net/bond_options.h @@ -7,6 +7,14 @@ #ifndef _NET_BOND_OPTIONS_H #define _NET_BOND_OPTIONS_H +#include <linux/bits.h> +#include <linux/limits.h> +#include <linux/types.h> +#include <linux/string.h> + +struct netlink_ext_ack; +struct nlattr; + #define BOND_OPT_MAX_NAMELEN 32 #define BOND_OPT_VALID(opt) ((opt) < BOND_OPT_LAST) #define BOND_MODE_ALL_EX(x) (~(x)) @@ -67,6 +75,7 @@ enum { BOND_OPT_LACP_ACTIVE, BOND_OPT_MISSED_MAX, BOND_OPT_NS_TARGETS, + BOND_OPT_PRIO, BOND_OPT_LAST }; @@ -83,7 +92,10 @@ struct bond_opt_value { char *string; u64 value; u32 flags; - char extra[BOND_OPT_EXTRA_MAXLEN]; + union { + char extra[BOND_OPT_EXTRA_MAXLEN]; + struct net_device *slave_dev; + }; }; struct bonding; @@ -107,7 +119,8 @@ struct bond_option { }; int __bond_opt_set(struct bonding *bond, unsigned int option, - struct bond_opt_value *val); + struct bond_opt_value *val, + struct nlattr *bad_attr, struct netlink_ext_ack *extack); int __bond_opt_set_notify(struct bonding *bond, unsigned int option, struct bond_opt_value *val); int bond_opt_tryset_rtnl(struct bonding *bond, unsigned int option, char *buf); @@ -132,13 +145,16 @@ static inline void __bond_opt_init(struct bond_opt_value *optval, optval->value = value; else if (string) optval->string = string; - else if (extra_len <= BOND_OPT_EXTRA_MAXLEN) + + if (extra && extra_len <= BOND_OPT_EXTRA_MAXLEN) memcpy(optval->extra, extra, extra_len); } #define bond_opt_initval(optval, value) __bond_opt_init(optval, NULL, value, NULL, 0) #define bond_opt_initstr(optval, str) __bond_opt_init(optval, str, ULLONG_MAX, NULL, 0) #define bond_opt_initextra(optval, extra, extra_len) \ __bond_opt_init(optval, NULL, ULLONG_MAX, extra, extra_len) +#define bond_opt_slave_initval(optval, slave_dev, value) \ + __bond_opt_init(optval, NULL, value, slave_dev, sizeof(struct net_device *)) void bond_option_arp_ip_targets_clear(struct bonding *bond); #if IS_ENABLED(CONFIG_IPV6) diff --git a/include/net/bonding.h b/include/net/bonding.h index cb904d356e31..afd606df149a 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -161,8 +161,9 @@ struct slave { struct net_device *dev; /* first - useful for panic debug */ struct bonding *bond; /* our master */ int delay; - /* all three in jiffies */ + /* all 4 in jiffies */ unsigned long last_link_up; + unsigned long last_tx; unsigned long last_rx; unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS]; s8 link; /* one of BOND_LINK_XXXX */ @@ -178,6 +179,7 @@ struct slave { u32 speed; u16 queue_id; u8 perm_hwaddr[MAX_ADDR_LEN]; + int prio; struct ad_slave_info *ad_info; struct tlb_slave_info tlb_info; #ifdef CONFIG_NET_POLL_CONTROLLER @@ -539,6 +541,16 @@ static inline unsigned long slave_last_rx(struct bonding *bond, return slave->last_rx; } +static inline void slave_update_last_tx(struct slave *slave) +{ + WRITE_ONCE(slave->last_tx, jiffies); +} + +static inline unsigned long slave_last_tx(struct slave *slave) +{ + return READ_ONCE(slave->last_tx); +} + #ifdef CONFIG_NET_POLL_CONTROLLER static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave, struct sk_buff *skb) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 80f41446b1f0..908d58393484 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -374,6 +374,7 @@ struct ieee80211_sta_he_cap { * and NSS Set field" * * @only_20mhz: MCS/NSS support for 20 MHz-only STA. + * @bw: MCS/NSS support for 80, 160 and 320 MHz * @bw._80: MCS/NSS support for BW <= 80 MHz * @bw._160: MCS/NSS support for BW = 160 MHz * @bw._320: MCS/NSS support for BW = 320 MHz @@ -420,6 +421,7 @@ struct ieee80211_sta_eht_cap { * @he_cap: holds the HE capabilities * @he_6ghz_capa: HE 6 GHz capabilities, must be filled in for a * 6 GHz band channel (and 0 may be valid value). + * @eht_cap: STA's EHT capabilities * @vendor_elems: vendor element(s) to advertise * @vendor_elems.data: vendor element(s) data * @vendor_elems.len: vendor element(s) length @@ -495,7 +497,7 @@ struct ieee80211_edmg { * This structure describes most essential parameters needed * to describe 802.11ah S1G capabilities for a STA. * - * @s1g_supported: is STA an S1G STA + * @s1g: is STA an S1G STA * @cap: S1G capabilities information * @nss_mcs: Supported NSS MCS set */ @@ -937,19 +939,18 @@ int cfg80211_chandef_dfs_required(struct wiphy *wiphy, enum nl80211_iftype iftype); /** - * ieee80211_chandef_rate_flags - returns rate flags for a channel + * ieee80211_chanwidth_rate_flags - return rate flags for channel width + * @width: the channel width of the channel * * In some channel types, not all rates may be used - for example CCK * rates may not be used in 5/10 MHz channels. * - * @chandef: channel definition for the channel - * - * Returns: rate flags which apply for this channel + * Returns: rate flags which apply for this channel width */ static inline enum ieee80211_rate_flags -ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef) +ieee80211_chanwidth_rate_flags(enum nl80211_chan_width width) { - switch (chandef->width) { + switch (width) { case NL80211_CHAN_WIDTH_5: return IEEE80211_RATE_SUPPORTS_5MHZ; case NL80211_CHAN_WIDTH_10: @@ -961,6 +962,20 @@ ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef) } /** + * ieee80211_chandef_rate_flags - returns rate flags for a channel + * @chandef: channel definition for the channel + * + * See ieee80211_chanwidth_rate_flags(). + * + * Returns: rate flags which apply for this channel + */ +static inline enum ieee80211_rate_flags +ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef) +{ + return ieee80211_chanwidth_rate_flags(chandef->width); +} + +/** * ieee80211_chandef_max_power - maximum transmission power for the chandef * * In some regulations, the transmit power may depend on the configured channel @@ -1061,6 +1076,7 @@ struct survey_info { }; #define CFG80211_MAX_WEP_KEYS 4 +#define CFG80211_MAX_NUM_AKM_SUITES 10 /** * struct cfg80211_crypto_settings - Crypto settings @@ -1112,7 +1128,7 @@ struct cfg80211_crypto_settings { int n_ciphers_pairwise; u32 ciphers_pairwise[NL80211_MAX_NR_CIPHER_SUITES]; int n_akm_suites; - u32 akm_suites[NL80211_MAX_NR_AKM_SUITES]; + u32 akm_suites[CFG80211_MAX_NUM_AKM_SUITES]; bool control_port; __be16 control_port_ethertype; bool control_port_no_encrypt; @@ -1158,6 +1174,7 @@ struct cfg80211_mbssid_elems { /** * struct cfg80211_beacon_data - beacon data + * @link_id: the link ID for the AP MLD link sending this beacon * @head: head portion of beacon (before TIM IE) * or %NULL if not changed * @tail: tail portion of beacon (after TIM IE) @@ -1188,6 +1205,8 @@ struct cfg80211_mbssid_elems { * attribute is present in beacon data or not. */ struct cfg80211_beacon_data { + unsigned int link_id; + const u8 *head, *tail; const u8 *beacon_ies; const u8 *proberesp_ies; @@ -1290,6 +1309,8 @@ struct cfg80211_unsol_bcast_probe_resp { * @ht_cap: HT capabilities (or %NULL if HT isn't enabled) * @vht_cap: VHT capabilities (or %NULL if VHT isn't enabled) * @he_cap: HE capabilities (or %NULL if HE isn't enabled) + * @eht_cap: EHT capabilities (or %NULL if EHT isn't enabled) + * @eht_oper: EHT operation IE (or %NULL if EHT isn't enabled) * @ht_required: stations must support HT * @vht_required: stations must support VHT * @twt_responder: Enable Target Wait Time @@ -1326,6 +1347,8 @@ struct cfg80211_ap_settings { const struct ieee80211_vht_cap *vht_cap; const struct ieee80211_he_cap_elem *he_cap; const struct ieee80211_he_operation *he_oper; + const struct ieee80211_eht_cap_elem *eht_cap; + const struct ieee80211_eht_operation *eht_oper; bool ht_required, vht_required, he_required, sae_h2e_required; bool twt_responder; u32 flags; @@ -1370,8 +1393,8 @@ struct cfg80211_csa_settings { * Used for bss color change * * @beacon_color_change: beacon data while performing the color countdown - * @counter_offsets_beacon: offsets of the counters within the beacon (tail) - * @counter_offsets_presp: offsets of the counters within the probe response + * @counter_offset_beacon: offsets of the counters within the beacon (tail) + * @counter_offset_presp: offsets of the counters within the probe response * @beacon_next: beacon data to be used after the color change * @count: number of beacons until the color change * @color: the color used after the change @@ -1414,6 +1437,7 @@ struct iface_combination_params { * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp) * @STATION_PARAM_APPLY_CAPABILITY: apply new capability * @STATION_PARAM_APPLY_PLINK_STATE: apply new plink state + * @STATION_PARAM_APPLY_STA_TXPOWER: apply tx power for STA * * Not all station parameters have in-band "no change" signalling, * for those that don't these flags will are used. @@ -1422,7 +1446,6 @@ enum station_parameters_apply_mask { STATION_PARAM_APPLY_UAPSD = BIT(0), STATION_PARAM_APPLY_CAPABILITY = BIT(1), STATION_PARAM_APPLY_PLINK_STATE = BIT(2), - STATION_PARAM_APPLY_STA_TXPOWER = BIT(3), }; /** @@ -1446,14 +1469,66 @@ struct sta_txpwr { }; /** - * struct station_parameters - station parameters + * struct link_station_parameters - link station parameters * - * Used to change and create a new station. + * Used to change and create a new link station. * - * @vlan: vlan interface station should belong to + * @mld_mac: MAC address of the station + * @link_id: the link id (-1 for non-MLD station) + * @link_mac: MAC address of the link * @supported_rates: supported rates in IEEE 802.11 format * (or NULL for no change) * @supported_rates_len: number of supported rates + * @ht_capa: HT capabilities of station + * @vht_capa: VHT capabilities of station + * @opmode_notif: operating mode field from Operating Mode Notification + * @opmode_notif_used: information if operating mode field is used + * @he_capa: HE capabilities of station + * @he_capa_len: the length of the HE capabilities + * @txpwr: transmit power for an associated station + * @txpwr_set: txpwr field is set + * @he_6ghz_capa: HE 6 GHz Band capabilities of station + * @eht_capa: EHT capabilities of station + * @eht_capa_len: the length of the EHT capabilities + */ +struct link_station_parameters { + const u8 *mld_mac; + int link_id; + const u8 *link_mac; + const u8 *supported_rates; + u8 supported_rates_len; + const struct ieee80211_ht_cap *ht_capa; + const struct ieee80211_vht_cap *vht_capa; + u8 opmode_notif; + bool opmode_notif_used; + const struct ieee80211_he_cap_elem *he_capa; + u8 he_capa_len; + struct sta_txpwr txpwr; + bool txpwr_set; + const struct ieee80211_he_6ghz_capa *he_6ghz_capa; + const struct ieee80211_eht_cap_elem *eht_capa; + u8 eht_capa_len; +}; + +/** + * struct link_station_del_parameters - link station deletion parameters + * + * Used to delete a link station entry (or all stations). + * + * @mld_mac: MAC address of the station + * @link_id: the link id + */ +struct link_station_del_parameters { + const u8 *mld_mac; + u32 link_id; +}; + +/** + * struct station_parameters - station parameters + * + * Used to change and create a new station. + * + * @vlan: vlan interface station should belong to * @sta_flags_mask: station flags that changed * (bitmask of BIT(%NL80211_STA_FLAG_...)) * @sta_flags_set: station flags values @@ -1464,8 +1539,6 @@ struct sta_txpwr { * @peer_aid: mesh peer AID or zero for no change * @plink_action: plink action to take * @plink_state: set the peer link state for a station - * @ht_capa: HT capabilities of station - * @vht_capa: VHT capabilities of station * @uapsd_queues: bitmap of queues configured for uapsd. same format * as the AC bitmap in the QoS info field * @max_sp: max Service Period. same format as the MAX_SP in the @@ -1482,19 +1555,11 @@ struct sta_txpwr { * @supported_channels_len: number of supported channels * @supported_oper_classes: supported oper classes in IEEE 802.11 format * @supported_oper_classes_len: number of supported operating classes - * @opmode_notif: operating mode field from Operating Mode Notification - * @opmode_notif_used: information if operating mode field is used * @support_p2p_ps: information if station supports P2P PS mechanism - * @he_capa: HE capabilities of station - * @he_capa_len: the length of the HE capabilities * @airtime_weight: airtime scheduler weight for this station - * @txpwr: transmit power for an associated station - * @he_6ghz_capa: HE 6 GHz Band capabilities of station - * @eht_capa: EHT capabilities of station - * @eht_capa_len: the length of the EHT capabilities + * @link_sta_params: link related params. */ struct station_parameters { - const u8 *supported_rates; struct net_device *vlan; u32 sta_flags_mask, sta_flags_set; u32 sta_modify_mask; @@ -1502,11 +1567,8 @@ struct station_parameters { u16 aid; u16 vlan_id; u16 peer_aid; - u8 supported_rates_len; u8 plink_action; u8 plink_state; - const struct ieee80211_ht_cap *ht_capa; - const struct ieee80211_vht_cap *vht_capa; u8 uapsd_queues; u8 max_sp; enum nl80211_mesh_power_mode local_pm; @@ -1517,16 +1579,9 @@ struct station_parameters { u8 supported_channels_len; const u8 *supported_oper_classes; u8 supported_oper_classes_len; - u8 opmode_notif; - bool opmode_notif_used; int support_p2p_ps; - const struct ieee80211_he_cap_elem *he_capa; - u8 he_capa_len; u16 airtime_weight; - struct sta_txpwr txpwr; - const struct ieee80211_he_6ghz_capa *he_6ghz_capa; - const struct ieee80211_eht_cap_elem *eht_capa; - u8 eht_capa_len; + struct link_station_parameters link_sta_params; }; /** @@ -2146,6 +2201,9 @@ struct bss_parameters { * @plink_timeout: If no tx activity is seen from a STA we've established * peering with for longer than this time (in seconds), then remove it * from the STA's list of peers. Default is 30 minutes. + * @dot11MeshConnectedToAuthServer: if set to true then this mesh STA + * will advertise that it is connected to a authentication server + * in the mesh formation field. * @dot11MeshConnectedToMeshGate: if set to true, advertise that this STA is * connected to a mesh gate in mesh formation info. If false, the * value in mesh formation is determined by the presence of root paths @@ -2318,12 +2376,12 @@ struct cfg80211_scan_info { /** * struct cfg80211_scan_6ghz_params - relevant for 6 GHz only * - * @short_bssid: short ssid to scan for + * @short_ssid: short ssid to scan for * @bssid: bssid to scan for * @channel_idx: idx of the channel in the channel array in the scan request * which the above info relvant to * @unsolicited_probe: the AP transmits unsolicited probe response every 20 TU - * @short_ssid_valid: short_ssid is valid and can be used + * @short_ssid_valid: @short_ssid is valid and can be used * @psc_no_listen: when set, and the channel is a PSC channel, no need to wait * 20 TUs before starting to send probe requests. */ @@ -2715,6 +2773,12 @@ static inline const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 id) * Authentication algorithm number, i.e., starting at the Authentication * transaction sequence number field. * @auth_data_len: Length of auth_data buffer in octets + * @link_id: if >= 0, indicates authentication should be done as an MLD, + * the interface address is included as the MLD address and the + * necessary link (with the given link_id) will be created (and + * given an MLD address) by the driver + * @ap_mld_addr: AP MLD address in case of authentication request with + * an AP MLD, valid iff @link_id >= 0 */ struct cfg80211_auth_request { struct cfg80211_bss *bss; @@ -2722,9 +2786,25 @@ struct cfg80211_auth_request { size_t ie_len; enum nl80211_auth_type auth_type; const u8 *key; - u8 key_len, key_idx; + u8 key_len; + s8 key_idx; const u8 *auth_data; size_t auth_data_len; + s8 link_id; + const u8 *ap_mld_addr; +}; + +/** + * struct cfg80211_assoc_link - per-link information for MLO association + * @bss: the BSS pointer, see also &struct cfg80211_assoc_request::bss; + * if this is %NULL for a link, that link is not requested + * @elems: extra elements for the per-STA profile for this link + * @elems_len: length of the elements + */ +struct cfg80211_assoc_link { + struct cfg80211_bss *bss; + const u8 *elems; + size_t elems_len; }; /** @@ -2739,6 +2819,9 @@ struct cfg80211_auth_request { * request (connect callback). * @ASSOC_REQ_DISABLE_HE: Disable HE * @ASSOC_REQ_DISABLE_EHT: Disable EHT + * @CONNECT_REQ_MLO_SUPPORT: Userspace indicates support for handling MLD links. + * Drivers shall disable MLO features for the current association if this + * flag is not set. */ enum cfg80211_assoc_req_flags { ASSOC_REQ_DISABLE_HT = BIT(0), @@ -2747,6 +2830,7 @@ enum cfg80211_assoc_req_flags { CONNECT_REQ_EXTERNAL_AUTH_SUPPORT = BIT(3), ASSOC_REQ_DISABLE_HE = BIT(4), ASSOC_REQ_DISABLE_EHT = BIT(5), + CONNECT_REQ_MLO_SUPPORT = BIT(6), }; /** @@ -2758,6 +2842,8 @@ enum cfg80211_assoc_req_flags { * given a reference that it must give back to cfg80211_send_rx_assoc() * or to cfg80211_assoc_timeout(). To ensure proper refcounting, new * association requests while already associating must be rejected. + * This also applies to the @links.bss parameter, which is used instead + * of this one (it is %NULL) for MLO associations. * @ie: Extra IEs to add to (Re)Association Request frame or %NULL * @ie_len: Length of ie buffer in octets * @use_mfp: Use management frame protection (IEEE 802.11w) in this association @@ -2782,6 +2868,11 @@ enum cfg80211_assoc_req_flags { * with 16 octets of STA Nonce followed by 16 octets of AP Nonce. * @s1g_capa: S1G capability override * @s1g_capa_mask: S1G capability override mask + * @links: per-link information for MLO connections + * @link_id: >= 0 for MLO connections, where links are given, and indicates + * the link on which the association request should be sent + * @ap_mld_addr: AP MLD address in case of MLO association request, + * valid iff @link_id >= 0 */ struct cfg80211_assoc_request { struct cfg80211_bss *bss; @@ -2797,6 +2888,9 @@ struct cfg80211_assoc_request { size_t fils_kek_len; const u8 *fils_nonces; struct ieee80211_s1g_cap s1g_capa, s1g_capa_mask; + struct cfg80211_assoc_link links[IEEE80211_MLD_MAX_NUM_LINKS]; + const u8 *ap_mld_addr; + s8 link_id; }; /** @@ -2805,7 +2899,7 @@ struct cfg80211_assoc_request { * This structure provides information needed to complete IEEE 802.11 * deauthentication. * - * @bssid: the BSSID of the BSS to deauthenticate from + * @bssid: the BSSID or AP MLD address to deauthenticate from * @ie: Extra IEs to add to Deauthentication frame or %NULL * @ie_len: Length of ie buffer in octets * @reason_code: The reason code for the deauthentication @@ -2826,7 +2920,7 @@ struct cfg80211_deauth_request { * This structure provides information needed to complete IEEE 802.11 * disassociation. * - * @bss: the BSS to disassociate from + * @ap_addr: the BSSID or AP MLD address to disassociate from * @ie: Extra IEs to add to Disassociation frame or %NULL * @ie_len: Length of ie buffer in octets * @reason_code: The reason code for the disassociation @@ -2834,7 +2928,7 @@ struct cfg80211_deauth_request { * Disassociation frame is to be transmitted. */ struct cfg80211_disassoc_request { - struct cfg80211_bss *bss; + const u8 *ap_addr; const u8 *ie; size_t ie_len; u16 reason_code; @@ -3279,7 +3373,7 @@ struct cfg80211_wowlan_wakeup { * @kck: key confirmation key (@kck_len bytes) * @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes) * @kek_len: length of kek - * @kck_len length of kck + * @kck_len: length of kck * @akm: akm (oui, id) */ struct cfg80211_gtk_rekey_data { @@ -3317,6 +3411,9 @@ struct cfg80211_update_ft_ies_params { * @dont_wait_for_ack: tells the low level not to wait for an ack * @n_csa_offsets: length of csa_offsets array * @csa_offsets: array of all the csa offsets in the frame + * @link_id: for MLO, the link ID to transmit on, -1 if not given; note + * that the link ID isn't validated (much), it's in range but the + * link might not exist (or be used by the receiver STA) */ struct cfg80211_mgmt_tx_params { struct ieee80211_channel *chan; @@ -3328,6 +3425,7 @@ struct cfg80211_mgmt_tx_params { bool dont_wait_for_ack; int n_csa_offsets; const u16 *csa_offsets; + int link_id; }; /** @@ -3641,6 +3739,7 @@ struct cfg80211_pmsr_ftm_result { * @type: type of the measurement reported, note that we only support reporting * one type at a time, but you can report multiple results separately and * they're all aggregated for userspace. + * @ftm: FTM result */ struct cfg80211_pmsr_result { u64 host_time, ap_tsf; @@ -3779,7 +3878,7 @@ struct cfg80211_update_owe_info { * for the entire device * @interface_stypes: bitmap of management frame subtypes registered * for the given interface - * @global_mcast_rx: mcast RX is needed globally for these subtypes + * @global_mcast_stypes: mcast RX is needed globally for these subtypes * @interface_mcast_stypes: mcast RX is needed on this interface * for these subtypes */ @@ -3824,6 +3923,11 @@ struct mgmt_frame_regs { * keep the struct wireless_dev's iftype updated. * This additionally holds the RTNL to be able to do netdev changes. * + * @add_intf_link: Add a new MLO link to the given interface. Note that + * the wdev->link[] data structure has been updated, so the new link + * address is available. + * @del_intf_link: Remove an MLO link from the given interface. + * * @add_key: add a key with the given parameters. @mac_addr will be %NULL * when adding a group key. * @@ -4160,6 +4264,9 @@ struct mgmt_frame_regs { * radar channel. * The caller is expected to set chandef pointer to NULL in order to * disable background CAC/radar detection. + * @add_link_station: Add a link to a station. + * @mod_link_station: Modify a link of a station. + * @del_link_station: Remove a link of a station. */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); @@ -4178,6 +4285,13 @@ struct cfg80211_ops { enum nl80211_iftype type, struct vif_params *params); + int (*add_intf_link)(struct wiphy *wiphy, + struct wireless_dev *wdev, + unsigned int link_id); + void (*del_intf_link)(struct wiphy *wiphy, + struct wireless_dev *wdev, + unsigned int link_id); + int (*add_key)(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params); @@ -4201,7 +4315,8 @@ struct cfg80211_ops { struct cfg80211_ap_settings *settings); int (*change_beacon)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_beacon_data *info); - int (*stop_ap)(struct wiphy *wiphy, struct net_device *dev); + int (*stop_ap)(struct wiphy *wiphy, struct net_device *dev, + unsigned int link_id); int (*add_station)(struct wiphy *wiphy, struct net_device *dev, @@ -4309,6 +4424,7 @@ struct cfg80211_ops { int (*set_bitrate_mask)(struct wiphy *wiphy, struct net_device *dev, + unsigned int link_id, const u8 *peer, const struct cfg80211_bitrate_mask *mask); @@ -4384,6 +4500,7 @@ struct cfg80211_ops { int (*get_channel)(struct wiphy *wiphy, struct wireless_dev *wdev, + unsigned int link_id, struct cfg80211_chan_def *chandef); int (*start_p2p_device)(struct wiphy *wiphy, @@ -4420,6 +4537,7 @@ struct cfg80211_ops { struct cfg80211_qos_map *qos_map); int (*set_ap_chanwidth)(struct wiphy *wiphy, struct net_device *dev, + unsigned int link_id, struct cfg80211_chan_def *chandef); int (*add_tx_ts)(struct wiphy *wiphy, struct net_device *dev, @@ -4466,7 +4584,7 @@ struct cfg80211_ops { struct net_device *dev, const u8 *buf, size_t len, const u8 *dest, const __be16 proto, - const bool noencrypt, + const bool noencrypt, int link_id, u64 *cookie); int (*get_ftm_responder_stats)(struct wiphy *wiphy, @@ -4494,6 +4612,12 @@ struct cfg80211_ops { struct cfg80211_fils_aad *fils_aad); int (*set_radar_background)(struct wiphy *wiphy, struct cfg80211_chan_def *chandef); + int (*add_link_station)(struct wiphy *wiphy, struct net_device *dev, + struct link_station_parameters *params); + int (*mod_link_station)(struct wiphy *wiphy, struct net_device *dev, + struct link_station_parameters *params); + int (*del_link_station)(struct wiphy *wiphy, struct net_device *dev, + struct link_station_del_parameters *params); }; /* @@ -4545,10 +4669,14 @@ struct cfg80211_ops { * @WIPHY_FLAG_HAS_STATIC_WEP: The device supports static WEP key installation * before connection. * @WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK: The device supports bigger kek and kck keys + * @WIPHY_FLAG_SUPPORTS_MLO: This is a temporary flag gating the MLO APIs, + * in order to not have them reachable in normal drivers, until we have + * complete feature/interface combinations/etc. advertisement. No driver + * should set this flag for now. */ enum wiphy_flags { WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0), - /* use hole at 1 */ + WIPHY_FLAG_SUPPORTS_MLO = BIT(1), WIPHY_FLAG_SPLIT_SCAN_6GHZ = BIT(2), WIPHY_FLAG_NETNS_OK = BIT(3), WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4), @@ -4869,19 +4997,32 @@ struct wiphy_vendor_command { * 802.11-2012 8.4.2.29 for the defined fields. * @extended_capabilities_mask: mask of the valid values * @extended_capabilities_len: length of the extended capabilities + * @eml_capabilities: EML capabilities (for MLO) + * @mld_capa_and_ops: MLD capabilities and operations (for MLO) */ struct wiphy_iftype_ext_capab { enum nl80211_iftype iftype; const u8 *extended_capabilities; const u8 *extended_capabilities_mask; u8 extended_capabilities_len; + u16 eml_capabilities; + u16 mld_capa_and_ops; }; /** + * cfg80211_get_iftype_ext_capa - lookup interface type extended capability + * @wiphy: the wiphy to look up from + * @type: the interface type to look up + */ +const struct wiphy_iftype_ext_capab * +cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type); + +/** * struct cfg80211_pmsr_capabilities - cfg80211 peer measurement capabilities * @max_peers: maximum number of peers in a single measurement * @report_ap_tsf: can report assoc AP's TSF for radio resource measurement * @randomize_mac_addr: can randomize MAC address for measurement + * @ftm: FTM measurement data * @ftm.supported: FTM measurement is supported * @ftm.asap: ASAP-mode is supported * @ftm.non_asap: non-ASAP-mode is supported @@ -5134,6 +5275,13 @@ struct wiphy_iftype_akm_suites { * @ema_max_profile_periodicity: maximum profile periodicity supported by * the driver. Setting this field to a non-zero value indicates that the * driver supports enhanced multi-BSSID advertisements (EMA AP). + * @max_num_akm_suites: maximum number of AKM suites allowed for + * configuration through %NL80211_CMD_CONNECT, %NL80211_CMD_ASSOCIATE and + * %NL80211_CMD_START_AP. Set to NL80211_MAX_NR_AKM_SUITES if not set by + * driver. If set by driver minimum allowed value is + * NL80211_MAX_NR_AKM_SUITES in order to avoid compatibility issues with + * legacy userspace and maximum allowed value is + * CFG80211_MAX_NUM_AKM_SUITES. */ struct wiphy { struct mutex mtx; @@ -5280,6 +5428,7 @@ struct wiphy { u8 mbssid_max_interfaces; u8 ema_max_profile_periodicity; + u16 max_num_akm_suites; char priv[] __aligned(NETDEV_ALIGN); }; @@ -5505,16 +5654,9 @@ static inline void wiphy_unlock(struct wiphy *wiphy) * @netdev: (private) Used to reference back to the netdev, may be %NULL * @identifier: (private) Identifier used in nl80211 to identify this * wireless device if it has no netdev - * @current_bss: (private) Used by the internal configuration code - * @chandef: (private) Used by the internal configuration code to track - * the user-set channel definition. - * @preset_chandef: (private) Used by the internal configuration code to - * track the channel to be used for AP later + * @u: union containing data specific to @iftype + * @connected: indicates if connected or not (STA mode) * @bssid: (private) Used by the internal configuration code - * @ssid: (private) Used by the internal configuration code - * @ssid_len: (private) Used by the internal configuration code - * @mesh_id_len: (private) Used by the internal configuration code - * @mesh_id_up_len: (private) Used by the internal configuration code * @wext: (private) Used by the internal wireless extensions compat code * @wext.ibss: (private) IBSS data part of wext handling * @wext.connect: (private) connection handling data @@ -5564,6 +5706,9 @@ static inline void wiphy_unlock(struct wiphy *wiphy) * @pmsr_free_wk: (private) peer measurements cleanup work * @unprot_beacon_reported: (private) timestamp of last * unprotected beacon report + * @links: array of %IEEE80211_MLD_MAX_NUM_LINKS elements containing @addr + * @ap and @client for each link + * @valid_links: bitmap describing what elements of @links are valid */ struct wireless_dev { struct wiphy *wiphy; @@ -5585,8 +5730,6 @@ struct wireless_dev { u8 address[ETH_ALEN] __aligned(sizeof(u16)); /* currently used for IBSS and SME - might be rearranged later */ - u8 ssid[IEEE80211_MAX_SSID_LEN]; - u8 ssid_len, mesh_id_len, mesh_id_up_len; struct cfg80211_conn *conn; struct cfg80211_cached_keys *connect_keys; enum ieee80211_bss_type conn_bss_type; @@ -5598,20 +5741,17 @@ struct wireless_dev { struct list_head event_list; spinlock_t event_lock; - struct cfg80211_internal_bss *current_bss; /* associated / joined */ - struct cfg80211_chan_def preset_chandef; - struct cfg80211_chan_def chandef; + u8 connected:1; bool ps; int ps_timeout; - int beacon_interval; - u32 ap_unexpected_nlportid; u32 owner_nlportid; bool nl_owner_dead; + /* FIXME: need to rework radar detection for MLO */ bool cac_started; unsigned long cac_start_time; unsigned int cac_time_ms; @@ -5639,6 +5779,50 @@ struct wireless_dev { struct work_struct pmsr_free_wk; unsigned long unprot_beacon_reported; + + union { + struct { + u8 connected_addr[ETH_ALEN] __aligned(2); + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 ssid_len; + } client; + struct { + int beacon_interval; + struct cfg80211_chan_def preset_chandef; + struct cfg80211_chan_def chandef; + u8 id[IEEE80211_MAX_SSID_LEN]; + u8 id_len, id_up_len; + } mesh; + struct { + struct cfg80211_chan_def preset_chandef; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 ssid_len; + } ap; + struct { + struct cfg80211_internal_bss *current_bss; + struct cfg80211_chan_def chandef; + int beacon_interval; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + u8 ssid_len; + } ibss; + struct { + struct cfg80211_chan_def chandef; + } ocb; + } u; + + struct { + u8 addr[ETH_ALEN] __aligned(2); + union { + struct { + unsigned int beacon_interval; + struct cfg80211_chan_def chandef; + } ap; + struct { + struct cfg80211_internal_bss *current_bss; + } client; + }; + } links[IEEE80211_MLD_MAX_NUM_LINKS]; + u16 valid_links; }; static inline const u8 *wdev_address(struct wireless_dev *wdev) @@ -5668,6 +5852,32 @@ static inline void *wdev_priv(struct wireless_dev *wdev) } /** + * wdev_chandef - return chandef pointer from wireless_dev + * @wdev: the wdev + * @link_id: the link ID for MLO + * + * Return: The chandef depending on the mode, or %NULL. + */ +struct cfg80211_chan_def *wdev_chandef(struct wireless_dev *wdev, + unsigned int link_id); + +static inline void WARN_INVALID_LINK_ID(struct wireless_dev *wdev, + unsigned int link_id) +{ + WARN_ON(link_id && !wdev->valid_links); + WARN_ON(wdev->valid_links && + !(wdev->valid_links & BIT(link_id))); +} + +#define for_each_valid_link(link_info, link_id) \ + for (link_id = 0; \ + link_id < ((link_info)->valid_links ? \ + ARRAY_SIZE((link_info)->links) : 1); \ + link_id++) \ + if (!(link_info)->valid_links || \ + ((link_info)->valid_links & BIT(link_id))) + +/** * DOC: Utility functions * * cfg80211 offers a number of utility functions that can be useful. @@ -5943,6 +6153,7 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr); * @addr: the device MAC address * @iftype: the virtual interface type * @data_offset: offset of payload after the 802.11 header + * @is_amsdu: true if the 802.11 header is A-MSDU * Return: 0 on success. Non-zero on error. */ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, @@ -6695,16 +6906,36 @@ void cfg80211_rx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr); /** - * cfg80211_rx_assoc_resp - notification of processed association response - * @dev: network device + * struct cfg80211_rx_assoc_resp - association response data * @bss: the BSS that association was requested with, ownership of the pointer - * moves to cfg80211 in this call + * moves to cfg80211 in the call to cfg80211_rx_assoc_resp() * @buf: (Re)Association Response frame (header + body) * @len: length of the frame data * @uapsd_queues: bitmap of queues configured for uapsd. Same format * as the AC bitmap in the QoS info field * @req_ies: information elements from the (Re)Association Request frame * @req_ies_len: length of req_ies data + * @ap_mld_addr: AP MLD address (in case of MLO) + * @links: per-link information indexed by link ID, use links[0] for + * non-MLO connections + */ +struct cfg80211_rx_assoc_resp { + const u8 *buf; + size_t len; + const u8 *req_ies; + size_t req_ies_len; + int uapsd_queues; + const u8 *ap_mld_addr; + struct { + const u8 *addr; + struct cfg80211_bss *bss; + } links[IEEE80211_MLD_MAX_NUM_LINKS]; +}; + +/** + * cfg80211_rx_assoc_resp - notification of processed association response + * @dev: network device + * @data: association response data, &struct cfg80211_rx_assoc_resp * * After being asked to associate via cfg80211_ops::assoc() the driver must * call either this function or cfg80211_auth_timeout(). @@ -6712,30 +6943,32 @@ void cfg80211_auth_timeout(struct net_device *dev, const u8 *addr); * This function may sleep. The caller must hold the corresponding wdev's mutex. */ void cfg80211_rx_assoc_resp(struct net_device *dev, - struct cfg80211_bss *bss, - const u8 *buf, size_t len, - int uapsd_queues, - const u8 *req_ies, size_t req_ies_len); + struct cfg80211_rx_assoc_resp *data); /** - * cfg80211_assoc_timeout - notification of timed out association - * @dev: network device - * @bss: The BSS entry with which association timed out. - * - * This function may sleep. The caller must hold the corresponding wdev's mutex. + * struct cfg80211_assoc_failure - association failure data + * @ap_mld_addr: AP MLD address, or %NULL + * @bss: list of BSSes, must use entry 0 for non-MLO connections + * (@ap_mld_addr is %NULL) + * @timeout: indicates the association failed due to timeout, otherwise + * the association was abandoned for a reason reported through some + * other API (e.g. deauth RX) */ -void cfg80211_assoc_timeout(struct net_device *dev, struct cfg80211_bss *bss); +struct cfg80211_assoc_failure { + const u8 *ap_mld_addr; + struct cfg80211_bss *bss[IEEE80211_MLD_MAX_NUM_LINKS]; + bool timeout; +}; /** - * cfg80211_abandon_assoc - notify cfg80211 of abandoned association attempt + * cfg80211_assoc_failure - notification of association failure * @dev: network device - * @bss: The BSS entry with which association was abandoned. + * @data: data describing the association failure * - * Call this whenever - for reasons reported through other API, like deauth RX, - * an association attempt was abandoned. * This function may sleep. The caller must hold the corresponding wdev's mutex. */ -void cfg80211_abandon_assoc(struct net_device *dev, struct cfg80211_bss *bss); +void cfg80211_assoc_failure(struct net_device *dev, + struct cfg80211_assoc_failure *data); /** * cfg80211_tx_mlme_mgmt - notification of transmitted deauth/disassoc frame @@ -6812,6 +7045,7 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, * @macaddr: the MAC address of the new candidate * @ie: information elements advertised by the peer candidate * @ie_len: length of the information elements buffer + * @sig_dbm: signal level in dBm * @gfp: allocation flags * * This function notifies cfg80211 that the mesh peer candidate has been @@ -7176,13 +7410,6 @@ struct cfg80211_fils_resp_params { * indicate that this is a failure, but without a status code. * @timeout_reason is used to report the reason for the timeout in that * case. - * @bssid: The BSSID of the AP (may be %NULL) - * @bss: Entry of bss to which STA got connected to, can be obtained through - * cfg80211_get_bss() (may be %NULL). But it is recommended to store the - * bss from the connect_request and hold a reference to it and return - * through this param to avoid a warning if the bss is expired during the - * connection, esp. for those drivers implementing connect op. - * Only one parameter among @bssid and @bss needs to be specified. * @req_ie: Association request IEs (may be %NULL) * @req_ie_len: Association request IEs length * @resp_ie: Association response IEs (may be %NULL) @@ -7194,17 +7421,41 @@ struct cfg80211_fils_resp_params { * not known. This value is used only if @status < 0 to indicate that the * failure is due to a timeout and not due to explicit rejection by the AP. * This value is ignored in other cases (@status >= 0). + * @valid_links: For MLO connection, BIT mask of the valid link ids. Otherwise + * zero. + * @ap_mld_addr: For MLO connection, MLD address of the AP. Otherwise %NULL. + * @links : For MLO connection, contains link info for the valid links indicated + * using @valid_links. For non-MLO connection, links[0] contains the + * connected AP info. + * @links.addr: For MLO connection, MAC address of the STA link. Otherwise + * %NULL. + * @links.bssid: For MLO connection, MAC address of the AP link. For non-MLO + * connection, links[0].bssid points to the BSSID of the AP (may be %NULL). + * @links.bss: For MLO connection, entry of bss to which STA link is connected. + * For non-MLO connection, links[0].bss points to entry of bss to which STA + * is connected. It can be obtained through cfg80211_get_bss() (may be + * %NULL). It is recommended to store the bss from the connect_request and + * hold a reference to it and return through this param to avoid a warning + * if the bss is expired during the connection, esp. for those drivers + * implementing connect op. Only one parameter among @bssid and @bss needs + * to be specified. */ struct cfg80211_connect_resp_params { int status; - const u8 *bssid; - struct cfg80211_bss *bss; const u8 *req_ie; size_t req_ie_len; const u8 *resp_ie; size_t resp_ie_len; struct cfg80211_fils_resp_params fils; enum nl80211_timeout_reason timeout_reason; + + const u8 *ap_mld_addr; + u16 valid_links; + struct { + const u8 *addr; + const u8 *bssid; + struct cfg80211_bss *bss; + } links[IEEE80211_MLD_MAX_NUM_LINKS]; }; /** @@ -7274,8 +7525,8 @@ cfg80211_connect_bss(struct net_device *dev, const u8 *bssid, memset(¶ms, 0, sizeof(params)); params.status = status; - params.bssid = bssid; - params.bss = bss; + params.links[0].bssid = bssid; + params.links[0].bss = bss; params.req_ie = req_ie; params.req_ie_len = req_ie_len; params.resp_ie = resp_ie; @@ -7346,24 +7597,40 @@ cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid, /** * struct cfg80211_roam_info - driver initiated roaming information * - * @channel: the channel of the new AP - * @bss: entry of bss to which STA got roamed (may be %NULL if %bssid is set) - * @bssid: the BSSID of the new AP (may be %NULL if %bss is set) * @req_ie: association request IEs (maybe be %NULL) * @req_ie_len: association request IEs length * @resp_ie: association response IEs (may be %NULL) * @resp_ie_len: assoc response IEs length * @fils: FILS related roaming information. + * @valid_links: For MLO roaming, BIT mask of the new valid links is set. + * Otherwise zero. + * @ap_mld_addr: For MLO roaming, MLD address of the new AP. Otherwise %NULL. + * @links : For MLO roaming, contains new link info for the valid links set in + * @valid_links. For non-MLO roaming, links[0] contains the new AP info. + * @links.addr: For MLO roaming, MAC address of the STA link. Otherwise %NULL. + * @links.bssid: For MLO roaming, MAC address of the new AP link. For non-MLO + * roaming, links[0].bssid points to the BSSID of the new AP. May be + * %NULL if %links.bss is set. + * @links.channel: the channel of the new AP. + * @links.bss: For MLO roaming, entry of new bss to which STA link got + * roamed. For non-MLO roaming, links[0].bss points to entry of bss to + * which STA got roamed (may be %NULL if %links.bssid is set) */ struct cfg80211_roam_info { - struct ieee80211_channel *channel; - struct cfg80211_bss *bss; - const u8 *bssid; const u8 *req_ie; size_t req_ie_len; const u8 *resp_ie; size_t resp_ie_len; struct cfg80211_fils_resp_params fils; + + const u8 *ap_mld_addr; + u16 valid_links; + struct { + const u8 *addr; + const u8 *bssid; + struct ieee80211_channel *channel; + struct cfg80211_bss *bss; + } links[IEEE80211_MLD_MAX_NUM_LINKS]; }; /** @@ -7530,6 +7797,48 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, gfp_t gfp); /** + * struct cfg80211_rx_info - received management frame info + * + * @freq: Frequency on which the frame was received in kHz + * @sig_dbm: signal strength in dBm, or 0 if unknown + * @have_link_id: indicates the frame was received on a link of + * an MLD, i.e. the @link_id field is valid + * @link_id: the ID of the link the frame was received on + * @buf: Management frame (header + body) + * @len: length of the frame data + * @flags: flags, as defined in enum nl80211_rxmgmt_flags + * @rx_tstamp: Hardware timestamp of frame RX in nanoseconds + * @ack_tstamp: Hardware timestamp of ack TX in nanoseconds + */ +struct cfg80211_rx_info { + int freq; + int sig_dbm; + bool have_link_id; + u8 link_id; + const u8 *buf; + size_t len; + u32 flags; + u64 rx_tstamp; + u64 ack_tstamp; +}; + +/** + * cfg80211_rx_mgmt_ext - management frame notification with extended info + * @wdev: wireless device receiving the frame + * @info: RX info as defined in struct cfg80211_rx_info + * + * This function is called whenever an Action frame is received for a station + * mode interface, but is not processed in kernel. + * + * Return: %true if a user space application has registered for this frame. + * For action frames, that makes it responsible for rejecting unrecognized + * action frames; %false otherwise, in which case for action frames the + * driver is responsible for rejecting the frame. + */ +bool cfg80211_rx_mgmt_ext(struct wireless_dev *wdev, + struct cfg80211_rx_info *info); + +/** * cfg80211_rx_mgmt_khz - notification of received, unprocessed management frame * @wdev: wireless device receiving the frame * @freq: Frequency on which the frame was received in KHz @@ -7546,8 +7855,20 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, * action frames; %false otherwise, in which case for action frames the * driver is responsible for rejecting the frame. */ -bool cfg80211_rx_mgmt_khz(struct wireless_dev *wdev, int freq, int sig_dbm, - const u8 *buf, size_t len, u32 flags); +static inline bool cfg80211_rx_mgmt_khz(struct wireless_dev *wdev, int freq, + int sig_dbm, const u8 *buf, size_t len, + u32 flags) +{ + struct cfg80211_rx_info info = { + .freq = freq, + .sig_dbm = sig_dbm, + .buf = buf, + .len = len, + .flags = flags + }; + + return cfg80211_rx_mgmt_ext(wdev, &info); +} /** * cfg80211_rx_mgmt - notification of received, unprocessed management frame @@ -7570,11 +7891,50 @@ static inline bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm, const u8 *buf, size_t len, u32 flags) { - return cfg80211_rx_mgmt_khz(wdev, MHZ_TO_KHZ(freq), sig_dbm, buf, len, - flags); + struct cfg80211_rx_info info = { + .freq = MHZ_TO_KHZ(freq), + .sig_dbm = sig_dbm, + .buf = buf, + .len = len, + .flags = flags + }; + + return cfg80211_rx_mgmt_ext(wdev, &info); } /** + * struct cfg80211_tx_status - TX status for management frame information + * + * @cookie: Cookie returned by cfg80211_ops::mgmt_tx() + * @tx_tstamp: hardware TX timestamp in nanoseconds + * @ack_tstamp: hardware ack RX timestamp in nanoseconds + * @buf: Management frame (header + body) + * @len: length of the frame data + * @ack: Whether frame was acknowledged + */ +struct cfg80211_tx_status { + u64 cookie; + u64 tx_tstamp; + u64 ack_tstamp; + const u8 *buf; + size_t len; + bool ack; +}; + +/** + * cfg80211_mgmt_tx_status_ext - TX status notification with extended info + * @wdev: wireless device receiving the frame + * @status: TX status data + * @gfp: context flags + * + * This function is called whenever a management frame was requested to be + * transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the + * transmission attempt with extended info. + */ +void cfg80211_mgmt_tx_status_ext(struct wireless_dev *wdev, + struct cfg80211_tx_status *status, gfp_t gfp); + +/** * cfg80211_mgmt_tx_status - notification of TX status for management frame * @wdev: wireless device receiving the frame * @cookie: Cookie returned by cfg80211_ops::mgmt_tx() @@ -7587,8 +7947,19 @@ static inline bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, * transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the * transmission attempt. */ -void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, - const u8 *buf, size_t len, bool ack, gfp_t gfp); +static inline void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, + u64 cookie, const u8 *buf, + size_t len, bool ack, gfp_t gfp) +{ + struct cfg80211_tx_status status = { + .cookie = cookie, + .buf = buf, + .len = len, + .ack = ack + }; + + cfg80211_mgmt_tx_status_ext(wdev, &status, gfp); +} /** * cfg80211_control_port_tx_status - notification of TX status for control @@ -7882,12 +8253,14 @@ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, * cfg80211_ch_switch_notify - update wdev channel and notify userspace * @dev: the device which switched channels * @chandef: the new channel definition + * @link_id: the link ID for MLO, must be 0 for non-MLO * * Caller must acquire wdev_lock, therefore must only be called from sleepable * driver context! */ void cfg80211_ch_switch_notify(struct net_device *dev, - struct cfg80211_chan_def *chandef); + struct cfg80211_chan_def *chandef, + unsigned int link_id); /* * cfg80211_ch_switch_started_notify - notify channel switch start @@ -8369,13 +8742,13 @@ bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype, * cfg80211_assoc_comeback - notification of association that was * temporarly rejected with a comeback * @netdev: network device - * @bss: the bss entry with which association is in progress. + * @ap_addr: AP (MLD) address that rejected the assocation * @timeout: timeout interval value TUs. * * this function may sleep. the caller must hold the corresponding wdev's mutex. */ void cfg80211_assoc_comeback(struct net_device *netdev, - struct cfg80211_bss *bss, u32 timeout); + const u8 *ap_addr, u32 timeout); /* Logging, debugging and troubleshooting/diagnostic helpers. */ diff --git a/include/net/codel_qdisc.h b/include/net/codel_qdisc.h index 58b6d0ebea10..7d3d9219f4fe 100644 --- a/include/net/codel_qdisc.h +++ b/include/net/codel_qdisc.h @@ -49,6 +49,7 @@ * Implemented on linux by Dave Taht and Eric Dumazet */ +#include <net/codel.h> #include <net/pkt_sched.h> /* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */ diff --git a/include/net/datalink.h b/include/net/datalink.h index d9b7faaa539f..c837ffc7ebf8 100644 --- a/include/net/datalink.h +++ b/include/net/datalink.h @@ -2,6 +2,13 @@ #ifndef _NET_INET_DATALINK_H_ #define _NET_INET_DATALINK_H_ +#include <linux/list.h> + +struct llc_sap; +struct net_device; +struct packet_type; +struct sk_buff; + struct datalink_proto { unsigned char type[8]; diff --git a/include/net/dcbevent.h b/include/net/dcbevent.h index 43e34131a53f..02700262f71a 100644 --- a/include/net/dcbevent.h +++ b/include/net/dcbevent.h @@ -8,6 +8,8 @@ #ifndef _DCB_EVENT_H #define _DCB_EVENT_H +struct notifier_block; + enum dcbevent_notif_type { DCB_APP_EVENT = 1, }; diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h index e4ad58c4062c..2b2d86fb3131 100644 --- a/include/net/dcbnl.h +++ b/include/net/dcbnl.h @@ -10,6 +10,8 @@ #include <linux/dcbnl.h> +struct net_device; + struct dcb_app_type { int ifindex; struct dcb_app app; diff --git a/include/net/devlink.h b/include/net/devlink.h index 2a2a2a0c93f7..119ed1ffb988 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1509,6 +1509,27 @@ struct devlink_ops { struct devlink_rate *parent, void *priv_child, void *priv_parent, struct netlink_ext_ack *extack); + /** + * selftests_check() - queries if selftest is supported + * @devlink: devlink instance + * @id: test index + * @extack: extack for reporting error messages + * + * Return: true if test is supported by the driver + */ + bool (*selftest_check)(struct devlink *devlink, unsigned int id, + struct netlink_ext_ack *extack); + /** + * selftest_run() - Runs a selftest + * @devlink: devlink instance + * @id: test index + * @extack: extack for reporting error messages + * + * Return: status of the test + */ + enum devlink_selftest_status + (*selftest_run)(struct devlink *devlink, unsigned int id, + struct netlink_ext_ack *extack); }; void *devlink_priv(struct devlink *devlink); @@ -1517,19 +1538,11 @@ struct device *devlink_to_dev(const struct devlink *devlink); /* Devlink instance explicit locking */ void devl_lock(struct devlink *devlink); +int devl_trylock(struct devlink *devlink); void devl_unlock(struct devlink *devlink); void devl_assert_locked(struct devlink *devlink); bool devl_lock_is_held(struct devlink *devlink); -int devl_port_register(struct devlink *devlink, - struct devlink_port *devlink_port, - unsigned int port_index); -void devl_port_unregister(struct devlink_port *devlink_port); - -int devl_rate_leaf_create(struct devlink_port *port, void *priv); -void devl_rate_leaf_destroy(struct devlink_port *devlink_port); -void devl_rate_nodes_destroy(struct devlink *devlink); - struct ib_device; struct net *devlink_net(const struct devlink *devlink); @@ -1551,9 +1564,13 @@ void devlink_set_features(struct devlink *devlink, u64 features); void devlink_register(struct devlink *devlink); void devlink_unregister(struct devlink *devlink); void devlink_free(struct devlink *devlink); +int devl_port_register(struct devlink *devlink, + struct devlink_port *devlink_port, + unsigned int port_index); int devlink_port_register(struct devlink *devlink, struct devlink_port *devlink_port, unsigned int port_index); +void devl_port_unregister(struct devlink_port *devlink_port); void devlink_port_unregister(struct devlink_port *devlink_port); void devlink_port_type_eth_set(struct devlink_port *devlink_port, struct net_device *netdev); @@ -1569,9 +1586,9 @@ void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 contro void devlink_port_attrs_pci_sf_set(struct devlink_port *devlink_port, u32 controller, u16 pf, u32 sf, bool external); -int devlink_rate_leaf_create(struct devlink_port *port, void *priv); -void devlink_rate_leaf_destroy(struct devlink_port *devlink_port); -void devlink_rate_nodes_destroy(struct devlink *devlink); +int devl_rate_leaf_create(struct devlink_port *port, void *priv); +void devl_rate_leaf_destroy(struct devlink_port *devlink_port); +void devl_rate_nodes_destroy(struct devlink *devlink); void devlink_port_linecard_set(struct devlink_port *devlink_port, struct devlink_linecard *linecard); struct devlink_linecard * @@ -1584,20 +1601,27 @@ void devlink_linecard_provision_clear(struct devlink_linecard *linecard); void devlink_linecard_provision_fail(struct devlink_linecard *linecard); void devlink_linecard_activate(struct devlink_linecard *linecard); void devlink_linecard_deactivate(struct devlink_linecard *linecard); +void devlink_linecard_nested_dl_set(struct devlink_linecard *linecard, + struct devlink *nested_devlink); +int devl_sb_register(struct devlink *devlink, unsigned int sb_index, + u32 size, u16 ingress_pools_count, + u16 egress_pools_count, u16 ingress_tc_count, + u16 egress_tc_count); int devlink_sb_register(struct devlink *devlink, unsigned int sb_index, u32 size, u16 ingress_pools_count, u16 egress_pools_count, u16 ingress_tc_count, u16 egress_tc_count); +void devl_sb_unregister(struct devlink *devlink, unsigned int sb_index); void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index); -int devlink_dpipe_table_register(struct devlink *devlink, - const char *table_name, - struct devlink_dpipe_table_ops *table_ops, - void *priv, bool counter_control_extern); -void devlink_dpipe_table_unregister(struct devlink *devlink, - const char *table_name); -int devlink_dpipe_headers_register(struct devlink *devlink, - struct devlink_dpipe_headers *dpipe_headers); -void devlink_dpipe_headers_unregister(struct devlink *devlink); +int devl_dpipe_table_register(struct devlink *devlink, + const char *table_name, + struct devlink_dpipe_table_ops *table_ops, + void *priv, bool counter_control_extern); +void devl_dpipe_table_unregister(struct devlink *devlink, + const char *table_name); +void devl_dpipe_headers_register(struct devlink *devlink, + struct devlink_dpipe_headers *dpipe_headers); +void devl_dpipe_headers_unregister(struct devlink *devlink); bool devlink_dpipe_table_counter_enabled(struct devlink *devlink, const char *table_name); int devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx); @@ -1613,23 +1637,40 @@ extern struct devlink_dpipe_header devlink_dpipe_header_ethernet; extern struct devlink_dpipe_header devlink_dpipe_header_ipv4; extern struct devlink_dpipe_header devlink_dpipe_header_ipv6; +int devl_resource_register(struct devlink *devlink, + const char *resource_name, + u64 resource_size, + u64 resource_id, + u64 parent_resource_id, + const struct devlink_resource_size_params *size_params); int devlink_resource_register(struct devlink *devlink, const char *resource_name, u64 resource_size, u64 resource_id, u64 parent_resource_id, const struct devlink_resource_size_params *size_params); +void devl_resources_unregister(struct devlink *devlink); void devlink_resources_unregister(struct devlink *devlink); -int devlink_resource_size_get(struct devlink *devlink, - u64 resource_id, - u64 *p_resource_size); +int devl_resource_size_get(struct devlink *devlink, + u64 resource_id, + u64 *p_resource_size); +int devl_dpipe_table_resource_set(struct devlink *devlink, + const char *table_name, u64 resource_id, + u64 resource_units); int devlink_dpipe_table_resource_set(struct devlink *devlink, const char *table_name, u64 resource_id, u64 resource_units); +void devl_resource_occ_get_register(struct devlink *devlink, + u64 resource_id, + devlink_resource_occ_get_t *occ_get, + void *occ_get_priv); void devlink_resource_occ_get_register(struct devlink *devlink, u64 resource_id, devlink_resource_occ_get_t *occ_get, void *occ_get_priv); +void devl_resource_occ_get_unregister(struct devlink *devlink, + u64 resource_id); + void devlink_resource_occ_get_unregister(struct devlink *devlink, u64 resource_id); int devlink_params_register(struct devlink *devlink, @@ -1647,6 +1688,10 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id, int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id, union devlink_param_value init_val); void devlink_param_value_changed(struct devlink *devlink, u32 param_id); +struct devlink_region *devl_region_create(struct devlink *devlink, + const struct devlink_region_ops *ops, + u32 region_max_snapshots, + u64 region_size); struct devlink_region * devlink_region_create(struct devlink *devlink, const struct devlink_region_ops *ops, @@ -1655,6 +1700,7 @@ struct devlink_region * devlink_port_region_create(struct devlink_port *port, const struct devlink_port_region_ops *ops, u32 region_max_snapshots, u64 region_size); +void devl_region_destroy(struct devlink_region *region); void devlink_region_destroy(struct devlink_region *region); void devlink_port_region_destroy(struct devlink_region *region); @@ -1750,9 +1796,15 @@ void devlink_flash_update_timeout_notify(struct devlink *devlink, const char *component, unsigned long timeout); +int devl_traps_register(struct devlink *devlink, + const struct devlink_trap *traps, + size_t traps_count, void *priv); int devlink_traps_register(struct devlink *devlink, const struct devlink_trap *traps, size_t traps_count, void *priv); +void devl_traps_unregister(struct devlink *devlink, + const struct devlink_trap *traps, + size_t traps_count); void devlink_traps_unregister(struct devlink *devlink, const struct devlink_trap *traps, size_t traps_count); @@ -1760,20 +1812,26 @@ void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb, void *trap_ctx, struct devlink_port *in_devlink_port, const struct flow_action_cookie *fa_cookie); void *devlink_trap_ctx_priv(void *trap_ctx); +int devl_trap_groups_register(struct devlink *devlink, + const struct devlink_trap_group *groups, + size_t groups_count); int devlink_trap_groups_register(struct devlink *devlink, const struct devlink_trap_group *groups, size_t groups_count); +void devl_trap_groups_unregister(struct devlink *devlink, + const struct devlink_trap_group *groups, + size_t groups_count); void devlink_trap_groups_unregister(struct devlink *devlink, const struct devlink_trap_group *groups, size_t groups_count); int -devlink_trap_policers_register(struct devlink *devlink, - const struct devlink_trap_policer *policers, - size_t policers_count); +devl_trap_policers_register(struct devlink *devlink, + const struct devlink_trap_policer *policers, + size_t policers_count); void -devlink_trap_policers_unregister(struct devlink *devlink, - const struct devlink_trap_policer *policers, - size_t policers_count); +devl_trap_policers_unregister(struct devlink *devlink, + const struct devlink_trap_policer *policers, + size_t policers_count); #if IS_ENABLED(CONFIG_NET_DEVLINK) diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h index 595b4f6c1eb1..bec303ea8367 100644 --- a/include/net/dn_dev.h +++ b/include/net/dn_dev.h @@ -2,6 +2,7 @@ #ifndef _NET_DN_DEV_H #define _NET_DN_DEV_H +#include <linux/netdevice.h> struct dn_dev; diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h index ddd6565957b3..1929a3cd5ebe 100644 --- a/include/net/dn_fib.h +++ b/include/net/dn_fib.h @@ -4,6 +4,8 @@ #include <linux/netlink.h> #include <linux/refcount.h> +#include <linux/rtnetlink.h> +#include <net/fib_rules.h> extern const struct nla_policy rtm_dn_policy[]; diff --git a/include/net/dn_neigh.h b/include/net/dn_neigh.h index 2e3e7793973a..1f7df98bfc33 100644 --- a/include/net/dn_neigh.h +++ b/include/net/dn_neigh.h @@ -2,6 +2,8 @@ #ifndef _NET_DN_NEIGH_H #define _NET_DN_NEIGH_H +#include <net/neighbour.h> + /* * The position of the first two fields of * this structure are critical - SJW diff --git a/include/net/dn_nsp.h b/include/net/dn_nsp.h index f83932b864a9..a4a18fee0b7c 100644 --- a/include/net/dn_nsp.h +++ b/include/net/dn_nsp.h @@ -6,6 +6,12 @@ *******************************************************************************/ /* dn_nsp.c functions prototyping */ +#include <linux/atomic.h> +#include <linux/types.h> +#include <net/sock.h> + +struct sk_buff; +struct sk_buff_head; void dn_nsp_send_data_ack(struct sock *sk); void dn_nsp_send_oth_ack(struct sock *sk); diff --git a/include/net/dn_route.h b/include/net/dn_route.h index 6f1e94ac0bdf..88c0300236cc 100644 --- a/include/net/dn_route.h +++ b/include/net/dn_route.h @@ -7,6 +7,9 @@ *******************************************************************************/ +#include <linux/types.h> +#include <net/dst.h> + struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri); int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *, struct sock *sk, int flags); diff --git a/include/net/dropreason.h b/include/net/dropreason.h new file mode 100644 index 000000000000..fae9b40e54fa --- /dev/null +++ b/include/net/dropreason.h @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef _LINUX_DROPREASON_H +#define _LINUX_DROPREASON_H + +/** + * enum skb_drop_reason - the reasons of skb drops + * + * The reason of skb drop, which is used in kfree_skb_reason(). + */ +enum skb_drop_reason { + /** + * @SKB_NOT_DROPPED_YET: skb is not dropped yet (used for no-drop case) + */ + SKB_NOT_DROPPED_YET = 0, + /** @SKB_DROP_REASON_NOT_SPECIFIED: drop reason is not specified */ + SKB_DROP_REASON_NOT_SPECIFIED, + /** @SKB_DROP_REASON_NO_SOCKET: socket not found */ + SKB_DROP_REASON_NO_SOCKET, + /** @SKB_DROP_REASON_PKT_TOO_SMALL: packet size is too small */ + SKB_DROP_REASON_PKT_TOO_SMALL, + /** @SKB_DROP_REASON_TCP_CSUM: TCP checksum error */ + SKB_DROP_REASON_TCP_CSUM, + /** @SKB_DROP_REASON_SOCKET_FILTER: dropped by socket filter */ + SKB_DROP_REASON_SOCKET_FILTER, + /** @SKB_DROP_REASON_UDP_CSUM: UDP checksum error */ + SKB_DROP_REASON_UDP_CSUM, + /** @SKB_DROP_REASON_NETFILTER_DROP: dropped by netfilter */ + SKB_DROP_REASON_NETFILTER_DROP, + /** + * @SKB_DROP_REASON_OTHERHOST: packet don't belong to current host + * (interface is in promisc mode) + */ + SKB_DROP_REASON_OTHERHOST, + /** @SKB_DROP_REASON_IP_CSUM: IP checksum error */ + SKB_DROP_REASON_IP_CSUM, + /** + * @SKB_DROP_REASON_IP_INHDR: there is something wrong with IP header (see + * IPSTATS_MIB_INHDRERRORS) + */ + SKB_DROP_REASON_IP_INHDR, + /** + * @SKB_DROP_REASON_IP_RPFILTER: IP rpfilter validate failed. see the + * document for rp_filter in ip-sysctl.rst for more information + */ + SKB_DROP_REASON_IP_RPFILTER, + /** + * @SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST: destination address of L2 is + * multicast, but L3 is unicast. + */ + SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST, + /** @SKB_DROP_REASON_XFRM_POLICY: xfrm policy check failed */ + SKB_DROP_REASON_XFRM_POLICY, + /** @SKB_DROP_REASON_IP_NOPROTO: no support for IP protocol */ + SKB_DROP_REASON_IP_NOPROTO, + /** @SKB_DROP_REASON_SOCKET_RCVBUFF: socket receive buff is full */ + SKB_DROP_REASON_SOCKET_RCVBUFF, + /** + * @SKB_DROP_REASON_PROTO_MEM: proto memory limition, such as udp packet + * drop out of udp_memory_allocated. + */ + SKB_DROP_REASON_PROTO_MEM, + /** + * @SKB_DROP_REASON_TCP_MD5NOTFOUND: no MD5 hash and one expected, + * corresponding to LINUX_MIB_TCPMD5NOTFOUND + */ + SKB_DROP_REASON_TCP_MD5NOTFOUND, + /** + * @SKB_DROP_REASON_TCP_MD5UNEXPECTED: MD5 hash and we're not expecting + * one, corresponding to LINUX_MIB_TCPMD5UNEXPECTED + */ + SKB_DROP_REASON_TCP_MD5UNEXPECTED, + /** + * @SKB_DROP_REASON_TCP_MD5FAILURE: MD5 hash and its wrong, corresponding + * to LINUX_MIB_TCPMD5FAILURE + */ + SKB_DROP_REASON_TCP_MD5FAILURE, + /** + * @SKB_DROP_REASON_SOCKET_BACKLOG: failed to add skb to socket backlog ( + * see LINUX_MIB_TCPBACKLOGDROP) + */ + SKB_DROP_REASON_SOCKET_BACKLOG, + /** @SKB_DROP_REASON_TCP_FLAGS: TCP flags invalid */ + SKB_DROP_REASON_TCP_FLAGS, + /** + * @SKB_DROP_REASON_TCP_ZEROWINDOW: TCP receive window size is zero, + * see LINUX_MIB_TCPZEROWINDOWDROP + */ + SKB_DROP_REASON_TCP_ZEROWINDOW, + /** + * @SKB_DROP_REASON_TCP_OLD_DATA: the TCP data reveived is already + * received before (spurious retrans may happened), see + * LINUX_MIB_DELAYEDACKLOST + */ + SKB_DROP_REASON_TCP_OLD_DATA, + /** + * @SKB_DROP_REASON_TCP_OVERWINDOW: the TCP data is out of window, + * the seq of the first byte exceed the right edges of receive + * window + */ + SKB_DROP_REASON_TCP_OVERWINDOW, + /** + * @SKB_DROP_REASON_TCP_OFOMERGE: the data of skb is already in the ofo + * queue, corresponding to LINUX_MIB_TCPOFOMERGE + */ + SKB_DROP_REASON_TCP_OFOMERGE, + /** + * @SKB_DROP_REASON_TCP_RFC7323_PAWS: PAWS check, corresponding to + * LINUX_MIB_PAWSESTABREJECTED + */ + SKB_DROP_REASON_TCP_RFC7323_PAWS, + /** @SKB_DROP_REASON_TCP_INVALID_SEQUENCE: Not acceptable SEQ field */ + SKB_DROP_REASON_TCP_INVALID_SEQUENCE, + /** @SKB_DROP_REASON_TCP_RESET: Invalid RST packet */ + SKB_DROP_REASON_TCP_RESET, + /** + * @SKB_DROP_REASON_TCP_INVALID_SYN: Incoming packet has unexpected + * SYN flag + */ + SKB_DROP_REASON_TCP_INVALID_SYN, + /** @SKB_DROP_REASON_TCP_CLOSE: TCP socket in CLOSE state */ + SKB_DROP_REASON_TCP_CLOSE, + /** @SKB_DROP_REASON_TCP_FASTOPEN: dropped by FASTOPEN request socket */ + SKB_DROP_REASON_TCP_FASTOPEN, + /** @SKB_DROP_REASON_TCP_OLD_ACK: TCP ACK is old, but in window */ + SKB_DROP_REASON_TCP_OLD_ACK, + /** @SKB_DROP_REASON_TCP_TOO_OLD_ACK: TCP ACK is too old */ + SKB_DROP_REASON_TCP_TOO_OLD_ACK, + /** + * @SKB_DROP_REASON_TCP_ACK_UNSENT_DATA: TCP ACK for data we haven't + * sent yet + */ + SKB_DROP_REASON_TCP_ACK_UNSENT_DATA, + /** @SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE: pruned from TCP OFO queue */ + SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE, + /** @SKB_DROP_REASON_TCP_OFO_DROP: data already in receive queue */ + SKB_DROP_REASON_TCP_OFO_DROP, + /** @SKB_DROP_REASON_IP_OUTNOROUTES: route lookup failed */ + SKB_DROP_REASON_IP_OUTNOROUTES, + /** + * @SKB_DROP_REASON_BPF_CGROUP_EGRESS: dropped by BPF_PROG_TYPE_CGROUP_SKB + * eBPF program + */ + SKB_DROP_REASON_BPF_CGROUP_EGRESS, + /** @SKB_DROP_REASON_IPV6DISABLED: IPv6 is disabled on the device */ + SKB_DROP_REASON_IPV6DISABLED, + /** @SKB_DROP_REASON_NEIGH_CREATEFAIL: failed to create neigh entry */ + SKB_DROP_REASON_NEIGH_CREATEFAIL, + /** @SKB_DROP_REASON_NEIGH_FAILED: neigh entry in failed state */ + SKB_DROP_REASON_NEIGH_FAILED, + /** @SKB_DROP_REASON_NEIGH_QUEUEFULL: arp_queue for neigh entry is full */ + SKB_DROP_REASON_NEIGH_QUEUEFULL, + /** @SKB_DROP_REASON_NEIGH_DEAD: neigh entry is dead */ + SKB_DROP_REASON_NEIGH_DEAD, + /** @SKB_DROP_REASON_TC_EGRESS: dropped in TC egress HOOK */ + SKB_DROP_REASON_TC_EGRESS, + /** + * @SKB_DROP_REASON_QDISC_DROP: dropped by qdisc when packet outputting ( + * failed to enqueue to current qdisc) + */ + SKB_DROP_REASON_QDISC_DROP, + /** + * @SKB_DROP_REASON_CPU_BACKLOG: failed to enqueue the skb to the per CPU + * backlog queue. This can be caused by backlog queue full (see + * netdev_max_backlog in net.rst) or RPS flow limit + */ + SKB_DROP_REASON_CPU_BACKLOG, + /** @SKB_DROP_REASON_XDP: dropped by XDP in input path */ + SKB_DROP_REASON_XDP, + /** @SKB_DROP_REASON_TC_INGRESS: dropped in TC ingress HOOK */ + SKB_DROP_REASON_TC_INGRESS, + /** @SKB_DROP_REASON_UNHANDLED_PROTO: protocol not implemented or not supported */ + SKB_DROP_REASON_UNHANDLED_PROTO, + /** @SKB_DROP_REASON_SKB_CSUM: sk_buff checksum computation error */ + SKB_DROP_REASON_SKB_CSUM, + /** @SKB_DROP_REASON_SKB_GSO_SEG: gso segmentation error */ + SKB_DROP_REASON_SKB_GSO_SEG, + /** + * @SKB_DROP_REASON_SKB_UCOPY_FAULT: failed to copy data from user space, + * e.g., via zerocopy_sg_from_iter() or skb_orphan_frags_rx() + */ + SKB_DROP_REASON_SKB_UCOPY_FAULT, + /** @SKB_DROP_REASON_DEV_HDR: device driver specific header/metadata is invalid */ + SKB_DROP_REASON_DEV_HDR, + /** + * @SKB_DROP_REASON_DEV_READY: the device is not ready to xmit/recv due to + * any of its data structure that is not up/ready/initialized, + * e.g., the IFF_UP is not set, or driver specific tun->tfiles[txq] + * is not initialized + */ + SKB_DROP_REASON_DEV_READY, + /** @SKB_DROP_REASON_FULL_RING: ring buffer is full */ + SKB_DROP_REASON_FULL_RING, + /** @SKB_DROP_REASON_NOMEM: error due to OOM */ + SKB_DROP_REASON_NOMEM, + /** + * @SKB_DROP_REASON_HDR_TRUNC: failed to trunc/extract the header from + * networking data, e.g., failed to pull the protocol header from + * frags via pskb_may_pull() + */ + SKB_DROP_REASON_HDR_TRUNC, + /** + * @SKB_DROP_REASON_TAP_FILTER: dropped by (ebpf) filter directly attached + * to tun/tap, e.g., via TUNSETFILTEREBPF + */ + SKB_DROP_REASON_TAP_FILTER, + /** + * @SKB_DROP_REASON_TAP_TXFILTER: dropped by tx filter implemented at + * tun/tap, e.g., check_filter() + */ + SKB_DROP_REASON_TAP_TXFILTER, + /** @SKB_DROP_REASON_ICMP_CSUM: ICMP checksum error */ + SKB_DROP_REASON_ICMP_CSUM, + /** + * @SKB_DROP_REASON_INVALID_PROTO: the packet doesn't follow RFC 2211, + * such as a broadcasts ICMP_TIMESTAMP + */ + SKB_DROP_REASON_INVALID_PROTO, + /** + * @SKB_DROP_REASON_IP_INADDRERRORS: host unreachable, corresponding to + * IPSTATS_MIB_INADDRERRORS + */ + SKB_DROP_REASON_IP_INADDRERRORS, + /** + * @SKB_DROP_REASON_IP_INNOROUTES: network unreachable, corresponding to + * IPSTATS_MIB_INADDRERRORS + */ + SKB_DROP_REASON_IP_INNOROUTES, + /** + * @SKB_DROP_REASON_PKT_TOO_BIG: packet size is too big (maybe exceed the + * MTU) + */ + SKB_DROP_REASON_PKT_TOO_BIG, + /** + * @SKB_DROP_REASON_MAX: the maximum of drop reason, which shouldn't be + * used as a real 'reason' + */ + SKB_DROP_REASON_MAX, +}; + +#define SKB_DR_INIT(name, reason) \ + enum skb_drop_reason name = SKB_DROP_REASON_##reason +#define SKB_DR(name) \ + SKB_DR_INIT(name, NOT_SPECIFIED) +#define SKB_DR_SET(name, reason) \ + (name = SKB_DROP_REASON_##reason) +#define SKB_DR_OR(name, reason) \ + do { \ + if (name == SKB_DROP_REASON_NOT_SPECIFIED || \ + name == SKB_NOT_DROPPED_YET) \ + SKB_DR_SET(name, reason); \ + } while (0) + +extern const char * const drop_reasons[]; + +#endif diff --git a/include/net/dsa.h b/include/net/dsa.h index 14f07275852b..b902b31bebce 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -53,6 +53,8 @@ struct phylink_link_state; #define DSA_TAG_PROTO_SJA1110_VALUE 23 #define DSA_TAG_PROTO_RTL8_4_VALUE 24 #define DSA_TAG_PROTO_RTL8_4T_VALUE 25 +#define DSA_TAG_PROTO_RZN1_A5PSW_VALUE 26 +#define DSA_TAG_PROTO_LAN937X_VALUE 27 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE, @@ -81,6 +83,8 @@ enum dsa_tag_protocol { DSA_TAG_PROTO_SJA1110 = DSA_TAG_PROTO_SJA1110_VALUE, DSA_TAG_PROTO_RTL8_4 = DSA_TAG_PROTO_RTL8_4_VALUE, DSA_TAG_PROTO_RTL8_4T = DSA_TAG_PROTO_RTL8_4T_VALUE, + DSA_TAG_PROTO_RZN1_A5PSW = DSA_TAG_PROTO_RZN1_A5PSW_VALUE, + DSA_TAG_PROTO_LAN937X = DSA_TAG_PROTO_LAN937X_VALUE, }; struct dsa_switch; @@ -888,8 +892,13 @@ struct dsa_switch_ops { struct ethtool_eth_mac_stats *mac_stats); void (*get_eth_ctrl_stats)(struct dsa_switch *ds, int port, struct ethtool_eth_ctrl_stats *ctrl_stats); + void (*get_rmon_stats)(struct dsa_switch *ds, int port, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges); void (*get_stats64)(struct dsa_switch *ds, int port, struct rtnl_link_stats64 *s); + void (*get_pause_stats)(struct dsa_switch *ds, int port, + struct ethtool_pause_stats *pause_stats); void (*self_test)(struct dsa_switch *ds, int port, struct ethtool_test *etest, u64 *data); diff --git a/include/net/erspan.h b/include/net/erspan.h index 0d9e86bd9893..6cb4cbd6a48f 100644 --- a/include/net/erspan.h +++ b/include/net/erspan.h @@ -58,6 +58,9 @@ * GRE proto ERSPAN type I/II = 0x88BE, type III = 0x22EB */ +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/skbuff.h> #include <uapi/linux/erspan.h> #define ERSPAN_VERSION 0x1 /* ERSPAN type II */ diff --git a/include/net/esp.h b/include/net/esp.h index 9c5637d41d95..322950727dd0 100644 --- a/include/net/esp.h +++ b/include/net/esp.h @@ -5,6 +5,7 @@ #include <linux/skbuff.h> struct ip_esp_hdr; +struct xfrm_state; static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb) { diff --git a/include/net/ethoc.h b/include/net/ethoc.h index 78519ed42ab4..73810f3ca492 100644 --- a/include/net/ethoc.h +++ b/include/net/ethoc.h @@ -10,6 +10,9 @@ #ifndef LINUX_NET_ETHOC_H #define LINUX_NET_ETHOC_H 1 +#include <linux/if.h> +#include <linux/types.h> + struct ethoc_platform_data { u8 hwaddr[IFHWADDRLEN]; s8 phy_id; diff --git a/include/net/firewire.h b/include/net/firewire.h index 299e5df38552..8fbff8d77865 100644 --- a/include/net/firewire.h +++ b/include/net/firewire.h @@ -2,6 +2,8 @@ #ifndef _NET_FIREWIRE_H #define _NET_FIREWIRE_H +#include <linux/types.h> + /* Pseudo L2 address */ #define FWNET_ALEN 16 union fwnet_hwaddr { @@ -11,8 +13,7 @@ union fwnet_hwaddr { __be64 uniq_id; /* EUI-64 */ u8 max_rec; /* max packet size */ u8 sspd; /* max speed */ - __be16 fifo_hi; /* hi 16bits of FIFO addr */ - __be32 fifo_lo; /* lo 32bits of FIFO addr */ + u8 fifo[6]; /* FIFO addr */ } __packed uc; }; diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index a4c6057c7097..6c74812d64b2 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -179,6 +179,22 @@ struct flow_dissector_key_ports { }; /** + * struct flow_dissector_key_ports_range + * @tp: port number from packet + * @tp_min: min port number in range + * @tp_max: max port number in range + */ +struct flow_dissector_key_ports_range { + union { + struct flow_dissector_key_ports tp; + struct { + struct flow_dissector_key_ports tp_min; + struct flow_dissector_key_ports tp_max; + }; + }; +}; + +/** * flow_dissector_key_icmp: * type: ICMP type * code: ICMP code @@ -261,6 +277,18 @@ struct flow_dissector_key_num_of_vlans { u8 num_of_vlans; }; +/** + * struct flow_dissector_key_pppoe: + * @session_id: pppoe session id + * @ppp_proto: ppp protocol + * @type: pppoe eth type + */ +struct flow_dissector_key_pppoe { + __be16 session_id; + __be16 ppp_proto; + __be16 type; +}; + enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */ FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */ @@ -291,6 +319,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_CT, /* struct flow_dissector_key_ct */ FLOW_DISSECTOR_KEY_HASH, /* struct flow_dissector_key_hash */ FLOW_DISSECTOR_KEY_NUM_OF_VLANS, /* struct flow_dissector_key_num_of_vlans */ + FLOW_DISSECTOR_KEY_PPPOE, /* struct flow_dissector_key_pppoe */ FLOW_DISSECTOR_KEY_MAX, }; diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 7ac313858037..2a9a9e42e7fd 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -48,6 +48,10 @@ struct flow_match_ports { struct flow_dissector_key_ports *key, *mask; }; +struct flow_match_ports_range { + struct flow_dissector_key_ports_range *key, *mask; +}; + struct flow_match_icmp { struct flow_dissector_key_icmp *key, *mask; }; @@ -72,6 +76,10 @@ struct flow_match_ct { struct flow_dissector_key_ct *key, *mask; }; +struct flow_match_pppoe { + struct flow_dissector_key_pppoe *key, *mask; +}; + struct flow_rule; void flow_rule_match_meta(const struct flow_rule *rule, @@ -94,6 +102,8 @@ void flow_rule_match_ip(const struct flow_rule *rule, struct flow_match_ip *out); void flow_rule_match_ports(const struct flow_rule *rule, struct flow_match_ports *out); +void flow_rule_match_ports_range(const struct flow_rule *rule, + struct flow_match_ports_range *out); void flow_rule_match_tcp(const struct flow_rule *rule, struct flow_match_tcp *out); void flow_rule_match_icmp(const struct flow_rule *rule, @@ -116,6 +126,8 @@ void flow_rule_match_enc_opts(const struct flow_rule *rule, struct flow_match_enc_opts *out); void flow_rule_match_ct(const struct flow_rule *rule, struct flow_match_ct *out); +void flow_rule_match_pppoe(const struct flow_rule *rule, + struct flow_match_pppoe *out); enum flow_action_id { FLOW_ACTION_ACCEPT = 0, diff --git a/include/net/fq.h b/include/net/fq.h index 2eccbbd2b559..07b5aff6ec58 100644 --- a/include/net/fq.h +++ b/include/net/fq.h @@ -7,6 +7,10 @@ #ifndef __NET_SCHED_FQ_H #define __NET_SCHED_FQ_H +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/types.h> + struct fq_tin; /** diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h index a5f67a2c0c73..524b510f1c68 100644 --- a/include/net/fq_impl.h +++ b/include/net/fq_impl.h @@ -358,8 +358,7 @@ static int fq_init(struct fq *fq, int flows_cnt) if (!fq->flows) return -ENOMEM; - fq->flows_bitmap = kcalloc(BITS_TO_LONGS(fq->flows_cnt), sizeof(long), - GFP_KERNEL); + fq->flows_bitmap = bitmap_zalloc(fq->flows_cnt, GFP_KERNEL); if (!fq->flows_bitmap) { kvfree(fq->flows); fq->flows = NULL; @@ -383,7 +382,7 @@ static void fq_reset(struct fq *fq, kvfree(fq->flows); fq->flows = NULL; - kfree(fq->flows_bitmap); + bitmap_free(fq->flows_bitmap); fq->flows_bitmap = NULL; } diff --git a/include/net/garp.h b/include/net/garp.h index 4d9a0c6a2e5f..59a07b171def 100644 --- a/include/net/garp.h +++ b/include/net/garp.h @@ -2,6 +2,8 @@ #ifndef _NET_GARP_H #define _NET_GARP_H +#include <linux/if_ether.h> +#include <linux/types.h> #include <net/stp.h> #define GARP_PROTOCOL_ID 0x1 diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 7cb3fa8310ed..56a50e1c51b9 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -11,6 +11,7 @@ /** * struct genl_multicast_group - generic netlink multicast group * @name: name of the multicast group, names are per-family + * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM) */ struct genl_multicast_group { char name[GENL_NAMSIZ]; @@ -116,7 +117,7 @@ enum genl_validate_flags { * struct genl_small_ops - generic netlink operations (small version) * @cmd: command identifier * @internal_flags: flags used by the family - * @flags: flags + * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM) * @validate: validation flags from enum genl_validate_flags * @doit: standard command callback * @dumpit: callback for dumpers @@ -137,7 +138,7 @@ struct genl_small_ops { * struct genl_ops - generic netlink operations * @cmd: command identifier * @internal_flags: flags used by the family - * @flags: flags + * @flags: GENL_* flags (%GENL_ADMIN_PERM or %GENL_UNS_ADMIN_PERM) * @maxattr: maximum number of attributes supported * @policy: netlink policy (takes precedence over family policy) * @validate: validation flags from enum genl_validate_flags diff --git a/include/net/gtp.h b/include/net/gtp.h index c1d6169df331..2a503f035d18 100644 --- a/include/net/gtp.h +++ b/include/net/gtp.h @@ -2,6 +2,10 @@ #ifndef _GTP_H_ #define _GTP_H_ +#include <linux/netdevice.h> +#include <linux/types.h> +#include <net/rtnetlink.h> + /* General GTP protocol related definitions. */ #define GTP0_PORT 3386 diff --git a/include/net/gue.h b/include/net/gue.h index e42402f180b7..dfca298bec9c 100644 --- a/include/net/gue.h +++ b/include/net/gue.h @@ -30,6 +30,9 @@ * may refer to options placed after this field. */ +#include <asm/byteorder.h> +#include <linux/types.h> + struct guehdr { union { struct { diff --git a/include/net/hwbm.h b/include/net/hwbm.h index c81444611a22..aa495decec35 100644 --- a/include/net/hwbm.h +++ b/include/net/hwbm.h @@ -2,6 +2,8 @@ #ifndef _HWBM_H #define _HWBM_H +#include <linux/mutex.h> + struct hwbm_pool { /* Capacity of the pool */ int size; diff --git a/include/net/ila.h b/include/net/ila.h index f98dcd5791b0..73ebe5eab272 100644 --- a/include/net/ila.h +++ b/include/net/ila.h @@ -8,6 +8,8 @@ #ifndef _NET_ILA_H #define _NET_ILA_H +struct sk_buff; + int ila_xlat_outgoing(struct sk_buff *skb); int ila_xlat_incoming(struct sk_buff *skb); diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h index 7392f959a405..025bd8d3c769 100644 --- a/include/net/inet6_connection_sock.h +++ b/include/net/inet6_connection_sock.h @@ -11,6 +11,8 @@ #include <linux/types.h> +struct flowi; +struct flowi6; struct request_sock; struct sk_buff; struct sock; diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index f259e1ae14ba..56f1286583d3 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -110,8 +110,6 @@ static inline bool inet6_match(struct net *net, const struct sock *sk, const __portpair ports, const int dif, const int sdif) { - int bound_dev_if; - if (!net_eq(sock_net(sk), net) || sk->sk_family != AF_INET6 || sk->sk_portpair != ports || @@ -119,8 +117,9 @@ static inline bool inet6_match(struct net *net, const struct sock *sk, !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr)) return false; - bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); - return bound_dev_if == dif || bound_dev_if == sdif; + /* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */ + return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, + sdif); } #endif /* IS_ENABLED(CONFIG_IPV6) */ diff --git a/include/net/inet_common.h b/include/net/inet_common.h index cad2a611efde..cec453c18f1d 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -3,6 +3,10 @@ #define _INET_COMMON_H #include <linux/indirect_call_wrapper.h> +#include <linux/net.h> +#include <linux/netdev_features.h> +#include <linux/types.h> +#include <net/sock.h> extern const struct proto_ops inet_stream_ops; extern const struct proto_ops inet_dgram_ops; @@ -12,6 +16,8 @@ extern const struct proto_ops inet_dgram_ops; */ struct msghdr; +struct net; +struct page; struct sock; struct sockaddr; struct socket; diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 911ad930867d..0b0876610553 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -4,6 +4,9 @@ #include <linux/rhashtable-types.h> #include <linux/completion.h> +#include <linux/in6.h> +#include <linux/rbtree_types.h> +#include <linux/refcount.h> /* Per netns frag queues directory */ struct fqdir { diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index fd6b510d114b..e9cf2157ed8a 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -175,17 +175,6 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) hashinfo->ehash_locks = NULL; } -static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if, - int dif, int sdif) -{ -#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) - return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept), - bound_dev_if, dif, sdif); -#else - return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); -#endif -} - struct inet_bind_bucket * inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net, struct inet_bind_hashbucket *head, @@ -271,16 +260,14 @@ static inline bool inet_match(struct net *net, const struct sock *sk, const __addrpair cookie, const __portpair ports, int dif, int sdif) { - int bound_dev_if; - if (!net_eq(sock_net(sk), net) || sk->sk_portpair != ports || sk->sk_addrpair != cookie) return false; - /* Paired with WRITE_ONCE() from sock_bindtoindex_locked() */ - bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); - return bound_dev_if == dif || bound_dev_if == sdif; + /* READ_ONCE() paired with WRITE_ONCE() in sock_bindtoindex_locked() */ + return inet_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, + sdif); } /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 6395f6b9a5d2..bf5654ce711e 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -149,6 +149,17 @@ static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if, return bound_dev_if == dif || bound_dev_if == sdif; } +static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if, + int dif, int sdif) +{ +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept), + bound_dev_if, dif, sdif); +#else + return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); +#endif +} + struct inet_cork { unsigned int flags; __be32 addr; diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index ca2d6b60e1ec..035d61d50a98 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -2,6 +2,16 @@ #ifndef _NET_IP6_ROUTE_H #define _NET_IP6_ROUTE_H +#include <net/addrconf.h> +#include <net/flow.h> +#include <net/ip6_fib.h> +#include <net/sock.h> +#include <net/lwtunnel.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/route.h> +#include <net/nexthop.h> + struct route_info { __u8 type; __u8 length; @@ -19,16 +29,6 @@ struct route_info { __u8 prefix[]; /* 0,8 or 16 */ }; -#include <net/addrconf.h> -#include <net/flow.h> -#include <net/ip6_fib.h> -#include <net/sock.h> -#include <net/lwtunnel.h> -#include <linux/ip.h> -#include <linux/ipv6.h> -#include <linux/route.h> -#include <net/nexthop.h> - #define RT6_LOOKUP_F_IFACE 0x00000001 #define RT6_LOOKUP_F_REACHABLE 0x00000002 #define RT6_LOOKUP_F_HAS_SADDR 0x00000004 diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index c24fa934221d..63fac94f9ace 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -54,6 +54,7 @@ struct ip_tunnel_key { __be32 label; /* Flow Label for IPv6 */ __be16 tp_src; __be16 tp_dst; + __u8 flow_flags; }; /* Flags for ip_tunnel_info mode. */ @@ -387,9 +388,11 @@ static inline int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, const struct sk_buff *skb) { - if (skb->protocol == htons(ETH_P_IP)) + __be16 payload_protocol = skb_protocol(skb, true); + + if (payload_protocol == htons(ETH_P_IP)) return iph->tos; - else if (skb->protocol == htons(ETH_P_IPV6)) + else if (payload_protocol == htons(ETH_P_IPV6)) return ipv6_get_dsfield((const struct ipv6hdr *)iph); else return 0; @@ -398,9 +401,11 @@ static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, static inline u8 ip_tunnel_get_ttl(const struct iphdr *iph, const struct sk_buff *skb) { - if (skb->protocol == htons(ETH_P_IP)) + __be16 payload_protocol = skb_protocol(skb, true); + + if (payload_protocol == htons(ETH_P_IP)) return iph->ttl; - else if (skb->protocol == htons(ETH_P_IPV6)) + else if (payload_protocol == htons(ETH_P_IPV6)) return ((const struct ipv6hdr *)iph)->hop_limit; else return 0; @@ -456,8 +461,8 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len) struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats); u64_stats_update_begin(&tstats->syncp); - tstats->tx_bytes += pkt_len; - tstats->tx_packets++; + u64_stats_add(&tstats->tx_bytes, pkt_len); + u64_stats_inc(&tstats->tx_packets); u64_stats_update_end(&tstats->syncp); put_cpu_ptr(tstats); } else { diff --git a/include/net/ipcomp.h b/include/net/ipcomp.h index fee6fc451597..c31108295079 100644 --- a/include/net/ipcomp.h +++ b/include/net/ipcomp.h @@ -2,11 +2,13 @@ #ifndef _NET_IPCOMP_H #define _NET_IPCOMP_H +#include <linux/skbuff.h> #include <linux/types.h> #define IPCOMP_SCRATCH_SIZE 65400 struct crypto_comp; +struct ip_comp_hdr; struct ipcomp_data { u16 threshold; diff --git a/include/net/ipconfig.h b/include/net/ipconfig.h index e3534299bd2a..8276897d0c2e 100644 --- a/include/net/ipconfig.h +++ b/include/net/ipconfig.h @@ -7,6 +7,8 @@ /* The following are initdata: */ +#include <linux/types.h> + extern int ic_proto_enabled; /* Protocols enabled (see IC_xxx) */ extern int ic_set_manually; /* IPconfig parameters set manually */ diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h index e766300b3e99..3e1f76786d7b 100644 --- a/include/net/llc_c_ac.h +++ b/include/net/llc_c_ac.h @@ -16,6 +16,13 @@ * Connection state transition actions * (Fb = F bit; Pb = P bit; Xb = X bit) */ + +#include <linux/types.h> + +struct sk_buff; +struct sock; +struct timer_list; + #define LLC_CONN_AC_CLR_REMOTE_BUSY 1 #define LLC_CONN_AC_CONN_IND 2 #define LLC_CONN_AC_CONN_CONFIRM 3 diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h index 48f3f891b2f9..53823d61d8b6 100644 --- a/include/net/llc_c_st.h +++ b/include/net/llc_c_st.h @@ -11,6 +11,10 @@ * * See the GNU General Public License for more details. */ + +#include <net/llc_c_ac.h> +#include <net/llc_c_ev.h> + /* Connection component state management */ /* connection states */ #define LLC_CONN_OUT_OF_SVC 0 /* prior to allocation */ diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h index a61b98c108ee..f71790305bc9 100644 --- a/include/net/llc_s_ac.h +++ b/include/net/llc_s_ac.h @@ -11,6 +11,10 @@ * * See the GNU General Public License for more details. */ + +struct llc_sap; +struct sk_buff; + /* SAP component actions */ #define SAP_ACT_UNITDATA_IND 1 #define SAP_ACT_SEND_UI 2 diff --git a/include/net/llc_s_ev.h b/include/net/llc_s_ev.h index 84db3a59ed28..fb7df1d70af3 100644 --- a/include/net/llc_s_ev.h +++ b/include/net/llc_s_ev.h @@ -13,6 +13,7 @@ */ #include <linux/skbuff.h> +#include <net/llc.h> /* Defines SAP component events */ /* Types of events (possible values in 'ev->type') */ diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h index c4359e203013..ed5b2fa40d32 100644 --- a/include/net/llc_s_st.h +++ b/include/net/llc_s_st.h @@ -12,6 +12,12 @@ * See the GNU General Public License for more details. */ +#include <linux/types.h> +#include <net/llc_s_ac.h> +#include <net/llc_s_ev.h> + +struct llc_sap_state_trans; + #define LLC_NR_SAP_STATES 2 /* size of state table */ /* structures and types */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 47642b020706..f198af600b5e 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -125,6 +125,22 @@ * via the usual ieee80211_tx_dequeue). */ +/** + * DOC: HW timestamping + * + * Timing Measurement and Fine Timing Measurement require accurate timestamps + * of the action frames TX/RX and their respective acks. + * + * To report hardware timestamps for Timing Measurement or Fine Timing + * Measurement frame RX, the low level driver should set the SKB's hwtstamp + * field to the frame RX timestamp and report the ack TX timestamp in the + * ieee80211_rx_status struct. + * + * Similarly, To report hardware timestamps for Timing Measurement or Fine + * Timing Measurement frame TX, the driver should set the SKB's hwtstamp field + * to the frame TX timestamp and report the ack RX timestamp in the + * ieee80211_tx_status struct. + */ struct device; /** @@ -261,11 +277,13 @@ enum ieee80211_chanctx_switch_mode { * done. * * @vif: the vif that should be switched from old_ctx to new_ctx + * @link_conf: the link conf that's switching * @old_ctx: the old context to which the vif was assigned * @new_ctx: the new context to which the vif must be assigned */ struct ieee80211_vif_chanctx_switch { struct ieee80211_vif *vif; + struct ieee80211_bss_conf *link_conf; struct ieee80211_chanctx_conf *old_ctx; struct ieee80211_chanctx_conf *new_ctx; }; @@ -273,8 +291,8 @@ struct ieee80211_vif_chanctx_switch { /** * enum ieee80211_bss_change - BSS change notification flags * - * These flags are used with the bss_info_changed() callback - * to indicate which BSS parameter changed. + * These flags are used with the bss_info_changed(), link_info_changed() + * and vif_cfg_changed() callbacks to indicate which parameter(s) changed. * * @BSS_CHANGED_ASSOC: association status changed (associated/disassociated), * also implies a change in the AID. @@ -513,6 +531,8 @@ struct ieee80211_fils_discovery { * This structure keeps information about a BSS (and an association * to that BSS) that can change during the lifetime of the BSS. * + * @addr: (link) address used locally + * @link_id: link ID, or 0 for non-MLO * @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE * @uora_exists: is the UORA element advertised by AP * @ack_enabled: indicates support to receive a multi-TID that solicits either @@ -526,11 +546,6 @@ struct ieee80211_fils_discovery { * mode only, set if the AP advertises TWT responder role) * @twt_protected: does this BSS support protected TWT frames * @twt_broadcast: does this BSS support broadcast TWT - * @assoc: association status - * @ibss_joined: indicates whether this station is part of an IBSS - * or not - * @ibss_creator: indicates if a new IBSS network is being created - * @aid: association ID number, valid only when @assoc is true * @use_cts_prot: use CTS protection * @use_short_preamble: use 802.11b short preamble * @use_short_slot: use short slot time (only relevant for ERP) @@ -551,6 +566,8 @@ struct ieee80211_fils_discovery { * IMPORTANT: These three sync_* parameters would possibly be out of sync * by the time the driver will use them. The synchronized view is currently * guaranteed only in certain callbacks. + * Note also that this is not used with MLD associations, mac80211 doesn't + * know how to track beacons for all of the links for this. * @beacon_int: beacon interval * @assoc_capability: capabilities taken from assoc resp * @basic_rates: bitmap of basic rates, each bit stands for an @@ -576,21 +593,7 @@ struct ieee80211_fils_discovery { * threshold event and can't be enabled simultaneously with it. * @cqm_rssi_high: Connection quality monitor RSSI upper threshold. * @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis - * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The - * may filter ARP queries targeted for other addresses than listed here. - * The driver must allow ARP queries targeted for all address listed here - * to pass through. An empty list implies no ARP queries need to pass. - * @arp_addr_cnt: Number of addresses currently on the list. Note that this - * may be larger than %IEEE80211_BSS_ARP_ADDR_LIST_LEN (the arp_addr_list - * array size), it's up to the driver what to do in that case. * @qos: This is a QoS-enabled BSS. - * @idle: This interface is idle. There's also a global idle flag in the - * hardware config which may be more appropriate depending on what - * your driver/device needs to do. - * @ps: power-save mode (STA only). This flag is NOT affected by - * offchannel/dynamic_ps operations. - * @ssid: The SSID of the current vif. Valid in AP and IBSS mode. - * @ssid_len: Length of SSID given in @ssid. * @hidden_ssid: The SSID of the current vif is hidden. Only valid in AP-mode. * @txpower: TX power in dBm. INT_MIN means not configured. * @txpower_type: TX power adjustment used to control per packet Transmit @@ -628,7 +631,6 @@ struct ieee80211_fils_discovery { * @fils_discovery: FILS discovery configuration * @unsol_bcast_probe_resp_interval: Unsolicited broadcast probe response * interval. - * @s1g: BSS is S1G BSS (affects Association Request format). * @beacon_tx_rate: The configured beacon transmit rate that needs to be passed * to driver when rate control is offloaded to firmware. * @power_type: power type of BSS for 6 GHz @@ -636,9 +638,24 @@ struct ieee80211_fils_discovery { * @tx_pwr_env_num: number of @tx_pwr_env. * @pwr_reduction: power constraint of BSS. * @eht_support: does this BSS support EHT + * @csa_active: marks whether a channel switch is going on. Internally it is + * write-protected by sdata_lock and local->mtx so holding either is fine + * for read access. + * @mu_mimo_owner: indicates interface owns MU-MIMO capability + * @chanctx_conf: The channel context this interface is assigned to, or %NULL + * when it is not assigned. This pointer is RCU-protected due to the TX + * path needing to access it; even though the netdev carrier will always + * be off when it is %NULL there can still be races and packets could be + * processed after it switches back to %NULL. + * @color_change_active: marks whether a color change is ongoing. Internally it is + * write-protected by sdata_lock and local->mtx so holding either is fine + * for read access. + * @color_change_color: the bss color that will be used after the change. */ struct ieee80211_bss_conf { const u8 *bssid; + unsigned int link_id; + u8 addr[ETH_ALEN] __aligned(2); u8 htc_trig_based_pkt_ext; bool uora_exists; u8 uora_ocw_range; @@ -648,10 +665,6 @@ struct ieee80211_bss_conf { bool twt_responder; bool twt_protected; bool twt_broadcast; - /* association related data */ - bool assoc, ibss_joined; - bool ibss_creator; - u16 aid; /* erp related data */ bool use_cts_prot; bool use_short_preamble; @@ -673,13 +686,7 @@ struct ieee80211_bss_conf { s32 cqm_rssi_high; struct cfg80211_chan_def chandef; struct ieee80211_mu_group_data mu_group; - __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; - int arp_addr_cnt; bool qos; - bool idle; - bool ps; - u8 ssid[IEEE80211_MAX_SSID_LEN]; - size_t ssid_len; bool hidden_ssid; int txpower; enum nl80211_tx_power_setting txpower_type; @@ -704,13 +711,19 @@ struct ieee80211_bss_conf { struct cfg80211_he_bss_color he_bss_color; struct ieee80211_fils_discovery fils_discovery; u32 unsol_bcast_probe_resp_interval; - bool s1g; struct cfg80211_bitrate_mask beacon_tx_rate; enum ieee80211_ap_reg_power power_type; struct ieee80211_tx_pwr_env tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT]; u8 tx_pwr_env_num; u8 pwr_reduction; bool eht_support; + + bool csa_active; + bool mu_mimo_owner; + struct ieee80211_chanctx_conf __rcu *chanctx_conf; + + bool color_change_active; + u8 color_change_color; }; /** @@ -869,6 +882,14 @@ enum mac80211_tx_info_flags { * @IEEE80211_TX_CTRL_DONT_REORDER: This frame should not be reordered * relative to other frames that have this flag set, independent * of their QoS TID or other priority field values. + * @IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX: first MLO TX, used mostly internally + * for sequence number assignment + * @IEEE80211_TX_CTRL_MLO_LINK: If not @IEEE80211_LINK_UNSPECIFIED, this + * frame should be transmitted on the specific link. This really is + * only relevant for frames that do not have data present, and is + * also not used for 802.3 format frames. Note that even if the frame + * is on a specific link, address translation might still apply if + * it's intended for an MLD. * * These flags are used in tx_info->control.flags. */ @@ -882,8 +903,15 @@ enum mac80211_tx_control_flags { IEEE80211_TX_INTCFL_NEED_TXPROCESSING = BIT(6), IEEE80211_TX_CTRL_NO_SEQNO = BIT(7), IEEE80211_TX_CTRL_DONT_REORDER = BIT(8), + IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX = BIT(9), + IEEE80211_TX_CTRL_MLO_LINK = 0xf0000000, }; +#define IEEE80211_LINK_UNSPECIFIED 0xf +#define IEEE80211_TX_CTRL_MLO_LINK_UNSPEC \ + u32_encode_bits(IEEE80211_LINK_UNSPECIFIED, \ + IEEE80211_TX_CTRL_MLO_LINK) + /** * enum mac80211_tx_status_flags - flags to describe transmit status * @@ -1031,7 +1059,9 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate) * (3) TX status information - driver tells mac80211 what happened * * @flags: transmit info flags, defined above - * @band: the band to transmit on (use for checking for races) + * @band: the band to transmit on (use e.g. for checking for races), + * not valid if the interface is an MLD since we won't know which + * link the frame will be transmitted on * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC * @ack_frame_id: internal frame ID for TX status, used internally * @tx_time_est: TX time estimate in units of 4us, used internally @@ -1170,12 +1200,16 @@ struct ieee80211_rate_status { * @rates: Mrr stages that were used when sending the packet * @n_rates: Number of mrr stages (count of instances for @rates) * @free_list: list where processed skbs are stored to be free'd by the driver + * @ack_hwtstamp: Hardware timestamp of the received ack in nanoseconds + * Only needed for Timing measurement and Fine timing measurement action + * frames. Only reported by devices that have timestamping enabled. */ struct ieee80211_tx_status { struct ieee80211_sta *sta; struct ieee80211_tx_info *info; struct sk_buff *skb; struct ieee80211_rate_status *rates; + ktime_t ack_hwtstamp; u8 n_rates; struct list_head *free_list; @@ -1413,6 +1447,9 @@ enum mac80211_rx_encoding { * (TSF) timer when the first data symbol (MPDU) arrived at the hardware. * @boottime_ns: CLOCK_BOOTTIME timestamp the frame was received at, this is * needed only for beacons and probe responses that update the scan cache. + * @ack_tx_hwtstamp: Hardware timestamp for the ack TX in nanoseconds. Only + * needed for Timing measurement and Fine timing measurement action frames. + * Only reported by devices that have timestamping enabled. * @device_timestamp: arbitrary timestamp for the device, mac80211 doesn't use * it but can store it and pass it back to the driver for synchronisation * @band: the active band when this frame was received @@ -1446,7 +1483,10 @@ enum mac80211_rx_encoding { */ struct ieee80211_rx_status { u64 mactime; - u64 boottime_ns; + union { + u64 boottime_ns; + ktime_t ack_tx_hwtstamp; + }; u32 device_timestamp; u32 ampdu_reference; u32 flag; @@ -1702,21 +1742,61 @@ enum ieee80211_offload_flags { }; /** + * struct ieee80211_vif_cfg - interface configuration + * @assoc: association status + * @ibss_joined: indicates whether this station is part of an IBSS or not + * @ibss_creator: indicates if a new IBSS network is being created + * @ps: power-save mode (STA only). This flag is NOT affected by + * offchannel/dynamic_ps operations. + * @aid: association ID number, valid only when @assoc is true + * @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The + * may filter ARP queries targeted for other addresses than listed here. + * The driver must allow ARP queries targeted for all address listed here + * to pass through. An empty list implies no ARP queries need to pass. + * @arp_addr_cnt: Number of addresses currently on the list. Note that this + * may be larger than %IEEE80211_BSS_ARP_ADDR_LIST_LEN (the arp_addr_list + * array size), it's up to the driver what to do in that case. + * @ssid: The SSID of the current vif. Valid in AP and IBSS mode. + * @ssid_len: Length of SSID given in @ssid. + * @s1g: BSS is S1G BSS (affects Association Request format). + * @idle: This interface is idle. There's also a global idle flag in the + * hardware config which may be more appropriate depending on what + * your driver/device needs to do. + * @ap_addr: AP MLD address, or BSSID for non-MLO connections + * (station mode only) + */ +struct ieee80211_vif_cfg { + /* association related data */ + bool assoc, ibss_joined; + bool ibss_creator; + bool ps; + u16 aid; + + __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; + int arp_addr_cnt; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + size_t ssid_len; + bool s1g; + bool idle; + u8 ap_addr[ETH_ALEN] __aligned(2); +}; + +/** * struct ieee80211_vif - per-interface data * * Data in this structure is continually present for driver * use during the life of a virtual interface. * * @type: type of this virtual interface + * @cfg: vif configuration, see &struct ieee80211_vif_cfg * @bss_conf: BSS configuration for this interface, either our own * or the BSS we're associated to + * @link_conf: in case of MLD, the per-link BSS configuration, + * indexed by link ID + * @valid_links: bitmap of valid links, or 0 for non-MLO. * @addr: address of this interface * @p2p: indicates whether this AP or STA interface is a p2p * interface, i.e. a GO or p2p-sta respectively - * @csa_active: marks whether a channel switch is going on. Internally it is - * write-protected by sdata_lock and local->mtx so holding either is fine - * for read access. - * @mu_mimo_owner: indicates interface owns MU-MIMO capability * @driver_flags: flags/capabilities the driver has for this interface, * these need to be set (or cleared) when the interface is added * or, if supported by the driver, the interface type is changed @@ -1728,11 +1808,6 @@ enum ieee80211_offload_flags { * restrictions. * @hw_queue: hardware queue for each AC * @cab_queue: content-after-beacon (DTIM beacon really) queue, AP mode only - * @chanctx_conf: The channel context this interface is assigned to, or %NULL - * when it is not assigned. This pointer is RCU-protected due to the TX - * path needing to access it; even though the netdev carrier will always - * be off when it is %NULL there can still be races and packets could be - * processed after it switches back to %NULL. * @debugfs_dir: debugfs dentry, can be used by drivers to create own per * interface debug files. Note that it will be NULL for the virtual * monitor interface (if that is requested.) @@ -1747,27 +1822,22 @@ enum ieee80211_offload_flags { * protected by fq->lock. * @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see * &enum ieee80211_offload_flags. - * @color_change_active: marks whether a color change is ongoing. Internally it is - * write-protected by sdata_lock and local->mtx so holding either is fine - * for read access. - * @color_change_color: the bss color that will be used after the change. * @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled. */ struct ieee80211_vif { enum nl80211_iftype type; + struct ieee80211_vif_cfg cfg; struct ieee80211_bss_conf bss_conf; + struct ieee80211_bss_conf __rcu *link_conf[IEEE80211_MLD_MAX_NUM_LINKS]; + u16 valid_links; u8 addr[ETH_ALEN] __aligned(2); bool p2p; - bool csa_active; - bool mu_mimo_owner; u8 cab_queue; u8 hw_queue[IEEE80211_NUM_ACS]; struct ieee80211_txq *txq; - struct ieee80211_chanctx_conf __rcu *chanctx_conf; - u32 driver_flags; u32 offload_flags; @@ -1780,15 +1850,19 @@ struct ieee80211_vif { bool txqs_stopped[IEEE80211_NUM_ACS]; - bool color_change_active; - u8 color_change_color; - struct ieee80211_vif *mbssid_tx_vif; /* must be last */ u8 drv_priv[] __aligned(sizeof(void *)); }; +/* FIXME: for now loop over all the available links; later will be changed + * to loop only over the active links. + */ +#define for_each_vif_active_link(vif, link, link_id) \ + for (link_id = 0; link_id < ARRAY_SIZE((vif)->link_conf); link_id++) \ + if ((link = rcu_dereference((vif)->link_conf[link_id]))) + static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif) { #ifdef CONFIG_MAC80211_MESH @@ -1959,36 +2033,6 @@ struct ieee80211_key_seq { }; /** - * struct ieee80211_cipher_scheme - cipher scheme - * - * This structure contains a cipher scheme information defining - * the secure packet crypto handling. - * - * @cipher: a cipher suite selector - * @iftype: a cipher iftype bit mask indicating an allowed cipher usage - * @hdr_len: a length of a security header used the cipher - * @pn_len: a length of a packet number in the security header - * @pn_off: an offset of pn from the beginning of the security header - * @key_idx_off: an offset of key index byte in the security header - * @key_idx_mask: a bit mask of key_idx bits - * @key_idx_shift: a bit shift needed to get key_idx - * key_idx value calculation: - * (sec_header_base[key_idx_off] & key_idx_mask) >> key_idx_shift - * @mic_len: a mic length in bytes - */ -struct ieee80211_cipher_scheme { - u32 cipher; - u16 iftype; - u8 hdr_len; - u8 pn_len; - u8 pn_off; - u8 key_idx_off; - u8 key_idx_mask; - u8 key_idx_shift; - u8 mic_len; -}; - -/** * enum set_key_cmd - key command * * Used with the set_key() callback in &struct ieee80211_ops, this @@ -2076,8 +2120,6 @@ struct ieee80211_sta_txpwr { enum nl80211_tx_power_setting type; }; -#define MAX_STA_LINKS 15 - /** * struct ieee80211_link_sta - station Link specific info * All link specific info for a STA link for a non MLD STA(single) @@ -2146,6 +2188,7 @@ struct ieee80211_link_sta { * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only * valid if the STA is a TDLS peer in the first place. * @mfp: indicates whether the STA uses management frame protection or not. + * @mlo: indicates whether the STA is MLO station. * @max_amsdu_subframes: indicates the maximal number of MSDUs in a single * A-MSDU. Taken from the Extended Capabilities element. 0 means * unlimited. @@ -2154,7 +2197,6 @@ struct ieee80211_link_sta { * @max_tid_amsdu_len: Maximum A-MSDU size in bytes for this TID * @txq: per-TID data TX queues (if driver uses the TXQ abstraction); note that * the last entry (%IEEE80211_NUM_TIDS) is used for non-data frames - * @multi_link_sta: Identifies if this sta is a MLD STA * @deflink: This holds the default link STA information, for non MLO STA all link * specific STA information is accessed through @deflink or through * link[0] which points to address of @deflink. For MLO Link STA @@ -2166,6 +2208,7 @@ struct ieee80211_link_sta { * @deflink address and remaining would be allocated and the address * would be assigned to link[link_id] where link_id is the id assigned * by the AP. + * @valid_links: bitmap of valid links, or 0 for non-MLO */ struct ieee80211_sta { u8 addr[ETH_ALEN]; @@ -2179,6 +2222,7 @@ struct ieee80211_sta { bool tdls; bool tdls_initiator; bool mfp; + bool mlo; u8 max_amsdu_subframes; /** @@ -2203,14 +2247,22 @@ struct ieee80211_sta { struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1]; - bool multi_link_sta; + u16 valid_links; struct ieee80211_link_sta deflink; - struct ieee80211_link_sta *link[MAX_STA_LINKS]; + struct ieee80211_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; /* must be last */ u8 drv_priv[] __aligned(sizeof(void *)); }; +/* FIXME: need to loop only over links which are active and check the actual + * lock + */ +#define for_each_sta_active_link(sta, link_sta, link_id) \ + for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) \ + if (((link_sta) = rcu_dereference_protected((sta)->link[link_id],\ + 1))) \ + /** * enum sta_notify_cmd - sta notify command * @@ -2493,6 +2545,9 @@ struct ieee80211_txq { * @IEEE80211_HW_DETECTS_COLOR_COLLISION: HW/driver has support for BSS color * collision detection and doesn't need it in software. * + * @IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX: Hardware/driver handles transmitting + * multicast frames on all links, mac80211 should not do that. + * * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays */ enum ieee80211_hw_flags { @@ -2549,6 +2604,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_SUPPORTS_RX_DECAP_OFFLOAD, IEEE80211_HW_SUPPORTS_CONC_MON_RX_DECAP, IEEE80211_HW_DETECTS_COLOR_COLLISION, + IEEE80211_HW_MLO_MCAST_MULTI_LINK_TX, /* keep last, obviously */ NUM_IEEE80211_HW_FLAGS @@ -2664,9 +2720,6 @@ enum ieee80211_hw_flags { * deliver to a WMM STA during any Service Period triggered by the WMM STA. * Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct values. * - * @n_cipher_schemes: a size of an array of cipher schemes definitions. - * @cipher_schemes: a pointer to an array of cipher scheme definitions - * supported by HW. * @max_nan_de_entries: maximum number of NAN DE functions supported by the * device. * @@ -2716,8 +2769,6 @@ struct ieee80211_hw { netdev_features_t netdev_features; u8 uapsd_queues; u8 uapsd_max_sp_len; - u8 n_cipher_schemes; - const struct ieee80211_cipher_scheme *cipher_schemes; u8 max_nan_de_entries; u8 tx_sk_pacing_shift; u8 weight_multiplier; @@ -3549,6 +3600,22 @@ struct ieee80211_prep_tx_info { * for association indication. The @changed parameter indicates which * of the bss parameters has changed when a call is made. The callback * can sleep. + * Note: this callback is called if @vif_cfg_changed or @link_info_changed + * are not implemented. + * + * @vif_cfg_changed: Handler for configuration requests related to interface + * (MLD) parameters from &struct ieee80211_vif_cfg that vary during the + * lifetime of the interface (e.g. assoc status, IP addresses, etc.) + * The @changed parameter indicates which value changed. + * The callback can sleep. + * + * @link_info_changed: Handler for configuration requests related to link + * parameters from &struct ieee80211_bss_conf that are related to an + * individual link. e.g. legacy/HT/VHT/... rate information. + * The @changed parameter indicates which value changed, and the @link_id + * parameter indicates the link ID. Note that the @link_id will be 0 for + * non-MLO connections. + * The callback can sleep. * * @prepare_multicast: Prepare for multicast filter configuration. * This callback is optional, and its return value is passed @@ -4034,6 +4101,18 @@ struct ieee80211_prep_tx_info { * disable background CAC/radar detection. * @net_fill_forward_path: Called from .ndo_fill_forward_path in order to * resolve a path for hardware flow offloading + * @change_vif_links: Change the valid links on an interface, note that while + * removing the old link information is still valid (link_conf pointer), + * but may immediately disappear after the function returns. The old or + * new links bitmaps may be 0 if going from/to a non-MLO situation. + * The @old array contains pointers to the old bss_conf structures + * that were already removed, in case they're needed. + * This callback can sleep. + * @change_sta_links: Change the valid links of a station, similar to + * @change_vif_links. This callback can sleep. + * Note that a sta can also be inserted or removed with valid links, + * i.e. passed to @sta_add/@sta_state with sta->valid_links not zero. + * In fact, cannot change from having valid_links and not having them. */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, @@ -4057,10 +4136,19 @@ struct ieee80211_ops { void (*bss_info_changed)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, - u32 changed); + u64 changed); + void (*vif_cfg_changed)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u64 changed); + void (*link_info_changed)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u64 changed); - int (*start_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); - void (*stop_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); + int (*start_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); + void (*stop_ap)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf); u64 (*prepare_multicast)(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list); @@ -4143,7 +4231,8 @@ struct ieee80211_ops { struct ieee80211_sta *sta, struct station_info *sinfo); int (*conf_tx)(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u16 ac, + struct ieee80211_vif *vif, + unsigned int link_id, u16 ac, const struct ieee80211_tx_queue_params *params); u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -4262,9 +4351,11 @@ struct ieee80211_ops { u32 changed); int (*assign_vif_chanctx)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx); void (*unassign_vif_chanctx)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx); int (*switch_vif_chanctx)(struct ieee80211_hw *hw, struct ieee80211_vif_chanctx_switch *vifs, @@ -4369,6 +4460,14 @@ struct ieee80211_ops { struct ieee80211_sta *sta, struct net_device_path_ctx *ctx, struct net_device_path *path); + int (*change_vif_links)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u16 old_links, u16 new_links, + struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]); + int (*change_sta_links)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u16 old_links, u16 new_links); }; /** @@ -5032,6 +5131,7 @@ struct ieee80211_mutable_offsets { * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @offs: &struct ieee80211_mutable_offsets pointer to struct that will * receive the offsets that may be updated by the driver. + * @link_id: the link id to which the beacon belongs (or 0 for a non-MLD AP) * * If the driver implements beaconing modes, it must use this function to * obtain the beacon template. @@ -5048,7 +5148,8 @@ struct ieee80211_mutable_offsets { struct sk_buff * ieee80211_beacon_get_template(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_mutable_offsets *offs); + struct ieee80211_mutable_offsets *offs, + unsigned int link_id); /** * ieee80211_beacon_get_tim - beacon generation function @@ -5059,6 +5160,7 @@ ieee80211_beacon_get_template(struct ieee80211_hw *hw, * @tim_length: pointer to variable that will receive the TIM IE length, * (including the ID and length bytes!). * Set to 0 if invalid (in non-AP modes). + * @link_id: the link id to which the beacon belongs (or 0 for a non-MLD AP) * * If the driver implements beaconing modes, it must use this function to * obtain the beacon frame. @@ -5074,21 +5176,24 @@ ieee80211_beacon_get_template(struct ieee80211_hw *hw, */ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - u16 *tim_offset, u16 *tim_length); + u16 *tim_offset, u16 *tim_length, + unsigned int link_id); /** * ieee80211_beacon_get - beacon generation function * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @link_id: the link id to which the beacon belongs (or 0 for a non-MLD AP) * * See ieee80211_beacon_get_tim(). * * Return: See ieee80211_beacon_get_tim(). */ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) + struct ieee80211_vif *vif, + unsigned int link_id) { - return ieee80211_beacon_get_tim(hw, vif, NULL, NULL); + return ieee80211_beacon_get_tim(hw, vif, NULL, NULL, link_id); } /** @@ -6195,7 +6300,7 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success); /** * ieee80211_channel_switch_disconnect - disconnect due to channel switch error - * @vif &struct ieee80211_vif pointer from the add_interface callback. + * @vif: &struct ieee80211_vif pointer from the add_interface callback. * @block_tx: if %true, do not send deauth frame. * * Instruct mac80211 to disconnect due to a channel switch error. The channel @@ -6208,13 +6313,14 @@ void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, /** * ieee80211_request_smps - request SM PS transition * @vif: &struct ieee80211_vif pointer from the add_interface callback. + * @link_id: link ID for MLO, or 0 * @smps_mode: new SM PS mode * * This allows the driver to request an SM PS transition in managed * mode. This is useful when the driver has more information than * the stack about possible interference, for example by bluetooth. */ -void ieee80211_request_smps(struct ieee80211_vif *vif, +void ieee80211_request_smps(struct ieee80211_vif *vif, unsigned int link_id, enum ieee80211_smps_mode smps_mode); /** @@ -6546,6 +6652,7 @@ ieee80211_vif_type_p2p(struct ieee80211_vif *vif) * ieee80211_update_mu_groups - set the VHT MU-MIMO groud data * * @vif: the specified virtual interface + * @link_id: the link ID for MLO, otherwise 0 * @membership: 64 bits array - a bit is set if station is member of the group * @position: 2 bits per group id indicating the position in the group * @@ -6554,7 +6661,7 @@ ieee80211_vif_type_p2p(struct ieee80211_vif *vif) * matching GroupId management frame. * Calls to this function need to be serialized with RX path. */ -void ieee80211_update_mu_groups(struct ieee80211_vif *vif, +void ieee80211_update_mu_groups(struct ieee80211_vif *vif, unsigned int link_id, const u8 *membership, const u8 *position); void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif, @@ -6787,6 +6894,9 @@ static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac) { } +void __ieee80211_schedule_txq(struct ieee80211_hw *hw, + struct ieee80211_txq *txq, bool force); + /** * ieee80211_schedule_txq - schedule a TXQ for transmission * @@ -6799,7 +6909,11 @@ static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac) * The driver may call this function if it has buffered packets for * this TXQ internally. */ -void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq); +static inline void +ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) +{ + __ieee80211_schedule_txq(hw, txq, true); +} /** * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq() @@ -6811,8 +6925,12 @@ void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq); * The driver may set force=true if it has buffered packets for this TXQ * internally. */ -void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, - bool force); +static inline void +ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, + bool force) +{ + __ieee80211_schedule_txq(hw, txq, force); +} /** * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h index 9deb3a3735da..0c71c27979fb 100644 --- a/include/net/mpls_iptunnel.h +++ b/include/net/mpls_iptunnel.h @@ -6,6 +6,9 @@ #ifndef _NET_MPLS_IPTUNNEL_H #define _NET_MPLS_IPTUNNEL_H 1 +#include <linux/types.h> +#include <net/lwtunnel.h> + struct mpls_iptunnel_encap { u8 labels; u8 ttl_propagate; diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 4d761ad530c9..412479ebf5ad 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -39,6 +39,7 @@ struct mptcp_ext { infinite_map:1; }; +#define MPTCPOPT_HMAC_LEN 20 #define MPTCP_RM_IDS_MAX 8 struct mptcp_rm_list { @@ -89,7 +90,7 @@ struct mptcp_out_options { u32 nonce; u32 token; u64 thmac; - u8 hmac[20]; + u8 hmac[MPTCPOPT_HMAC_LEN]; }; }; #endif @@ -290,4 +291,8 @@ struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk); static inline struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk) { return NULL; } #endif +#if !IS_ENABLED(CONFIG_MPTCP) +struct mptcp_sock { }; +#endif + #endif /* __NET_MPTCP_H */ diff --git a/include/net/mrp.h b/include/net/mrp.h index 1c308c034e1a..92cd3fb6cf9d 100644 --- a/include/net/mrp.h +++ b/include/net/mrp.h @@ -2,6 +2,10 @@ #ifndef _NET_MRP_H #define _NET_MRP_H +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/types.h> + #define MRP_END_MARK 0x0 struct mrp_pdu_hdr { diff --git a/include/net/ncsi.h b/include/net/ncsi.h index fbefe80361ee..08a50d9acb0a 100644 --- a/include/net/ncsi.h +++ b/include/net/ncsi.h @@ -2,6 +2,8 @@ #ifndef __NET_NCSI_H #define __NET_NCSI_H +#include <linux/types.h> + /* * The NCSI device states seen from external. More NCSI device states are * only visible internally (in net/ncsi/internal.h). When the NCSI device diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 87419f7f5421..9f0bab0589d9 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -48,6 +48,7 @@ enum { NEIGH_VAR_RETRANS_TIME, NEIGH_VAR_BASE_REACHABLE_TIME, NEIGH_VAR_DELAY_PROBE_TIME, + NEIGH_VAR_INTERVAL_PROBE_TIME_MS, NEIGH_VAR_GC_STALETIME, NEIGH_VAR_QUEUE_LEN_BYTES, NEIGH_VAR_PROXY_QLEN, diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index c4f5601f6e32..8c3587d5c308 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -26,6 +26,9 @@ #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) #include <net/netns/conntrack.h> #endif +#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) +#include <net/netns/flow_table.h> +#endif #include <net/netns/nftables.h> #include <net/netns/xfrm.h> #include <net/netns/mpls.h> @@ -120,7 +123,9 @@ struct net { struct netns_core core; struct netns_mib mib; struct netns_packet packet; +#if IS_ENABLED(CONFIG_UNIX) struct netns_unix unx; +#endif struct netns_nexthop nexthop; struct netns_ipv4 ipv4; #if IS_ENABLED(CONFIG_IPV6) @@ -140,6 +145,9 @@ struct net { #if defined(CONFIG_NF_TABLES) || defined(CONFIG_NF_TABLES_MODULE) struct netns_nftables nft; #endif +#if IS_ENABLED(CONFIG_NF_FLOW_TABLE) + struct netns_ft ft; +#endif #endif #ifdef CONFIG_WEXT_CORE struct sk_buff_head wext_nlevents; diff --git a/include/net/netevent.h b/include/net/netevent.h index 4107016c3bb4..1be3757a8b7f 100644 --- a/include/net/netevent.h +++ b/include/net/netevent.h @@ -14,6 +14,7 @@ struct dst_entry; struct neighbour; +struct notifier_block ; struct netevent_redirect { struct dst_entry *old; diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 37866c8386e2..3cd3a6e631aa 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -84,4 +84,23 @@ void nf_conntrack_lock(spinlock_t *lock); extern spinlock_t nf_conntrack_expect_lock; +/* ctnetlink code shared by both ctnetlink and nf_conntrack_bpf */ + +#if (IS_BUILTIN(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \ + (IS_MODULE(CONFIG_NF_CONNTRACK) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES) || \ + IS_ENABLED(CONFIG_NF_CT_NETLINK)) + +static inline void __nf_ct_set_timeout(struct nf_conn *ct, u64 timeout) +{ + if (timeout > INT_MAX) + timeout = INT_MAX; + WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout); +} + +int __nf_ct_change_timeout(struct nf_conn *ct, u64 cta_timeout); +void __nf_ct_change_status(struct nf_conn *ct, unsigned long on, unsigned long off); +int nf_ct_change_status_common(struct nf_conn *ct, unsigned int status); + +#endif + #endif /* _NF_CONNTRACK_CORE_H */ diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h index fea258983d23..9fdaba911de6 100644 --- a/include/net/netfilter/nf_conntrack_timeout.h +++ b/include/net/netfilter/nf_conntrack_timeout.h @@ -105,7 +105,7 @@ struct nf_ct_timeout_hooks { void (*timeout_put)(struct nf_ct_timeout *timeout); }; -extern const struct nf_ct_timeout_hooks *nf_ct_timeout_hook; +extern const struct nf_ct_timeout_hooks __rcu *nf_ct_timeout_hook; #endif #endif /* _NF_CONNTRACK_TIMEOUT_H */ diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index 64daafd1fc41..d5326c44b453 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -335,4 +335,25 @@ static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb) return 0; } +#define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count) +#define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count) +#define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count) \ + this_cpu_inc((net)->ft.stat->count) +#define NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count) \ + this_cpu_dec((net)->ft.stat->count) + +#ifdef CONFIG_NF_FLOW_TABLE_PROCFS +int nf_flow_table_init_proc(struct net *net); +void nf_flow_table_fini_proc(struct net *net); +#else +static inline int nf_flow_table_init_proc(struct net *net) +{ + return 0; +} + +static inline void nf_flow_table_fini_proc(struct net *net) +{ +} +#endif /* CONFIG_NF_FLOW_TABLE_PROCFS */ + #endif /* _NF_FLOW_TABLE_H */ diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h index 987111ae5240..e9eb01e99d2f 100644 --- a/include/net/netfilter/nf_nat.h +++ b/include/net/netfilter/nf_nat.h @@ -104,7 +104,7 @@ unsigned int nf_nat_inet_fn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); -static inline int nf_nat_initialized(struct nf_conn *ct, +static inline int nf_nat_initialized(const struct nf_conn *ct, enum nf_nat_manip_type manip) { if (manip == NF_NAT_MANIP_SRC) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 64cf655c818c..99aae36c04b9 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -157,11 +157,26 @@ static inline void nft_reg_store16(u32 *dreg, u16 val) *(u16 *)dreg = val; } +static inline void nft_reg_store_be16(u32 *dreg, __be16 val) +{ + nft_reg_store16(dreg, (__force __u16)val); +} + static inline u16 nft_reg_load16(const u32 *sreg) { return *(u16 *)sreg; } +static inline __be16 nft_reg_load_be16(const u32 *sreg) +{ + return (__force __be16)nft_reg_load16(sreg); +} + +static inline __be32 nft_reg_load_be32(const u32 *sreg) +{ + return *(__force __be32 *)sreg; +} + static inline void nft_reg_store64(u32 *dreg, u64 val) { put_unaligned(val, (u64 *)dreg); @@ -206,13 +221,18 @@ struct nft_ctx { bool report; }; +enum nft_data_desc_flags { + NFT_DATA_DESC_SETELEM = (1 << 0), +}; + struct nft_data_desc { enum nft_data_types type; + unsigned int size; unsigned int len; + unsigned int flags; }; -int nft_data_init(const struct nft_ctx *ctx, - struct nft_data *data, unsigned int size, +int nft_data_init(const struct nft_ctx *ctx, struct nft_data *data, struct nft_data_desc *desc, const struct nlattr *nla); void nft_data_hold(const struct nft_data *data, enum nft_data_types type); void nft_data_release(const struct nft_data *data, enum nft_data_types type); @@ -636,6 +656,7 @@ extern const struct nft_set_ext_type nft_set_ext_types[]; struct nft_set_ext_tmpl { u16 len; u8 offset[NFT_SET_EXT_NUM]; + u8 ext_len[NFT_SET_EXT_NUM]; }; /** @@ -665,7 +686,8 @@ static inline int nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id, return -EINVAL; tmpl->offset[id] = tmpl->len; - tmpl->len += nft_set_ext_types[id].len + len; + tmpl->ext_len[id] = nft_set_ext_types[id].len + len; + tmpl->len += tmpl->ext_len[id]; return 0; } diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h index 0ea7c55cea4d..1223af68cd9a 100644 --- a/include/net/netfilter/nf_tables_core.h +++ b/include/net/netfilter/nf_tables_core.h @@ -56,16 +56,6 @@ struct nft_immediate_expr { u8 dlen; }; -/* Calculate the mask for the nft_cmp_fast expression. On big endian the - * mask needs to include the *upper* bytes when interpreting that data as - * something smaller than the full u32, therefore a cpu_to_le32 is done. - */ -static inline u32 nft_cmp_fast_mask(unsigned int len) -{ - return cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr, - data) * BITS_PER_BYTE - len)); -} - extern const struct nft_expr_ops nft_cmp_fast_ops; extern const struct nft_expr_ops nft_cmp16_fast_ops; diff --git a/include/net/netns/can.h b/include/net/netns/can.h index 52fbd8291a96..48b79f7e6236 100644 --- a/include/net/netns/can.h +++ b/include/net/netns/can.h @@ -7,6 +7,7 @@ #define __NETNS_CAN_H__ #include <linux/spinlock.h> +#include <linux/timer.h> struct can_dev_rcv_lists; struct can_pkg_stats; diff --git a/include/net/netns/core.h b/include/net/netns/core.h index 388244e315e7..8249060cf5d0 100644 --- a/include/net/netns/core.h +++ b/include/net/netns/core.h @@ -2,6 +2,8 @@ #ifndef __NETNS_CORE_H__ #define __NETNS_CORE_H__ +#include <linux/types.h> + struct ctl_table_header; struct prot_inuse; diff --git a/include/net/netns/flow_table.h b/include/net/netns/flow_table.h new file mode 100644 index 000000000000..1c5fc657e267 --- /dev/null +++ b/include/net/netns/flow_table.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __NETNS_FLOW_TABLE_H +#define __NETNS_FLOW_TABLE_H + +struct nf_flow_table_stat { + unsigned int count_wq_add; + unsigned int count_wq_del; + unsigned int count_wq_stats; +}; + +struct netns_ft { + struct nf_flow_table_stat __percpu *stat; +}; +#endif diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h index 8a1ab47c3fb3..7ce68183f6e1 100644 --- a/include/net/netns/generic.h +++ b/include/net/netns/generic.h @@ -8,6 +8,7 @@ #include <linux/bug.h> #include <linux/rcupdate.h> +#include <net/net_namespace.h> /* * Generic net pointers are to be used by modules to put some private diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index ce0cc4e8d8c7..c7320ef356d9 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -9,6 +9,7 @@ #include <linux/uidgid.h> #include <net/inet_frag.h> #include <linux/rcupdate.h> +#include <linux/seqlock.h> #include <linux/siphash.h> struct ctl_table_header; diff --git a/include/net/netns/mctp.h b/include/net/netns/mctp.h index acedef12a35e..1db8f9aaddb4 100644 --- a/include/net/netns/mctp.h +++ b/include/net/netns/mctp.h @@ -6,6 +6,7 @@ #ifndef __NETNS_MCTP_H__ #define __NETNS_MCTP_H__ +#include <linux/mutex.h> #include <linux/types.h> struct netns_mctp { diff --git a/include/net/netns/mpls.h b/include/net/netns/mpls.h index a7bdcfbb0b28..19ad2574b267 100644 --- a/include/net/netns/mpls.h +++ b/include/net/netns/mpls.h @@ -6,6 +6,8 @@ #ifndef __NETNS_MPLS_H__ #define __NETNS_MPLS_H__ +#include <linux/types.h> + struct mpls_route; struct ctl_table_header; diff --git a/include/net/netns/nexthop.h b/include/net/netns/nexthop.h index 1849e77eb68a..434239b37014 100644 --- a/include/net/netns/nexthop.h +++ b/include/net/netns/nexthop.h @@ -6,6 +6,7 @@ #ifndef __NETNS_NEXTHOP_H__ #define __NETNS_NEXTHOP_H__ +#include <linux/notifier.h> #include <linux/rbtree.h> struct netns_nexthop { diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h index 40240722cdca..a681147aecd8 100644 --- a/include/net/netns/sctp.h +++ b/include/net/netns/sctp.h @@ -2,6 +2,9 @@ #ifndef __NETNS_SCTP_H__ #define __NETNS_SCTP_H__ +#include <linux/timer.h> +#include <net/snmp.h> + struct sock; struct proc_dir_entry; struct sctp_mib; diff --git a/include/net/netns/smc.h b/include/net/netns/smc.h index e5389eeaf8bd..2adbe2b245df 100644 --- a/include/net/netns/smc.h +++ b/include/net/netns/smc.h @@ -18,5 +18,6 @@ struct netns_smc { struct ctl_table_header *smc_hdr; #endif unsigned int sysctl_autocorking_size; + unsigned int sysctl_smcr_buf_type; }; #endif diff --git a/include/net/netns/unix.h b/include/net/netns/unix.h index 91a3d7e39198..9859d134d5a8 100644 --- a/include/net/netns/unix.h +++ b/include/net/netns/unix.h @@ -5,8 +5,16 @@ #ifndef __NETNS_UNIX_H__ #define __NETNS_UNIX_H__ +#include <linux/spinlock.h> + +struct unix_table { + spinlock_t *locks; + struct hlist_head *buckets; +}; + struct ctl_table_header; struct netns_unix { + struct unix_table table; int sysctl_max_dgram_qlen; struct ctl_table_header *ctl; }; diff --git a/include/net/netrom.h b/include/net/netrom.h index 80f15b1c1a48..f0565a5987d1 100644 --- a/include/net/netrom.h +++ b/include/net/netrom.h @@ -14,6 +14,7 @@ #include <net/sock.h> #include <linux/refcount.h> #include <linux/seq_file.h> +#include <net/ax25.h> #define NR_NETWORK_LEN 15 #define NR_TRANSPORT_LEN 5 diff --git a/include/net/p8022.h b/include/net/p8022.h index c2bacc66bfbc..b690ffcad66b 100644 --- a/include/net/p8022.h +++ b/include/net/p8022.h @@ -1,6 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NET_P8022_H #define _NET_P8022_H + +struct net_device; +struct packet_type; +struct sk_buff; + struct datalink_proto * register_8022_client(unsigned char type, int (*func)(struct sk_buff *skb, diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h index 27b1ab5e4e6d..645dddf5ce77 100644 --- a/include/net/phonet/pep.h +++ b/include/net/phonet/pep.h @@ -10,6 +10,9 @@ #ifndef NET_PHONET_PEP_H #define NET_PHONET_PEP_H +#include <linux/skbuff.h> +#include <net/phonet/phonet.h> + struct pep_sock { struct pn_sock pn_sk; diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h index a27bdc6cfeab..862f1719b523 100644 --- a/include/net/phonet/phonet.h +++ b/include/net/phonet/phonet.h @@ -10,6 +10,10 @@ #ifndef AF_PHONET_H #define AF_PHONET_H +#include <linux/phonet.h> +#include <linux/skbuff.h> +#include <net/sock.h> + /* * The lower layers may not require more space, ever. Make sure it's * enough. diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h index 05b49d4d2b11..e9dc8dca5817 100644 --- a/include/net/phonet/pn_dev.h +++ b/include/net/phonet/pn_dev.h @@ -10,6 +10,11 @@ #ifndef PN_DEV_H #define PN_DEV_H +#include <linux/list.h> +#include <linux/mutex.h> + +struct net; + struct phonet_device_list { struct list_head list; struct mutex lock; diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 8cf001aed858..d9d90e6925e1 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -23,7 +23,7 @@ struct tcf_walker { }; int register_tcf_proto_ops(struct tcf_proto_ops *ops); -int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); +void unregister_tcf_proto_ops(struct tcf_proto_ops *ops); struct tcf_block_ext_info { enum flow_block_binder_type binder_type; diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 44a35531952e..3372a1f67cf4 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -173,11 +173,28 @@ struct tc_taprio_qopt_offload { struct tc_taprio_sched_entry entries[]; }; +#if IS_ENABLED(CONFIG_NET_SCH_TAPRIO) + /* Reference counting */ struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload *offload); void taprio_offload_free(struct tc_taprio_qopt_offload *offload); +#else + +/* Reference counting */ +static inline struct tc_taprio_qopt_offload * +taprio_offload_get(struct tc_taprio_qopt_offload *offload) +{ + return NULL; +} + +static inline void taprio_offload_free(struct tc_taprio_qopt_offload *offload) +{ +} + +#endif + /* Ensure skb_mstamp_ns, which might have been populated with the txtime, is * not mistaken for a software timestamp, because this will otherwise prevent * the dispatch of hardware timestamps to the socket. diff --git a/include/net/pptp.h b/include/net/pptp.h index 383e25ca53a7..e63176bdd4c8 100644 --- a/include/net/pptp.h +++ b/include/net/pptp.h @@ -2,6 +2,9 @@ #ifndef _NET_PPTP_H #define _NET_PPTP_H +#include <linux/types.h> +#include <net/gre.h> + #define PPP_LCP_ECHOREQ 0x09 #define PPP_LCP_ECHOREP 0x0A #define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP) diff --git a/include/net/psnap.h b/include/net/psnap.h index 7cb0c8ab4171..88802b0754ad 100644 --- a/include/net/psnap.h +++ b/include/net/psnap.h @@ -2,6 +2,11 @@ #ifndef _NET_PSNAP_H #define _NET_PSNAP_H +struct datalink_proto; +struct sk_buff; +struct packet_type; +struct net_device; + struct datalink_proto * register_snap_client(const unsigned char *desc, int (*rcvfunc)(struct sk_buff *, struct net_device *, diff --git a/include/net/raw.h b/include/net/raw.h index c51a635671a7..5e665934ebc7 100644 --- a/include/net/raw.h +++ b/include/net/raw.h @@ -20,9 +20,8 @@ extern struct proto raw_prot; extern struct raw_hashinfo raw_v4_hashinfo; -struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, - unsigned short num, __be32 raddr, - __be32 laddr, int dif, int sdif); +bool raw_v4_match(struct net *net, struct sock *sk, unsigned short num, + __be32 raddr, __be32 laddr, int dif, int sdif); int raw_abort(struct sock *sk, int err); void raw_icmp_error(struct sk_buff *, int, u32); @@ -33,10 +32,19 @@ int raw_rcv(struct sock *, struct sk_buff *); #define RAW_HTABLE_SIZE MAX_INET_PROTOS struct raw_hashinfo { - rwlock_t lock; - struct hlist_head ht[RAW_HTABLE_SIZE]; + spinlock_t lock; + struct hlist_nulls_head ht[RAW_HTABLE_SIZE]; }; +static inline void raw_hashinfo_init(struct raw_hashinfo *hashinfo) +{ + int i; + + spin_lock_init(&hashinfo->lock); + for (i = 0; i < RAW_HTABLE_SIZE; i++) + INIT_HLIST_NULLS_HEAD(&hashinfo->ht[i], i); +} + #ifdef CONFIG_PROC_FS int raw_proc_init(void); void raw_proc_exit(void); diff --git a/include/net/rawv6.h b/include/net/rawv6.h index 53d86b6055e8..bc70909625f6 100644 --- a/include/net/rawv6.h +++ b/include/net/rawv6.h @@ -3,11 +3,12 @@ #define _NET_RAWV6_H #include <net/protocol.h> +#include <net/raw.h> extern struct raw_hashinfo raw_v6_hashinfo; -struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, - unsigned short num, const struct in6_addr *loc_addr, - const struct in6_addr *rmt_addr, int dif, int sdif); +bool raw_v6_match(struct net *net, struct sock *sk, unsigned short num, + const struct in6_addr *loc_addr, + const struct in6_addr *rmt_addr, int dif, int sdif); int raw_abort(struct sock *sk, int err); diff --git a/include/net/regulatory.h b/include/net/regulatory.h index 47f06f6f5a67..896191f420d5 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h @@ -1,3 +1,4 @@ + #ifndef __NET_REGULATORY_H #define __NET_REGULATORY_H /* @@ -19,6 +20,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include <linux/ieee80211.h> +#include <linux/nl80211.h> #include <linux/rcupdate.h> /** diff --git a/include/net/rose.h b/include/net/rose.h index 0f0a4ce0fee7..23267b4efcfa 100644 --- a/include/net/rose.h +++ b/include/net/rose.h @@ -9,6 +9,7 @@ #define _ROSE_H #include <linux/rose.h> +#include <net/ax25.h> #include <net/sock.h> #define ROSE_ADDR_LEN 5 @@ -131,7 +132,8 @@ struct rose_sock { ax25_address source_digis[ROSE_MAX_DIGIS]; ax25_address dest_digis[ROSE_MAX_DIGIS]; struct rose_neigh *neighbour; - struct net_device *device; + struct net_device *device; + netdevice_tracker dev_tracker; unsigned int lci, rand; unsigned char state, condition, qbitincl, defer; unsigned char cause, diagnostic; diff --git a/include/net/route.h b/include/net/route.h index bbcf2aba149f..6e92dd5bcd61 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -201,10 +201,6 @@ int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, struct in_device *in_dev, u32 *itag); int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src, u8 tos, struct net_device *devin); -int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src, - u8 tos, struct net_device *devin, - struct fib_result *res); - int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src, u8 tos, struct net_device *devin, const struct sk_buff *hint); @@ -244,8 +240,7 @@ void ip_rt_multicast_event(struct in_device *); int ip_rt_ioctl(struct net *, unsigned int cmd, struct rtentry *rt); void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); struct rtable *rt_dst_alloc(struct net_device *dev, - unsigned int flags, u16 type, - bool nopolicy, bool noxfrm); + unsigned int flags, u16 type, bool noxfrm); struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt); struct in_ifaddr; diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index d6cf5116b5f9..ec693fe7c553 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -551,25 +551,6 @@ static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc) return qdisc->dev_queue->qdisc_sleeping; } -/* The qdisc root lock is a mechanism by which to top level - * of a qdisc tree can be locked from any qdisc node in the - * forest. This allows changing the configuration of some - * aspect of the qdisc tree while blocking out asynchronous - * qdisc access in the packet processing paths. - * - * It is only legal to do this when the root will not change - * on us. Otherwise we'll potentially lock the wrong qdisc - * root. This is enforced by holding the RTNL semaphore, which - * all users of this lock accessor must do. - */ -static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc) -{ - struct Qdisc *root = qdisc_root(qdisc); - - ASSERT_RTNL(); - return qdisc_lock(root); -} - static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) { struct Qdisc *root = qdisc_root_sleeping(qdisc); diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h index dac91aa38c5a..21e7fa2a1813 100644 --- a/include/net/secure_seq.h +++ b/include/net/secure_seq.h @@ -4,6 +4,8 @@ #include <linux/types.h> +struct net; + u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport); u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport); diff --git a/include/net/smc.h b/include/net/smc.h index e441aa97ad61..c926d3313e05 100644 --- a/include/net/smc.h +++ b/include/net/smc.h @@ -11,6 +11,13 @@ #ifndef _SMC_H #define _SMC_H +#include <linux/device.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/wait.h> + +struct sock; + #define SMC_MAX_PNETID_LEN 16 /* Max. length of PNET id */ struct smc_hashinfo { @@ -65,7 +72,7 @@ struct smcd_ops { int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx, bool sf, unsigned int offset, void *data, unsigned int size); - void (*get_system_eid)(struct smcd_dev *dev, u8 **eid); + u8* (*get_system_eid)(void); u16 (*get_chid)(struct smcd_dev *dev); }; @@ -94,5 +101,5 @@ int smcd_register_dev(struct smcd_dev *smcd); void smcd_unregister_dev(struct smcd_dev *smcd); void smcd_free_dev(struct smcd_dev *smcd); void smcd_handle_event(struct smcd_dev *dev, struct smcd_event *event); -void smcd_handle_irq(struct smcd_dev *dev, unsigned int bit); +void smcd_handle_irq(struct smcd_dev *dev, unsigned int bit, u16 dmbemask); #endif /* _SMC_H */ diff --git a/include/net/sock.h b/include/net/sock.h index 7a48991cdb19..05a1bbdf5805 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -545,14 +545,26 @@ enum sk_pacing { SK_PACING_FQ = 2, }; -/* Pointer stored in sk_user_data might not be suitable for copying - * when cloning the socket. For instance, it can point to a reference - * counted object. sk_user_data bottom bit is set if pointer must not - * be copied. +/* flag bits in sk_user_data + * + * - SK_USER_DATA_NOCOPY: Pointer stored in sk_user_data might + * not be suitable for copying when cloning the socket. For instance, + * it can point to a reference counted object. sk_user_data bottom + * bit is set if pointer must not be copied. + * + * - SK_USER_DATA_BPF: Mark whether sk_user_data field is + * managed/owned by a BPF reuseport array. This bit should be set + * when sk_user_data's sk is added to the bpf's reuseport_array. + * + * - SK_USER_DATA_PSOCK: Mark whether pointer stored in + * sk_user_data points to psock type. This bit should be set + * when sk_user_data is assigned to a psock object. */ #define SK_USER_DATA_NOCOPY 1UL -#define SK_USER_DATA_BPF 2UL /* Managed by BPF */ -#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF) +#define SK_USER_DATA_BPF 2UL +#define SK_USER_DATA_PSOCK 4UL +#define SK_USER_DATA_PTRMASK ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF |\ + SK_USER_DATA_PSOCK) /** * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied @@ -565,24 +577,40 @@ static inline bool sk_user_data_is_nocopy(const struct sock *sk) #define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data))) +/** + * __rcu_dereference_sk_user_data_with_flags - return the pointer + * only if argument flags all has been set in sk_user_data. Otherwise + * return NULL + * + * @sk: socket + * @flags: flag bits + */ +static inline void * +__rcu_dereference_sk_user_data_with_flags(const struct sock *sk, + uintptr_t flags) +{ + uintptr_t sk_user_data = (uintptr_t)rcu_dereference(__sk_user_data(sk)); + + WARN_ON_ONCE(flags & SK_USER_DATA_PTRMASK); + + if ((sk_user_data & flags) == flags) + return (void *)(sk_user_data & SK_USER_DATA_PTRMASK); + return NULL; +} + #define rcu_dereference_sk_user_data(sk) \ + __rcu_dereference_sk_user_data_with_flags(sk, 0) +#define __rcu_assign_sk_user_data_with_flags(sk, ptr, flags) \ ({ \ - void *__tmp = rcu_dereference(__sk_user_data((sk))); \ - (void *)((uintptr_t)__tmp & SK_USER_DATA_PTRMASK); \ -}) -#define rcu_assign_sk_user_data(sk, ptr) \ -({ \ - uintptr_t __tmp = (uintptr_t)(ptr); \ - WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \ - rcu_assign_pointer(__sk_user_data((sk)), __tmp); \ -}) -#define rcu_assign_sk_user_data_nocopy(sk, ptr) \ -({ \ - uintptr_t __tmp = (uintptr_t)(ptr); \ - WARN_ON_ONCE(__tmp & ~SK_USER_DATA_PTRMASK); \ + uintptr_t __tmp1 = (uintptr_t)(ptr), \ + __tmp2 = (uintptr_t)(flags); \ + WARN_ON_ONCE(__tmp1 & ~SK_USER_DATA_PTRMASK); \ + WARN_ON_ONCE(__tmp2 & SK_USER_DATA_PTRMASK); \ rcu_assign_pointer(__sk_user_data((sk)), \ - __tmp | SK_USER_DATA_NOCOPY); \ + __tmp1 | __tmp2); \ }) +#define rcu_assign_sk_user_data(sk, ptr) \ + __rcu_assign_sk_user_data_with_flags(sk, ptr, 0) static inline struct net *sock_net(const struct sock *sk) @@ -609,7 +637,7 @@ void sock_net_set(struct sock *sk, struct net *net) int sk_set_peek_off(struct sock *sk, int val); -static inline int sk_peek_offset(struct sock *sk, int flags) +static inline int sk_peek_offset(const struct sock *sk, int flags) { if (unlikely(flags & MSG_PEEK)) { return READ_ONCE(sk->sk_peek_off); @@ -849,7 +877,7 @@ static inline void sk_add_bind_node(struct sock *sk, ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;}); \ pos = rcu_dereference(hlist_next_rcu(pos))) -static inline struct user_namespace *sk_user_ns(struct sock *sk) +static inline struct user_namespace *sk_user_ns(const struct sock *sk) { /* Careful only use this in a context where these parameters * can not change and must all be valid, such as recvmsg from @@ -895,7 +923,7 @@ enum sock_flags { #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) -static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) +static inline void sock_copy_flags(struct sock *nsk, const struct sock *osk) { nsk->sk_flags = osk->sk_flags; } @@ -1240,6 +1268,7 @@ struct proto { void (*enter_memory_pressure)(struct sock *sk); void (*leave_memory_pressure)(struct sock *sk); atomic_long_t *memory_allocated; /* Current allocated memory. */ + int __percpu *per_cpu_fw_alloc; struct percpu_counter *sockets_allocated; /* Current number of sockets. */ /* @@ -1383,21 +1412,46 @@ static inline bool sk_under_memory_pressure(const struct sock *sk) } static inline long -sk_memory_allocated(const struct sock *sk) +proto_memory_allocated(const struct proto *prot) { - return atomic_long_read(sk->sk_prot->memory_allocated); + return max(0L, atomic_long_read(prot->memory_allocated)); } static inline long +sk_memory_allocated(const struct sock *sk) +{ + return proto_memory_allocated(sk->sk_prot); +} + +/* 1 MB per cpu, in page units */ +#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT)) + +static inline void sk_memory_allocated_add(struct sock *sk, int amt) { - return atomic_long_add_return(amt, sk->sk_prot->memory_allocated); + int local_reserve; + + preempt_disable(); + local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt); + if (local_reserve >= SK_MEMORY_PCPU_RESERVE) { + __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve); + atomic_long_add(local_reserve, sk->sk_prot->memory_allocated); + } + preempt_enable(); } static inline void sk_memory_allocated_sub(struct sock *sk, int amt) { - atomic_long_sub(amt, sk->sk_prot->memory_allocated); + int local_reserve; + + preempt_disable(); + local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt); + if (local_reserve <= -SK_MEMORY_PCPU_RESERVE) { + __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve); + atomic_long_add(local_reserve, sk->sk_prot->memory_allocated); + } + preempt_enable(); } #define SK_ALLOC_PERCPU_COUNTER_BATCH 16 @@ -1426,12 +1480,6 @@ proto_sockets_allocated_sum_positive(struct proto *prot) return percpu_counter_sum_positive(prot->sockets_allocated); } -static inline long -proto_memory_allocated(struct proto *prot) -{ - return atomic_long_read(prot->memory_allocated); -} - static inline bool proto_memory_pressure(struct proto *prot) { @@ -1518,30 +1566,18 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind); void __sk_mem_reduce_allocated(struct sock *sk, int amount); void __sk_mem_reclaim(struct sock *sk, int amount); -/* We used to have PAGE_SIZE here, but systems with 64KB pages - * do not necessarily have 16x time more memory than 4KB ones. - */ -#define SK_MEM_QUANTUM 4096 -#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM) #define SK_MEM_SEND 0 #define SK_MEM_RECV 1 -/* sysctl_mem values are in pages, we convert them in SK_MEM_QUANTUM units */ +/* sysctl_mem values are in pages */ static inline long sk_prot_mem_limits(const struct sock *sk, int index) { - long val = READ_ONCE(sk->sk_prot->sysctl_mem[index]); - -#if PAGE_SIZE > SK_MEM_QUANTUM - val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT; -#elif PAGE_SIZE < SK_MEM_QUANTUM - val >>= SK_MEM_QUANTUM_SHIFT - PAGE_SHIFT; -#endif - return val; + return READ_ONCE(sk->sk_prot->sysctl_mem[index]); } static inline int sk_mem_pages(int amt) { - return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; + return (amt + PAGE_SIZE - 1) >> PAGE_SHIFT; } static inline bool sk_has_account(struct sock *sk) @@ -1552,19 +1588,23 @@ static inline bool sk_has_account(struct sock *sk) static inline bool sk_wmem_schedule(struct sock *sk, int size) { + int delta; + if (!sk_has_account(sk)) return true; - return size <= sk->sk_forward_alloc || - __sk_mem_schedule(sk, size, SK_MEM_SEND); + delta = size - sk->sk_forward_alloc; + return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_SEND); } static inline bool sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) { + int delta; + if (!sk_has_account(sk)) return true; - return size <= sk->sk_forward_alloc || - __sk_mem_schedule(sk, size, SK_MEM_RECV) || + delta = size - sk->sk_forward_alloc; + return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) || skb_pfmemalloc(skb); } @@ -1590,7 +1630,7 @@ static inline void sk_mem_reclaim(struct sock *sk) reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); - if (reclaimable >= SK_MEM_QUANTUM) + if (reclaimable >= (int)PAGE_SIZE) __sk_mem_reclaim(sk, reclaimable); } @@ -1600,19 +1640,6 @@ static inline void sk_mem_reclaim_final(struct sock *sk) sk_mem_reclaim(sk); } -static inline void sk_mem_reclaim_partial(struct sock *sk) -{ - int reclaimable; - - if (!sk_has_account(sk)) - return; - - reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); - - if (reclaimable > SK_MEM_QUANTUM) - __sk_mem_reclaim(sk, reclaimable - 1); -} - static inline void sk_mem_charge(struct sock *sk, int size) { if (!sk_has_account(sk)) @@ -1620,29 +1647,12 @@ static inline void sk_mem_charge(struct sock *sk, int size) sk->sk_forward_alloc -= size; } -/* the following macros control memory reclaiming in sk_mem_uncharge() - */ -#define SK_RECLAIM_THRESHOLD (1 << 21) -#define SK_RECLAIM_CHUNK (1 << 20) - static inline void sk_mem_uncharge(struct sock *sk, int size) { - int reclaimable; - if (!sk_has_account(sk)) return; sk->sk_forward_alloc += size; - reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); - - /* Avoid a possible overflow. - * TCP send queues can make this happen, if sk_mem_reclaim() - * is not called and more than 2 GBytes are released at once. - * - * If we reach 2 MBytes, reclaim 1 MBytes right now, there is - * no need to hold that much forward allocation anyway. - */ - if (unlikely(reclaimable >= SK_RECLAIM_THRESHOLD)) - __sk_mem_reclaim(sk, SK_RECLAIM_CHUNK); + sk_mem_reclaim(sk); } /* @@ -2232,9 +2242,7 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *fro if (err) return err; - skb->len += copy; - skb->data_len += copy; - skb->truesize += copy; + skb_len_add(skb, copy); sk_wmem_queued_add(sk, copy); sk_mem_charge(sk, copy); return 0; diff --git a/include/net/stp.h b/include/net/stp.h index 2914e6d53490..528103fce2c0 100644 --- a/include/net/stp.h +++ b/include/net/stp.h @@ -2,6 +2,8 @@ #ifndef _NET_STP_H #define _NET_STP_H +#include <linux/if_ether.h> + struct stp_proto { unsigned char group_address[ETH_ALEN]; void (*rcv)(const struct stp_proto *, struct sk_buff *, diff --git a/include/net/strparser.h b/include/net/strparser.h index a191486eb1e4..41e2ce9e9e10 100644 --- a/include/net/strparser.h +++ b/include/net/strparser.h @@ -65,15 +65,18 @@ struct _strp_msg { struct sk_skb_cb { #define SK_SKB_CB_PRIV_LEN 20 unsigned char data[SK_SKB_CB_PRIV_LEN]; + /* align strp on cache line boundary within skb->cb[] */ + unsigned char pad[4]; struct _strp_msg strp; + + /* strp users' data follows */ + struct tls_msg { + u8 control; + } tls; /* temp_reg is a temporary register used for bpf_convert_data_end_access * when dst_reg == src_reg. */ u64 temp_reg; - struct tls_msg { - u8 control; - u8 decrypted; - } tls; }; static inline struct strp_msg *strp_msg(struct sk_buff *skb) diff --git a/include/net/switchdev.h b/include/net/switchdev.h index aa0171d5786d..7dcdc97c0bc3 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -239,6 +239,9 @@ struct switchdev_notifier_info { const void *ctx; }; +/* Remember to update br_switchdev_fdb_populate() when adding + * new members to this structure + */ struct switchdev_notifier_fdb_info { struct switchdev_notifier_info info; /* must be first */ const unsigned char *addr; diff --git a/include/net/tcp.h b/include/net/tcp.h index 78a64e1b33a7..d10962b9f0d0 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -253,6 +253,8 @@ extern long sysctl_tcp_mem[3]; #define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */ extern atomic_long_t tcp_memory_allocated; +DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc); + extern struct percpu_counter tcp_sockets_allocated; extern unsigned long tcp_memory_pressure; @@ -432,6 +434,7 @@ u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph, struct tcphdr *th, u32 *cookie); u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph, struct tcphdr *th, u32 *cookie); +u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss); u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops, const struct tcp_request_sock_ops *af_ops, struct sock *sk, struct tcphdr *th); @@ -669,6 +672,9 @@ void tcp_get_info(struct sock *, struct tcp_info *); /* Read 'sendfile()'-style from a TCP socket */ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, sk_read_actor_t recv_actor); +int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor); +struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off); +void tcp_read_done(struct sock *sk, size_t len); void tcp_initialize_rcv_mss(struct sock *sk); diff --git a/include/net/tls.h b/include/net/tls.h index 8bd938f98bdd..cb205f9d9473 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -39,7 +39,6 @@ #include <linux/crypto.h> #include <linux/socket.h> #include <linux/tcp.h> -#include <linux/skmsg.h> #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/rcupdate.h> @@ -50,6 +49,7 @@ #include <crypto/aead.h> #include <uapi/linux/tls.h> +struct tls_rec; /* Maximum data size carried in a TLS record */ #define TLS_MAX_PAYLOAD_SIZE ((size_t)1 << 14) @@ -66,6 +66,7 @@ #define MAX_IV_SIZE 16 #define TLS_TAG_SIZE 16 #define TLS_MAX_REC_SEQ_SIZE 8 +#define TLS_MAX_AAD_SIZE TLS_AAD_SPACE_SIZE /* For CCM mode, the full 16-bytes of IV is made of '4' fields of given sizes. * @@ -77,13 +78,6 @@ #define TLS_AES_CCM_IV_B0_BYTE 2 #define TLS_SM4_CCM_IV_B0_BYTE 2 -#define __TLS_INC_STATS(net, field) \ - __SNMP_INC_STATS((net)->mib.tls_statistics, field) -#define TLS_INC_STATS(net, field) \ - SNMP_INC_STATS((net)->mib.tls_statistics, field) -#define TLS_DEC_STATS(net, field) \ - SNMP_DEC_STATS((net)->mib.tls_statistics, field) - enum { TLS_BASE, TLS_SW, @@ -92,32 +86,6 @@ enum { TLS_NUM_CONFIG, }; -/* TLS records are maintained in 'struct tls_rec'. It stores the memory pages - * allocated or mapped for each TLS record. After encryption, the records are - * stores in a linked list. - */ -struct tls_rec { - struct list_head list; - int tx_ready; - int tx_flags; - - struct sk_msg msg_plaintext; - struct sk_msg msg_encrypted; - - /* AAD | msg_plaintext.sg.data | sg_tag */ - struct scatterlist sg_aead_in[2]; - /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */ - struct scatterlist sg_aead_out[2]; - - char content_type; - struct scatterlist sg_content_type; - - char aad_space[TLS_AAD_SPACE_SIZE]; - u8 iv_data[MAX_IV_SIZE]; - struct aead_request aead_req; - u8 aead_req_ctx[]; -}; - struct tx_work { struct delayed_work work; struct sock *sk; @@ -140,18 +108,38 @@ struct tls_sw_context_tx { unsigned long tx_bitmask; }; +struct tls_strparser { + struct sock *sk; + + u32 mark : 8; + u32 stopped : 1; + u32 copy_mode : 1; + u32 msg_ready : 1; + + struct strp_msg stm; + + struct sk_buff *anchor; + struct work_struct work; +}; + struct tls_sw_context_rx { struct crypto_aead *aead_recv; struct crypto_wait async_wait; - struct strparser strp; struct sk_buff_head rx_list; /* list of decrypted 'data' records */ void (*saved_data_ready)(struct sock *sk); - struct sk_buff *recv_pkt; + u8 reader_present; u8 async_capable:1; + u8 zc_capable:1; + u8 reader_contended:1; + + struct tls_strparser strp; + atomic_t decrypt_pending; /* protect crypto_wait with decrypt_pending*/ spinlock_t decrypt_compl_lock; + struct sk_buff_head async_hold; + struct wait_queue_head wq; }; struct tls_record_info { @@ -173,6 +161,8 @@ struct tls_offload_context_tx { struct scatterlist sg_tx_data[MAX_SKB_FRAGS]; void (*sk_destruct)(struct sock *sk); + struct work_struct destruct_work; + struct tls_context *ctx; u8 driver_state[] __aligned(8); /* The TLS layer reserves room for driver specific state * Currently the belief is that there is not enough @@ -239,6 +229,7 @@ struct tls_context { u8 tx_conf:3; u8 rx_conf:3; u8 zerocopy_sendfile:1; + u8 rx_no_pad:1; int (*push_pending_record)(struct sock *sk, int flags); void (*sk_write_space)(struct sock *sk); @@ -246,7 +237,7 @@ struct tls_context { void *priv_ctx_tx; void *priv_ctx_rx; - struct net_device *netdev; + struct net_device __rcu *netdev; /* rw cache line */ struct cipher_context tx; @@ -346,43 +337,6 @@ struct tls_offload_context_rx { #define TLS_OFFLOAD_CONTEXT_SIZE_RX \ (sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX) -struct tls_context *tls_ctx_create(struct sock *sk); -void tls_ctx_free(struct sock *sk, struct tls_context *ctx); -void update_sk_prot(struct sock *sk, struct tls_context *ctx); - -int wait_on_pending_writer(struct sock *sk, long *timeo); -int tls_sk_query(struct sock *sk, int optname, char __user *optval, - int __user *optlen); -int tls_sk_attach(struct sock *sk, int optname, char __user *optval, - unsigned int optlen); -void tls_err_abort(struct sock *sk, int err); - -int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); -void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); -void tls_sw_strparser_done(struct tls_context *tls_ctx); -int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); -int tls_sw_sendpage_locked(struct sock *sk, struct page *page, - int offset, size_t size, int flags); -int tls_sw_sendpage(struct sock *sk, struct page *page, - int offset, size_t size, int flags); -void tls_sw_cancel_work_tx(struct tls_context *tls_ctx); -void tls_sw_release_resources_tx(struct sock *sk); -void tls_sw_free_ctx_tx(struct tls_context *tls_ctx); -void tls_sw_free_resources_rx(struct sock *sk); -void tls_sw_release_resources_rx(struct sock *sk); -void tls_sw_free_ctx_rx(struct tls_context *tls_ctx); -int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, - int flags, int *addr_len); -bool tls_sw_sock_is_readable(struct sock *sk); -ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, - struct pipe_inode_info *pipe, - size_t len, unsigned int flags); - -int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); -int tls_device_sendpage(struct sock *sk, struct page *page, - int offset, size_t size, int flags); -int tls_tx_records(struct sock *sk, int flags); - struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context, u32 seq, u64 *p_record_sn); @@ -396,58 +350,6 @@ static inline u32 tls_record_start_seq(struct tls_record_info *rec) return rec->end_seq - rec->len; } -int tls_push_sg(struct sock *sk, struct tls_context *ctx, - struct scatterlist *sg, u16 first_offset, - int flags); -int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, - int flags); -void tls_free_partial_record(struct sock *sk, struct tls_context *ctx); - -static inline struct tls_msg *tls_msg(struct sk_buff *skb) -{ - struct sk_skb_cb *scb = (struct sk_skb_cb *)skb->cb; - - return &scb->tls; -} - -static inline bool tls_is_partially_sent_record(struct tls_context *ctx) -{ - return !!ctx->partially_sent_record; -} - -static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx) -{ - return tls_ctx->pending_open_record_frags; -} - -static inline bool is_tx_ready(struct tls_sw_context_tx *ctx) -{ - struct tls_rec *rec; - - rec = list_first_entry(&ctx->tx_list, struct tls_rec, list); - if (!rec) - return false; - - return READ_ONCE(rec->tx_ready); -} - -static inline u16 tls_user_config(struct tls_context *ctx, bool tx) -{ - u16 config = tx ? ctx->tx_conf : ctx->rx_conf; - - switch (config) { - case TLS_BASE: - return TLS_CONF_BASE; - case TLS_SW: - return TLS_CONF_SW; - case TLS_HW: - return TLS_CONF_HW; - case TLS_HW_RECORD: - return TLS_CONF_HW_RECORD; - } - return 0; -} - struct sk_buff * tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, struct sk_buff *skb); @@ -466,31 +368,6 @@ static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) #endif } -static inline bool tls_bigint_increment(unsigned char *seq, int len) -{ - int i; - - for (i = len - 1; i >= 0; i--) { - ++seq[i]; - if (seq[i] != 0) - break; - } - - return (i == -1); -} - -static inline void tls_bigint_subtract(unsigned char *seq, int n) -{ - u64 rcd_sn; - __be64 *p; - - BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8); - - p = (__be64 *)seq; - rcd_sn = be64_to_cpu(*p); - *p = cpu_to_be64(rcd_sn - n); -} - static inline struct tls_context *tls_get_ctx(const struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -501,82 +378,6 @@ static inline struct tls_context *tls_get_ctx(const struct sock *sk) return (__force void *)icsk->icsk_ulp_data; } -static inline void tls_advance_record_sn(struct sock *sk, - struct tls_prot_info *prot, - struct cipher_context *ctx) -{ - if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size)) - tls_err_abort(sk, -EBADMSG); - - if (prot->version != TLS_1_3_VERSION && - prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) - tls_bigint_increment(ctx->iv + prot->salt_size, - prot->iv_size); -} - -static inline void tls_fill_prepend(struct tls_context *ctx, - char *buf, - size_t plaintext_len, - unsigned char record_type) -{ - struct tls_prot_info *prot = &ctx->prot_info; - size_t pkt_len, iv_size = prot->iv_size; - - pkt_len = plaintext_len + prot->tag_size; - if (prot->version != TLS_1_3_VERSION && - prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) { - pkt_len += iv_size; - - memcpy(buf + TLS_NONCE_OFFSET, - ctx->tx.iv + prot->salt_size, iv_size); - } - - /* we cover nonce explicit here as well, so buf should be of - * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE - */ - buf[0] = prot->version == TLS_1_3_VERSION ? - TLS_RECORD_TYPE_DATA : record_type; - /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */ - buf[1] = TLS_1_2_VERSION_MINOR; - buf[2] = TLS_1_2_VERSION_MAJOR; - /* we can use IV for nonce explicit according to spec */ - buf[3] = pkt_len >> 8; - buf[4] = pkt_len & 0xFF; -} - -static inline void tls_make_aad(char *buf, - size_t size, - char *record_sequence, - unsigned char record_type, - struct tls_prot_info *prot) -{ - if (prot->version != TLS_1_3_VERSION) { - memcpy(buf, record_sequence, prot->rec_seq_size); - buf += 8; - } else { - size += prot->tag_size; - } - - buf[0] = prot->version == TLS_1_3_VERSION ? - TLS_RECORD_TYPE_DATA : record_type; - buf[1] = TLS_1_2_VERSION_MAJOR; - buf[2] = TLS_1_2_VERSION_MINOR; - buf[3] = size >> 8; - buf[4] = size & 0xFF; -} - -static inline void xor_iv_with_seq(struct tls_prot_info *prot, char *iv, char *seq) -{ - int i; - - if (prot->version == TLS_1_3_VERSION || - prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) { - for (i = 0; i < 8; i++) - iv[i + 4] ^= seq[i]; - } -} - - static inline struct tls_sw_context_rx *tls_sw_ctx_rx( const struct tls_context *tls_ctx) { @@ -613,9 +414,6 @@ static inline bool tls_sw_has_ctx_rx(const struct sock *sk) return !!tls_sw_ctx_rx(ctx); } -void tls_sw_write_space(struct sock *sk, struct tls_context *ctx); -void tls_device_write_space(struct sock *sk, struct tls_context *ctx); - static inline struct tls_offload_context_rx * tls_offload_ctx_rx(const struct tls_context *tls_ctx) { @@ -690,31 +488,11 @@ static inline bool tls_offload_tx_resync_pending(struct sock *sk) return ret; } -int __net_init tls_proc_init(struct net *net); -void __net_exit tls_proc_fini(struct net *net); - -int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg, - unsigned char *record_type); -int decrypt_skb(struct sock *sk, struct sk_buff *skb, - struct scatterlist *sgout); struct sk_buff *tls_encrypt_skb(struct sk_buff *skb); -int tls_sw_fallback_init(struct sock *sk, - struct tls_offload_context_tx *offload_ctx, - struct tls_crypto_info *crypto_info); - #ifdef CONFIG_TLS_DEVICE -int tls_device_init(void); -void tls_device_cleanup(void); void tls_device_sk_destruct(struct sock *sk); -int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); -void tls_device_free_resources_tx(struct sock *sk); -int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx); -void tls_device_offload_cleanup_rx(struct sock *sk); -void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq); void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq); -int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx, - struct sk_buff *skb, struct strp_msg *rxm); static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk) { @@ -723,33 +501,5 @@ static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk) return false; return tls_get_ctx(sk)->rx_conf == TLS_HW; } -#else -static inline int tls_device_init(void) { return 0; } -static inline void tls_device_cleanup(void) {} - -static inline int -tls_set_device_offload(struct sock *sk, struct tls_context *ctx) -{ - return -EOPNOTSUPP; -} - -static inline void tls_device_free_resources_tx(struct sock *sk) {} - -static inline int -tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) -{ - return -EOPNOTSUPP; -} - -static inline void tls_device_offload_cleanup_rx(struct sock *sk) {} -static inline void -tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {} - -static inline int -tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx, - struct sk_buff *skb, struct strp_msg *rxm) -{ - return 0; -} #endif #endif /* _TLS_OFFLOAD_H */ diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index da06613c9603..b830463e3dff 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -3,6 +3,7 @@ #define _TRANSP_V6_H #include <net/checksum.h> +#include <net/sock.h> /* IPv6 transport protocols */ extern struct proto rawv6_prot; @@ -12,6 +13,7 @@ extern struct proto tcpv6_prot; extern struct proto pingv6_prot; struct flowi6; +struct ipcm6_cookie; /* extension headers */ int ipv6_exthdrs_init(void); diff --git a/include/net/tun_proto.h b/include/net/tun_proto.h index 2ea3deba4c99..7b0de7852908 100644 --- a/include/net/tun_proto.h +++ b/include/net/tun_proto.h @@ -1,7 +1,8 @@ #ifndef __NET_TUN_PROTO_H #define __NET_TUN_PROTO_H -#include <linux/kernel.h> +#include <linux/if_ether.h> +#include <linux/types.h> /* One byte protocol values as defined by VXLAN-GPE and NSH. These will * hopefully get a shared IANA registry. diff --git a/include/net/udp.h b/include/net/udp.h index 8dd4aa1485a6..5ee88ddf79c3 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -95,6 +95,7 @@ static inline struct udp_hslot *udp_hashslot2(struct udp_table *table, extern struct proto udp_prot; extern atomic_long_t udp_memory_allocated; +DECLARE_PER_CPU(int, udp_memory_per_cpu_fw_alloc); /* sysctl variables for udp */ extern long sysctl_udp_mem[3]; @@ -305,8 +306,7 @@ struct sock *__udp6_lib_lookup(struct net *net, struct sk_buff *skb); struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb, __be16 sport, __be16 dport); -int udp_read_sock(struct sock *sk, read_descriptor_t *desc, - sk_read_actor_t recv_actor); +int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor); /* UDP uses skb->dev_scratch to cache as much information as possible and avoid * possibly multiple cache miss on dequeue() diff --git a/include/net/udplite.h b/include/net/udplite.h index a3c53110d30b..0143b373602e 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h @@ -6,6 +6,7 @@ #define _UDPLITE_H #include <net/ip6_checksum.h> +#include <net/udp.h> /* UDP-Lite socket options */ #define UDPLITE_SEND_CSCOV 10 /* sender partial coverage (as sent) */ diff --git a/include/net/xdp_priv.h b/include/net/xdp_priv.h index a2d58b1a12e1..c9df68d5f258 100644 --- a/include/net/xdp_priv.h +++ b/include/net/xdp_priv.h @@ -3,6 +3,7 @@ #define __LINUX_NET_XDP_PRIV_H__ #include <linux/rhashtable.h> +#include <net/xdp.h> /* Private to net/core/xdp.c, but used by trace/events/xdp.h */ struct xdp_mem_allocator { diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h index 4aa031849668..0e58c38ce0c1 100644 --- a/include/net/xdp_sock_drv.h +++ b/include/net/xdp_sock_drv.h @@ -44,6 +44,15 @@ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool, xp_set_rxq_info(pool, rxq); } +static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool) +{ +#ifdef CONFIG_NET_RX_BUSY_POLL + return pool->heads[0].xdp.rxq->napi_id; +#else + return 0; +#endif +} + static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) { @@ -95,6 +104,13 @@ static inline void xsk_buff_free(struct xdp_buff *xdp) xp_free(xskb); } +static inline void xsk_buff_discard(struct xdp_buff *xdp) +{ + struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); + + xp_release(xskb); +} + static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) { xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM; @@ -198,6 +214,11 @@ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool, { } +static inline unsigned int xsk_pool_get_napi_id(struct xsk_buff_pool *pool) +{ + return 0; +} + static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) { @@ -238,6 +259,10 @@ static inline void xsk_buff_free(struct xdp_buff *xdp) { } +static inline void xsk_buff_discard(struct xdp_buff *xdp) +{ +} + static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size) { } diff --git a/include/net/xfrm.h b/include/net/xfrm.h index c39d910d4b45..6e8fa98f786f 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -583,8 +583,8 @@ struct xfrm_mgr { bool (*is_alive)(const struct km_event *c); }; -int xfrm_register_km(struct xfrm_mgr *km); -int xfrm_unregister_km(struct xfrm_mgr *km); +void xfrm_register_km(struct xfrm_mgr *km); +void xfrm_unregister_km(struct xfrm_mgr *km); struct xfrm_tunnel_skb_cb { union { @@ -1195,6 +1195,8 @@ int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk); static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { + if (!sk_fullsock(osk)) + return 0; sk->sk_policy[0] = NULL; sk->sk_policy[1] = NULL; if (unlikely(osk->sk_policy[0] || osk->sk_policy[1])) @@ -1923,7 +1925,7 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x) if (dev->xfrmdev_ops->xdo_dev_state_free) dev->xfrmdev_ops->xdo_dev_state_free(x); xso->dev = NULL; - dev_put_track(dev, &xso->dev_tracker); + netdev_put(dev, &xso->dev_tracker); } } #else diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h index d0337a41141c..cbd3ddd7c33d 100644 --- a/include/ras/ras_event.h +++ b/include/ras/ras_event.h @@ -360,7 +360,6 @@ TRACE_EVENT(aer_event, EM ( MF_MSG_DIFFERENT_COMPOUND, "different compound page after locking" ) \ EM ( MF_MSG_HUGE, "huge page" ) \ EM ( MF_MSG_FREE_HUGE, "free huge page" ) \ - EM ( MF_MSG_NON_PMD_HUGE, "non-pmd-sized huge page" ) \ EM ( MF_MSG_UNMAP_FAILED, "unmapping failed page" ) \ EM ( MF_MSG_DIRTY_SWAPCACHE, "dirty swapcache page" ) \ EM ( MF_MSG_CLEAN_SWAPCACHE, "clean swapcache page" ) \ diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 9c6317cf80d5..975d6e9efbcb 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -4013,6 +4013,17 @@ static inline bool ib_uses_virt_dma(struct ib_device *dev) return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device; } +/* + * Check if a IB device's underlying DMA mapping supports P2PDMA transfers. + */ +static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev) +{ + if (ib_uses_virt_dma(dev)) + return false; + + return dma_pci_p2pdma_supported(dev->dma_device); +} + /** * ib_dma_mapping_error - check a DMA addr for error * @dev: The device for which the dma_addr was created @@ -4603,7 +4614,7 @@ static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev, /** * ib_lid_cpu16 - Return lid in 16bit CPU encoding. - * In the current implementation the only way to get + * In the current implementation the only way to * get the 32bit lid is from other sources for OPA. * For IB, lids will always be 16bits so cast the * value accordingly. diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index d989f030fae0..5b18e2e36ee6 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -108,6 +108,7 @@ struct rdma_cm_id { enum rdma_ucm_port_space ps; enum ib_qp_type qp_type; u32 port_num; + struct work_struct net_work; }; struct rdma_cm_id * diff --git a/include/rv/automata.h b/include/rv/automata.h new file mode 100644 index 000000000000..eb9e636809a0 --- /dev/null +++ b/include/rv/automata.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org> + * + * Deterministic automata helper functions, to be used with the automata + * models in C generated by the dot2k tool. + */ + +/* + * DECLARE_AUTOMATA_HELPERS - define a set of helper functions for automata + * + * Define a set of helper functions for automata. The 'name' argument is used + * as suffix for the functions and data. These functions will handle automaton + * with data type 'type'. + */ +#define DECLARE_AUTOMATA_HELPERS(name, type) \ + \ +/* \ + * model_get_state_name_##name - return the (string) name of the given state \ + */ \ +static char *model_get_state_name_##name(enum states_##name state) \ +{ \ + if ((state < 0) || (state >= state_max_##name)) \ + return "INVALID"; \ + \ + return automaton_##name.state_names[state]; \ +} \ + \ +/* \ + * model_get_event_name_##name - return the (string) name of the given event \ + */ \ +static char *model_get_event_name_##name(enum events_##name event) \ +{ \ + if ((event < 0) || (event >= event_max_##name)) \ + return "INVALID"; \ + \ + return automaton_##name.event_names[event]; \ +} \ + \ +/* \ + * model_get_initial_state_##name - return the automaton's initial state \ + */ \ +static inline type model_get_initial_state_##name(void) \ +{ \ + return automaton_##name.initial_state; \ +} \ + \ +/* \ + * model_get_next_state_##name - process an automaton event occurrence \ + * \ + * Given the current state (curr_state) and the event (event), returns \ + * the next state, or INVALID_STATE in case of error. \ + */ \ +static inline type model_get_next_state_##name(enum states_##name curr_state, \ + enum events_##name event) \ +{ \ + if ((curr_state < 0) || (curr_state >= state_max_##name)) \ + return INVALID_STATE; \ + \ + if ((event < 0) || (event >= event_max_##name)) \ + return INVALID_STATE; \ + \ + return automaton_##name.function[curr_state][event]; \ +} \ + \ +/* \ + * model_is_final_state_##name - check if the given state is a final state \ + */ \ +static inline bool model_is_final_state_##name(enum states_##name state) \ +{ \ + if ((state < 0) || (state >= state_max_##name)) \ + return 0; \ + \ + return automaton_##name.final_states[state]; \ +} diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h new file mode 100644 index 000000000000..9eb75683e012 --- /dev/null +++ b/include/rv/da_monitor.h @@ -0,0 +1,544 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org> + * + * Deterministic automata (DA) monitor functions, to be used together + * with automata models in C generated by the dot2k tool. + * + * The dot2k tool is available at tools/verification/dot2k/ + * + * For further information, see: + * Documentation/trace/rv/da_monitor_synthesis.rst + */ + +#include <rv/automata.h> +#include <linux/rv.h> +#include <linux/bug.h> + +#ifdef CONFIG_RV_REACTORS + +#define DECLARE_RV_REACTING_HELPERS(name, type) \ +static char REACT_MSG_##name[1024]; \ + \ +static inline char *format_react_msg_##name(type curr_state, type event) \ +{ \ + snprintf(REACT_MSG_##name, 1024, \ + "rv: monitor %s does not allow event %s on state %s\n", \ + #name, \ + model_get_event_name_##name(event), \ + model_get_state_name_##name(curr_state)); \ + return REACT_MSG_##name; \ +} \ + \ +static void cond_react_##name(char *msg) \ +{ \ + if (rv_##name.react) \ + rv_##name.react(msg); \ +} \ + \ +static bool rv_reacting_on_##name(void) \ +{ \ + return rv_reacting_on(); \ +} + +#else /* CONFIG_RV_REACTOR */ + +#define DECLARE_RV_REACTING_HELPERS(name, type) \ +static inline char *format_react_msg_##name(type curr_state, type event) \ +{ \ + return NULL; \ +} \ + \ +static void cond_react_##name(char *msg) \ +{ \ + return; \ +} \ + \ +static bool rv_reacting_on_##name(void) \ +{ \ + return 0; \ +} +#endif + +/* + * Generic helpers for all types of deterministic automata monitors. + */ +#define DECLARE_DA_MON_GENERIC_HELPERS(name, type) \ + \ +DECLARE_RV_REACTING_HELPERS(name, type) \ + \ +/* \ + * da_monitor_reset_##name - reset a monitor and setting it to init state \ + */ \ +static inline void da_monitor_reset_##name(struct da_monitor *da_mon) \ +{ \ + da_mon->monitoring = 0; \ + da_mon->curr_state = model_get_initial_state_##name(); \ +} \ + \ +/* \ + * da_monitor_curr_state_##name - return the current state \ + */ \ +static inline type da_monitor_curr_state_##name(struct da_monitor *da_mon) \ +{ \ + return da_mon->curr_state; \ +} \ + \ +/* \ + * da_monitor_set_state_##name - set the new current state \ + */ \ +static inline void \ +da_monitor_set_state_##name(struct da_monitor *da_mon, enum states_##name state) \ +{ \ + da_mon->curr_state = state; \ +} \ + \ +/* \ + * da_monitor_start_##name - start monitoring \ + * \ + * The monitor will ignore all events until monitoring is set to true. This \ + * function needs to be called to tell the monitor to start monitoring. \ + */ \ +static inline void da_monitor_start_##name(struct da_monitor *da_mon) \ +{ \ + da_mon->curr_state = model_get_initial_state_##name(); \ + da_mon->monitoring = 1; \ +} \ + \ +/* \ + * da_monitoring_##name - returns true if the monitor is processing events \ + */ \ +static inline bool da_monitoring_##name(struct da_monitor *da_mon) \ +{ \ + return da_mon->monitoring; \ +} \ + \ +/* \ + * da_monitor_enabled_##name - checks if the monitor is enabled \ + */ \ +static inline bool da_monitor_enabled_##name(void) \ +{ \ + /* global switch */ \ + if (unlikely(!rv_monitoring_on())) \ + return 0; \ + \ + /* monitor enabled */ \ + if (unlikely(!rv_##name.enabled)) \ + return 0; \ + \ + return 1; \ +} \ + \ +/* \ + * da_monitor_handling_event_##name - checks if the monitor is ready to handle events \ + */ \ +static inline bool da_monitor_handling_event_##name(struct da_monitor *da_mon) \ +{ \ + \ + if (!da_monitor_enabled_##name()) \ + return 0; \ + \ + /* monitor is actually monitoring */ \ + if (unlikely(!da_monitoring_##name(da_mon))) \ + return 0; \ + \ + return 1; \ +} + +/* + * Event handler for implicit monitors. Implicit monitor is the one which the + * handler does not need to specify which da_monitor to manipulate. Examples + * of implicit monitor are the per_cpu or the global ones. + */ +#define DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \ + \ +static inline bool \ +da_event_##name(struct da_monitor *da_mon, enum events_##name event) \ +{ \ + type curr_state = da_monitor_curr_state_##name(da_mon); \ + type next_state = model_get_next_state_##name(curr_state, event); \ + \ + if (next_state != INVALID_STATE) { \ + da_monitor_set_state_##name(da_mon, next_state); \ + \ + trace_event_##name(model_get_state_name_##name(curr_state), \ + model_get_event_name_##name(event), \ + model_get_state_name_##name(next_state), \ + model_is_final_state_##name(next_state)); \ + \ + return true; \ + } \ + \ + if (rv_reacting_on_##name()) \ + cond_react_##name(format_react_msg_##name(curr_state, event)); \ + \ + trace_error_##name(model_get_state_name_##name(curr_state), \ + model_get_event_name_##name(event)); \ + \ + return false; \ +} \ + +/* + * Event handler for per_task monitors. + */ +#define DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \ + \ +static inline bool da_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \ + enum events_##name event) \ +{ \ + type curr_state = da_monitor_curr_state_##name(da_mon); \ + type next_state = model_get_next_state_##name(curr_state, event); \ + \ + if (next_state != INVALID_STATE) { \ + da_monitor_set_state_##name(da_mon, next_state); \ + \ + trace_event_##name(tsk->pid, \ + model_get_state_name_##name(curr_state), \ + model_get_event_name_##name(event), \ + model_get_state_name_##name(next_state), \ + model_is_final_state_##name(next_state)); \ + \ + return true; \ + } \ + \ + if (rv_reacting_on_##name()) \ + cond_react_##name(format_react_msg_##name(curr_state, event)); \ + \ + trace_error_##name(tsk->pid, \ + model_get_state_name_##name(curr_state), \ + model_get_event_name_##name(event)); \ + \ + return false; \ +} + +/* + * Functions to define, init and get a global monitor. + */ +#define DECLARE_DA_MON_INIT_GLOBAL(name, type) \ + \ +/* \ + * global monitor (a single variable) \ + */ \ +static struct da_monitor da_mon_##name; \ + \ +/* \ + * da_get_monitor_##name - return the global monitor address \ + */ \ +static struct da_monitor *da_get_monitor_##name(void) \ +{ \ + return &da_mon_##name; \ +} \ + \ +/* \ + * da_monitor_reset_all_##name - reset the single monitor \ + */ \ +static void da_monitor_reset_all_##name(void) \ +{ \ + da_monitor_reset_##name(da_get_monitor_##name()); \ +} \ + \ +/* \ + * da_monitor_init_##name - initialize a monitor \ + */ \ +static inline int da_monitor_init_##name(void) \ +{ \ + da_monitor_reset_all_##name(); \ + return 0; \ +} \ + \ +/* \ + * da_monitor_destroy_##name - destroy the monitor \ + */ \ +static inline void da_monitor_destroy_##name(void) \ +{ \ + return; \ +} + +/* + * Functions to define, init and get a per-cpu monitor. + */ +#define DECLARE_DA_MON_INIT_PER_CPU(name, type) \ + \ +/* \ + * per-cpu monitor variables \ + */ \ +DEFINE_PER_CPU(struct da_monitor, da_mon_##name); \ + \ +/* \ + * da_get_monitor_##name - return current CPU monitor address \ + */ \ +static struct da_monitor *da_get_monitor_##name(void) \ +{ \ + return this_cpu_ptr(&da_mon_##name); \ +} \ + \ +/* \ + * da_monitor_reset_all_##name - reset all CPUs' monitor \ + */ \ +static void da_monitor_reset_all_##name(void) \ +{ \ + struct da_monitor *da_mon; \ + int cpu; \ + for_each_cpu(cpu, cpu_online_mask) { \ + da_mon = per_cpu_ptr(&da_mon_##name, cpu); \ + da_monitor_reset_##name(da_mon); \ + } \ +} \ + \ +/* \ + * da_monitor_init_##name - initialize all CPUs' monitor \ + */ \ +static inline int da_monitor_init_##name(void) \ +{ \ + da_monitor_reset_all_##name(); \ + return 0; \ +} \ + \ +/* \ + * da_monitor_destroy_##name - destroy the monitor \ + */ \ +static inline void da_monitor_destroy_##name(void) \ +{ \ + return; \ +} + +/* + * Functions to define, init and get a per-task monitor. + */ +#define DECLARE_DA_MON_INIT_PER_TASK(name, type) \ + \ +/* \ + * The per-task monitor is stored a vector in the task struct. This variable \ + * stores the position on the vector reserved for this monitor. \ + */ \ +static int task_mon_slot_##name = RV_PER_TASK_MONITOR_INIT; \ + \ +/* \ + * da_get_monitor_##name - return the monitor in the allocated slot for tsk \ + */ \ +static inline struct da_monitor *da_get_monitor_##name(struct task_struct *tsk) \ +{ \ + return &tsk->rv[task_mon_slot_##name].da_mon; \ +} \ + \ +static void da_monitor_reset_all_##name(void) \ +{ \ + struct task_struct *g, *p; \ + \ + read_lock(&tasklist_lock); \ + for_each_process_thread(g, p) \ + da_monitor_reset_##name(da_get_monitor_##name(p)); \ + read_unlock(&tasklist_lock); \ +} \ + \ +/* \ + * da_monitor_init_##name - initialize the per-task monitor \ + * \ + * Try to allocate a slot in the task's vector of monitors. If there \ + * is an available slot, use it and reset all task's monitor. \ + */ \ +static int da_monitor_init_##name(void) \ +{ \ + int slot; \ + \ + slot = rv_get_task_monitor_slot(); \ + if (slot < 0 || slot >= RV_PER_TASK_MONITOR_INIT) \ + return slot; \ + \ + task_mon_slot_##name = slot; \ + \ + da_monitor_reset_all_##name(); \ + return 0; \ +} \ + \ +/* \ + * da_monitor_destroy_##name - return the allocated slot \ + */ \ +static inline void da_monitor_destroy_##name(void) \ +{ \ + if (task_mon_slot_##name == RV_PER_TASK_MONITOR_INIT) { \ + WARN_ONCE(1, "Disabling a disabled monitor: " #name); \ + return; \ + } \ + rv_put_task_monitor_slot(task_mon_slot_##name); \ + task_mon_slot_##name = RV_PER_TASK_MONITOR_INIT; \ + return; \ +} + +/* + * Handle event for implicit monitor: da_get_monitor_##name() will figure out + * the monitor. + */ +#define DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type) \ + \ +static inline void __da_handle_event_##name(struct da_monitor *da_mon, \ + enum events_##name event) \ +{ \ + bool retval; \ + \ + retval = da_event_##name(da_mon, event); \ + if (!retval) \ + da_monitor_reset_##name(da_mon); \ +} \ + \ +/* \ + * da_handle_event_##name - handle an event \ + */ \ +static inline void da_handle_event_##name(enum events_##name event) \ +{ \ + struct da_monitor *da_mon = da_get_monitor_##name(); \ + bool retval; \ + \ + retval = da_monitor_handling_event_##name(da_mon); \ + if (!retval) \ + return; \ + \ + __da_handle_event_##name(da_mon, event); \ +} \ + \ +/* \ + * da_handle_start_event_##name - start monitoring or handle event \ + * \ + * This function is used to notify the monitor that the system is returning \ + * to the initial state, so the monitor can start monitoring in the next event. \ + * Thus: \ + * \ + * If the monitor already started, handle the event. \ + * If the monitor did not start yet, start the monitor but skip the event. \ + */ \ +static inline bool da_handle_start_event_##name(enum events_##name event) \ +{ \ + struct da_monitor *da_mon; \ + \ + if (!da_monitor_enabled_##name()) \ + return 0; \ + \ + da_mon = da_get_monitor_##name(); \ + \ + if (unlikely(!da_monitoring_##name(da_mon))) { \ + da_monitor_start_##name(da_mon); \ + return 0; \ + } \ + \ + __da_handle_event_##name(da_mon, event); \ + \ + return 1; \ +} \ + \ +/* \ + * da_handle_start_run_event_##name - start monitoring and handle event \ + * \ + * This function is used to notify the monitor that the system is in the \ + * initial state, so the monitor can start monitoring and handling event. \ + */ \ +static inline bool da_handle_start_run_event_##name(enum events_##name event) \ +{ \ + struct da_monitor *da_mon; \ + \ + if (!da_monitor_enabled_##name()) \ + return 0; \ + \ + da_mon = da_get_monitor_##name(); \ + \ + if (unlikely(!da_monitoring_##name(da_mon))) \ + da_monitor_start_##name(da_mon); \ + \ + __da_handle_event_##name(da_mon, event); \ + \ + return 1; \ +} + +/* + * Handle event for per task. + */ +#define DECLARE_DA_MON_MONITOR_HANDLER_PER_TASK(name, type) \ + \ +static inline void \ +__da_handle_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \ + enum events_##name event) \ +{ \ + bool retval; \ + \ + retval = da_event_##name(da_mon, tsk, event); \ + if (!retval) \ + da_monitor_reset_##name(da_mon); \ +} \ + \ +/* \ + * da_handle_event_##name - handle an event \ + */ \ +static inline void \ +da_handle_event_##name(struct task_struct *tsk, enum events_##name event) \ +{ \ + struct da_monitor *da_mon = da_get_monitor_##name(tsk); \ + bool retval; \ + \ + retval = da_monitor_handling_event_##name(da_mon); \ + if (!retval) \ + return; \ + \ + __da_handle_event_##name(da_mon, tsk, event); \ +} \ + \ +/* \ + * da_handle_start_event_##name - start monitoring or handle event \ + * \ + * This function is used to notify the monitor that the system is returning \ + * to the initial state, so the monitor can start monitoring in the next event. \ + * Thus: \ + * \ + * If the monitor already started, handle the event. \ + * If the monitor did not start yet, start the monitor but skip the event. \ + */ \ +static inline bool \ +da_handle_start_event_##name(struct task_struct *tsk, enum events_##name event) \ +{ \ + struct da_monitor *da_mon; \ + \ + if (!da_monitor_enabled_##name()) \ + return 0; \ + \ + da_mon = da_get_monitor_##name(tsk); \ + \ + if (unlikely(!da_monitoring_##name(da_mon))) { \ + da_monitor_start_##name(da_mon); \ + return 0; \ + } \ + \ + __da_handle_event_##name(da_mon, tsk, event); \ + \ + return 1; \ +} + +/* + * Entry point for the global monitor. + */ +#define DECLARE_DA_MON_GLOBAL(name, type) \ + \ +DECLARE_AUTOMATA_HELPERS(name, type) \ +DECLARE_DA_MON_GENERIC_HELPERS(name, type) \ +DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \ +DECLARE_DA_MON_INIT_GLOBAL(name, type) \ +DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type) + +/* + * Entry point for the per-cpu monitor. + */ +#define DECLARE_DA_MON_PER_CPU(name, type) \ + \ +DECLARE_AUTOMATA_HELPERS(name, type) \ +DECLARE_DA_MON_GENERIC_HELPERS(name, type) \ +DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \ +DECLARE_DA_MON_INIT_PER_CPU(name, type) \ +DECLARE_DA_MON_MONITOR_HANDLER_IMPLICIT(name, type) + +/* + * Entry point for the per-task monitor. + */ +#define DECLARE_DA_MON_PER_TASK(name, type) \ + \ +DECLARE_AUTOMATA_HELPERS(name, type) \ +DECLARE_DA_MON_GENERIC_HELPERS(name, type) \ +DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \ +DECLARE_DA_MON_INIT_PER_TASK(name, type) \ +DECLARE_DA_MON_MONITOR_HANDLER_PER_TASK(name, type) diff --git a/include/rv/instrumentation.h b/include/rv/instrumentation.h new file mode 100644 index 000000000000..d4e7a02ede1a --- /dev/null +++ b/include/rv/instrumentation.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019-2022 Red Hat, Inc. Daniel Bristot de Oliveira <bristot@kernel.org> + * + * Helper functions to facilitate the instrumentation of auto-generated + * RV monitors create by dot2k. + * + * The dot2k tool is available at tools/verification/dot2/ + */ + +#include <linux/ftrace.h> + +/* + * rv_attach_trace_probe - check and attach a handler function to a tracepoint + */ +#define rv_attach_trace_probe(monitor, tp, rv_handler) \ + do { \ + check_trace_callback_type_##tp(rv_handler); \ + WARN_ONCE(register_trace_##tp(rv_handler, NULL), \ + "fail attaching " #monitor " " #tp "handler"); \ + } while (0) + +/* + * rv_detach_trace_probe - detach a handler function to a tracepoint + */ +#define rv_detach_trace_probe(monitor, tp, rv_handler) \ + do { \ + unregister_trace_##tp(rv_handler, NULL); \ + } while (0) diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index c0703cd20a99..654cc3918c94 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -135,9 +135,6 @@ struct iscsi_task { void *dd_data; /* driver/transport data */ }; -/* invalid scsi_task pointer */ -#define INVALID_SCSI_TASK (struct iscsi_task *)-1l - static inline int iscsi_task_has_unsol_data(struct iscsi_task *task) { return task->unsol_r2t.data_length > task->unsol_r2t.sent; @@ -213,6 +210,8 @@ struct iscsi_conn { struct list_head cmdqueue; /* data-path cmd queue */ struct list_head requeue; /* tasks needing another run */ struct work_struct xmitwork; /* per-conn. xmit workqueue */ + /* recv */ + struct work_struct recvwork; unsigned long flags; /* ISCSI_CONN_FLAGs */ /* negotiated params */ @@ -411,7 +410,7 @@ extern int iscsi_host_add(struct Scsi_Host *shost, struct device *pdev); extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht, int dd_data_size, bool xmit_can_sleep); -extern void iscsi_host_remove(struct Scsi_Host *shost); +extern void iscsi_host_remove(struct Scsi_Host *shost, bool is_shutdown); extern void iscsi_host_free(struct Scsi_Host *shost); extern int iscsi_target_alloc(struct scsi_target *starget); extern int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost, @@ -452,8 +451,10 @@ extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn, extern int iscsi_conn_get_addr_param(struct sockaddr_storage *addr, enum iscsi_param param, char *buf); extern void iscsi_suspend_tx(struct iscsi_conn *conn); +extern void iscsi_suspend_rx(struct iscsi_conn *conn); extern void iscsi_suspend_queue(struct iscsi_conn *conn); -extern void iscsi_conn_queue_work(struct iscsi_conn *conn); +extern void iscsi_conn_queue_xmit(struct iscsi_conn *conn); +extern void iscsi_conn_queue_recv(struct iscsi_conn *conn); #define iscsi_conn_printk(prefix, _c, fmt, a...) \ iscsi_cls_conn_printk(prefix, ((struct iscsi_conn *)_c)->cls_conn, \ @@ -478,7 +479,7 @@ extern struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *, itt_t); extern void iscsi_requeue_task(struct iscsi_task *task); extern void iscsi_put_task(struct iscsi_task *task); extern void __iscsi_put_task(struct iscsi_task *task); -extern void __iscsi_get_task(struct iscsi_task *task); +extern bool iscsi_get_task(struct iscsi_task *task); extern void iscsi_complete_scsi_task(struct iscsi_task *task, uint32_t exp_cmdsn, uint32_t max_cmdsn); diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index ff04eb6d250b..2dbead74a2af 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -145,7 +145,7 @@ struct sata_device { struct ata_port *ap; struct ata_host *ata_host; - struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */ + struct smp_rps_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */ u8 fis[ATA_RESP_FIS_SIZE]; }; diff --git a/include/scsi/sas.h b/include/scsi/sas.h index acfc69fd72d0..71b749bed3b0 100644 --- a/include/scsi/sas.h +++ b/include/scsi/sas.h @@ -471,18 +471,6 @@ struct report_phy_sata_resp { __be32 crc; } __attribute__ ((packed)); -struct smp_resp { - u8 frame_type; - u8 function; - u8 result; - u8 reserved; - union { - struct report_general_resp rg; - struct discover_resp disc; - struct report_phy_sata_resp rps; - }; -} __attribute__ ((packed)); - #elif defined(__BIG_ENDIAN_BITFIELD) struct sas_identify_frame { /* Byte 0 */ @@ -704,20 +692,32 @@ struct report_phy_sata_resp { __be32 crc; } __attribute__ ((packed)); -struct smp_resp { +#else +#error "Bitfield order not defined!" +#endif + +struct smp_rg_resp { u8 frame_type; u8 function; u8 result; u8 reserved; - union { - struct report_general_resp rg; - struct discover_resp disc; - struct report_phy_sata_resp rps; - }; + struct report_general_resp rg; } __attribute__ ((packed)); -#else -#error "Bitfield order not defined!" -#endif +struct smp_disc_resp { + u8 frame_type; + u8 function; + u8 result; + u8 reserved; + struct discover_resp disc; +} __attribute__ ((packed)); + +struct smp_rps_resp { + u8 frame_type; + u8 function; + u8 result; + u8 reserved; + struct report_phy_sata_resp rps; +} __attribute__ ((packed)); #endif /* _SAS_H_ */ diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 65082ecdd557..b6e41ee3d566 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -607,6 +607,7 @@ struct Scsi_Host { short unsigned int sg_tablesize; short unsigned int sg_prot_tablesize; unsigned int max_sectors; + unsigned int opt_sectors; unsigned int max_segment_size; unsigned long dma_boundary; unsigned long virt_boundary_mask; diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 9acb8422f680..cab52b0f11d0 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h @@ -162,7 +162,7 @@ struct iscsi_transport { * transport registration upcalls */ extern struct scsi_transport_template *iscsi_register_transport(struct iscsi_transport *tt); -extern int iscsi_unregister_transport(struct iscsi_transport *tt); +extern void iscsi_unregister_transport(struct iscsi_transport *tt); /* * control plane upcalls @@ -442,6 +442,7 @@ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost, struct iscsi_transport *t, int dd_size, unsigned int target_id); +extern void iscsi_force_destroy_session(struct iscsi_cls_session *session); extern void iscsi_remove_session(struct iscsi_cls_session *session); extern void iscsi_free_session(struct iscsi_cls_session *session); extern struct iscsi_cls_conn *iscsi_alloc_conn(struct iscsi_cls_session *sess, diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 5f88385a7748..ac151ecc7f19 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -575,6 +575,7 @@ struct ocelot_ops { int (*psfp_stats_get)(struct ocelot *ocelot, struct flow_cls_offload *f, struct flow_stats *stats); void (*cut_through_fwd)(struct ocelot *ocelot); + void (*tas_clock_adjust)(struct ocelot *ocelot); }; struct ocelot_vcap_policer { @@ -669,6 +670,8 @@ struct ocelot_port { /* VLAN that untagged frames are classified to, on ingress */ const struct ocelot_bridge_vlan *pvid_vlan; + struct tc_taprio_qopt_offload *taprio; + phy_interface_t phy_mode; unsigned int ptp_skbs_in_flight; @@ -757,6 +760,9 @@ struct ocelot { /* Lock for serializing forwarding domain changes */ struct mutex fwd_domain_lock; + /* Lock for serializing Time-Aware Shaper changes */ + struct mutex tas_lock; + struct workqueue_struct *owq; u8 ptp:1; diff --git a/include/soc/qcom/qcom-spmi-pmic.h b/include/soc/qcom/qcom-spmi-pmic.h new file mode 100644 index 000000000000..72398ff44719 --- /dev/null +++ b/include/soc/qcom/qcom-spmi-pmic.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2022 Linaro. All rights reserved. + * Author: Caleb Connolly <caleb.connolly@linaro.org> + */ + +#ifndef __QCOM_SPMI_PMIC_H__ +#define __QCOM_SPMI_PMIC_H__ + +#include <linux/device.h> + +#define COMMON_SUBTYPE 0x00 +#define PM8941_SUBTYPE 0x01 +#define PM8841_SUBTYPE 0x02 +#define PM8019_SUBTYPE 0x03 +#define PM8226_SUBTYPE 0x04 +#define PM8110_SUBTYPE 0x05 +#define PMA8084_SUBTYPE 0x06 +#define PMI8962_SUBTYPE 0x07 +#define PMD9635_SUBTYPE 0x08 +#define PM8994_SUBTYPE 0x09 +#define PMI8994_SUBTYPE 0x0a +#define PM8916_SUBTYPE 0x0b +#define PM8004_SUBTYPE 0x0c +#define PM8909_SUBTYPE 0x0d +#define PM8028_SUBTYPE 0x0e +#define PM8901_SUBTYPE 0x0f +#define PM8950_SUBTYPE 0x10 +#define PMI8950_SUBTYPE 0x11 +#define PM8998_SUBTYPE 0x14 +#define PMI8998_SUBTYPE 0x15 +#define PM8005_SUBTYPE 0x18 +#define PM660L_SUBTYPE 0x1A +#define PM660_SUBTYPE 0x1B +#define PM8150_SUBTYPE 0x1E +#define PM8150L_SUBTYPE 0x1f +#define PM8150B_SUBTYPE 0x20 +#define PMK8002_SUBTYPE 0x21 +#define PM8009_SUBTYPE 0x24 +#define PM8150C_SUBTYPE 0x26 +#define SMB2351_SUBTYPE 0x29 + +#define PMI8998_FAB_ID_SMIC 0x11 +#define PMI8998_FAB_ID_GF 0x30 + +#define PM660_FAB_ID_GF 0x0 +#define PM660_FAB_ID_TSMC 0x2 +#define PM660_FAB_ID_MX 0x3 + +struct qcom_spmi_pmic { + unsigned int type; + unsigned int subtype; + unsigned int major; + unsigned int minor; + unsigned int rev2; + unsigned int fab_id; + const char *name; +}; + +const struct qcom_spmi_pmic *qcom_pmic_get(struct device *dev); + +#endif /* __QCOM_SPMI_PMIC_H__ */ diff --git a/include/sound/control.h b/include/sound/control.h index 985c51a8fb74..eae443ba79ba 100644 --- a/include/sound/control.h +++ b/include/sound/control.h @@ -23,7 +23,7 @@ typedef int (snd_kcontrol_tlv_rw_t)(struct snd_kcontrol *kcontrol, unsigned int __user *tlv); /* internal flag for skipping validations */ -#ifdef CONFIG_SND_CTL_VALIDATION +#ifdef CONFIG_SND_CTL_DEBUG #define SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK (1 << 24) #define snd_ctl_skip_validation(info) \ ((info)->access & SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK) @@ -109,7 +109,7 @@ struct snd_ctl_file { int preferred_subdevice[SND_CTL_SUBDEV_ITEMS]; wait_queue_head_t change_sleep; spinlock_t read_lock; - struct fasync_struct *fasync; + struct snd_fasync *fasync; int subscribed; /* read interface is activated */ struct list_head events; /* waiting events for read */ }; diff --git a/include/sound/core.h b/include/sound/core.h index 6d4cc49584c6..4365c35d038b 100644 --- a/include/sound/core.h +++ b/include/sound/core.h @@ -14,6 +14,7 @@ #include <linux/pm.h> /* pm_message_t */ #include <linux/stringify.h> #include <linux/printk.h> +#include <linux/xarray.h> /* number of supported soundcards */ #ifdef CONFIG_SND_DYNAMIC_MINORS @@ -103,6 +104,11 @@ struct snd_card { size_t user_ctl_alloc_size; // current memory allocation by user controls. struct list_head controls; /* all controls for this card */ struct list_head ctl_files; /* active control files */ +#ifdef CONFIG_SND_CTL_FAST_LOOKUP + struct xarray ctl_numids; /* hash table for numids */ + struct xarray ctl_hash; /* hash table for ctl id matching */ + bool ctl_hash_collision; /* ctl_hash collision seen? */ +#endif struct snd_info_entry *proc_root; /* root for soundcard specific files */ struct proc_dir_entry *proc_root_link; /* number link to real id */ @@ -501,4 +507,12 @@ snd_pci_quirk_lookup_id(u16 vendor, u16 device, } #endif +/* async signal helpers */ +struct snd_fasync; + +int snd_fasync_helper(int fd, struct file *file, int on, + struct snd_fasync **fasyncp); +void snd_kill_fasync(struct snd_fasync *fasync, int signal, int poll); +void snd_fasync_free(struct snd_fasync *fasync); + #endif /* __SOUND_CORE_H */ diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h index 8972fa697622..9ac5918269a5 100644 --- a/include/sound/cs35l41.h +++ b/include/sound/cs35l41.h @@ -665,6 +665,10 @@ #define CS35L41_BST_EN_DEFAULT 0x2 #define CS35L41_AMP_EN_SHIFT 0 #define CS35L41_AMP_EN_MASK 1 +#define CS35L41_VMON_EN_MASK 0x1000 +#define CS35L41_VMON_EN_SHIFT 12 +#define CS35L41_IMON_EN_MASK 0x2000 +#define CS35L41_IMON_EN_SHIFT 13 #define CS35L41_PDN_DONE_MASK 0x00800000 #define CS35L41_PDN_DONE_SHIFT 23 @@ -881,6 +885,9 @@ void cs35l41_configure_cs_dsp(struct device *dev, struct regmap *reg, struct cs_ int cs35l41_set_cspl_mbox_cmd(struct device *dev, struct regmap *regmap, enum cs35l41_cspl_mbox_cmd cmd); int cs35l41_write_fs_errata(struct device *dev, struct regmap *regmap); +int cs35l41_enter_hibernate(struct device *dev, struct regmap *regmap, + enum cs35l41_boost_type b_type); +int cs35l41_exit_hibernate(struct device *dev, struct regmap *regmap); int cs35l41_init_boost(struct device *dev, struct regmap *regmap, struct cs35l41_hw_cfg *hw_cfg); bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type); diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h index 38ea046e653c..2df54cf02cb3 100644 --- a/include/sound/dmaengine_pcm.h +++ b/include/sound/dmaengine_pcm.h @@ -15,6 +15,8 @@ * snd_pcm_substream_to_dma_direction - Get dma_transfer_direction for a PCM * substream * @substream: PCM substream + * + * Return: DMA transfer direction */ static inline enum dma_transfer_direction snd_pcm_substream_to_dma_direction(const struct snd_pcm_substream *substream) diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h index b7be300b6b18..6d3c82c4b6ac 100644 --- a/include/sound/hda_codec.h +++ b/include/sound/hda_codec.h @@ -231,7 +231,6 @@ struct hda_codec { /* misc flags */ unsigned int configured:1; /* codec was configured */ unsigned int in_freeing:1; /* being released */ - unsigned int registered:1; /* codec was registered */ unsigned int display_power_control:1; /* needs display power */ unsigned int spdif_status_reset :1; /* needs to toggle SPDIF for each * status change diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index 15f15075238d..797bf67a164d 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -93,6 +93,7 @@ struct hdac_device { bool lazy_cache:1; /* don't wake up for writes */ bool caps_overwriting:1; /* caps overwrite being in process */ bool cache_coef:1; /* cache COEF read/write too */ + unsigned int registered:1; /* codec was registered */ }; /* device/driver type used for matching */ diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h index 4fc733c8c570..48ad33aba393 100644 --- a/include/sound/hdmi-codec.h +++ b/include/sound/hdmi-codec.h @@ -32,8 +32,8 @@ struct hdmi_codec_daifmt { } fmt; unsigned int bit_clk_inv:1; unsigned int frame_clk_inv:1; - unsigned int bit_clk_master:1; - unsigned int frame_clk_master:1; + unsigned int bit_clk_provider:1; + unsigned int frame_clk_provider:1; /* bit_fmt could be standard PCM format or * IEC958 encoded format. ALSA IEC958 plugin will pass * IEC958_SUBFRAME format to the underneath driver. diff --git a/include/sound/madera-pdata.h b/include/sound/madera-pdata.h index e3060f48f108..58398d80c3de 100644 --- a/include/sound/madera-pdata.h +++ b/include/sound/madera-pdata.h @@ -9,7 +9,7 @@ #ifndef MADERA_CODEC_PDATA_H #define MADERA_CODEC_PDATA_H -#include <linux/kernel.h> +#include <linux/types.h> #define MADERA_MAX_INPUT 6 #define MADERA_MAX_MUXED_CHANNELS 4 diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 6b99310b5b88..8c48a5bce88c 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -399,7 +399,7 @@ struct snd_pcm_runtime { snd_pcm_uframes_t twake; /* do transfer (!poll) wakeup if non-zero */ wait_queue_head_t sleep; /* poll sleep */ wait_queue_head_t tsleep; /* transfer sleep */ - struct fasync_struct *fasync; + struct snd_fasync *fasync; bool stop_operating; /* sync_stop will be called */ struct mutex buffer_mutex; /* protect for buffer changes */ atomic_t buffer_accessing; /* >0: in r/w operation, <0: blocked */ @@ -607,7 +607,7 @@ snd_pcm_debug_name(struct snd_pcm_substream *substream, char *buf, size_t size) * snd_pcm_stream_linked - Check whether the substream is linked with others * @substream: substream to check * - * Returns true if the given substream is being linked with others. + * Return: true if the given substream is being linked with others */ static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream) { @@ -673,7 +673,7 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream, * snd_pcm_running - Check whether the substream is in a running state * @substream: substream to check * - * Returns true if the given substream is in the state RUNNING, or in the + * Return: true if the given substream is in the state RUNNING, or in the * state DRAINING for playback. */ static inline int snd_pcm_running(struct snd_pcm_substream *substream) @@ -687,6 +687,8 @@ static inline int snd_pcm_running(struct snd_pcm_substream *substream) * bytes_to_samples - Unit conversion of the size from bytes to samples * @runtime: PCM runtime instance * @size: size in bytes + * + * Return: the size in samples */ static inline ssize_t bytes_to_samples(struct snd_pcm_runtime *runtime, ssize_t size) { @@ -697,6 +699,8 @@ static inline ssize_t bytes_to_samples(struct snd_pcm_runtime *runtime, ssize_t * bytes_to_frames - Unit conversion of the size from bytes to frames * @runtime: PCM runtime instance * @size: size in bytes + * + * Return: the size in frames */ static inline snd_pcm_sframes_t bytes_to_frames(struct snd_pcm_runtime *runtime, ssize_t size) { @@ -707,6 +711,8 @@ static inline snd_pcm_sframes_t bytes_to_frames(struct snd_pcm_runtime *runtime, * samples_to_bytes - Unit conversion of the size from samples to bytes * @runtime: PCM runtime instance * @size: size in samples + * + * Return: the byte size */ static inline ssize_t samples_to_bytes(struct snd_pcm_runtime *runtime, ssize_t size) { @@ -717,6 +723,8 @@ static inline ssize_t samples_to_bytes(struct snd_pcm_runtime *runtime, ssize_t * frames_to_bytes - Unit conversion of the size from frames to bytes * @runtime: PCM runtime instance * @size: size in frames + * + * Return: the byte size */ static inline ssize_t frames_to_bytes(struct snd_pcm_runtime *runtime, snd_pcm_sframes_t size) { @@ -727,6 +735,8 @@ static inline ssize_t frames_to_bytes(struct snd_pcm_runtime *runtime, snd_pcm_s * frame_aligned - Check whether the byte size is aligned to frames * @runtime: PCM runtime instance * @bytes: size in bytes + * + * Return: true if aligned, or false if not */ static inline int frame_aligned(struct snd_pcm_runtime *runtime, ssize_t bytes) { @@ -736,6 +746,8 @@ static inline int frame_aligned(struct snd_pcm_runtime *runtime, ssize_t bytes) /** * snd_pcm_lib_buffer_bytes - Get the buffer size of the current PCM in bytes * @substream: PCM substream + * + * Return: buffer byte size */ static inline size_t snd_pcm_lib_buffer_bytes(struct snd_pcm_substream *substream) { @@ -746,6 +758,8 @@ static inline size_t snd_pcm_lib_buffer_bytes(struct snd_pcm_substream *substrea /** * snd_pcm_lib_period_bytes - Get the period size of the current PCM in bytes * @substream: PCM substream + * + * Return: period byte size */ static inline size_t snd_pcm_lib_period_bytes(struct snd_pcm_substream *substream) { @@ -758,6 +772,8 @@ static inline size_t snd_pcm_lib_period_bytes(struct snd_pcm_substream *substrea * @runtime: PCM runtime instance * * Result is between 0 ... (boundary - 1) + * + * Return: available frame size */ static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *runtime) { @@ -774,6 +790,8 @@ static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *r * @runtime: PCM runtime instance * * Result is between 0 ... (boundary - 1) + * + * Return: available frame size */ static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *runtime) { @@ -786,6 +804,8 @@ static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *ru /** * snd_pcm_playback_hw_avail - Get the queued space for playback * @runtime: PCM runtime instance + * + * Return: available frame size */ static inline snd_pcm_sframes_t snd_pcm_playback_hw_avail(struct snd_pcm_runtime *runtime) { @@ -795,6 +815,8 @@ static inline snd_pcm_sframes_t snd_pcm_playback_hw_avail(struct snd_pcm_runtime /** * snd_pcm_capture_hw_avail - Get the free space for capture * @runtime: PCM runtime instance + * + * Return: available frame size */ static inline snd_pcm_sframes_t snd_pcm_capture_hw_avail(struct snd_pcm_runtime *runtime) { @@ -934,6 +956,8 @@ static inline const struct snd_interval *hw_param_interval_c(const struct snd_pc /** * params_channels - Get the number of channels from the hw params * @p: hw params + * + * Return: the number of channels */ static inline unsigned int params_channels(const struct snd_pcm_hw_params *p) { @@ -943,6 +967,8 @@ static inline unsigned int params_channels(const struct snd_pcm_hw_params *p) /** * params_rate - Get the sample rate from the hw params * @p: hw params + * + * Return: the sample rate */ static inline unsigned int params_rate(const struct snd_pcm_hw_params *p) { @@ -952,6 +978,8 @@ static inline unsigned int params_rate(const struct snd_pcm_hw_params *p) /** * params_period_size - Get the period size (in frames) from the hw params * @p: hw params + * + * Return: the period size in frames */ static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p) { @@ -961,6 +989,8 @@ static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p) /** * params_periods - Get the number of periods from the hw params * @p: hw params + * + * Return: the number of periods */ static inline unsigned int params_periods(const struct snd_pcm_hw_params *p) { @@ -970,6 +1000,8 @@ static inline unsigned int params_periods(const struct snd_pcm_hw_params *p) /** * params_buffer_size - Get the buffer size (in frames) from the hw params * @p: hw params + * + * Return: the buffer size in frames */ static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p) { @@ -979,6 +1011,8 @@ static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p) /** * params_buffer_bytes - Get the buffer size (in bytes) from the hw params * @p: hw params + * + * Return: the buffer size in bytes */ static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p) { @@ -1241,6 +1275,8 @@ int snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type, * only the given sized buffer and doesn't allow re-allocation nor dynamic * allocation of a larger buffer unlike the standard one. * The function may return -ENOMEM error, hence the caller must check it. + * + * Return: zero if successful, or a negative error code */ static inline int __must_check snd_pcm_set_fixed_buffer(struct snd_pcm_substream *substream, int type, @@ -1259,6 +1295,8 @@ snd_pcm_set_fixed_buffer(struct snd_pcm_substream *substream, int type, * Apply the set up of the fixed buffer via snd_pcm_set_fixed_buffer() for * all substream. If any of allocation fails, it returns -ENOMEM, hence the * caller must check the return value. + * + * Return: zero if successful, or a negative error code */ static inline int __must_check snd_pcm_set_fixed_buffer_all(struct snd_pcm *pcm, int type, @@ -1315,6 +1353,8 @@ static inline int snd_pcm_lib_alloc_vmalloc_32_buffer * snd_pcm_sgbuf_get_addr - Get the DMA address at the corresponding offset * @substream: PCM substream * @ofs: byte offset + * + * Return: DMA address */ static inline dma_addr_t snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs) @@ -1328,6 +1368,8 @@ snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs) * @substream: PCM substream * @ofs: byte offset * @size: byte size to examine + * + * Return: chunk size */ static inline unsigned int snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream, @@ -1393,6 +1435,20 @@ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max) const char *snd_pcm_format_name(snd_pcm_format_t format); /** + * snd_pcm_direction_name - Get a string naming the direction of a stream + * @direction: Stream's direction, one of SNDRV_PCM_STREAM_XXX + * + * Returns a string naming the direction of the stream. + */ +static inline const char *snd_pcm_direction_name(int direction) +{ + if (direction == SNDRV_PCM_STREAM_PLAYBACK) + return "Playback"; + else + return "Capture"; +} + +/** * snd_pcm_stream_str - Get a string naming the direction of a stream * @substream: the pcm substream instance * @@ -1400,10 +1456,7 @@ const char *snd_pcm_format_name(snd_pcm_format_t format); */ static inline const char *snd_pcm_stream_str(struct snd_pcm_substream *substream) { - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - return "Playback"; - else - return "Capture"; + return snd_pcm_direction_name(substream->stream); } /* @@ -1430,6 +1483,8 @@ struct snd_pcm_chmap { * snd_pcm_chmap_substream - get the PCM substream assigned to the given chmap info * @info: chmap information * @idx: the substream number index + * + * Return: the matched PCM substream, or NULL if not found */ static inline struct snd_pcm_substream * snd_pcm_chmap_substream(struct snd_pcm_chmap *info, unsigned int idx) @@ -1460,6 +1515,8 @@ int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream, /** * pcm_format_to_bits - Strong-typed conversion of pcm_format to bitwise * @pcm_format: PCM format + * + * Return: 64bit mask corresponding to the given PCM format */ static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format) { diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h index 7a08ed2acd60..e1f59b2940af 100644 --- a/include/sound/rawmidi.h +++ b/include/sound/rawmidi.h @@ -63,7 +63,6 @@ struct snd_rawmidi_runtime { size_t xruns; /* over/underruns counter */ int buffer_ref; /* buffer reference count */ /* misc */ - spinlock_t lock; wait_queue_head_t sleep; /* event handler (new bytes, input only) */ void (*event)(struct snd_rawmidi_substream *substream); @@ -85,6 +84,7 @@ struct snd_rawmidi_substream { unsigned int clock_type; /* clock source to use for input framing */ int use_count; /* use counter (for output) */ size_t bytes; + spinlock_t lock; struct snd_rawmidi *rmidi; struct snd_rawmidi_str *pstr; char name[32]; @@ -156,10 +156,6 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream, int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count); int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream, unsigned char *buffer, int count); -int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream, - unsigned char *buffer, int count); -int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, - int count); int snd_rawmidi_proceed(struct snd_rawmidi_substream *substream); /* main midi functions */ diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h index 8faa649f712b..ab55f40896e0 100644 --- a/include/sound/simple_card_utils.h +++ b/include/sound/simple_card_utils.h @@ -51,7 +51,6 @@ struct prop_nums { int cpus; int codecs; int platforms; - int c2c; }; struct asoc_simple_priv { @@ -64,7 +63,6 @@ struct asoc_simple_priv { struct snd_soc_dai_link_component *platforms; struct asoc_simple_data adata; struct snd_soc_codec_conf *codec_conf; - struct snd_soc_pcm_stream *c2c_conf; struct prop_nums num; unsigned int mclk_fs; } *dai_props; @@ -75,7 +73,6 @@ struct asoc_simple_priv { struct snd_soc_dai_link_component *dlcs; struct snd_soc_dai_link_component dummy; struct snd_soc_codec_conf *codec_conf; - struct snd_soc_pcm_stream *c2c_conf; struct gpio_desc *pa_gpio; const struct snd_soc_ops *ops; unsigned int dpcm_selectable:1; @@ -173,7 +170,7 @@ void asoc_simple_canonicalize_platform(struct snd_soc_dai_link_component *platfo void asoc_simple_canonicalize_cpu(struct snd_soc_dai_link_component *cpus, int is_single_links); -int asoc_simple_clean_reference(struct snd_soc_card *card); +void asoc_simple_clean_reference(struct snd_soc_card *card); void asoc_simple_convert_fixup(struct asoc_simple_data *data, struct snd_pcm_hw_params *params); diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h index 59551b1f22f3..bc7fd46ec2bc 100644 --- a/include/sound/soc-acpi-intel-match.h +++ b/include/sound/soc-acpi-intel-match.h @@ -30,6 +30,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_ehl_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_jsl_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[]; +extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_sdw_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cfl_sdw_machines[]; @@ -37,6 +38,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_sdw_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_sdw_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_sdw_machines[]; extern struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[]; +extern struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_sdw_machines[]; /* * generic table used for HDA codec-based platforms, possibly with diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h index df08573bd80c..9d31a5c0db33 100644 --- a/include/sound/soc-card.h +++ b/include/sound/soc-card.h @@ -29,6 +29,7 @@ int snd_soc_card_resume_post(struct snd_soc_card *card); int snd_soc_card_probe(struct snd_soc_card *card); int snd_soc_card_late_probe(struct snd_soc_card *card); +void snd_soc_card_fixup_controls(struct snd_soc_card *card); int snd_soc_card_remove(struct snd_soc_card *card); int snd_soc_card_set_bias_level(struct snd_soc_card *card, diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index 5a764c3099d3..c26ffb033777 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -179,7 +179,7 @@ struct snd_soc_component_driver { * analogue). */ unsigned int endianness:1; - unsigned int non_legacy_dai_naming:1; + unsigned int legacy_dai_naming:1; /* this component uses topology and ignore machine driver FEs */ const char *ignore_machine; @@ -348,11 +348,6 @@ static inline int snd_soc_component_cache_sync( return regcache_sync(component->regmap); } -static inline int snd_soc_component_is_codec(struct snd_soc_component *component) -{ - return component->driver->non_legacy_dai_naming; -} - void snd_soc_component_set_aux(struct snd_soc_component *component, struct snd_soc_aux_dev *aux); int snd_soc_component_init(struct snd_soc_component *component); diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index bbd821d2df9c..ea7509672086 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -124,6 +124,12 @@ struct snd_compr_stream; #define SND_SOC_DAIFMT_CBM_CFS SND_SOC_DAIFMT_CBP_CFC #define SND_SOC_DAIFMT_CBS_CFS SND_SOC_DAIFMT_CBC_CFC +/* when passed to set_fmt directly indicate if the device is provider or consumer */ +#define SND_SOC_DAIFMT_BP_FP SND_SOC_DAIFMT_CBP_CFP +#define SND_SOC_DAIFMT_BC_FP SND_SOC_DAIFMT_CBC_CFP +#define SND_SOC_DAIFMT_BP_FC SND_SOC_DAIFMT_CBP_CFC +#define SND_SOC_DAIFMT_BC_FC SND_SOC_DAIFMT_CBC_CFC + /* Describes the possible PCM format */ #define SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT 48 #define SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_MASK (0xFFFFULL << SND_SOC_POSSIBLE_DAIFMT_CLOCK_PROVIDER_SHIFT) diff --git a/include/sound/soc.h b/include/sound/soc.h index b276dcb5d4e8..aad24a1d3276 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -136,6 +136,18 @@ .put = snd_soc_put_volsw, \ .private_value = SOC_DOUBLE_VALUE(reg, shift_left, shift_right, \ max, invert, 0) } +#define SOC_DOUBLE_SX_TLV(xname, xreg, shift_left, shift_right, xmin, xmax, tlv_array) \ +{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \ + .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ + SNDRV_CTL_ELEM_ACCESS_READWRITE, \ + .tlv.p = (tlv_array), \ + .info = snd_soc_info_volsw_sx, \ + .get = snd_soc_get_volsw_sx, \ + .put = snd_soc_put_volsw_sx, \ + .private_value = (unsigned long)&(struct soc_mixer_control) \ + {.reg = xreg, .rreg = xreg, \ + .shift = shift_left, .rshift = shift_right, \ + .max = xmax, .min = xmin} } #define SOC_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax, xinvert, tlv_array) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\ @@ -414,7 +426,7 @@ enum snd_soc_pcm_subclass { }; int snd_soc_register_card(struct snd_soc_card *card); -int snd_soc_unregister_card(struct snd_soc_card *card); +void snd_soc_unregister_card(struct snd_soc_card *card); int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card); #ifdef CONFIG_PM_SLEEP int snd_soc_suspend(struct device *dev); @@ -914,6 +926,7 @@ struct snd_soc_card { int (*probe)(struct snd_soc_card *card); int (*late_probe)(struct snd_soc_card *card); + void (*fixup_controls)(struct snd_soc_card *card); int (*remove)(struct snd_soc_card *card); /* the pre and post PM functions are used to do any PM work before and diff --git a/include/sound/sof.h b/include/sound/sof.h index 1a82a0db5e7f..367dccfea7ad 100644 --- a/include/sound/sof.h +++ b/include/sound/sof.h @@ -138,6 +138,7 @@ struct sof_dev_desc { struct snd_sof_dsp_ops *ops; int (*ops_init)(struct snd_sof_dev *sdev); + void (*ops_free)(struct snd_sof_dev *sdev); }; int sof_dai_get_mclk(struct snd_soc_pcm_runtime *rtd); diff --git a/include/sound/sof/dai-amd.h b/include/sound/sof/dai-amd.h index 90d09dbdd709..92f45c180b7c 100644 --- a/include/sound/sof/dai-amd.h +++ b/include/sound/sof/dai-amd.h @@ -18,4 +18,11 @@ struct sof_ipc_dai_acp_params { uint32_t fsync_rate; /* FSYNC frequency in Hz */ uint32_t tdm_slots; } __packed; + +/* ACPDMIC Configuration Request - SOF_IPC_DAI_AMD_CONFIG */ +struct sof_ipc_dai_acpdmic_params { + uint32_t pdm_rate; + uint32_t pdm_ch; +} __packed; + #endif diff --git a/include/sound/sof/dai-intel.h b/include/sound/sof/dai-intel.h index 7a266f41983c..5b93b7292f5e 100644 --- a/include/sound/sof/dai-intel.h +++ b/include/sound/sof/dai-intel.h @@ -52,6 +52,8 @@ #define SOF_DAI_INTEL_SSP_CLKCTRL_MCLK_ES BIT(6) /* bclk early start */ #define SOF_DAI_INTEL_SSP_CLKCTRL_BCLK_ES BIT(7) +/* mclk always on */ +#define SOF_DAI_INTEL_SSP_CLKCTRL_MCLK_AON BIT(8) /* DMIC max. four controllers for eight microphone channels */ #define SOF_DAI_INTEL_DMIC_NUM_CTRL 4 diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h index a818a0f0a226..21d98f31a9ca 100644 --- a/include/sound/sof/dai.h +++ b/include/sound/sof/dai.h @@ -111,7 +111,7 @@ struct sof_ipc_dai_config { struct sof_ipc_dai_sai_params sai; struct sof_ipc_dai_acp_params acpbt; struct sof_ipc_dai_acp_params acpsp; - struct sof_ipc_dai_acp_params acpdmic; + struct sof_ipc_dai_acpdmic_params acpdmic; struct sof_ipc_dai_mtk_afe_params afe; }; } __packed; diff --git a/include/sound/sof/ipc4/header.h b/include/sound/sof/ipc4/header.h index b8b8e5b5e3e1..a795deacc2ea 100644 --- a/include/sound/sof/ipc4/header.h +++ b/include/sound/sof/ipc4/header.h @@ -385,6 +385,14 @@ struct sof_ipc4_fw_version { uint16_t build; } __packed; +/* Payload data for SOF_IPC4_MOD_SET_DX */ +struct sof_ipc4_dx_state_info { + /* core(s) to apply the change */ + uint32_t core_mask; + /* core state: 0: put core_id to D3; 1: put core_id to D0 */ + uint32_t dx_mask; +} __packed __aligned(4); + /* Reply messages */ /* diff --git a/include/sound/sof/stream.h b/include/sound/sof/stream.h index 1db3bbc3e65d..9377113f13e4 100644 --- a/include/sound/sof/stream.h +++ b/include/sound/sof/stream.h @@ -86,9 +86,11 @@ struct sof_ipc_stream_params { uint32_t host_period_bytes; uint16_t no_stream_position; /**< 1 means don't send stream position */ uint8_t cont_update_posn; /**< 1 means continuous update stream position */ - - uint8_t reserved[5]; + uint8_t reserved0; + int16_t ext_data_length; /**< 0, means no extended data */ + uint8_t reserved[2]; uint16_t chmap[SOF_IPC_MAX_CHANNELS]; /**< channel map - SOF_CHMAP_ */ + uint8_t ext_data[]; /**< extended data */ } __packed; /* PCM params info - SOF_IPC_STREAM_PCM_PARAMS */ diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 8e68ace428d9..94d06ddfd80a 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -26,6 +26,7 @@ struct sock; #define ISCSI_RX_THREAD_NAME "iscsi_trx" #define ISCSI_TX_THREAD_NAME "iscsi_ttx" #define ISCSI_IQN_LEN 224 +#define NA_AUTHENTICATION_INHERITED -1 /* struct iscsi_node_attrib sanity values */ #define NA_DATAOUT_TIMEOUT 3 @@ -715,6 +716,7 @@ struct iscsi_login { } ____cacheline_aligned; struct iscsi_node_attrib { + s32 authentication; u32 dataout_timeout; u32 dataout_timeout_retries; u32 default_erl; @@ -758,6 +760,12 @@ struct iscsi_node_acl { struct iscsi_node_stat_grps node_stat_grps; }; +static inline struct iscsi_node_acl * +to_iscsi_nacl(struct se_node_acl *se_nacl) +{ + return container_of(se_nacl, struct iscsi_node_acl, se_node_acl); +} + struct iscsi_tpg_attrib { u32 authentication; u32 login_timeout; @@ -839,6 +847,12 @@ struct iscsi_portal_group { struct list_head tpg_list; } ____cacheline_aligned; +static inline struct iscsi_portal_group * +to_iscsi_tpg(struct se_portal_group *se_tpg) +{ + return container_of(se_tpg, struct iscsi_portal_group, tpg_se_tpg); +} + struct iscsi_wwn_stat_grps { struct config_group iscsi_stat_group; struct config_group iscsi_instance_group; diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 773963a1e0b5..a3c193df25b3 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -37,6 +37,7 @@ struct target_backend_ops { struct se_dev_plug *(*plug_device)(struct se_device *se_dev); void (*unplug_device)(struct se_dev_plug *se_plug); + bool (*configure_unmap)(struct se_device *se_dev); ssize_t (*set_configfs_dev_params)(struct se_device *, const char *, ssize_t); ssize_t (*show_configfs_dev_params)(struct se_device *, char *); diff --git a/include/trace/events/9p.h b/include/trace/events/9p.h index 78c5608a1648..4dfa6d7f83ba 100644 --- a/include/trace/events/9p.h +++ b/include/trace/events/9p.h @@ -77,6 +77,13 @@ EM( P9_TWSTAT, "P9_TWSTAT" ) \ EMe(P9_RWSTAT, "P9_RWSTAT" ) + +#define P9_FID_REFTYPE \ + EM( P9_FID_REF_CREATE, "create " ) \ + EM( P9_FID_REF_GET, "get " ) \ + EM( P9_FID_REF_PUT, "put " ) \ + EMe(P9_FID_REF_DESTROY, "destroy" ) + /* Define EM() to export the enums to userspace via TRACE_DEFINE_ENUM() */ #undef EM #undef EMe @@ -84,6 +91,21 @@ #define EMe(a, b) TRACE_DEFINE_ENUM(a); P9_MSG_T +P9_FID_REFTYPE + +/* And also use EM/EMe to define helper enums -- once */ +#ifndef __9P_DECLARE_TRACE_ENUMS_ONLY_ONCE +#define __9P_DECLARE_TRACE_ENUMS_ONLY_ONCE +#undef EM +#undef EMe +#define EM(a, b) a, +#define EMe(a, b) a + +enum p9_fid_reftype { + P9_FID_REFTYPE +} __mode(byte); + +#endif /* * Now redefine the EM() and EMe() macros to map the enums to the strings @@ -96,6 +118,8 @@ P9_MSG_T #define show_9p_op(type) \ __print_symbolic(type, P9_MSG_T) +#define show_9p_fid_reftype(type) \ + __print_symbolic(type, P9_FID_REFTYPE) TRACE_EVENT(9p_client_req, TP_PROTO(struct p9_client *clnt, int8_t type, int tag), @@ -168,6 +192,30 @@ TRACE_EVENT(9p_protocol_dump, __entry->tag, 0, __entry->line, 16, __entry->line + 16) ); + +TRACE_EVENT(9p_fid_ref, + TP_PROTO(struct p9_fid *fid, __u8 type), + + TP_ARGS(fid, type), + + TP_STRUCT__entry( + __field( int, fid ) + __field( int, refcount ) + __field( __u8, type ) + ), + + TP_fast_assign( + __entry->fid = fid->fid; + __entry->refcount = refcount_read(&fid->count); + __entry->type = type; + ), + + TP_printk("%s fid %d, refcount %d", + show_9p_fid_reftype(__entry->type), + __entry->fid, __entry->refcount) +); + + #endif /* _TRACE_9P_H */ /* This part must be outside protection */ diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 499f5fabd20f..e9d412d19dbb 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -727,31 +727,31 @@ TRACE_EVENT(afs_cb_call, ); TRACE_EVENT(afs_call, - TP_PROTO(struct afs_call *call, enum afs_call_trace op, - int usage, int outstanding, const void *where), + TP_PROTO(unsigned int call_debug_id, enum afs_call_trace op, + int ref, int outstanding, const void *where), - TP_ARGS(call, op, usage, outstanding, where), + TP_ARGS(call_debug_id, op, ref, outstanding, where), TP_STRUCT__entry( __field(unsigned int, call ) __field(int, op ) - __field(int, usage ) + __field(int, ref ) __field(int, outstanding ) __field(const void *, where ) ), TP_fast_assign( - __entry->call = call->debug_id; + __entry->call = call_debug_id; __entry->op = op; - __entry->usage = usage; + __entry->ref = ref; __entry->outstanding = outstanding; __entry->where = where; ), - TP_printk("c=%08x %s u=%d o=%d sp=%pSR", + TP_printk("c=%08x %s r=%d o=%d sp=%pSR", __entry->call, __print_symbolic(__entry->op, afs_call_traces), - __entry->usage, + __entry->ref, __entry->outstanding, __entry->where) ); @@ -1433,10 +1433,10 @@ TRACE_EVENT(afs_cb_miss, ); TRACE_EVENT(afs_server, - TP_PROTO(struct afs_server *server, int ref, int active, + TP_PROTO(unsigned int server_debug_id, int ref, int active, enum afs_server_trace reason), - TP_ARGS(server, ref, active, reason), + TP_ARGS(server_debug_id, ref, active, reason), TP_STRUCT__entry( __field(unsigned int, server ) @@ -1446,7 +1446,7 @@ TRACE_EVENT(afs_server, ), TP_fast_assign( - __entry->server = server->debug_id; + __entry->server = server_debug_id; __entry->ref = ref; __entry->active = active; __entry->reason = reason; @@ -1476,36 +1476,36 @@ TRACE_EVENT(afs_volume, __entry->reason = reason; ), - TP_printk("V=%llx %s u=%d", + TP_printk("V=%llx %s ur=%d", __entry->vid, __print_symbolic(__entry->reason, afs_volume_traces), __entry->ref) ); TRACE_EVENT(afs_cell, - TP_PROTO(unsigned int cell_debug_id, int usage, int active, + TP_PROTO(unsigned int cell_debug_id, int ref, int active, enum afs_cell_trace reason), - TP_ARGS(cell_debug_id, usage, active, reason), + TP_ARGS(cell_debug_id, ref, active, reason), TP_STRUCT__entry( __field(unsigned int, cell ) - __field(int, usage ) + __field(int, ref ) __field(int, active ) __field(int, reason ) ), TP_fast_assign( __entry->cell = cell_debug_id; - __entry->usage = usage; + __entry->ref = ref; __entry->active = active; __entry->reason = reason; ), - TP_printk("L=%08x %s u=%d a=%d", + TP_printk("L=%08x %s r=%d a=%d", __entry->cell, __print_symbolic(__entry->reason, afs_cell_traces), - __entry->usage, + __entry->ref, __entry->active) ); diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 9ae94ef3e270..73df80d462dc 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -30,6 +30,8 @@ struct btrfs_qgroup; struct extent_io_tree; struct prelim_ref; struct btrfs_space_info; +struct btrfs_raid_bio; +struct raid56_bio_trace_info; #define show_ref_type(type) \ __print_symbolic(type, \ @@ -596,6 +598,70 @@ DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_put, TP_ARGS(inode, ordered) ); +DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup, + + TP_PROTO(const struct btrfs_inode *inode, + const struct btrfs_ordered_extent *ordered), + + TP_ARGS(inode, ordered) +); + +DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_range, + + TP_PROTO(const struct btrfs_inode *inode, + const struct btrfs_ordered_extent *ordered), + + TP_ARGS(inode, ordered) +); + +DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_first_range, + + TP_PROTO(const struct btrfs_inode *inode, + const struct btrfs_ordered_extent *ordered), + + TP_ARGS(inode, ordered) +); + +DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_for_logging, + + TP_PROTO(const struct btrfs_inode *inode, + const struct btrfs_ordered_extent *ordered), + + TP_ARGS(inode, ordered) +); + +DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_first, + + TP_PROTO(const struct btrfs_inode *inode, + const struct btrfs_ordered_extent *ordered), + + TP_ARGS(inode, ordered) +); + +DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_split, + + TP_PROTO(const struct btrfs_inode *inode, + const struct btrfs_ordered_extent *ordered), + + TP_ARGS(inode, ordered) +); + +DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_dec_test_pending, + + TP_PROTO(const struct btrfs_inode *inode, + const struct btrfs_ordered_extent *ordered), + + TP_ARGS(inode, ordered) +); + +DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_mark_finished, + + TP_PROTO(const struct btrfs_inode *inode, + const struct btrfs_ordered_extent *ordered), + + TP_ARGS(inode, ordered) +); + DECLARE_EVENT_CLASS(btrfs__writepage, TP_PROTO(const struct page *page, const struct inode *inode, @@ -2258,6 +2324,98 @@ DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned, TP_ARGS(fs_info, sinfo, old, diff) ); +DECLARE_EVENT_CLASS(btrfs_raid56_bio, + + TP_PROTO(const struct btrfs_raid_bio *rbio, + const struct bio *bio, + const struct raid56_bio_trace_info *trace_info), + + TP_ARGS(rbio, bio, trace_info), + + TP_STRUCT__entry_btrfs( + __field( u64, full_stripe ) + __field( u64, physical ) + __field( u64, devid ) + __field( u32, offset ) + __field( u32, len ) + __field( u8, opf ) + __field( u8, total_stripes ) + __field( u8, real_stripes ) + __field( u8, nr_data ) + __field( u8, stripe_nr ) + ), + + TP_fast_assign_btrfs(rbio->bioc->fs_info, + __entry->full_stripe = rbio->bioc->raid_map[0]; + __entry->physical = bio->bi_iter.bi_sector << SECTOR_SHIFT; + __entry->len = bio->bi_iter.bi_size; + __entry->opf = bio_op(bio); + __entry->devid = trace_info->devid; + __entry->offset = trace_info->offset; + __entry->stripe_nr = trace_info->stripe_nr; + __entry->total_stripes = rbio->bioc->num_stripes; + __entry->real_stripes = rbio->real_stripes; + __entry->nr_data = rbio->nr_data; + ), + /* + * For type output, we need to output things like "DATA1" + * (the first data stripe), "DATA2" (the second data stripe), + * "PQ1" (P stripe),"PQ2" (Q stripe), "REPLACE0" (replace target device). + */ + TP_printk_btrfs( +"full_stripe=%llu devid=%lld type=%s%d offset=%d opf=0x%x physical=%llu len=%u", + __entry->full_stripe, __entry->devid, + (__entry->stripe_nr < __entry->nr_data) ? "DATA" : + ((__entry->stripe_nr < __entry->real_stripes) ? "PQ" : + "REPLACE"), + (__entry->stripe_nr < __entry->nr_data) ? + (__entry->stripe_nr + 1) : + ((__entry->stripe_nr < __entry->real_stripes) ? + (__entry->stripe_nr - __entry->nr_data + 1) : 0), + __entry->offset, __entry->opf, __entry->physical, __entry->len) +); + +DEFINE_EVENT(btrfs_raid56_bio, raid56_read_partial, + TP_PROTO(const struct btrfs_raid_bio *rbio, + const struct bio *bio, + const struct raid56_bio_trace_info *trace_info), + + TP_ARGS(rbio, bio, trace_info) +); + +DEFINE_EVENT(btrfs_raid56_bio, raid56_write_stripe, + TP_PROTO(const struct btrfs_raid_bio *rbio, + const struct bio *bio, + const struct raid56_bio_trace_info *trace_info), + + TP_ARGS(rbio, bio, trace_info) +); + + +DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_write_stripe, + TP_PROTO(const struct btrfs_raid_bio *rbio, + const struct bio *bio, + const struct raid56_bio_trace_info *trace_info), + + TP_ARGS(rbio, bio, trace_info) +); + +DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_read, + TP_PROTO(const struct btrfs_raid_bio *rbio, + const struct bio *bio, + const struct raid56_bio_trace_info *trace_info), + + TP_ARGS(rbio, bio, trace_info) +); + +DEFINE_EVENT(btrfs_raid56_bio, raid56_scrub_read_recover, + TP_PROTO(const struct btrfs_raid_bio *rbio, + const struct bio *bio, + const struct raid56_bio_trace_info *trace_info), + + TP_ARGS(rbio, bio, trace_info) +); + #endif /* _TRACE_BTRFS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h index 2814f188d98c..24969184c534 100644 --- a/include/trace/events/devlink.h +++ b/include/trace/events/devlink.h @@ -186,7 +186,7 @@ TRACE_EVENT(devlink_trap_report, __string(driver_name, devlink_to_dev(devlink)->driver->name) __string(trap_name, metadata->trap_name) __string(trap_group_name, metadata->trap_group_name) - __dynamic_array(char, input_dev_name, IFNAMSIZ) + __array(char, input_dev_name, IFNAMSIZ) ), TP_fast_assign( @@ -197,15 +197,14 @@ TRACE_EVENT(devlink_trap_report, __assign_str(driver_name, devlink_to_dev(devlink)->driver->name); __assign_str(trap_name, metadata->trap_name); __assign_str(trap_group_name, metadata->trap_group_name); - __assign_str(input_dev_name, - (input_dev ? input_dev->name : "NULL")); + strscpy(__entry->input_dev_name, input_dev ? input_dev->name : "NULL", IFNAMSIZ); ), TP_printk("bus_name=%s dev_name=%s driver_name=%s trap_name=%s " "trap_group_name=%s input_dev_name=%s", __get_str(bus_name), __get_str(dev_name), __get_str(driver_name), __get_str(trap_name), __get_str(trap_group_name), - __get_str(input_dev_name)) + __entry->input_dev_name) ); #endif /* _TRACE_DEVLINK_H */ diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h index 6f2a4dc35e37..c2300c407f58 100644 --- a/include/trace/events/fib.h +++ b/include/trace/events/fib.h @@ -32,7 +32,7 @@ TRACE_EVENT(fib_table_lookup, __array( __u8, gw6, 16 ) __field( u16, sport ) __field( u16, dport ) - __dynamic_array(char, name, IFNAMSIZ ) + __array(char, name, IFNAMSIZ ) ), TP_fast_assign( @@ -66,7 +66,7 @@ TRACE_EVENT(fib_table_lookup, } dev = nhc ? nhc->nhc_dev : NULL; - __assign_str(name, dev ? dev->name : "-"); + strlcpy(__entry->name, dev ? dev->name : "-", IFNAMSIZ); if (nhc) { if (nhc->nhc_gw_family == AF_INET) { @@ -95,7 +95,7 @@ TRACE_EVENT(fib_table_lookup, __entry->tb_id, __entry->oif, __entry->iif, __entry->proto, __entry->src, __entry->sport, __entry->dst, __entry->dport, __entry->tos, __entry->scope, __entry->flags, - __get_str(name), __entry->gw4, __entry->gw6, __entry->err) + __entry->name, __entry->gw4, __entry->gw6, __entry->err) ); #endif /* _TRACE_FIB_H */ diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h index c6abdcc77c12..6e821eb79450 100644 --- a/include/trace/events/fib6.h +++ b/include/trace/events/fib6.h @@ -31,7 +31,7 @@ TRACE_EVENT(fib6_table_lookup, __field( u16, dport ) __field( u8, proto ) __field( u8, rt_type ) - __dynamic_array( char, name, IFNAMSIZ ) + __array( char, name, IFNAMSIZ ) __array( __u8, gw, 16 ) ), @@ -63,9 +63,9 @@ TRACE_EVENT(fib6_table_lookup, } if (res->nh && res->nh->fib_nh_dev) { - __assign_str(name, res->nh->fib_nh_dev); + strlcpy(__entry->name, res->nh->fib_nh_dev->name, IFNAMSIZ); } else { - __assign_str(name, "-"); + strcpy(__entry->name, "-"); } if (res->f6i == net->ipv6.fib6_null_entry) { struct in6_addr in6_zero = {}; @@ -83,7 +83,7 @@ TRACE_EVENT(fib6_table_lookup, __entry->tb_id, __entry->oif, __entry->iif, __entry->proto, __entry->src, __entry->sport, __entry->dst, __entry->dport, __entry->tos, __entry->scope, __entry->flags, - __get_str(name), __entry->gw, __entry->err) + __entry->name, __entry->gw, __entry->err) ); #endif /* _TRACE_FIB6_H */ diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h index cb3fb337e880..c078c48a8e6d 100644 --- a/include/trace/events/fscache.h +++ b/include/trace/events/fscache.h @@ -49,6 +49,7 @@ enum fscache_volume_trace { enum fscache_cookie_trace { fscache_cookie_collision, fscache_cookie_discard, + fscache_cookie_failed, fscache_cookie_get_attach_object, fscache_cookie_get_end_access, fscache_cookie_get_hash_collision, @@ -131,6 +132,7 @@ enum fscache_access_trace { #define fscache_cookie_traces \ EM(fscache_cookie_collision, "*COLLIDE*") \ EM(fscache_cookie_discard, "DISCARD ") \ + EM(fscache_cookie_failed, "FAILED ") \ EM(fscache_cookie_get_attach_object, "GET attch") \ EM(fscache_cookie_get_hash_collision, "GET hcoll") \ EM(fscache_cookie_get_end_access, "GQ endac") \ diff --git a/include/trace/events/intel_iommu.h b/include/trace/events/intel_iommu.h deleted file mode 100644 index e5c1ca6d16ee..000000000000 --- a/include/trace/events/intel_iommu.h +++ /dev/null @@ -1,94 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Intel IOMMU trace support - * - * Copyright (C) 2019 Intel Corporation - * - * Author: Lu Baolu <baolu.lu@linux.intel.com> - */ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM intel_iommu - -#if !defined(_TRACE_INTEL_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_INTEL_IOMMU_H - -#include <linux/tracepoint.h> -#include <linux/intel-iommu.h> - -#define MSG_MAX 256 - -TRACE_EVENT(qi_submit, - TP_PROTO(struct intel_iommu *iommu, u64 qw0, u64 qw1, u64 qw2, u64 qw3), - - TP_ARGS(iommu, qw0, qw1, qw2, qw3), - - TP_STRUCT__entry( - __field(u64, qw0) - __field(u64, qw1) - __field(u64, qw2) - __field(u64, qw3) - __string(iommu, iommu->name) - ), - - TP_fast_assign( - __assign_str(iommu, iommu->name); - __entry->qw0 = qw0; - __entry->qw1 = qw1; - __entry->qw2 = qw2; - __entry->qw3 = qw3; - ), - - TP_printk("%s %s: 0x%llx 0x%llx 0x%llx 0x%llx", - __print_symbolic(__entry->qw0 & 0xf, - { QI_CC_TYPE, "cc_inv" }, - { QI_IOTLB_TYPE, "iotlb_inv" }, - { QI_DIOTLB_TYPE, "dev_tlb_inv" }, - { QI_IEC_TYPE, "iec_inv" }, - { QI_IWD_TYPE, "inv_wait" }, - { QI_EIOTLB_TYPE, "p_iotlb_inv" }, - { QI_PC_TYPE, "pc_inv" }, - { QI_DEIOTLB_TYPE, "p_dev_tlb_inv" }, - { QI_PGRP_RESP_TYPE, "page_grp_resp" }), - __get_str(iommu), - __entry->qw0, __entry->qw1, __entry->qw2, __entry->qw3 - ) -); - -TRACE_EVENT(prq_report, - TP_PROTO(struct intel_iommu *iommu, struct device *dev, - u64 dw0, u64 dw1, u64 dw2, u64 dw3, - unsigned long seq), - - TP_ARGS(iommu, dev, dw0, dw1, dw2, dw3, seq), - - TP_STRUCT__entry( - __field(u64, dw0) - __field(u64, dw1) - __field(u64, dw2) - __field(u64, dw3) - __field(unsigned long, seq) - __string(iommu, iommu->name) - __string(dev, dev_name(dev)) - __dynamic_array(char, buff, MSG_MAX) - ), - - TP_fast_assign( - __entry->dw0 = dw0; - __entry->dw1 = dw1; - __entry->dw2 = dw2; - __entry->dw3 = dw3; - __entry->seq = seq; - __assign_str(iommu, iommu->name); - __assign_str(dev, dev_name(dev)); - ), - - TP_printk("%s/%s seq# %ld: %s", - __get_str(iommu), __get_str(dev), __entry->seq, - decode_prq_descriptor(__get_str(buff), MSG_MAX, __entry->dw0, - __entry->dw1, __entry->dw2, __entry->dw3) - ) -); -#endif /* _TRACE_INTEL_IOMMU_H */ - -/* This part must be outside protection */ -#include <trace/define_trace.h> diff --git a/include/trace/events/iscsi.h b/include/trace/events/iscsi.h index 87408faf6e4e..8ff2a3ca5d75 100644 --- a/include/trace/events/iscsi.h +++ b/include/trace/events/iscsi.h @@ -26,12 +26,12 @@ DECLARE_EVENT_CLASS(iscsi_log_msg, TP_STRUCT__entry( __string(dname, dev_name(dev) ) - __dynamic_array(char, msg, ISCSI_MSG_MAX ) + __vstring(msg, vaf->fmt, vaf->va) ), TP_fast_assign( __assign_str(dname, dev_name(dev)); - vsnprintf(__get_str(msg), ISCSI_MSG_MAX, vaf->fmt, *vaf->va); + __assign_vstr(msg, vaf->fmt, vaf->va); ), TP_printk("%s: %s",__get_str(dname), __get_str(msg) diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 37e1e1a2d67d..3bd31ea23fee 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -282,7 +282,7 @@ DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page, TP_ARGS(gva, gfn) ); -DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault, +DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_repeated_fault, TP_PROTO(u64 gva, u64 gfn), diff --git a/include/trace/events/neigh.h b/include/trace/events/neigh.h index 62bb17516713..5eaa1fa99171 100644 --- a/include/trace/events/neigh.h +++ b/include/trace/events/neigh.h @@ -30,7 +30,7 @@ TRACE_EVENT(neigh_create, TP_STRUCT__entry( __field(u32, family) - __dynamic_array(char, dev, IFNAMSIZ ) + __string(dev, dev ? dev->name : "NULL") __field(int, entries) __field(u8, created) __field(u8, gc_exempt) diff --git a/include/trace/events/net.h b/include/trace/events/net.h index 032b431b987b..da611a7aaf97 100644 --- a/include/trace/events/net.h +++ b/include/trace/events/net.h @@ -136,7 +136,7 @@ DECLARE_EVENT_CLASS(net_dev_template, __assign_str(name, skb->dev->name); ), - TP_printk("dev=%s skbaddr=%px len=%u", + TP_printk("dev=%s skbaddr=%p len=%u", __get_str(name), __entry->skbaddr, __entry->len) ) diff --git a/include/trace/events/power.h b/include/trace/events/power.h index c708521e4ed5..77f14f7a11d4 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h @@ -40,6 +40,28 @@ DEFINE_EVENT(cpu, cpu_idle, TP_ARGS(state, cpu_id) ); +TRACE_EVENT(cpu_idle_miss, + + TP_PROTO(unsigned int cpu_id, unsigned int state, bool below), + + TP_ARGS(cpu_id, state, below), + + TP_STRUCT__entry( + __field(u32, cpu_id) + __field(u32, state) + __field(bool, below) + ), + + TP_fast_assign( + __entry->cpu_id = cpu_id; + __entry->state = state; + __entry->below = below; + ), + + TP_printk("cpu_id=%lu state=%lu type=%s", (unsigned long)__entry->cpu_id, + (unsigned long)__entry->state, (__entry->below)?"below":"above") +); + TRACE_EVENT(powernv_throttle, TP_PROTO(int chip_id, const char *reason, int pmax), diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h index 59c945b66f9c..a3995925cb05 100644 --- a/include/trace/events/qdisc.h +++ b/include/trace/events/qdisc.h @@ -41,7 +41,7 @@ TRACE_EVENT(qdisc_dequeue, __entry->txq_state = txq->state; ), - TP_printk("dequeue ifindex=%d qdisc handle=0x%X parent=0x%X txq_state=0x%lX packets=%d skbaddr=%px", + TP_printk("dequeue ifindex=%d qdisc handle=0x%X parent=0x%X txq_state=0x%lX packets=%d skbaddr=%p", __entry->ifindex, __entry->handle, __entry->parent, __entry->txq_state, __entry->packets, __entry->skbaddr ) ); @@ -70,7 +70,7 @@ TRACE_EVENT(qdisc_enqueue, __entry->parent = qdisc->parent; ), - TP_printk("enqueue ifindex=%d qdisc handle=0x%X parent=0x%X skbaddr=%px", + TP_printk("enqueue ifindex=%d qdisc handle=0x%X parent=0x%X skbaddr=%p", __entry->ifindex, __entry->handle, __entry->parent, __entry->skbaddr) ); diff --git a/include/trace/events/qla.h b/include/trace/events/qla.h index 5857cf682ee7..e7fd55e7dc3d 100644 --- a/include/trace/events/qla.h +++ b/include/trace/events/qla.h @@ -22,11 +22,11 @@ DECLARE_EVENT_CLASS(qla_log_event, TP_STRUCT__entry( __string(buf, buf) - __dynamic_array(char, msg, QLA_MSG_MAX) + __vstring(msg, vaf->fmt, vaf->va) ), TP_fast_assign( __assign_str(buf, buf); - vsnprintf(__get_str(msg), QLA_MSG_MAX, vaf->fmt, *vaf->va); + __assign_vstr(msg, vaf->fmt, vaf->va); ), TP_printk("%s %s", __get_str(buf), __get_str(msg)) diff --git a/include/trace/events/rv.h b/include/trace/events/rv.h new file mode 100644 index 000000000000..56592da9301c --- /dev/null +++ b/include/trace/events/rv.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rv + +#if !defined(_TRACE_RV_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RV_H + +#include <linux/rv.h> +#include <linux/tracepoint.h> + +#ifdef CONFIG_DA_MON_EVENTS_IMPLICIT +DECLARE_EVENT_CLASS(event_da_monitor, + + TP_PROTO(char *state, char *event, char *next_state, bool final_state), + + TP_ARGS(state, event, next_state, final_state), + + TP_STRUCT__entry( + __array( char, state, MAX_DA_NAME_LEN ) + __array( char, event, MAX_DA_NAME_LEN ) + __array( char, next_state, MAX_DA_NAME_LEN ) + __field( bool, final_state ) + ), + + TP_fast_assign( + memcpy(__entry->state, state, MAX_DA_NAME_LEN); + memcpy(__entry->event, event, MAX_DA_NAME_LEN); + memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN); + __entry->final_state = final_state; + ), + + TP_printk("%s x %s -> %s %s", + __entry->state, + __entry->event, + __entry->next_state, + __entry->final_state ? "(final)" : "") +); + +DECLARE_EVENT_CLASS(error_da_monitor, + + TP_PROTO(char *state, char *event), + + TP_ARGS(state, event), + + TP_STRUCT__entry( + __array( char, state, MAX_DA_NAME_LEN ) + __array( char, event, MAX_DA_NAME_LEN ) + ), + + TP_fast_assign( + memcpy(__entry->state, state, MAX_DA_NAME_LEN); + memcpy(__entry->event, event, MAX_DA_NAME_LEN); + ), + + TP_printk("event %s not expected in the state %s", + __entry->event, + __entry->state) +); + +#ifdef CONFIG_RV_MON_WIP +DEFINE_EVENT(event_da_monitor, event_wip, + TP_PROTO(char *state, char *event, char *next_state, bool final_state), + TP_ARGS(state, event, next_state, final_state)); + +DEFINE_EVENT(error_da_monitor, error_wip, + TP_PROTO(char *state, char *event), + TP_ARGS(state, event)); +#endif /* CONFIG_RV_MON_WIP */ +#endif /* CONFIG_DA_MON_EVENTS_IMPLICIT */ + +#ifdef CONFIG_DA_MON_EVENTS_ID +DECLARE_EVENT_CLASS(event_da_monitor_id, + + TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state), + + TP_ARGS(id, state, event, next_state, final_state), + + TP_STRUCT__entry( + __field( int, id ) + __array( char, state, MAX_DA_NAME_LEN ) + __array( char, event, MAX_DA_NAME_LEN ) + __array( char, next_state, MAX_DA_NAME_LEN ) + __field( bool, final_state ) + ), + + TP_fast_assign( + memcpy(__entry->state, state, MAX_DA_NAME_LEN); + memcpy(__entry->event, event, MAX_DA_NAME_LEN); + memcpy(__entry->next_state, next_state, MAX_DA_NAME_LEN); + __entry->id = id; + __entry->final_state = final_state; + ), + + TP_printk("%d: %s x %s -> %s %s", + __entry->id, + __entry->state, + __entry->event, + __entry->next_state, + __entry->final_state ? "(final)" : "") +); + +DECLARE_EVENT_CLASS(error_da_monitor_id, + + TP_PROTO(int id, char *state, char *event), + + TP_ARGS(id, state, event), + + TP_STRUCT__entry( + __field( int, id ) + __array( char, state, MAX_DA_NAME_LEN ) + __array( char, event, MAX_DA_NAME_LEN ) + ), + + TP_fast_assign( + memcpy(__entry->state, state, MAX_DA_NAME_LEN); + memcpy(__entry->event, event, MAX_DA_NAME_LEN); + __entry->id = id; + ), + + TP_printk("%d: event %s not expected in the state %s", + __entry->id, + __entry->event, + __entry->state) +); + +#ifdef CONFIG_RV_MON_WWNR +/* id is the pid of the task */ +DEFINE_EVENT(event_da_monitor_id, event_wwnr, + TP_PROTO(int id, char *state, char *event, char *next_state, bool final_state), + TP_ARGS(id, state, event, next_state, final_state)); + +DEFINE_EVENT(error_da_monitor_id, error_wwnr, + TP_PROTO(int id, char *state, char *event), + TP_ARGS(id, state, event)); +#endif /* CONFIG_RV_MON_WWNR */ + +#endif /* CONFIG_DA_MON_EVENTS_ID */ +#endif /* _TRACE_RV_H */ + +/* This part ust be outside protection */ +#undef TRACE_INCLUDE_PATH +#include <trace/define_trace.h> diff --git a/include/trace/events/rwmmio.h b/include/trace/events/rwmmio.h new file mode 100644 index 000000000000..de41159216c1 --- /dev/null +++ b/include/trace/events/rwmmio.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rwmmio + +#if !defined(_TRACE_RWMMIO_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RWMMIO_H + +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(rwmmio_rw_template, + + TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr), + + TP_ARGS(caller, val, width, addr), + + TP_STRUCT__entry( + __field(unsigned long, caller) + __field(unsigned long, addr) + __field(u64, val) + __field(u8, width) + ), + + TP_fast_assign( + __entry->caller = caller; + __entry->val = val; + __entry->addr = (unsigned long)addr; + __entry->width = width; + ), + + TP_printk("%pS width=%d val=%#llx addr=%#lx", + (void *)__entry->caller, __entry->width, + __entry->val, __entry->addr) +); + +DEFINE_EVENT(rwmmio_rw_template, rwmmio_write, + TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr), + TP_ARGS(caller, val, width, addr) +); + +DEFINE_EVENT(rwmmio_rw_template, rwmmio_post_write, + TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr), + TP_ARGS(caller, val, width, addr) +); + +TRACE_EVENT(rwmmio_read, + + TP_PROTO(unsigned long caller, u8 width, const volatile void __iomem *addr), + + TP_ARGS(caller, width, addr), + + TP_STRUCT__entry( + __field(unsigned long, caller) + __field(unsigned long, addr) + __field(u8, width) + ), + + TP_fast_assign( + __entry->caller = caller; + __entry->addr = (unsigned long)addr; + __entry->width = width; + ), + + TP_printk("%pS width=%d addr=%#lx", + (void *)__entry->caller, __entry->width, __entry->addr) +); + +TRACE_EVENT(rwmmio_post_read, + + TP_PROTO(unsigned long caller, u64 val, u8 width, const volatile void __iomem *addr), + + TP_ARGS(caller, val, width, addr), + + TP_STRUCT__entry( + __field(unsigned long, caller) + __field(unsigned long, addr) + __field(u64, val) + __field(u8, width) + ), + + TP_fast_assign( + __entry->caller = caller; + __entry->val = val; + __entry->addr = (unsigned long)addr; + __entry->width = width; + ), + + TP_printk("%pS width=%d val=%#llx addr=%#lx", + (void *)__entry->caller, __entry->width, + __entry->val, __entry->addr) +); + +#endif /* _TRACE_RWMMIO_H */ + +#include <trace/define_trace.h> diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h index 370ade0d4093..a2c7befd451a 100644 --- a/include/trace/events/scsi.h +++ b/include/trace/events/scsi.h @@ -166,6 +166,8 @@ TRACE_EVENT(scsi_dispatch_cmd_start, __field( unsigned int, lun ) __field( unsigned int, opcode ) __field( unsigned int, cmd_len ) + __field( int, driver_tag) + __field( int, scheduler_tag) __field( unsigned int, data_sglen ) __field( unsigned int, prot_sglen ) __field( unsigned char, prot_op ) @@ -179,6 +181,8 @@ TRACE_EVENT(scsi_dispatch_cmd_start, __entry->lun = cmd->device->lun; __entry->opcode = cmd->cmnd[0]; __entry->cmd_len = cmd->cmd_len; + __entry->driver_tag = scsi_cmd_to_rq(cmd)->tag; + __entry->scheduler_tag = scsi_cmd_to_rq(cmd)->internal_tag; __entry->data_sglen = scsi_sg_count(cmd); __entry->prot_sglen = scsi_prot_sg_count(cmd); __entry->prot_op = scsi_get_prot_op(cmd); @@ -186,11 +190,11 @@ TRACE_EVENT(scsi_dispatch_cmd_start, ), TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \ - " prot_op=%s cmnd=(%s %s raw=%s)", + " prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s)", __entry->host_no, __entry->channel, __entry->id, __entry->lun, __entry->data_sglen, __entry->prot_sglen, - show_prot_op_name(__entry->prot_op), - show_opcode_name(__entry->opcode), + show_prot_op_name(__entry->prot_op), __entry->driver_tag, + __entry->scheduler_tag, show_opcode_name(__entry->opcode), __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len), __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len)) ); @@ -209,6 +213,8 @@ TRACE_EVENT(scsi_dispatch_cmd_error, __field( int, rtn ) __field( unsigned int, opcode ) __field( unsigned int, cmd_len ) + __field( int, driver_tag) + __field( int, scheduler_tag) __field( unsigned int, data_sglen ) __field( unsigned int, prot_sglen ) __field( unsigned char, prot_op ) @@ -223,6 +229,8 @@ TRACE_EVENT(scsi_dispatch_cmd_error, __entry->rtn = rtn; __entry->opcode = cmd->cmnd[0]; __entry->cmd_len = cmd->cmd_len; + __entry->driver_tag = scsi_cmd_to_rq(cmd)->tag; + __entry->scheduler_tag = scsi_cmd_to_rq(cmd)->internal_tag; __entry->data_sglen = scsi_sg_count(cmd); __entry->prot_sglen = scsi_prot_sg_count(cmd); __entry->prot_op = scsi_get_prot_op(cmd); @@ -230,11 +238,12 @@ TRACE_EVENT(scsi_dispatch_cmd_error, ), TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \ - " prot_op=%s cmnd=(%s %s raw=%s) rtn=%d", + " prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s)" \ + " rtn=%d", __entry->host_no, __entry->channel, __entry->id, __entry->lun, __entry->data_sglen, __entry->prot_sglen, - show_prot_op_name(__entry->prot_op), - show_opcode_name(__entry->opcode), + show_prot_op_name(__entry->prot_op), __entry->driver_tag, + __entry->scheduler_tag, show_opcode_name(__entry->opcode), __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len), __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len), __entry->rtn) @@ -254,6 +263,8 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template, __field( int, result ) __field( unsigned int, opcode ) __field( unsigned int, cmd_len ) + __field( int, driver_tag) + __field( int, scheduler_tag) __field( unsigned int, data_sglen ) __field( unsigned int, prot_sglen ) __field( unsigned char, prot_op ) @@ -268,19 +279,21 @@ DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template, __entry->result = cmd->result; __entry->opcode = cmd->cmnd[0]; __entry->cmd_len = cmd->cmd_len; + __entry->driver_tag = scsi_cmd_to_rq(cmd)->tag; + __entry->scheduler_tag = scsi_cmd_to_rq(cmd)->internal_tag; __entry->data_sglen = scsi_sg_count(cmd); __entry->prot_sglen = scsi_prot_sg_count(cmd); __entry->prot_op = scsi_get_prot_op(cmd); memcpy(__get_dynamic_array(cmnd), cmd->cmnd, cmd->cmd_len); ), - TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u " \ - "prot_sgl=%u prot_op=%s cmnd=(%s %s raw=%s) result=(driver=" \ - "%s host=%s message=%s status=%s)", + TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u " \ + "prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s) " \ + "result=(driver=%s host=%s message=%s status=%s)", __entry->host_no, __entry->channel, __entry->id, __entry->lun, __entry->data_sglen, __entry->prot_sglen, - show_prot_op_name(__entry->prot_op), - show_opcode_name(__entry->opcode), + show_prot_op_name(__entry->prot_op), __entry->driver_tag, + __entry->scheduler_tag, show_opcode_name(__entry->opcode), __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len), __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len), "DRIVER_OK", diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index a477bf907498..45264e4bb254 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -9,92 +9,6 @@ #include <linux/netdevice.h> #include <linux/tracepoint.h> -#define TRACE_SKB_DROP_REASON \ - EM(SKB_DROP_REASON_NOT_SPECIFIED, NOT_SPECIFIED) \ - EM(SKB_DROP_REASON_NO_SOCKET, NO_SOCKET) \ - EM(SKB_DROP_REASON_PKT_TOO_SMALL, PKT_TOO_SMALL) \ - EM(SKB_DROP_REASON_TCP_CSUM, TCP_CSUM) \ - EM(SKB_DROP_REASON_SOCKET_FILTER, SOCKET_FILTER) \ - EM(SKB_DROP_REASON_UDP_CSUM, UDP_CSUM) \ - EM(SKB_DROP_REASON_NETFILTER_DROP, NETFILTER_DROP) \ - EM(SKB_DROP_REASON_OTHERHOST, OTHERHOST) \ - EM(SKB_DROP_REASON_IP_CSUM, IP_CSUM) \ - EM(SKB_DROP_REASON_IP_INHDR, IP_INHDR) \ - EM(SKB_DROP_REASON_IP_RPFILTER, IP_RPFILTER) \ - EM(SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST, \ - UNICAST_IN_L2_MULTICAST) \ - EM(SKB_DROP_REASON_XFRM_POLICY, XFRM_POLICY) \ - EM(SKB_DROP_REASON_IP_NOPROTO, IP_NOPROTO) \ - EM(SKB_DROP_REASON_SOCKET_RCVBUFF, SOCKET_RCVBUFF) \ - EM(SKB_DROP_REASON_PROTO_MEM, PROTO_MEM) \ - EM(SKB_DROP_REASON_TCP_MD5NOTFOUND, TCP_MD5NOTFOUND) \ - EM(SKB_DROP_REASON_TCP_MD5UNEXPECTED, \ - TCP_MD5UNEXPECTED) \ - EM(SKB_DROP_REASON_TCP_MD5FAILURE, TCP_MD5FAILURE) \ - EM(SKB_DROP_REASON_SOCKET_BACKLOG, SOCKET_BACKLOG) \ - EM(SKB_DROP_REASON_TCP_FLAGS, TCP_FLAGS) \ - EM(SKB_DROP_REASON_TCP_ZEROWINDOW, TCP_ZEROWINDOW) \ - EM(SKB_DROP_REASON_TCP_OLD_DATA, TCP_OLD_DATA) \ - EM(SKB_DROP_REASON_TCP_OVERWINDOW, TCP_OVERWINDOW) \ - EM(SKB_DROP_REASON_TCP_OFOMERGE, TCP_OFOMERGE) \ - EM(SKB_DROP_REASON_TCP_OFO_DROP, TCP_OFO_DROP) \ - EM(SKB_DROP_REASON_TCP_RFC7323_PAWS, TCP_RFC7323_PAWS) \ - EM(SKB_DROP_REASON_TCP_INVALID_SEQUENCE, \ - TCP_INVALID_SEQUENCE) \ - EM(SKB_DROP_REASON_TCP_RESET, TCP_RESET) \ - EM(SKB_DROP_REASON_TCP_INVALID_SYN, TCP_INVALID_SYN) \ - EM(SKB_DROP_REASON_TCP_CLOSE, TCP_CLOSE) \ - EM(SKB_DROP_REASON_TCP_FASTOPEN, TCP_FASTOPEN) \ - EM(SKB_DROP_REASON_TCP_OLD_ACK, TCP_OLD_ACK) \ - EM(SKB_DROP_REASON_TCP_TOO_OLD_ACK, TCP_TOO_OLD_ACK) \ - EM(SKB_DROP_REASON_TCP_ACK_UNSENT_DATA, \ - TCP_ACK_UNSENT_DATA) \ - EM(SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE, \ - TCP_OFO_QUEUE_PRUNE) \ - EM(SKB_DROP_REASON_IP_OUTNOROUTES, IP_OUTNOROUTES) \ - EM(SKB_DROP_REASON_BPF_CGROUP_EGRESS, \ - BPF_CGROUP_EGRESS) \ - EM(SKB_DROP_REASON_IPV6DISABLED, IPV6DISABLED) \ - EM(SKB_DROP_REASON_NEIGH_CREATEFAIL, NEIGH_CREATEFAIL) \ - EM(SKB_DROP_REASON_NEIGH_FAILED, NEIGH_FAILED) \ - EM(SKB_DROP_REASON_NEIGH_QUEUEFULL, NEIGH_QUEUEFULL) \ - EM(SKB_DROP_REASON_NEIGH_DEAD, NEIGH_DEAD) \ - EM(SKB_DROP_REASON_TC_EGRESS, TC_EGRESS) \ - EM(SKB_DROP_REASON_QDISC_DROP, QDISC_DROP) \ - EM(SKB_DROP_REASON_CPU_BACKLOG, CPU_BACKLOG) \ - EM(SKB_DROP_REASON_XDP, XDP) \ - EM(SKB_DROP_REASON_TC_INGRESS, TC_INGRESS) \ - EM(SKB_DROP_REASON_UNHANDLED_PROTO, UNHANDLED_PROTO) \ - EM(SKB_DROP_REASON_SKB_CSUM, SKB_CSUM) \ - EM(SKB_DROP_REASON_SKB_GSO_SEG, SKB_GSO_SEG) \ - EM(SKB_DROP_REASON_SKB_UCOPY_FAULT, SKB_UCOPY_FAULT) \ - EM(SKB_DROP_REASON_DEV_HDR, DEV_HDR) \ - EM(SKB_DROP_REASON_DEV_READY, DEV_READY) \ - EM(SKB_DROP_REASON_FULL_RING, FULL_RING) \ - EM(SKB_DROP_REASON_NOMEM, NOMEM) \ - EM(SKB_DROP_REASON_HDR_TRUNC, HDR_TRUNC) \ - EM(SKB_DROP_REASON_TAP_FILTER, TAP_FILTER) \ - EM(SKB_DROP_REASON_TAP_TXFILTER, TAP_TXFILTER) \ - EM(SKB_DROP_REASON_ICMP_CSUM, ICMP_CSUM) \ - EM(SKB_DROP_REASON_INVALID_PROTO, INVALID_PROTO) \ - EM(SKB_DROP_REASON_IP_INADDRERRORS, IP_INADDRERRORS) \ - EM(SKB_DROP_REASON_IP_INNOROUTES, IP_INNOROUTES) \ - EM(SKB_DROP_REASON_PKT_TOO_BIG, PKT_TOO_BIG) \ - EMe(SKB_DROP_REASON_MAX, MAX) - -#undef EM -#undef EMe - -#define EM(a, b) TRACE_DEFINE_ENUM(a); -#define EMe(a, b) TRACE_DEFINE_ENUM(a); - -TRACE_SKB_DROP_REASON - -#undef EM -#undef EMe -#define EM(a, b) { a, #b }, -#define EMe(a, b) { a, #b } - /* * Tracepoint for free an sk_buff: */ @@ -121,8 +35,7 @@ TRACE_EVENT(kfree_skb, TP_printk("skbaddr=%p protocol=%u location=%p reason: %s", __entry->skbaddr, __entry->protocol, __entry->location, - __print_symbolic(__entry->reason, - TRACE_SKB_DROP_REASON)) + drop_reasons[__entry->reason]) ); TRACE_EVENT(consume_skb, diff --git a/include/trace/events/spmi.h b/include/trace/events/spmi.h index 8b60efe18ba6..a6819fd85cdf 100644 --- a/include/trace/events/spmi.h +++ b/include/trace/events/spmi.h @@ -21,15 +21,15 @@ TRACE_EVENT(spmi_write_begin, __field ( u8, sid ) __field ( u16, addr ) __field ( u8, len ) - __dynamic_array ( u8, buf, len + 1 ) + __dynamic_array ( u8, buf, len ) ), TP_fast_assign( __entry->opcode = opcode; __entry->sid = sid; __entry->addr = addr; - __entry->len = len + 1; - memcpy(__get_dynamic_array(buf), buf, len + 1); + __entry->len = len; + memcpy(__get_dynamic_array(buf), buf, len); ), TP_printk("opc=%d sid=%02d addr=0x%04x len=%d buf=0x[%*phD]", @@ -92,7 +92,7 @@ TRACE_EVENT(spmi_read_end, __field ( u16, addr ) __field ( int, ret ) __field ( u8, len ) - __dynamic_array ( u8, buf, len + 1 ) + __dynamic_array ( u8, buf, len ) ), TP_fast_assign( @@ -100,8 +100,8 @@ TRACE_EVENT(spmi_read_end, __entry->sid = sid; __entry->addr = addr; __entry->ret = ret; - __entry->len = len + 1; - memcpy(__get_dynamic_array(buf), buf, len + 1); + __entry->len = len; + memcpy(__get_dynamic_array(buf), buf, len); ), TP_printk("opc=%d sid=%02d addr=0x%04x ret=%d len=%02d buf=0x[%*phD]", diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index b61d9c90fa26..f48f2ab9d238 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -1266,6 +1266,26 @@ TRACE_EVENT(xprt_reserve, ) ); +TRACE_EVENT(xs_data_ready, + TP_PROTO( + const struct rpc_xprt *xprt + ), + + TP_ARGS(xprt), + + TP_STRUCT__entry( + __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR]) + __string(port, xprt->address_strings[RPC_DISPLAY_PORT]) + ), + + TP_fast_assign( + __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]); + __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]); + ), + + TP_printk("peer=[%s]:%s", __get_str(addr), __get_str(port)) +); + TRACE_EVENT(xs_stream_read_data, TP_PROTO(struct rpc_xprt *xprt, ssize_t err, size_t total), @@ -1989,20 +2009,24 @@ TRACE_EVENT(svc_wake_up, TRACE_EVENT(svc_alloc_arg_err, TP_PROTO( - unsigned int pages + unsigned int requested, + unsigned int allocated ), - TP_ARGS(pages), + TP_ARGS(requested, allocated), TP_STRUCT__entry( - __field(unsigned int, pages) + __field(unsigned int, requested) + __field(unsigned int, allocated) ), TP_fast_assign( - __entry->pages = pages; + __entry->requested = requested; + __entry->allocated = allocated; ), - TP_printk("pages=%u", __entry->pages) + TP_printk("requested=%u allocated=%u", + __entry->requested, __entry->allocated) ); DECLARE_EVENT_CLASS(svc_deferred_event, diff --git a/include/trace/stages/stage1_struct_define.h b/include/trace/stages/stage1_struct_define.h index a16783419687..1b7bab60434c 100644 --- a/include/trace/stages/stage1_struct_define.h +++ b/include/trace/stages/stage1_struct_define.h @@ -26,6 +26,9 @@ #undef __string_len #define __string_len(item, src, len) __dynamic_array(char, item, -1) +#undef __vstring +#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1) + #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(char, item, -1) diff --git a/include/trace/stages/stage2_data_offsets.h b/include/trace/stages/stage2_data_offsets.h index 42fd1e8813ec..1b7a8f764fdd 100644 --- a/include/trace/stages/stage2_data_offsets.h +++ b/include/trace/stages/stage2_data_offsets.h @@ -32,6 +32,9 @@ #undef __string_len #define __string_len(item, src, len) __dynamic_array(char, item, -1) +#undef __vstring +#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1) + #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) diff --git a/include/trace/stages/stage4_event_fields.h b/include/trace/stages/stage4_event_fields.h index e80cdc397a43..a8fb25f39a99 100644 --- a/include/trace/stages/stage4_event_fields.h +++ b/include/trace/stages/stage4_event_fields.h @@ -2,16 +2,18 @@ /* Stage 4 definitions for creating trace events */ +#define ALIGN_STRUCTFIELD(type) ((int)(__alignof__(struct {type b;}))) + #undef __field_ext #define __field_ext(_type, _item, _filter_type) { \ .type = #_type, .name = #_item, \ - .size = sizeof(_type), .align = __alignof__(_type), \ + .size = sizeof(_type), .align = ALIGN_STRUCTFIELD(_type), \ .is_signed = is_signed_type(_type), .filter_type = _filter_type }, #undef __field_struct_ext #define __field_struct_ext(_type, _item, _filter_type) { \ .type = #_type, .name = #_item, \ - .size = sizeof(_type), .align = __alignof__(_type), \ + .size = sizeof(_type), .align = ALIGN_STRUCTFIELD(_type), \ 0, .filter_type = _filter_type }, #undef __field @@ -23,7 +25,7 @@ #undef __array #define __array(_type, _item, _len) { \ .type = #_type"["__stringify(_len)"]", .name = #_item, \ - .size = sizeof(_type[_len]), .align = __alignof__(_type), \ + .size = sizeof(_type[_len]), .align = ALIGN_STRUCTFIELD(_type), \ .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }, #undef __dynamic_array @@ -38,6 +40,9 @@ #undef __string_len #define __string_len(item, src, len) __dynamic_array(char, item, -1) +#undef __vstring +#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1) + #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) diff --git a/include/trace/stages/stage5_get_offsets.h b/include/trace/stages/stage5_get_offsets.h index 7ee5931300e6..fba4c24ed9e6 100644 --- a/include/trace/stages/stage5_get_offsets.h +++ b/include/trace/stages/stage5_get_offsets.h @@ -39,6 +39,10 @@ #undef __string_len #define __string_len(item, src, len) __dynamic_array(char, item, (len) + 1) +#undef __vstring +#define __vstring(item, fmt, ap) __dynamic_array(char, item, \ + __trace_event_vstr_len(fmt, ap)) + #undef __rel_dynamic_array #define __rel_dynamic_array(type, item, len) \ __item_length = (len) * sizeof(type); \ diff --git a/include/trace/stages/stage6_event_callback.h b/include/trace/stages/stage6_event_callback.h index e1724f73594b..3c554a585320 100644 --- a/include/trace/stages/stage6_event_callback.h +++ b/include/trace/stages/stage6_event_callback.h @@ -24,6 +24,9 @@ #undef __string_len #define __string_len(item, src, len) __dynamic_array(char, item, -1) +#undef __vstring +#define __vstring(item, fmt, ap) __dynamic_array(char, item, -1) + #undef __assign_str #define __assign_str(dst, src) \ strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)"); @@ -35,6 +38,15 @@ __get_str(dst)[len] = '\0'; \ } while(0) +#undef __assign_vstr +#define __assign_vstr(dst, fmt, va) \ + do { \ + va_list __cp_va; \ + va_copy(__cp_va, *(va)); \ + vsnprintf(__get_str(dst), TRACE_EVENT_STR_MAX, fmt, __cp_va); \ + va_end(__cp_va); \ + } while (0) + #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) diff --git a/include/uapi/asm-generic/termbits-common.h b/include/uapi/asm-generic/termbits-common.h index 4d084fe8def5..4a6a79f28b21 100644 --- a/include/uapi/asm-generic/termbits-common.h +++ b/include/uapi/asm-generic/termbits-common.h @@ -46,6 +46,7 @@ typedef unsigned int speed_t; #define EXTA B19200 #define EXTB B38400 +#define ADDRB 0x20000000 /* address bit */ #define CMSPAR 0x40000000 /* mark or space (stick) parity */ #define CRTSCTS 0x80000000 /* flow control */ diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 18d3246d636e..c2c9c674a223 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -559,6 +559,10 @@ struct drm_amdgpu_gem_va { #define AMDGPU_HW_IP_VCE 4 #define AMDGPU_HW_IP_UVD_ENC 5 #define AMDGPU_HW_IP_VCN_DEC 6 +/* + * From VCN4, AMDGPU_HW_IP_VCN_ENC is re-used to support + * both encoding and decoding jobs. + */ #define AMDGPU_HW_IP_VCN_ENC 7 #define AMDGPU_HW_IP_VCN_JPEG 8 #define AMDGPU_HW_IP_NUM 9 @@ -1093,7 +1097,8 @@ struct drm_amdgpu_info_hw_ip { __u32 ib_size_alignment; /** Bitmask of available rings. Bit 0 means ring 0, etc. */ __u32 available_rings; - __u32 _pad; + /** version info: bits 23:16 major, 15:8 minor, 7:0 revision */ + __u32 ip_discovery_version; }; struct drm_amdgpu_info_num_handles { diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 0980678d502d..0206f812c569 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -559,7 +559,7 @@ extern "C" { * * The main surface is Y-tiled and is at plane index 0 whereas CCS is linear * and at index 1. The clear color is stored at index 2, and the pitch should - * be ignored. The clear color structure is 256 bits. The first 128 bits + * be 64 bytes aligned. The clear color structure is 256 bits. The first 128 bits * represents Raw Clear Color Red, Green, Blue and Alpha color each represented * by 32 bits. The raw clear color is consumed by the 3d engine and generates * the converted clear color of size 64 bits. The first 32 bits store the Lower @@ -612,9 +612,9 @@ extern "C" { * outside of the GEM object in a reserved memory area dedicated for the * storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The * main surface pitch is required to be a multiple of four Tile 4 widths. The - * clear color is stored at plane index 1 and the pitch should be ignored. The - * format of the 256 bits of clear color data matches the one used for the - * I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC modifier, see its description + * clear color is stored at plane index 1 and the pitch should be 64 bytes + * aligned. The format of the 256 bits of clear color data matches the one used + * for the I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC modifier, see its description * for details. */ #define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12) @@ -1363,6 +1363,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) #define AMD_FMT_MOD_TILE_VER_GFX9 1 #define AMD_FMT_MOD_TILE_VER_GFX10 2 #define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3 +#define AMD_FMT_MOD_TILE_VER_GFX11 4 /* * 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical @@ -1378,6 +1379,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) #define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25 #define AMD_FMT_MOD_TILE_GFX9_64K_D_X 26 #define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27 +#define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31 #define AMD_FMT_MOD_DCC_BLOCK_64B 0 #define AMD_FMT_MOD_DCC_BLOCK_128B 1 diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index b28ff5d88145..520ad2691a99 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -751,14 +751,27 @@ typedef struct drm_i915_irq_wait { /* Must be kept compact -- no holes and well documented */ -typedef struct drm_i915_getparam { +/** + * struct drm_i915_getparam - Driver parameter query structure. + */ +struct drm_i915_getparam { + /** @param: Driver parameter to query. */ __s32 param; - /* + + /** + * @value: Address of memory where queried value should be put. + * * WARNING: Using pointers instead of fixed-size u64 means we need to write * compat32 code. Don't repeat this mistake. */ int __user *value; -} drm_i915_getparam_t; +}; + +/** + * typedef drm_i915_getparam_t - Driver parameter query structure. + * See struct drm_i915_getparam. + */ +typedef struct drm_i915_getparam drm_i915_getparam_t; /* Ioctl to set kernel params: */ @@ -1239,76 +1252,119 @@ struct drm_i915_gem_exec_object2 { __u64 rsvd2; }; +/** + * struct drm_i915_gem_exec_fence - An input or output fence for the execbuf + * ioctl. + * + * The request will wait for input fence to signal before submission. + * + * The returned output fence will be signaled after the completion of the + * request. + */ struct drm_i915_gem_exec_fence { - /** - * User's handle for a drm_syncobj to wait on or signal. - */ + /** @handle: User's handle for a drm_syncobj to wait on or signal. */ __u32 handle; + /** + * @flags: Supported flags are: + * + * I915_EXEC_FENCE_WAIT: + * Wait for the input fence before request submission. + * + * I915_EXEC_FENCE_SIGNAL: + * Return request completion fence as output + */ + __u32 flags; #define I915_EXEC_FENCE_WAIT (1<<0) #define I915_EXEC_FENCE_SIGNAL (1<<1) #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1)) - __u32 flags; }; -/* - * See drm_i915_gem_execbuffer_ext_timeline_fences. - */ -#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0 - -/* +/** + * struct drm_i915_gem_execbuffer_ext_timeline_fences - Timeline fences + * for execbuf ioctl. + * * This structure describes an array of drm_syncobj and associated points for * timeline variants of drm_syncobj. It is invalid to append this structure to * the execbuf if I915_EXEC_FENCE_ARRAY is set. */ struct drm_i915_gem_execbuffer_ext_timeline_fences { +#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0 + /** @base: Extension link. See struct i915_user_extension. */ struct i915_user_extension base; /** - * Number of element in the handles_ptr & value_ptr arrays. + * @fence_count: Number of elements in the @handles_ptr & @value_ptr + * arrays. */ __u64 fence_count; /** - * Pointer to an array of struct drm_i915_gem_exec_fence of length - * fence_count. + * @handles_ptr: Pointer to an array of struct drm_i915_gem_exec_fence + * of length @fence_count. */ __u64 handles_ptr; /** - * Pointer to an array of u64 values of length fence_count. Values - * must be 0 for a binary drm_syncobj. A Value of 0 for a timeline - * drm_syncobj is invalid as it turns a drm_syncobj into a binary one. + * @values_ptr: Pointer to an array of u64 values of length + * @fence_count. + * Values must be 0 for a binary drm_syncobj. A Value of 0 for a + * timeline drm_syncobj is invalid as it turns a drm_syncobj into a + * binary one. */ __u64 values_ptr; }; +/** + * struct drm_i915_gem_execbuffer2 - Structure for DRM_I915_GEM_EXECBUFFER2 + * ioctl. + */ struct drm_i915_gem_execbuffer2 { - /** - * List of gem_exec_object2 structs - */ + /** @buffers_ptr: Pointer to a list of gem_exec_object2 structs */ __u64 buffers_ptr; + + /** @buffer_count: Number of elements in @buffers_ptr array */ __u32 buffer_count; - /** Offset in the batchbuffer to start execution from. */ + /** + * @batch_start_offset: Offset in the batchbuffer to start execution + * from. + */ __u32 batch_start_offset; - /** Bytes used in batchbuffer from batch_start_offset */ + + /** + * @batch_len: Length in bytes of the batch buffer, starting from the + * @batch_start_offset. If 0, length is assumed to be the batch buffer + * object size. + */ __u32 batch_len; + + /** @DR1: deprecated */ __u32 DR1; + + /** @DR4: deprecated */ __u32 DR4; + + /** @num_cliprects: See @cliprects_ptr */ __u32 num_cliprects; + /** - * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY - * & I915_EXEC_USE_EXTENSIONS are not set. + * @cliprects_ptr: Kernel clipping was a DRI1 misfeature. + * + * It is invalid to use this field if I915_EXEC_FENCE_ARRAY or + * I915_EXEC_USE_EXTENSIONS flags are not set. * * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array - * of struct drm_i915_gem_exec_fence and num_cliprects is the length - * of the array. + * of &drm_i915_gem_exec_fence and @num_cliprects is the length of the + * array. * * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a - * single struct i915_user_extension and num_cliprects is 0. + * single &i915_user_extension and num_cliprects is 0. */ __u64 cliprects_ptr; + + /** @flags: Execbuf flags */ + __u64 flags; #define I915_EXEC_RING_MASK (0x3f) #define I915_EXEC_DEFAULT (0<<0) #define I915_EXEC_RENDER (1<<0) @@ -1326,10 +1382,6 @@ struct drm_i915_gem_execbuffer2 { #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */ #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6) #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */ - __u64 flags; - __u64 rsvd1; /* now used for context info */ - __u64 rsvd2; -}; /** Resets the SO write offset registers for transform feedback on gen7. */ #define I915_EXEC_GEN7_SOL_RESET (1<<8) @@ -1432,9 +1484,23 @@ struct drm_i915_gem_execbuffer2 { * drm_i915_gem_execbuffer_ext enum. */ #define I915_EXEC_USE_EXTENSIONS (1 << 21) - #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1)) + /** @rsvd1: Context id */ + __u64 rsvd1; + + /** + * @rsvd2: in and out sync_file file descriptors. + * + * When I915_EXEC_FENCE_IN or I915_EXEC_FENCE_SUBMIT flag is set, the + * lower 32 bits of this field will have the in sync_file fd (input). + * + * When I915_EXEC_FENCE_OUT flag is set, the upper 32 bits of this + * field will have the out sync_file fd (output). + */ + __u64 rsvd2; +}; + #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) #define i915_execbuffer2_set_context_id(eb2, context) \ (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK @@ -1814,19 +1880,58 @@ struct drm_i915_gem_context_create { __u32 pad; }; +/** + * struct drm_i915_gem_context_create_ext - Structure for creating contexts. + */ struct drm_i915_gem_context_create_ext { - __u32 ctx_id; /* output: id of new context*/ + /** @ctx_id: Id of the created context (output) */ + __u32 ctx_id; + + /** + * @flags: Supported flags are: + * + * I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS: + * + * Extensions may be appended to this structure and driver must check + * for those. See @extensions. + * + * I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE + * + * Created context will have single timeline. + */ __u32 flags; #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0) #define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1) #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \ (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1)) + + /** + * @extensions: Zero-terminated chain of extensions. + * + * I915_CONTEXT_CREATE_EXT_SETPARAM: + * Context parameter to set or query during context creation. + * See struct drm_i915_gem_context_create_ext_setparam. + * + * I915_CONTEXT_CREATE_EXT_CLONE: + * This extension has been removed. On the off chance someone somewhere + * has attempted to use it, never re-use this extension number. + */ __u64 extensions; +#define I915_CONTEXT_CREATE_EXT_SETPARAM 0 +#define I915_CONTEXT_CREATE_EXT_CLONE 1 }; +/** + * struct drm_i915_gem_context_param - Context parameter to set or query. + */ struct drm_i915_gem_context_param { + /** @ctx_id: Context id */ __u32 ctx_id; + + /** @size: Size of the parameter @value */ __u32 size; + + /** @param: Parameter to set or query */ __u64 param; #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 /* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed. On the off chance @@ -1973,6 +2078,7 @@ struct drm_i915_gem_context_param { #define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd /* Must be kept compact -- no holes and well documented */ + /** @value: Context parameter value to be set or queried */ __u64 value; }; @@ -2371,23 +2477,29 @@ struct i915_context_param_engines { struct i915_engine_class_instance engines[N__]; \ } __attribute__((packed)) name__ +/** + * struct drm_i915_gem_context_create_ext_setparam - Context parameter + * to set or query during context creation. + */ struct drm_i915_gem_context_create_ext_setparam { -#define I915_CONTEXT_CREATE_EXT_SETPARAM 0 + /** @base: Extension link. See struct i915_user_extension. */ struct i915_user_extension base; + + /** + * @param: Context parameter to set or query. + * See struct drm_i915_gem_context_param. + */ struct drm_i915_gem_context_param param; }; -/* This API has been removed. On the off chance someone somewhere has - * attempted to use it, never re-use this extension number. - */ -#define I915_CONTEXT_CREATE_EXT_CLONE 1 - struct drm_i915_gem_context_destroy { __u32 ctx_id; __u32 pad; }; -/* +/** + * struct drm_i915_gem_vm_control - Structure to create or destroy VM. + * * DRM_I915_GEM_VM_CREATE - * * Create a new virtual memory address space (ppGTT) for use within a context @@ -2397,20 +2509,23 @@ struct drm_i915_gem_context_destroy { * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is * returned in the outparam @id. * - * No flags are defined, with all bits reserved and must be zero. - * * An extension chain maybe provided, starting with @extensions, and terminated * by the @next_extension being 0. Currently, no extensions are defined. * * DRM_I915_GEM_VM_DESTROY - * - * Destroys a previously created VM id, specified in @id. + * Destroys a previously created VM id, specified in @vm_id. * * No extensions or flags are allowed currently, and so must be zero. */ struct drm_i915_gem_vm_control { + /** @extensions: Zero-terminated chain of extensions. */ __u64 extensions; + + /** @flags: reserved for future usage, currently MBZ */ __u32 flags; + + /** @vm_id: Id of the VM created or to be destroyed */ __u32 vm_id; }; @@ -3207,36 +3322,6 @@ struct drm_i915_gem_memory_class_instance { * struct drm_i915_memory_region_info - Describes one region as known to the * driver. * - * Note that we reserve some stuff here for potential future work. As an example - * we might want expose the capabilities for a given region, which could include - * things like if the region is CPU mappable/accessible, what are the supported - * mapping types etc. - * - * Note that to extend struct drm_i915_memory_region_info and struct - * drm_i915_query_memory_regions in the future the plan is to do the following: - * - * .. code-block:: C - * - * struct drm_i915_memory_region_info { - * struct drm_i915_gem_memory_class_instance region; - * union { - * __u32 rsvd0; - * __u32 new_thing1; - * }; - * ... - * union { - * __u64 rsvd1[8]; - * struct { - * __u64 new_thing2; - * __u64 new_thing3; - * ... - * }; - * }; - * }; - * - * With this things should remain source compatible between versions for - * userspace, even as we add new fields. - * * Note this is using both struct drm_i915_query_item and struct drm_i915_query. * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS * at &drm_i915_query_item.query_id. @@ -3248,14 +3333,81 @@ struct drm_i915_memory_region_info { /** @rsvd0: MBZ */ __u32 rsvd0; - /** @probed_size: Memory probed by the driver (-1 = unknown) */ + /** + * @probed_size: Memory probed by the driver + * + * Note that it should not be possible to ever encounter a zero value + * here, also note that no current region type will ever return -1 here. + * Although for future region types, this might be a possibility. The + * same applies to the other size fields. + */ __u64 probed_size; - /** @unallocated_size: Estimate of memory remaining (-1 = unknown) */ + /** + * @unallocated_size: Estimate of memory remaining + * + * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable accounting. + * Without this (or if this is an older kernel) the value here will + * always equal the @probed_size. Note this is only currently tracked + * for I915_MEMORY_CLASS_DEVICE regions (for other types the value here + * will always equal the @probed_size). + */ __u64 unallocated_size; - /** @rsvd1: MBZ */ - __u64 rsvd1[8]; + union { + /** @rsvd1: MBZ */ + __u64 rsvd1[8]; + struct { + /** + * @probed_cpu_visible_size: Memory probed by the driver + * that is CPU accessible. + * + * This will be always be <= @probed_size, and the + * remainder (if there is any) will not be CPU + * accessible. + * + * On systems without small BAR, the @probed_size will + * always equal the @probed_cpu_visible_size, since all + * of it will be CPU accessible. + * + * Note this is only tracked for + * I915_MEMORY_CLASS_DEVICE regions (for other types the + * value here will always equal the @probed_size). + * + * Note that if the value returned here is zero, then + * this must be an old kernel which lacks the relevant + * small-bar uAPI support (including + * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS), but on + * such systems we should never actually end up with a + * small BAR configuration, assuming we are able to load + * the kernel module. Hence it should be safe to treat + * this the same as when @probed_cpu_visible_size == + * @probed_size. + */ + __u64 probed_cpu_visible_size; + + /** + * @unallocated_cpu_visible_size: Estimate of CPU + * visible memory remaining. + * + * Note this is only tracked for + * I915_MEMORY_CLASS_DEVICE regions (for other types the + * value here will always equal the + * @probed_cpu_visible_size). + * + * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable + * accounting. Without this the value here will always + * equal the @probed_cpu_visible_size. Note this is only + * currently tracked for I915_MEMORY_CLASS_DEVICE + * regions (for other types the value here will also + * always equal the @probed_cpu_visible_size). + * + * If this is an older kernel the value here will be + * zero, see also @probed_cpu_visible_size. + */ + __u64 unallocated_cpu_visible_size; + }; + }; }; /** @@ -3329,11 +3481,11 @@ struct drm_i915_query_memory_regions { * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added * extension support using struct i915_user_extension. * - * Note that in the future we want to have our buffer flags here, at least for - * the stuff that is immutable. Previously we would have two ioctls, one to - * create the object with gem_create, and another to apply various parameters, - * however this creates some ambiguity for the params which are considered - * immutable. Also in general we're phasing out the various SET/GET ioctls. + * Note that new buffer flags should be added here, at least for the stuff that + * is immutable. Previously we would have two ioctls, one to create the object + * with gem_create, and another to apply various parameters, however this + * creates some ambiguity for the params which are considered immutable. Also in + * general we're phasing out the various SET/GET ioctls. */ struct drm_i915_gem_create_ext { /** @@ -3341,7 +3493,6 @@ struct drm_i915_gem_create_ext { * * The (page-aligned) allocated size for the object will be returned. * - * * DG2 64K min page size implications: * * On discrete platforms, starting from DG2, we have to contend with GTT @@ -3353,7 +3504,9 @@ struct drm_i915_gem_create_ext { * * Note that the returned size here will always reflect any required * rounding up done by the kernel, i.e 4K will now become 64K on devices - * such as DG2. + * such as DG2. The kernel will always select the largest minimum + * page-size for the set of possible placements as the value to use when + * rounding up the @size. * * Special DG2 GTT address alignment requirement: * @@ -3377,14 +3530,58 @@ struct drm_i915_gem_create_ext { * is deemed to be a good compromise. */ __u64 size; + /** * @handle: Returned handle for the object. * * Object handles are nonzero. */ __u32 handle; - /** @flags: MBZ */ + + /** + * @flags: Optional flags. + * + * Supported values: + * + * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that + * the object will need to be accessed via the CPU. + * + * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and only + * strictly required on configurations where some subset of the device + * memory is directly visible/mappable through the CPU (which we also + * call small BAR), like on some DG2+ systems. Note that this is quite + * undesirable, but due to various factors like the client CPU, BIOS etc + * it's something we can expect to see in the wild. See + * &drm_i915_memory_region_info.probed_cpu_visible_size for how to + * determine if this system applies. + * + * Note that one of the placements MUST be I915_MEMORY_CLASS_SYSTEM, to + * ensure the kernel can always spill the allocation to system memory, + * if the object can't be allocated in the mappable part of + * I915_MEMORY_CLASS_DEVICE. + * + * Also note that since the kernel only supports flat-CCS on objects + * that can *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore + * don't support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with + * flat-CCS. + * + * Without this hint, the kernel will assume that non-mappable + * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the + * kernel can still migrate the object to the mappable part, as a last + * resort, if userspace ever CPU faults this object, but this might be + * expensive, and so ideally should be avoided. + * + * On older kernels which lack the relevant small-bar uAPI support (see + * also &drm_i915_memory_region_info.probed_cpu_visible_size), + * usage of the flag will result in an error, but it should NEVER be + * possible to end up with a small BAR configuration, assuming we can + * also successfully load the i915 kernel module. In such cases the + * entire I915_MEMORY_CLASS_DEVICE region will be CPU accessible, and as + * such there are zero restrictions on where the object can be placed. + */ +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0) __u32 flags; + /** * @extensions: The chain of extensions to apply to this object. * @@ -3443,6 +3640,22 @@ struct drm_i915_gem_create_ext { * At which point we get the object handle in &drm_i915_gem_create_ext.handle, * along with the final object size in &drm_i915_gem_create_ext.size, which * should account for any rounding up, if required. + * + * Note that userspace has no means of knowing the current backing region + * for objects where @num_regions is larger than one. The kernel will only + * ensure that the priority order of the @regions array is honoured, either + * when initially placing the object, or when moving memory around due to + * memory pressure + * + * On Flat-CCS capable HW, compression is supported for the objects residing + * in I915_MEMORY_CLASS_DEVICE. When such objects (compressed) have other + * memory class in @regions and migrated (by i915, due to memory + * constraints) to the non I915_MEMORY_CLASS_DEVICE region, then i915 needs to + * decompress the content. But i915 doesn't have the required information to + * decompress the userspace compressed objects. + * + * So i915 supports Flat-CCS, on the objects which can reside only on + * I915_MEMORY_CLASS_DEVICE regions. */ struct drm_i915_gem_create_ext_memory_regions { /** @base: Extension link. See struct i915_user_extension. */ diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index 986333cf5bbe..e72e4de8f452 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -287,6 +287,7 @@ enum transaction_flags { TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ TF_CLEAR_BUF = 0x20, /* clear buffer on txn complete */ + TF_UPDATE_TXN = 0x40, /* update the outdated pending async txn */ }; struct binder_transaction_data { diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h new file mode 100644 index 000000000000..5135027b93c1 --- /dev/null +++ b/include/uapi/linux/atm_zatm.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* atm_zatm.h - Driver-specific declarations of the ZATM driver (for use by + driver-specific utilities) */ + +/* Written 1995-1999 by Werner Almesberger, EPFL LRC/ICA */ + + +#ifndef LINUX_ATM_ZATM_H +#define LINUX_ATM_ZATM_H + +/* + * Note: non-kernel programs including this file must also include + * sys/types.h for struct timeval + */ + +#include <linux/atmapi.h> +#include <linux/atmioc.h> + +#define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc) + /* get pool statistics */ +#define ZATM_GETPOOLZ _IOW('a',ATMIOC_SARPRV+2,struct atmif_sioc) + /* get statistics and zero */ +#define ZATM_SETPOOL _IOW('a',ATMIOC_SARPRV+3,struct atmif_sioc) + /* set pool parameters */ + +struct zatm_pool_info { + int ref_count; /* free buffer pool usage counters */ + int low_water,high_water; /* refill parameters */ + int rqa_count,rqu_count; /* queue condition counters */ + int offset,next_off; /* alignment optimizations: offset */ + int next_cnt,next_thres; /* repetition counter and threshold */ +}; + +struct zatm_pool_req { + int pool_num; /* pool number */ + struct zatm_pool_info info; /* actual information */ +}; + +#define ZATM_OAM_POOL 0 /* free buffer pool for OAM cells */ +#define ZATM_AAL0_POOL 1 /* free buffer pool for AAL0 cells */ +#define ZATM_AAL5_POOL_BASE 2 /* first AAL5 free buffer pool */ +#define ZATM_LAST_POOL ZATM_AAL5_POOL_BASE+10 /* max. 64 kB */ + +#define ZATM_TIMER_HISTORY_SIZE 16 /* number of timer adjustments to + record; must be 2^n */ + +#endif diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index ff225aa3f16a..7bf9ba1329be 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -998,6 +998,7 @@ enum bpf_attach_type { BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, BPF_PERF_EVENT, BPF_TRACE_KPROBE_MULTI, + BPF_LSM_CGROUP, __MAX_BPF_ATTACH_TYPE }; @@ -1431,6 +1432,7 @@ union bpf_attr { __u32 attach_flags; __aligned_u64 prog_ids; __u32 prog_cnt; + __aligned_u64 prog_attach_flags; /* output: per-program attach_flags */ } query; struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ @@ -2359,7 +2361,8 @@ union bpf_attr { * Pull in non-linear data in case the *skb* is non-linear and not * all of *len* are part of the linear section. Make *len* bytes * from *skb* readable and writable. If a zero value is passed for - * *len*, then the whole length of the *skb* is pulled. + * *len*, then all bytes in the linear part of *skb* will be made + * readable and writable. * * This helper is only needed for reading and writing with direct * packet access. @@ -3597,10 +3600,11 @@ union bpf_attr { * * *iph* points to the start of the IPv4 or IPv6 header, while * *iph_len* contains **sizeof**\ (**struct iphdr**) or - * **sizeof**\ (**struct ip6hdr**). + * **sizeof**\ (**struct ipv6hdr**). * * *th* points to the start of the TCP header, while *th_len* - * contains **sizeof**\ (**struct tcphdr**). + * contains the length of the TCP header (at least + * **sizeof**\ (**struct tcphdr**)). * Return * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative * error otherwise. @@ -3783,10 +3787,11 @@ union bpf_attr { * * *iph* points to the start of the IPv4 or IPv6 header, while * *iph_len* contains **sizeof**\ (**struct iphdr**) or - * **sizeof**\ (**struct ip6hdr**). + * **sizeof**\ (**struct ipv6hdr**). * * *th* points to the start of the TCP header, while *th_len* - * contains the length of the TCP header. + * contains the length of the TCP header with options (at least + * **sizeof**\ (**struct tcphdr**)). * Return * On success, lower 32 bits hold the generated SYN cookie in * followed by 16 bits which hold the MSS value for that cookie, @@ -5252,6 +5257,80 @@ union bpf_attr { * Pointer to the underlying dynptr data, NULL if the dynptr is * read-only, if the dynptr is invalid, or if the offset and length * is out of bounds. + * + * s64 bpf_tcp_raw_gen_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th, u32 th_len) + * Description + * Try to issue a SYN cookie for the packet with corresponding + * IPv4/TCP headers, *iph* and *th*, without depending on a + * listening socket. + * + * *iph* points to the IPv4 header. + * + * *th* points to the start of the TCP header, while *th_len* + * contains the length of the TCP header (at least + * **sizeof**\ (**struct tcphdr**)). + * Return + * On success, lower 32 bits hold the generated SYN cookie in + * followed by 16 bits which hold the MSS value for that cookie, + * and the top 16 bits are unused. + * + * On failure, the returned value is one of the following: + * + * **-EINVAL** if *th_len* is invalid. + * + * s64 bpf_tcp_raw_gen_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th, u32 th_len) + * Description + * Try to issue a SYN cookie for the packet with corresponding + * IPv6/TCP headers, *iph* and *th*, without depending on a + * listening socket. + * + * *iph* points to the IPv6 header. + * + * *th* points to the start of the TCP header, while *th_len* + * contains the length of the TCP header (at least + * **sizeof**\ (**struct tcphdr**)). + * Return + * On success, lower 32 bits hold the generated SYN cookie in + * followed by 16 bits which hold the MSS value for that cookie, + * and the top 16 bits are unused. + * + * On failure, the returned value is one of the following: + * + * **-EINVAL** if *th_len* is invalid. + * + * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin. + * + * long bpf_tcp_raw_check_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th) + * Description + * Check whether *iph* and *th* contain a valid SYN cookie ACK + * without depending on a listening socket. + * + * *iph* points to the IPv4 header. + * + * *th* points to the TCP header. + * Return + * 0 if *iph* and *th* are a valid SYN cookie ACK. + * + * On failure, the returned value is one of the following: + * + * **-EACCES** if the SYN cookie is not valid. + * + * long bpf_tcp_raw_check_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th) + * Description + * Check whether *iph* and *th* contain a valid SYN cookie ACK + * without depending on a listening socket. + * + * *iph* points to the IPv6 header. + * + * *th* points to the TCP header. + * Return + * 0 if *iph* and *th* are a valid SYN cookie ACK. + * + * On failure, the returned value is one of the following: + * + * **-EACCES** if the SYN cookie is not valid. + * + * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -5458,6 +5537,10 @@ union bpf_attr { FN(dynptr_read), \ FN(dynptr_write), \ FN(dynptr_data), \ + FN(tcp_raw_gen_syncookie_ipv4), \ + FN(tcp_raw_gen_syncookie_ipv6), \ + FN(tcp_raw_check_syncookie_ipv4), \ + FN(tcp_raw_check_syncookie_ipv6), \ /* */ /* integer value in 'imm' field of BPF_CALL instruction selects which helper @@ -5998,6 +6081,8 @@ struct bpf_prog_info { __u64 run_cnt; __u64 recursion_misses; __u32 verified_insns; + __u32 attach_btf_obj_id; + __u32 attach_btf_id; } __attribute__((aligned(8))); struct bpf_map_info { @@ -6705,6 +6790,7 @@ enum bpf_core_relo_kind { BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */ BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */ BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */ + BPF_CORE_TYPE_MATCHES = 12, /* type match in target kernel */ }; /* diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h index a9162a6c0284..ec1798b6d3ff 100644 --- a/include/uapi/linux/btf.h +++ b/include/uapi/linux/btf.h @@ -36,10 +36,10 @@ struct btf_type { * bits 24-28: kind (e.g. int, ptr, array...etc) * bits 29-30: unused * bit 31: kind_flag, currently used by - * struct, union and fwd + * struct, union, enum, fwd and enum64 */ __u32 info; - /* "size" is used by INT, ENUM, STRUCT, UNION and DATASEC. + /* "size" is used by INT, ENUM, STRUCT, UNION, DATASEC and ENUM64. * "size" tells the size of the type it is describing. * * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, @@ -63,7 +63,7 @@ enum { BTF_KIND_ARRAY = 3, /* Array */ BTF_KIND_STRUCT = 4, /* Struct */ BTF_KIND_UNION = 5, /* Union */ - BTF_KIND_ENUM = 6, /* Enumeration */ + BTF_KIND_ENUM = 6, /* Enumeration up to 32-bit values */ BTF_KIND_FWD = 7, /* Forward */ BTF_KIND_TYPEDEF = 8, /* Typedef */ BTF_KIND_VOLATILE = 9, /* Volatile */ @@ -76,6 +76,7 @@ enum { BTF_KIND_FLOAT = 16, /* Floating point */ BTF_KIND_DECL_TAG = 17, /* Decl Tag */ BTF_KIND_TYPE_TAG = 18, /* Type Tag */ + BTF_KIND_ENUM64 = 19, /* Enumeration up to 64-bit values */ NR_BTF_KINDS, BTF_KIND_MAX = NR_BTF_KINDS - 1, @@ -186,4 +187,14 @@ struct btf_decl_tag { __s32 component_idx; }; +/* BTF_KIND_ENUM64 is followed by multiple "struct btf_enum64". + * The exact number of btf_enum64 is stored in the vlen (of the + * info in "struct btf_type"). + */ +struct btf_enum64 { + __u32 name_off; + __u32 val_lo32; + __u32 val_hi32; +}; + #endif /* _UAPI__LINUX_BTF_H__ */ diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 3d0edbe3b991..7ada84e4a3ed 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -777,11 +777,19 @@ struct btrfs_ioctl_received_subvol_args { */ #define BTRFS_SEND_FLAG_VERSION 0x8 +/* + * Send compressed data using the ENCODED_WRITE command instead of decompressing + * the data and sending it with the WRITE command. This requires protocol + * version >= 2. + */ +#define BTRFS_SEND_FLAG_COMPRESSED 0x10 + #define BTRFS_SEND_FLAG_MASK \ (BTRFS_SEND_FLAG_NO_FILE_DATA | \ BTRFS_SEND_FLAG_OMIT_STREAM_HEADER | \ BTRFS_SEND_FLAG_OMIT_END_CMD | \ - BTRFS_SEND_FLAG_VERSION) + BTRFS_SEND_FLAG_VERSION | \ + BTRFS_SEND_FLAG_COMPRESSED) struct btrfs_ioctl_send_args { __s64 send_fd; /* in */ diff --git a/include/uapi/linux/can/error.h b/include/uapi/linux/can/error.h index 34633283de64..acc1ac393d2a 100644 --- a/include/uapi/linux/can/error.h +++ b/include/uapi/linux/can/error.h @@ -57,6 +57,8 @@ #define CAN_ERR_BUSOFF 0x00000040U /* bus off */ #define CAN_ERR_BUSERROR 0x00000080U /* bus error (may flood!) */ #define CAN_ERR_RESTARTED 0x00000100U /* controller restarted */ +#define CAN_ERR_CNT 0x00000200U /* TX error counter / data[6] */ + /* RX error counter / data[7] */ /* arbitration lost in bit ... / data[0] */ #define CAN_ERR_LOSTARB_UNSPEC 0x00 /* unspecified */ @@ -120,6 +122,22 @@ #define CAN_ERR_TRX_CANL_SHORT_TO_GND 0x70 /* 0111 0000 */ #define CAN_ERR_TRX_CANL_SHORT_TO_CANH 0x80 /* 1000 0000 */ -/* controller specific additional information / data[5..7] */ +/* data[5] is reserved (do not use) */ + +/* TX error counter / data[6] */ +/* RX error counter / data[7] */ + +/* CAN state thresholds + * + * Error counter Error state + * ----------------------------------- + * 0 - 95 Error-active + * 96 - 127 Error-warning + * 128 - 255 Error-passive + * 256 and greater Bus-off + */ +#define CAN_ERROR_WARNING_THRESHOLD 96 +#define CAN_ERROR_PASSIVE_THRESHOLD 128 +#define CAN_BUS_OFF_THRESHOLD 256 #endif /* _UAPI_CAN_ERROR_H */ diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index b3d40a5d72ff..2f24b53a87a5 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -136,6 +136,9 @@ enum devlink_command { DEVLINK_CMD_LINECARD_NEW, DEVLINK_CMD_LINECARD_DEL, + DEVLINK_CMD_SELFTESTS_GET, /* can dump */ + DEVLINK_CMD_SELFTESTS_RUN, + /* add new commands above here */ __DEVLINK_CMD_MAX, DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1 @@ -276,6 +279,30 @@ enum { #define DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS \ (_BITUL(__DEVLINK_FLASH_OVERWRITE_MAX_BIT) - 1) +enum devlink_attr_selftest_id { + DEVLINK_ATTR_SELFTEST_ID_UNSPEC, + DEVLINK_ATTR_SELFTEST_ID_FLASH, /* flag */ + + __DEVLINK_ATTR_SELFTEST_ID_MAX, + DEVLINK_ATTR_SELFTEST_ID_MAX = __DEVLINK_ATTR_SELFTEST_ID_MAX - 1 +}; + +enum devlink_selftest_status { + DEVLINK_SELFTEST_STATUS_SKIP, + DEVLINK_SELFTEST_STATUS_PASS, + DEVLINK_SELFTEST_STATUS_FAIL +}; + +enum devlink_attr_selftest_result { + DEVLINK_ATTR_SELFTEST_RESULT_UNSPEC, + DEVLINK_ATTR_SELFTEST_RESULT, /* nested */ + DEVLINK_ATTR_SELFTEST_RESULT_ID, /* u32, enum devlink_attr_selftest_id */ + DEVLINK_ATTR_SELFTEST_RESULT_STATUS, /* u8, enum devlink_selftest_status */ + + __DEVLINK_ATTR_SELFTEST_RESULT_MAX, + DEVLINK_ATTR_SELFTEST_RESULT_MAX = __DEVLINK_ATTR_SELFTEST_RESULT_MAX - 1 +}; + /** * enum devlink_trap_action - Packet trap action. * @DEVLINK_TRAP_ACTION_DROP: Packet is dropped by the device and a copy is not @@ -576,6 +603,10 @@ enum devlink_attr { DEVLINK_ATTR_LINECARD_TYPE, /* string */ DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES, /* nested */ + DEVLINK_ATTR_NESTED_DEVLINK, /* nested */ + + DEVLINK_ATTR_SELFTESTS, /* nested */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h index b1523cb8ab30..5a6fda66d9ad 100644 --- a/include/uapi/linux/dma-buf.h +++ b/include/uapi/linux/dma-buf.h @@ -85,6 +85,88 @@ struct dma_buf_sync { #define DMA_BUF_NAME_LEN 32 +/** + * struct dma_buf_export_sync_file - Get a sync_file from a dma-buf + * + * Userspace can perform a DMA_BUF_IOCTL_EXPORT_SYNC_FILE to retrieve the + * current set of fences on a dma-buf file descriptor as a sync_file. CPU + * waits via poll() or other driver-specific mechanisms typically wait on + * whatever fences are on the dma-buf at the time the wait begins. This + * is similar except that it takes a snapshot of the current fences on the + * dma-buf for waiting later instead of waiting immediately. This is + * useful for modern graphics APIs such as Vulkan which assume an explicit + * synchronization model but still need to inter-operate with dma-buf. + * + * The intended usage pattern is the following: + * + * 1. Export a sync_file with flags corresponding to the expected GPU usage + * via DMA_BUF_IOCTL_EXPORT_SYNC_FILE. + * + * 2. Submit rendering work which uses the dma-buf. The work should wait on + * the exported sync file before rendering and produce another sync_file + * when complete. + * + * 3. Import the rendering-complete sync_file into the dma-buf with flags + * corresponding to the GPU usage via DMA_BUF_IOCTL_IMPORT_SYNC_FILE. + * + * Unlike doing implicit synchronization via a GPU kernel driver's exec ioctl, + * the above is not a single atomic operation. If userspace wants to ensure + * ordering via these fences, it is the respnosibility of userspace to use + * locks or other mechanisms to ensure that no other context adds fences or + * submits work between steps 1 and 3 above. + */ +struct dma_buf_export_sync_file { + /** + * @flags: Read/write flags + * + * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both. + * + * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set, + * the returned sync file waits on any writers of the dma-buf to + * complete. Waiting on the returned sync file is equivalent to + * poll() with POLLIN. + * + * If DMA_BUF_SYNC_WRITE is set, the returned sync file waits on + * any users of the dma-buf (read or write) to complete. Waiting + * on the returned sync file is equivalent to poll() with POLLOUT. + * If both DMA_BUF_SYNC_WRITE and DMA_BUF_SYNC_READ are set, this + * is equivalent to just DMA_BUF_SYNC_WRITE. + */ + __u32 flags; + /** @fd: Returned sync file descriptor */ + __s32 fd; +}; + +/** + * struct dma_buf_import_sync_file - Insert a sync_file into a dma-buf + * + * Userspace can perform a DMA_BUF_IOCTL_IMPORT_SYNC_FILE to insert a + * sync_file into a dma-buf for the purposes of implicit synchronization + * with other dma-buf consumers. This allows clients using explicitly + * synchronized APIs such as Vulkan to inter-op with dma-buf consumers + * which expect implicit synchronization such as OpenGL or most media + * drivers/video. + */ +struct dma_buf_import_sync_file { + /** + * @flags: Read/write flags + * + * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both. + * + * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set, + * this inserts the sync_file as a read-only fence. Any subsequent + * implicitly synchronized writes to this dma-buf will wait on this + * fence but reads will not. + * + * If DMA_BUF_SYNC_WRITE is set, this inserts the sync_file as a + * write fence. All subsequent implicitly synchronized access to + * this dma-buf will wait on this fence. + */ + __u32 flags; + /** @fd: Sync file descriptor */ + __s32 fd; +}; + #define DMA_BUF_BASE 'b' #define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync) @@ -94,5 +176,7 @@ struct dma_buf_sync { #define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *) #define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32) #define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64) +#define DMA_BUF_IOCTL_EXPORT_SYNC_FILE _IOWR(DMA_BUF_BASE, 2, struct dma_buf_export_sync_file) +#define DMA_BUF_IOCTL_IMPORT_SYNC_FILE _IOW(DMA_BUF_BASE, 3, struct dma_buf_import_sync_file) #endif diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 2b9f5e9985e5..c7b056af9ef0 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -420,6 +420,7 @@ typedef struct elf64_shdr { #define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */ #define NT_S390_GS_BC 0x30c /* s390 guarded storage broadcast control block */ #define NT_S390_RI_CB 0x30d /* s390 runtime instrumentation */ +#define NT_S390_PV_CPU_DATA 0x30e /* s390 protvirt cpu dump data */ #define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */ #define NT_ARM_TLS 0x401 /* ARM TLS register */ #define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */ diff --git a/include/uapi/linux/f2fs.h b/include/uapi/linux/f2fs.h index 352a822d4370..3121d127d5aa 100644 --- a/include/uapi/linux/f2fs.h +++ b/include/uapi/linux/f2fs.h @@ -13,7 +13,7 @@ #define F2FS_IOC_COMMIT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 2) #define F2FS_IOC_START_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 3) #define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4) -#define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5) +#define F2FS_IOC_ABORT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 5) #define F2FS_IOC_GARBAGE_COLLECT _IOW(F2FS_IOCTL_MAGIC, 6, __u32) #define F2FS_IOC_WRITE_CHECKPOINT _IO(F2FS_IOCTL_MAGIC, 7) #define F2FS_IOC_DEFRAGMENT _IOWR(F2FS_IOCTL_MAGIC, 8, \ diff --git a/include/uapi/linux/genetlink.h b/include/uapi/linux/genetlink.h index d83f214b4134..ddba3ca01e39 100644 --- a/include/uapi/linux/genetlink.h +++ b/include/uapi/linux/genetlink.h @@ -87,6 +87,8 @@ enum { __CTRL_ATTR_MCAST_GRP_MAX, }; +#define CTRL_ATTR_MCAST_GRP_MAX (__CTRL_ATTR_MCAST_GRP_MAX - 1) + enum { CTRL_ATTR_POLICY_UNSPEC, CTRL_ATTR_POLICY_DO, @@ -96,7 +98,6 @@ enum { CTRL_ATTR_POLICY_DUMP_MAX = __CTRL_ATTR_POLICY_DUMP_MAX - 1 }; -#define CTRL_ATTR_MCAST_GRP_MAX (__CTRL_ATTR_MCAST_GRP_MAX - 1) - +#define CTRL_ATTR_POLICY_MAX (__CTRL_ATTR_POLICY_DUMP_MAX - 1) #endif /* _UAPI__LINUX_GENERIC_NETLINK_H */ diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index bce7c43657d5..095299c75828 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -89,14 +89,14 @@ enum iax_opcode { IAX_OPCODE_CRC64, IAX_OPCODE_ZERO_DECOMP_32 = 0x48, IAX_OPCODE_ZERO_DECOMP_16, - IAX_OPCODE_DECOMP_32 = 0x4c, - IAX_OPCODE_DECOMP_16, + IAX_OPCODE_ZERO_COMP_32 = 0x4c, + IAX_OPCODE_ZERO_COMP_16, IAX_OPCODE_SCAN = 0x50, IAX_OPCODE_SET_MEMBER, IAX_OPCODE_EXTRACT, IAX_OPCODE_SELECT, IAX_OPCODE_RLE_BURST, - IAX_OPCDE_FIND_UNIQUE, + IAX_OPCODE_FIND_UNIQUE, IAX_OPCODE_EXPAND, }; diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 1d0bccc3fa54..d370165bc621 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -116,6 +116,7 @@ #define ETH_P_QINQ3 0x9300 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */ #define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ #define ETH_P_DSA_8021Q 0xDADB /* Fake VLAN Header for DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ +#define ETH_P_DSA_A5PSW 0xE001 /* A5PSW Tag Value [ NOT AN OFFICIALLY REGISTERED ID ] */ #define ETH_P_IFE 0xED3E /* ForCES inter-FE LFB type */ #define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 5f58dcfe2787..e36d9d2c65a7 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -963,6 +963,7 @@ enum { IFLA_BOND_SLAVE_AD_AGGREGATOR_ID, IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE, IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE, + IFLA_BOND_SLAVE_PRIO, __IFLA_BOND_SLAVE_MAX, }; diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 42975e940758..42b60198b6c5 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -34,9 +34,12 @@ * - 1.6 - Query clear flags in SVM get_attr API * - 1.7 - Checkpoint Restore (CRIU) API * - 1.8 - CRIU - Support for SDMA transfers with GTT BOs + * - 1.9 - Add available memory ioctl + * - 1.10 - Add SMI profiler event log + * - 1.11 - Add unified memory for ctx save/restore area */ #define KFD_IOCTL_MAJOR_VERSION 1 -#define KFD_IOCTL_MINOR_VERSION 8 +#define KFD_IOCTL_MINOR_VERSION 11 struct kfd_ioctl_get_version_args { __u32 major_version; /* from KFD */ @@ -100,6 +103,12 @@ struct kfd_ioctl_get_queue_wave_state_args { __u32 pad; }; +struct kfd_ioctl_get_available_memory_args { + __u64 available; /* from KFD */ + __u32 gpu_id; /* to KFD */ + __u32 pad; +}; + /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */ #define KFD_IOC_CACHE_POLICY_COHERENT 0 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1 @@ -463,6 +472,43 @@ enum kfd_smi_event { KFD_SMI_EVENT_THERMAL_THROTTLE = 2, KFD_SMI_EVENT_GPU_PRE_RESET = 3, KFD_SMI_EVENT_GPU_POST_RESET = 4, + KFD_SMI_EVENT_MIGRATE_START = 5, + KFD_SMI_EVENT_MIGRATE_END = 6, + KFD_SMI_EVENT_PAGE_FAULT_START = 7, + KFD_SMI_EVENT_PAGE_FAULT_END = 8, + KFD_SMI_EVENT_QUEUE_EVICTION = 9, + KFD_SMI_EVENT_QUEUE_RESTORE = 10, + KFD_SMI_EVENT_UNMAP_FROM_GPU = 11, + + /* + * max event number, as a flag bit to get events from all processes, + * this requires super user permission, otherwise will not be able to + * receive event from any process. Without this flag to receive events + * from same process. + */ + KFD_SMI_EVENT_ALL_PROCESS = 64 +}; + +enum KFD_MIGRATE_TRIGGERS { + KFD_MIGRATE_TRIGGER_PREFETCH, + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, + KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, + KFD_MIGRATE_TRIGGER_TTM_EVICTION +}; + +enum KFD_QUEUE_EVICTION_TRIGGERS { + KFD_QUEUE_EVICTION_TRIGGER_SVM, + KFD_QUEUE_EVICTION_TRIGGER_USERPTR, + KFD_QUEUE_EVICTION_TRIGGER_TTM, + KFD_QUEUE_EVICTION_TRIGGER_SUSPEND, + KFD_QUEUE_EVICTION_CRIU_CHECKPOINT, + KFD_QUEUE_EVICTION_CRIU_RESTORE +}; + +enum KFD_SVM_UNMAP_TRIGGERS { + KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY, + KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE, + KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU }; #define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1)) @@ -568,6 +614,8 @@ enum kfd_mmio_remap { #define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010 /* GPUs mostly read, may allow similar optimizations as RO, but writes fault */ #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020 +/* Keep GPU memory mapping always valid as if XNACK is disable */ +#define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040 /** * kfd_ioctl_svm_op - SVM ioctl operations @@ -826,7 +874,10 @@ struct kfd_ioctl_set_xnack_mode_args { #define AMDKFD_IOC_CRIU_OP \ AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args) +#define AMDKFD_IOC_AVAILABLE_MEMORY \ + AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args) + #define AMDKFD_COMMAND_START 0x01 -#define AMDKFD_COMMAND_END 0x23 +#define AMDKFD_COMMAND_END 0x24 #endif diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index cb6e3846d27b..eed0315a77a6 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -270,6 +270,8 @@ struct kvm_xen_exit { #define KVM_EXIT_X86_BUS_LOCK 33 #define KVM_EXIT_XEN 34 #define KVM_EXIT_RISCV_SBI 35 +#define KVM_EXIT_RISCV_CSR 36 +#define KVM_EXIT_NOTIFY 37 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -496,6 +498,18 @@ struct kvm_run { unsigned long args[6]; unsigned long ret[2]; } riscv_sbi; + /* KVM_EXIT_RISCV_CSR */ + struct { + unsigned long csr_num; + unsigned long new_value; + unsigned long write_mask; + unsigned long ret_value; + } riscv_csr; + /* KVM_EXIT_NOTIFY */ + struct { +#define KVM_NOTIFY_CONTEXT_INVALID (1 << 0) + __u32 flags; + } notify; /* Fix the size of the union. */ char padding[256]; }; @@ -1157,6 +1171,12 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_VM_TSC_CONTROL 214 #define KVM_CAP_SYSTEM_EVENT_DATA 215 #define KVM_CAP_ARM_SYSTEM_SUSPEND 216 +#define KVM_CAP_S390_PROTECTED_DUMP 217 +#define KVM_CAP_X86_TRIPLE_FAULT_EVENT 218 +#define KVM_CAP_X86_NOTIFY_VMEXIT 219 +#define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 220 +#define KVM_CAP_S390_ZPCI_OP 221 +#define KVM_CAP_S390_CPU_TOPOLOGY 222 #ifdef KVM_CAP_IRQ_ROUTING @@ -1660,6 +1680,55 @@ struct kvm_s390_pv_unp { __u64 tweak; }; +enum pv_cmd_dmp_id { + KVM_PV_DUMP_INIT, + KVM_PV_DUMP_CONFIG_STOR_STATE, + KVM_PV_DUMP_COMPLETE, + KVM_PV_DUMP_CPU, +}; + +struct kvm_s390_pv_dmp { + __u64 subcmd; + __u64 buff_addr; + __u64 buff_len; + __u64 gaddr; /* For dump storage state */ + __u64 reserved[4]; +}; + +enum pv_cmd_info_id { + KVM_PV_INFO_VM, + KVM_PV_INFO_DUMP, +}; + +struct kvm_s390_pv_info_dump { + __u64 dump_cpu_buffer_len; + __u64 dump_config_mem_buffer_per_1m; + __u64 dump_config_finalize_len; +}; + +struct kvm_s390_pv_info_vm { + __u64 inst_calls_list[4]; + __u64 max_cpus; + __u64 max_guests; + __u64 max_guest_addr; + __u64 feature_indication; +}; + +struct kvm_s390_pv_info_header { + __u32 id; + __u32 len_max; + __u32 len_written; + __u32 reserved; +}; + +struct kvm_s390_pv_info { + struct kvm_s390_pv_info_header header; + union { + struct kvm_s390_pv_info_dump dump; + struct kvm_s390_pv_info_vm vm; + }; +}; + enum pv_cmd_id { KVM_PV_ENABLE, KVM_PV_DISABLE, @@ -1668,6 +1737,8 @@ enum pv_cmd_id { KVM_PV_VERIFY, KVM_PV_PREP_RESET, KVM_PV_UNSHARE_ALL, + KVM_PV_INFO, + KVM_PV_DUMP, }; struct kvm_pv_cmd { @@ -2119,4 +2190,41 @@ struct kvm_stats_desc { /* Available with KVM_CAP_XSAVE2 */ #define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave) +/* Available with KVM_CAP_S390_PROTECTED_DUMP */ +#define KVM_S390_PV_CPU_COMMAND _IOWR(KVMIO, 0xd0, struct kvm_pv_cmd) + +/* Available with KVM_CAP_X86_NOTIFY_VMEXIT */ +#define KVM_X86_NOTIFY_VMEXIT_ENABLED (1ULL << 0) +#define KVM_X86_NOTIFY_VMEXIT_USER (1ULL << 1) + +/* Available with KVM_CAP_S390_ZPCI_OP */ +#define KVM_S390_ZPCI_OP _IOW(KVMIO, 0xd1, struct kvm_s390_zpci_op) + +struct kvm_s390_zpci_op { + /* in */ + __u32 fh; /* target device */ + __u8 op; /* operation to perform */ + __u8 pad[3]; + union { + /* for KVM_S390_ZPCIOP_REG_AEN */ + struct { + __u64 ibv; /* Guest addr of interrupt bit vector */ + __u64 sb; /* Guest addr of summary bit */ + __u32 flags; + __u32 noi; /* Number of interrupts */ + __u8 isc; /* Guest interrupt subclass */ + __u8 sbo; /* Offset of guest summary bit vector */ + __u16 pad; + } reg_aen; + __u64 reserved[8]; + } u; +}; + +/* types for kvm_s390_zpci_op->op */ +#define KVM_S390_ZPCIOP_REG_AEN 0 +#define KVM_S390_ZPCIOP_DEREG_AEN 1 + +/* flags for kvm_s390_zpci_op->u.reg_aen.flags */ +#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0) + #endif /* __LINUX_KVM_H */ diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index f724129c0425..6325d1d0e90f 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -98,12 +98,8 @@ /* Since UDF 2.01 is ISO 13346 based... */ #define UDF_SUPER_MAGIC 0x15013346 -#define BALLOON_KVM_MAGIC 0x13661366 -#define ZSMALLOC_MAGIC 0x58295829 #define DMA_BUF_MAGIC 0x444d4142 /* "DMAB" */ #define DEVMEM_MAGIC 0x454d444d /* "DMEM" */ -#define Z3FOLD_MAGIC 0x33 -#define PPC_CMM_MAGIC 0xc7571590 #define SECRETMEM_MAGIC 0x5345434d /* "SECM" */ #endif /* __LINUX_MAGIC_H__ */ diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h index 0dfc11ee243a..ec3323dbb927 100644 --- a/include/uapi/linux/media-bus-format.h +++ b/include/uapi/linux/media-bus-format.h @@ -34,7 +34,7 @@ #define MEDIA_BUS_FMT_FIXED 0x0001 -/* RGB - next is 0x101e */ +/* RGB - next is 0x1022 */ #define MEDIA_BUS_FMT_RGB444_1X12 0x1016 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002 @@ -59,9 +59,13 @@ #define MEDIA_BUS_FMT_RGB888_3X8_DELTA 0x101d #define MEDIA_BUS_FMT_RGB888_1X7X4_SPWG 0x1011 #define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012 +#define MEDIA_BUS_FMT_RGB666_1X30_CPADLO 0x101e +#define MEDIA_BUS_FMT_RGB888_1X30_CPADLO 0x101f #define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d #define MEDIA_BUS_FMT_RGB888_1X32_PADHI 0x100f #define MEDIA_BUS_FMT_RGB101010_1X30 0x1018 +#define MEDIA_BUS_FMT_RGB666_1X36_CPADLO 0x1020 +#define MEDIA_BUS_FMT_RGB888_1X36_CPADLO 0x1021 #define MEDIA_BUS_FMT_RGB121212_1X36 0x1019 #define MEDIA_BUS_FMT_RGB161616_1X48 0x101a diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index 39c565e460c7..a998bf761635 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -154,6 +154,7 @@ enum { NDTPA_QUEUE_LENBYTES, /* u32 */ NDTPA_MCAST_REPROBES, /* u32 */ NDTPA_PAD, + NDTPA_INTERVAL_PROBE_TIME_MS, /* u64, msecs */ __NDTPA_MAX }; #define NDTPA_MAX (__NDTPA_MAX - 1) diff --git a/include/uapi/linux/netfilter/xt_IDLETIMER.h b/include/uapi/linux/netfilter/xt_IDLETIMER.h index 49ddcdc61c09..7bfb31a66fc9 100644 --- a/include/uapi/linux/netfilter/xt_IDLETIMER.h +++ b/include/uapi/linux/netfilter/xt_IDLETIMER.h @@ -1,6 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* - * linux/include/linux/netfilter/xt_IDLETIMER.h - * * Header file for Xtables timer target module. * * Copyright (C) 2004, 2010 Nokia Corporation @@ -10,20 +9,6 @@ * by Luciano Coelho <luciano.coelho@nokia.com> * * Contact: Luciano Coelho <luciano.coelho@nokia.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA */ #ifndef _XT_IDLETIMER_H diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h b/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h index 23e91a9c2583..0b7b16dbdec2 100644 --- a/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h +++ b/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h @@ -17,4 +17,4 @@ struct ip6t_log_info { char prefix[30]; }; -#endif /*_IPT_LOG_H*/ +#endif /* _IP6T_LOG_H */ diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index d9490e3062a7..ffb7c573e299 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -324,6 +324,17 @@ */ /** + * DOC: Multi-Link Operation + * + * In Multi-Link Operation, a connection between to MLDs utilizes multiple + * links. To use this in nl80211, various commands and responses now need + * to or will include the new %NL80211_ATTR_MLO_LINKS attribute. + * Additionally, various commands that need to operate on a specific link + * now need to be given the %NL80211_ATTR_MLO_LINK_ID attribute, e.g. to + * use %NL80211_CMD_START_AP or similar functions. + */ + +/** * enum nl80211_commands - supported nl80211 commands * * @NL80211_CMD_UNSPEC: unspecified command to catch errors @@ -753,6 +764,13 @@ * %NL80211_ATTR_CSA_C_OFFSETS_TX is an array of offsets to CSA * counters which will be updated to the current value. This attribute * is used during CSA period. + * For TX on an MLD, the frequency can be omitted and the link ID be + * specified, or if transmitting to a known peer MLD (with MLD addresses + * in the frame) both can be omitted and the link will be selected by + * lower layers. + * For RX notification, %NL80211_ATTR_RX_HW_TIMESTAMP may be included to + * indicate the frame RX timestamp and %NL80211_ATTR_TX_HW_TIMESTAMP may + * be included to indicate the ack TX timestamp. * @NL80211_CMD_FRAME_WAIT_CANCEL: When an off-channel TX was requested, this * command may be used with the corresponding cookie to cancel the wait * time if it is known that it is no longer necessary. This command is @@ -763,7 +781,9 @@ * transmitted with %NL80211_CMD_FRAME. %NL80211_ATTR_COOKIE identifies * the TX command and %NL80211_ATTR_FRAME includes the contents of the * frame. %NL80211_ATTR_ACK flag is included if the recipient acknowledged - * the frame. + * the frame. %NL80211_ATTR_TX_HW_TIMESTAMP may be included to indicate the + * tx timestamp and %NL80211_ATTR_RX_HW_TIMESTAMP may be included to + * indicate the ack RX timestamp. * @NL80211_CMD_ACTION_TX_STATUS: Alias for @NL80211_CMD_FRAME_TX_STATUS for * backward compatibility. * @@ -1108,6 +1128,12 @@ * has been received. %NL80211_ATTR_FRAME is used to specify the * frame contents. The frame is the raw EAPoL data, without ethernet or * 802.11 headers. + * For an MLD transmitter, the %NL80211_ATTR_MLO_LINK_ID may be given and + * its effect will depend on the destination: If the destination is known + * to be an MLD, this will be used as a hint to select the link to transmit + * the frame on. If the destination is not an MLD, this will select both + * the link to transmit on and the source address will be set to the link + * address of that link. * When used as an event indication %NL80211_ATTR_CONTROL_PORT_ETHERTYPE, * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT and %NL80211_ATTR_MAC are added * indicating the protocol type of the received frame; whether the frame @@ -1237,6 +1263,16 @@ * to describe the BSSID address of the AP and %NL80211_ATTR_TIMEOUT to * specify the timeout value. * + * @NL80211_CMD_ADD_LINK: Add a new link to an interface. The + * %NL80211_ATTR_MLO_LINK_ID attribute is used for the new link. + * @NL80211_CMD_REMOVE_LINK: Remove a link from an interface. This may come + * without %NL80211_ATTR_MLO_LINK_ID as an easy way to remove all links + * in preparation for e.g. roaming to a regular (non-MLO) AP. + * + * @NL80211_CMD_ADD_LINK_STA: Add a link to an MLD station + * @NL80211_CMD_MODIFY_LINK_STA: Modify a link of an MLD station + * @NL80211_CMD_REMOVE_LINK_STA: Remove a link of an MLD station + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -1481,6 +1517,13 @@ enum nl80211_commands { NL80211_CMD_ASSOC_COMEBACK, + NL80211_CMD_ADD_LINK, + NL80211_CMD_REMOVE_LINK, + + NL80211_CMD_ADD_LINK_STA, + NL80211_CMD_MODIFY_LINK_STA, + NL80211_CMD_REMOVE_LINK_STA, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -2340,8 +2383,10 @@ enum nl80211_commands { * * @NL80211_ATTR_IFTYPE_EXT_CAPA: Nested attribute of the following attributes: * %NL80211_ATTR_IFTYPE, %NL80211_ATTR_EXT_CAPA, - * %NL80211_ATTR_EXT_CAPA_MASK, to specify the extended capabilities per - * interface type. + * %NL80211_ATTR_EXT_CAPA_MASK, to specify the extended capabilities and + * other interface-type specific capabilities per interface type. For MLO, + * %NL80211_ATTR_EML_CAPABILITY and %NL80211_ATTR_MLD_CAPA_AND_OPS are + * present. * * @NL80211_ATTR_MU_MIMO_GROUP_DATA: array of 24 bytes that defines a MU-MIMO * groupID for monitor mode. @@ -2663,6 +2708,39 @@ enum nl80211_commands { * association request when used with NL80211_CMD_NEW_STATION). Can be set * only if %NL80211_STA_FLAG_WME is set. * + * @NL80211_ATTR_MLO_LINK_ID: A (u8) link ID for use with MLO, to be used with + * various commands that need a link ID to operate. + * @NL80211_ATTR_MLO_LINKS: A nested array of links, each containing some + * per-link information and a link ID. + * @NL80211_ATTR_MLD_ADDR: An MLD address, used with various commands such as + * authenticate/associate. + * + * @NL80211_ATTR_MLO_SUPPORT: Flag attribute to indicate user space supports MLO + * connection. Used with %NL80211_CMD_CONNECT. If this attribute is not + * included in NL80211_CMD_CONNECT drivers must not perform MLO connection. + * + * @NL80211_ATTR_MAX_NUM_AKM_SUITES: U16 attribute. Indicates maximum number of + * AKM suites allowed for %NL80211_CMD_CONNECT, %NL80211_CMD_ASSOCIATE and + * %NL80211_CMD_START_AP in %NL80211_CMD_GET_WIPHY response. If this + * attribute is not present userspace shall consider maximum number of AKM + * suites allowed as %NL80211_MAX_NR_AKM_SUITES which is the legacy maximum + * number prior to the introduction of this attribute. + * + * @NL80211_ATTR_EML_CAPABILITY: EML Capability information (u16) + * @NL80211_ATTR_MLD_CAPA_AND_OPS: MLD Capabilities and Operations (u16) + * + * @NL80211_ATTR_TX_HW_TIMESTAMP: Hardware timestamp for TX operation in + * nanoseconds (u64). This is the device clock timestamp so it will + * probably reset when the device is stopped or the firmware is reset. + * When used with %NL80211_CMD_FRAME_TX_STATUS, indicates the frame TX + * timestamp. When used with %NL80211_CMD_FRAME RX notification, indicates + * the ack TX timestamp. + * @NL80211_ATTR_RX_HW_TIMESTAMP: Hardware timestamp for RX operation in + * nanoseconds (u64). This is the device clock timestamp so it will + * probably reset when the device is stopped or the firmware is reset. + * When used with %NL80211_CMD_FRAME_TX_STATUS, indicates the ack RX + * timestamp. When used with %NL80211_CMD_FRAME RX notification, indicates + * the incoming frame RX timestamp. * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -3177,6 +3255,20 @@ enum nl80211_attrs { NL80211_ATTR_DISABLE_EHT, + NL80211_ATTR_MLO_LINKS, + NL80211_ATTR_MLO_LINK_ID, + NL80211_ATTR_MLD_ADDR, + + NL80211_ATTR_MLO_SUPPORT, + + NL80211_ATTR_MAX_NUM_AKM_SUITES, + + NL80211_ATTR_EML_CAPABILITY, + NL80211_ATTR_MLD_CAPA_AND_OPS, + + NL80211_ATTR_TX_HW_TIMESTAMP, + NL80211_ATTR_RX_HW_TIMESTAMP, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3231,6 +3323,11 @@ enum nl80211_attrs { #define NL80211_HE_MIN_CAPABILITY_LEN 16 #define NL80211_HE_MAX_CAPABILITY_LEN 54 #define NL80211_MAX_NR_CIPHER_SUITES 5 + +/* + * NL80211_MAX_NR_AKM_SUITES is obsolete when %NL80211_ATTR_MAX_NUM_AKM_SUITES + * present in %NL80211_CMD_GET_WIPHY response. + */ #define NL80211_MAX_NR_AKM_SUITES 2 #define NL80211_EHT_MIN_CAPABILITY_LEN 13 #define NL80211_EHT_MAX_CAPABILITY_LEN 51 @@ -4853,6 +4950,7 @@ enum nl80211_bss_scan_width { * Contains a nested array of signal strength attributes (u8, dBm), * using the nesting index as the antenna number. * @NL80211_BSS_FREQUENCY_OFFSET: frequency offset in KHz + * @NL80211_BSS_MLO_LINK_ID: MLO link ID of the BSS (u8). * @__NL80211_BSS_AFTER_LAST: internal * @NL80211_BSS_MAX: highest BSS attribute */ @@ -4878,6 +4976,7 @@ enum nl80211_bss { NL80211_BSS_PARENT_BSSID, NL80211_BSS_CHAIN_SIGNAL, NL80211_BSS_FREQUENCY_OFFSET, + NL80211_BSS_MLO_LINK_ID, /* keep last */ __NL80211_BSS_AFTER_LAST, @@ -5874,7 +5973,7 @@ enum nl80211_ap_sme_features { * @NL80211_FEATURE_INACTIVITY_TIMER: This driver takes care of freeing up * the connected inactive stations in AP mode. * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested - * to work properly to suppport receiving regulatory hints from + * to work properly to support receiving regulatory hints from * cellular base stations. * @NL80211_FEATURE_P2P_DEVICE_NEEDS_CHANNEL: (no longer available, only * here to reserve the value for API/ABI compatibility) diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 108f8523fa04..57b8e2ffb1dd 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -737,7 +737,8 @@ #define PCI_EXT_CAP_ID_DVSEC 0x23 /* Designated Vendor-Specific */ #define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */ #define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */ -#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PL_16GT +#define PCI_EXT_CAP_ID_DOE 0x2E /* Data Object Exchange */ +#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_DOE #define PCI_EXT_CAP_DSN_SIZEOF 12 #define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40 @@ -1103,4 +1104,30 @@ #define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK 0x000000F0 #define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT 4 +/* Data Object Exchange */ +#define PCI_DOE_CAP 0x04 /* DOE Capabilities Register */ +#define PCI_DOE_CAP_INT_SUP 0x00000001 /* Interrupt Support */ +#define PCI_DOE_CAP_INT_MSG_NUM 0x00000ffe /* Interrupt Message Number */ +#define PCI_DOE_CTRL 0x08 /* DOE Control Register */ +#define PCI_DOE_CTRL_ABORT 0x00000001 /* DOE Abort */ +#define PCI_DOE_CTRL_INT_EN 0x00000002 /* DOE Interrupt Enable */ +#define PCI_DOE_CTRL_GO 0x80000000 /* DOE Go */ +#define PCI_DOE_STATUS 0x0c /* DOE Status Register */ +#define PCI_DOE_STATUS_BUSY 0x00000001 /* DOE Busy */ +#define PCI_DOE_STATUS_INT_STATUS 0x00000002 /* DOE Interrupt Status */ +#define PCI_DOE_STATUS_ERROR 0x00000004 /* DOE Error */ +#define PCI_DOE_STATUS_DATA_OBJECT_READY 0x80000000 /* Data Object Ready */ +#define PCI_DOE_WRITE 0x10 /* DOE Write Data Mailbox Register */ +#define PCI_DOE_READ 0x14 /* DOE Read Data Mailbox Register */ + +/* DOE Data Object - note not actually registers */ +#define PCI_DOE_DATA_OBJECT_HEADER_1_VID 0x0000ffff +#define PCI_DOE_DATA_OBJECT_HEADER_1_TYPE 0x00ff0000 +#define PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH 0x0003ffff + +#define PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX 0x000000ff +#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID 0x0000ffff +#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL 0x00ff0000 +#define PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX 0xff000000 + #endif /* LINUX_PCI_REGS_H */ diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index ffbe230ef90b..877309d6ca3c 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -589,6 +589,9 @@ enum { TCA_FLOWER_KEY_NUM_OF_VLANS, /* u8 */ + TCA_FLOWER_KEY_PPPOE_SID, /* be16 */ + TCA_FLOWER_KEY_PPP_PROTO, /* be16 */ + __TCA_FLOWER_MAX, }; diff --git a/include/uapi/linux/seg6_iptunnel.h b/include/uapi/linux/seg6_iptunnel.h index a74294211290..ae78791372b8 100644 --- a/include/uapi/linux/seg6_iptunnel.h +++ b/include/uapi/linux/seg6_iptunnel.h @@ -35,6 +35,8 @@ enum { SEG6_IPTUN_MODE_INLINE, SEG6_IPTUN_MODE_ENCAP, SEG6_IPTUN_MODE_L2ENCAP, + SEG6_IPTUN_MODE_ENCAP_RED, + SEG6_IPTUN_MODE_L2ENCAP_RED, }; #endif diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h index fa6b16e5fdd8..cea06924b295 100644 --- a/include/uapi/linux/serial.h +++ b/include/uapi/linux/serial.h @@ -126,10 +126,26 @@ struct serial_rs485 { #define SER_RS485_TERMINATE_BUS (1 << 5) /* Enable bus termination (if supported) */ + +/* RS-485 addressing mode */ +#define SER_RS485_ADDRB (1 << 6) /* Enable addressing mode */ +#define SER_RS485_ADDR_RECV (1 << 7) /* Receive address filter */ +#define SER_RS485_ADDR_DEST (1 << 8) /* Destination address */ + __u32 delay_rts_before_send; /* Delay before send (milliseconds) */ __u32 delay_rts_after_send; /* Delay after send (milliseconds) */ - __u32 padding[5]; /* Memory is cheap, new structs - are a royal PITA .. */ + + /* The fields below are defined by flags */ + union { + __u32 padding[5]; /* Memory is cheap, new structs are a pain */ + + struct { + __u8 addr_recv; + __u8 addr_dest; + __u8 padding0[2]; + __u32 padding1[4]; + }; + }; }; /* diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 6faf502b7860..3ba34d8378bd 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -124,10 +124,6 @@ /* TXX9 type number */ #define PORT_TXX9 64 -/* NEC VR4100 series SIU/DSIU */ -#define PORT_VR41XX_SIU 65 -#define PORT_VR41XX_DSIU 66 - /* Samsung S3C2400 SoC */ #define PORT_S3C2400 67 diff --git a/include/uapi/linux/serial_reg.h b/include/uapi/linux/serial_reg.h index f51bc8f36813..bab3b39266cc 100644 --- a/include/uapi/linux/serial_reg.h +++ b/include/uapi/linux/serial_reg.h @@ -139,7 +139,7 @@ #define UART_LSR_PE 0x04 /* Parity error indicator */ #define UART_LSR_OE 0x02 /* Overrun error indicator */ #define UART_LSR_DR 0x01 /* Receiver data ready */ -#define UART_LSR_BRK_ERROR_BITS 0x1E /* BI, FE, PE, OE bits */ +#define UART_LSR_BRK_ERROR_BITS (UART_LSR_BI|UART_LSR_FE|UART_LSR_PE|UART_LSR_OE) #define UART_MSR 6 /* In: Modem Status Register */ #define UART_MSR_DCD 0x80 /* Data Carrier Detect */ @@ -150,7 +150,7 @@ #define UART_MSR_TERI 0x04 /* Trailing edge ring indicator */ #define UART_MSR_DDSR 0x02 /* Delta DSR */ #define UART_MSR_DCTS 0x01 /* Delta CTS */ -#define UART_MSR_ANY_DELTA 0x0F /* Any of the delta bits! */ +#define UART_MSR_ANY_DELTA (UART_MSR_DDCD|UART_MSR_TERI|UART_MSR_DDSR|UART_MSR_DCTS) #define UART_SCR 7 /* I/O: Scratch Register */ diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h index 693f549f6966..bb4dacca31e7 100644 --- a/include/uapi/linux/smc.h +++ b/include/uapi/linux/smc.h @@ -124,6 +124,7 @@ enum { SMC_NLA_LGR_R_V2, /* nest */ SMC_NLA_LGR_R_NET_COOKIE, /* u64 */ SMC_NLA_LGR_R_PAD, /* flag */ + SMC_NLA_LGR_R_BUF_TYPE, /* u8 */ __SMC_NLA_LGR_R_MAX, SMC_NLA_LGR_R_MAX = __SMC_NLA_LGR_R_MAX - 1 }; diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index 904909d020e2..4d7470036a8b 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -344,6 +344,8 @@ enum LINUX_MIB_TLSRXDEVICE, /* TlsRxDevice */ LINUX_MIB_TLSDECRYPTERROR, /* TlsDecryptError */ LINUX_MIB_TLSRXDEVICERESYNC, /* TlsRxDeviceResync */ + LINUX_MIB_TLSDECRYPTRETRY, /* TlsDecryptRetry */ + LINUX_MIB_TLSRXNOPADVIOL, /* TlsRxNoPadViolation */ __LINUX_MIB_TLSMAX }; diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h index 7272f85d6d6a..0723a9cce747 100644 --- a/include/uapi/linux/swab.h +++ b/include/uapi/linux/swab.h @@ -102,7 +102,7 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val) #define __swab16(x) (__u16)__builtin_bswap16((__u16)(x)) #else #define __swab16(x) \ - (__builtin_constant_p((__u16)(x)) ? \ + (__u16)(__builtin_constant_p(x) ? \ ___constant_swab16(x) : \ __fswab16(x)) #endif @@ -115,7 +115,7 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val) #define __swab32(x) (__u32)__builtin_bswap32((__u32)(x)) #else #define __swab32(x) \ - (__builtin_constant_p((__u32)(x)) ? \ + (__u32)(__builtin_constant_p(x) ? \ ___constant_swab32(x) : \ __fswab32(x)) #endif @@ -128,7 +128,7 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val) #define __swab64(x) (__u64)__builtin_bswap64((__u64)(x)) #else #define __swab64(x) \ - (__builtin_constant_p((__u64)(x)) ? \ + (__u64)(__builtin_constant_p(x) ? \ ___constant_swab64(x) : \ __fswab64(x)) #endif diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h index 6a3b194c50fe..8981f00204db 100644 --- a/include/uapi/linux/sysctl.h +++ b/include/uapi/linux/sysctl.h @@ -584,24 +584,25 @@ enum { /* /proc/sys/net/<protocol>/neigh/<dev> */ enum { - NET_NEIGH_MCAST_SOLICIT=1, - NET_NEIGH_UCAST_SOLICIT=2, - NET_NEIGH_APP_SOLICIT=3, - NET_NEIGH_RETRANS_TIME=4, - NET_NEIGH_REACHABLE_TIME=5, - NET_NEIGH_DELAY_PROBE_TIME=6, - NET_NEIGH_GC_STALE_TIME=7, - NET_NEIGH_UNRES_QLEN=8, - NET_NEIGH_PROXY_QLEN=9, - NET_NEIGH_ANYCAST_DELAY=10, - NET_NEIGH_PROXY_DELAY=11, - NET_NEIGH_LOCKTIME=12, - NET_NEIGH_GC_INTERVAL=13, - NET_NEIGH_GC_THRESH1=14, - NET_NEIGH_GC_THRESH2=15, - NET_NEIGH_GC_THRESH3=16, - NET_NEIGH_RETRANS_TIME_MS=17, - NET_NEIGH_REACHABLE_TIME_MS=18, + NET_NEIGH_MCAST_SOLICIT = 1, + NET_NEIGH_UCAST_SOLICIT = 2, + NET_NEIGH_APP_SOLICIT = 3, + NET_NEIGH_RETRANS_TIME = 4, + NET_NEIGH_REACHABLE_TIME = 5, + NET_NEIGH_DELAY_PROBE_TIME = 6, + NET_NEIGH_GC_STALE_TIME = 7, + NET_NEIGH_UNRES_QLEN = 8, + NET_NEIGH_PROXY_QLEN = 9, + NET_NEIGH_ANYCAST_DELAY = 10, + NET_NEIGH_PROXY_DELAY = 11, + NET_NEIGH_LOCKTIME = 12, + NET_NEIGH_GC_INTERVAL = 13, + NET_NEIGH_GC_THRESH1 = 14, + NET_NEIGH_GC_THRESH2 = 15, + NET_NEIGH_GC_THRESH3 = 16, + NET_NEIGH_RETRANS_TIME_MS = 17, + NET_NEIGH_REACHABLE_TIME_MS = 18, + NET_NEIGH_INTERVAL_PROBE_TIME_MS = 19, }; /* /proc/sys/net/dccp */ diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h index bb8f80812b0b..f1157d8f4acd 100644 --- a/include/uapi/linux/tls.h +++ b/include/uapi/linux/tls.h @@ -40,6 +40,7 @@ #define TLS_TX 1 /* Set transmit parameters */ #define TLS_RX 2 /* Set receive parameters */ #define TLS_TX_ZEROCOPY_RO 3 /* TX zerocopy (only sendfile now) */ +#define TLS_RX_EXPECT_NO_PAD 4 /* Attempt opportunistic zero-copy */ /* Supported versions */ #define TLS_VERSION_MINOR(ver) ((ver) & 0xFF) @@ -162,6 +163,7 @@ enum { TLS_INFO_TXCONF, TLS_INFO_RXCONF, TLS_INFO_ZC_RO_TX, + TLS_INFO_RX_NO_PAD, __TLS_INFO_MAX, }; #define TLS_INFO_MAX (__TLS_INFO_MAX - 1) diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h index ca33092354ab..677edaab2b66 100644 --- a/include/uapi/linux/ublk_cmd.h +++ b/include/uapi/linux/ublk_cmd.h @@ -15,6 +15,8 @@ #define UBLK_CMD_DEL_DEV 0x05 #define UBLK_CMD_START_DEV 0x06 #define UBLK_CMD_STOP_DEV 0x07 +#define UBLK_CMD_SET_PARAMS 0x08 +#define UBLK_CMD_GET_PARAMS 0x09 /* * IO commands, issued by ublk server, and handled by ublk driver. @@ -28,12 +30,21 @@ * this IO request, request's handling result is committed to ublk * driver, meantime FETCH_REQ is piggyback, and FETCH_REQ has to be * handled before completing io request. + * + * NEED_GET_DATA: only used for write requests to set io addr and copy data + * When NEED_GET_DATA is set, ublksrv has to issue UBLK_IO_NEED_GET_DATA + * command after ublk driver returns UBLK_IO_RES_NEED_GET_DATA. + * + * It is only used if ublksrv set UBLK_F_NEED_GET_DATA flag + * while starting a ublk device. */ #define UBLK_IO_FETCH_REQ 0x20 #define UBLK_IO_COMMIT_AND_FETCH_REQ 0x21 +#define UBLK_IO_NEED_GET_DATA 0x22 /* only ABORT means that no re-fetch */ #define UBLK_IO_RES_OK 0 +#define UBLK_IO_RES_NEED_GET_DATA 1 #define UBLK_IO_RES_ABORT (-ENODEV) #define UBLKSRV_CMD_BUF_OFFSET 0 @@ -54,6 +65,15 @@ */ #define UBLK_F_URING_CMD_COMP_IN_TASK (1ULL << 1) +/* + * User should issue io cmd again for write requests to + * set io buffer address and copy data from bio vectors + * to the userspace io buffer. + * + * In this mode, task_work is not used. + */ +#define UBLK_F_NEED_GET_DATA (1UL << 2) + /* device state */ #define UBLK_S_DEV_DEAD 0 #define UBLK_S_DEV_LIVE 1 @@ -78,22 +98,23 @@ struct ublksrv_ctrl_cmd { struct ublksrv_ctrl_dev_info { __u16 nr_hw_queues; __u16 queue_depth; - __u16 block_size; __u16 state; + __u16 pad0; - __u32 rq_max_blocks; + __u32 max_io_buf_bytes; __u32 dev_id; - __u64 dev_blocks; - __s32 ublksrv_pid; - __s32 reserved0; + __u32 pad1; + __u64 flags; - __u64 flags_reserved; /* For ublksrv internal use, invisible to ublk driver */ __u64 ublksrv_flags; - __u64 reserved1[9]; + + __u64 reserved0; + __u64 reserved1; + __u64 reserved2; }; #define UBLK_IO_OP_READ 0 @@ -158,4 +179,49 @@ struct ublksrv_io_cmd { __u64 addr; }; +struct ublk_param_basic { +#define UBLK_ATTR_READ_ONLY (1 << 0) +#define UBLK_ATTR_ROTATIONAL (1 << 1) +#define UBLK_ATTR_VOLATILE_CACHE (1 << 2) +#define UBLK_ATTR_FUA (1 << 3) + __u32 attrs; + __u8 logical_bs_shift; + __u8 physical_bs_shift; + __u8 io_opt_shift; + __u8 io_min_shift; + + __u32 max_sectors; + __u32 chunk_sectors; + + __u64 dev_sectors; + __u64 virt_boundary_mask; +}; + +struct ublk_param_discard { + __u32 discard_alignment; + + __u32 discard_granularity; + __u32 max_discard_sectors; + + __u32 max_write_zeroes_sectors; + __u16 max_discard_segments; + __u16 reserved0; +}; + +struct ublk_params { + /* + * Total length of parameters, userspace has to set 'len' for both + * SET_PARAMS and GET_PARAMS command, and driver may update len + * if two sides use different version of 'ublk_params', same with + * 'types' fields. + */ + __u32 len; +#define UBLK_PARAM_TYPE_BASIC (1 << 0) +#define UBLK_PARAM_TYPE_DISCARD (1 << 1) + __u32 types; /* types of parameter included */ + + struct ublk_param_basic basic; + struct ublk_param_discard discard; +}; + #endif diff --git a/include/uapi/linux/usb/cdc.h b/include/uapi/linux/usb/cdc.h index acf3852bb676..1924cf665448 100644 --- a/include/uapi/linux/usb/cdc.h +++ b/include/uapi/linux/usb/cdc.h @@ -271,6 +271,10 @@ struct usb_cdc_line_coding { __u8 bDataBits; } __attribute__ ((packed)); +/* Control Signal Bitmap Values from 6.2.14 SetControlLineState */ +#define USB_CDC_CTRL_DTR (1 << 0) +#define USB_CDC_CTRL_RTS (1 << 1) + /* table 62; bits in multicast filter */ #define USB_CDC_PACKET_TYPE_PROMISCUOUS (1 << 0) #define USB_CDC_PACKET_TYPE_ALL_MULTICAST (1 << 1) /* no filter */ @@ -302,6 +306,15 @@ struct usb_cdc_notification { __le16 wLength; } __attribute__ ((packed)); +/* UART State Bitmap Values from 6.3.5 SerialState */ +#define USB_CDC_SERIAL_STATE_DCD (1 << 0) +#define USB_CDC_SERIAL_STATE_DSR (1 << 1) +#define USB_CDC_SERIAL_STATE_BREAK (1 << 2) +#define USB_CDC_SERIAL_STATE_RING_SIGNAL (1 << 3) +#define USB_CDC_SERIAL_STATE_FRAMING (1 << 4) +#define USB_CDC_SERIAL_STATE_PARITY (1 << 5) +#define USB_CDC_SERIAL_STATE_OVERRUN (1 << 6) + struct usb_cdc_speed_change { __le32 DLBitRRate; /* contains the downlink bit rate (IN pipe) */ __le32 ULBitRate; /* contains the uplink bit rate (OUT pipe) */ diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index dfff69ed88f7..5f46bf4a570c 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -1997,6 +1997,465 @@ struct v4l2_ctrl_mpeg2_quantisation { __u8 chroma_non_intra_quantiser_matrix[64]; }; +#define V4L2_CID_STATELESS_HEVC_SPS (V4L2_CID_CODEC_STATELESS_BASE + 400) +#define V4L2_CID_STATELESS_HEVC_PPS (V4L2_CID_CODEC_STATELESS_BASE + 401) +#define V4L2_CID_STATELESS_HEVC_SLICE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 402) +#define V4L2_CID_STATELESS_HEVC_SCALING_MATRIX (V4L2_CID_CODEC_STATELESS_BASE + 403) +#define V4L2_CID_STATELESS_HEVC_DECODE_PARAMS (V4L2_CID_CODEC_STATELESS_BASE + 404) +#define V4L2_CID_STATELESS_HEVC_DECODE_MODE (V4L2_CID_CODEC_STATELESS_BASE + 405) +#define V4L2_CID_STATELESS_HEVC_START_CODE (V4L2_CID_CODEC_STATELESS_BASE + 406) +#define V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS (V4L2_CID_CODEC_STATELESS_BASE + 407) + +enum v4l2_stateless_hevc_decode_mode { + V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED, + V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED, +}; + +enum v4l2_stateless_hevc_start_code { + V4L2_STATELESS_HEVC_START_CODE_NONE, + V4L2_STATELESS_HEVC_START_CODE_ANNEX_B, +}; + +#define V4L2_HEVC_SLICE_TYPE_B 0 +#define V4L2_HEVC_SLICE_TYPE_P 1 +#define V4L2_HEVC_SLICE_TYPE_I 2 + +#define V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE (1ULL << 0) +#define V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED (1ULL << 1) +#define V4L2_HEVC_SPS_FLAG_AMP_ENABLED (1ULL << 2) +#define V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET (1ULL << 3) +#define V4L2_HEVC_SPS_FLAG_PCM_ENABLED (1ULL << 4) +#define V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED (1ULL << 5) +#define V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT (1ULL << 6) +#define V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED (1ULL << 7) +#define V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED (1ULL << 8) + +/** + * struct v4l2_ctrl_hevc_sps - ITU-T Rec. H.265: Sequence parameter set + * + * @video_parameter_set_id: specifies the value of the + * vps_video_parameter_set_id of the active VPS + * @seq_parameter_set_id: provides an identifier for the SPS for + * reference by other syntax elements + * @pic_width_in_luma_samples: specifies the width of each decoded picture + * in units of luma samples + * @pic_height_in_luma_samples: specifies the height of each decoded picture + * in units of luma samples + * @bit_depth_luma_minus8: this value plus 8specifies the bit depth of the + * samples of the luma array + * @bit_depth_chroma_minus8: this value plus 8 specifies the bit depth of the + * samples of the chroma arrays + * @log2_max_pic_order_cnt_lsb_minus4: this value plus 4 specifies the value of + * the variable MaxPicOrderCntLsb + * @sps_max_dec_pic_buffering_minus1: this value plus 1 specifies the maximum + * required size of the decoded picture + * buffer for the codec video sequence + * @sps_max_num_reorder_pics: indicates the maximum allowed number of pictures + * @sps_max_latency_increase_plus1: not equal to 0 is used to compute the + * value of SpsMaxLatencyPictures array + * @log2_min_luma_coding_block_size_minus3: plus 3 specifies the minimum + * luma coding block size + * @log2_diff_max_min_luma_coding_block_size: specifies the difference between + * the maximum and minimum luma + * coding block size + * @log2_min_luma_transform_block_size_minus2: plus 2 specifies the minimum luma + * transform block size + * @log2_diff_max_min_luma_transform_block_size: specifies the difference between + * the maximum and minimum luma + * transform block size + * @max_transform_hierarchy_depth_inter: specifies the maximum hierarchy + * depth for transform units of + * coding units coded in inter + * prediction mode + * @max_transform_hierarchy_depth_intra: specifies the maximum hierarchy + * depth for transform units of + * coding units coded in intra + * prediction mode + * @pcm_sample_bit_depth_luma_minus1: this value plus 1 specifies the number of + * bits used to represent each of PCM sample + * values of the luma component + * @pcm_sample_bit_depth_chroma_minus1: this value plus 1 specifies the number + * of bits used to represent each of PCM + * sample values of the chroma components + * @log2_min_pcm_luma_coding_block_size_minus3: this value plus 3 specifies the + * minimum size of coding blocks + * @log2_diff_max_min_pcm_luma_coding_block_size: specifies the difference between + * the maximum and minimum size of + * coding blocks + * @num_short_term_ref_pic_sets: specifies the number of st_ref_pic_set() + * syntax structures included in the SPS + * @num_long_term_ref_pics_sps: specifies the number of candidate long-term + * reference pictures that are specified in the SPS + * @chroma_format_idc: specifies the chroma sampling + * @sps_max_sub_layers_minus1: this value plus 1 specifies the maximum number + * of temporal sub-layers + * @reserved: padding field. Should be zeroed by applications. + * @flags: see V4L2_HEVC_SPS_FLAG_{} + */ +struct v4l2_ctrl_hevc_sps { + __u8 video_parameter_set_id; + __u8 seq_parameter_set_id; + __u16 pic_width_in_luma_samples; + __u16 pic_height_in_luma_samples; + __u8 bit_depth_luma_minus8; + __u8 bit_depth_chroma_minus8; + __u8 log2_max_pic_order_cnt_lsb_minus4; + __u8 sps_max_dec_pic_buffering_minus1; + __u8 sps_max_num_reorder_pics; + __u8 sps_max_latency_increase_plus1; + __u8 log2_min_luma_coding_block_size_minus3; + __u8 log2_diff_max_min_luma_coding_block_size; + __u8 log2_min_luma_transform_block_size_minus2; + __u8 log2_diff_max_min_luma_transform_block_size; + __u8 max_transform_hierarchy_depth_inter; + __u8 max_transform_hierarchy_depth_intra; + __u8 pcm_sample_bit_depth_luma_minus1; + __u8 pcm_sample_bit_depth_chroma_minus1; + __u8 log2_min_pcm_luma_coding_block_size_minus3; + __u8 log2_diff_max_min_pcm_luma_coding_block_size; + __u8 num_short_term_ref_pic_sets; + __u8 num_long_term_ref_pics_sps; + __u8 chroma_format_idc; + __u8 sps_max_sub_layers_minus1; + + __u8 reserved[6]; + __u64 flags; +}; + +#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED (1ULL << 0) +#define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1) +#define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2) +#define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3) +#define V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED (1ULL << 4) +#define V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED (1ULL << 5) +#define V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED (1ULL << 6) +#define V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT (1ULL << 7) +#define V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED (1ULL << 8) +#define V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED (1ULL << 9) +#define V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED (1ULL << 10) +#define V4L2_HEVC_PPS_FLAG_TILES_ENABLED (1ULL << 11) +#define V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED (1ULL << 12) +#define V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED (1ULL << 13) +#define V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 14) +#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED (1ULL << 15) +#define V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER (1ULL << 16) +#define V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT (1ULL << 17) +#define V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT (1ULL << 18) +#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT (1ULL << 19) +#define V4L2_HEVC_PPS_FLAG_UNIFORM_SPACING (1ULL << 20) + +/** + * struct v4l2_ctrl_hevc_pps - ITU-T Rec. H.265: Picture parameter set + * + * @pic_parameter_set_id: identifies the PPS for reference by other + * syntax elements + * @num_extra_slice_header_bits: specifies the number of extra slice header + * bits that are present in the slice header RBSP + * for coded pictures referring to the PPS. + * @num_ref_idx_l0_default_active_minus1: this value plus 1 specifies the + * inferred value of num_ref_idx_l0_active_minus1 + * @num_ref_idx_l1_default_active_minus1: this value plus 1 specifies the + * inferred value of num_ref_idx_l1_active_minus1 + * @init_qp_minus26: this value plus 26 specifies the initial value of SliceQp Y for + * each slice referring to the PPS + * @diff_cu_qp_delta_depth: specifies the difference between the luma coding + * tree block size and the minimum luma coding block + * size of coding units that convey cu_qp_delta_abs + * and cu_qp_delta_sign_flag + * @pps_cb_qp_offset: specify the offsets to the luma quantization parameter Cb + * @pps_cr_qp_offset: specify the offsets to the luma quantization parameter Cr + * @num_tile_columns_minus1: this value plus 1 specifies the number of tile columns + * partitioning the picture + * @num_tile_rows_minus1: this value plus 1 specifies the number of tile rows partitioning + * the picture + * @column_width_minus1: this value plus 1 specifies the width of the each tile column in + * units of coding tree blocks + * @row_height_minus1: this value plus 1 specifies the height of the each tile row in + * units of coding tree blocks + * @pps_beta_offset_div2: specify the default deblocking parameter offsets for + * beta divided by 2 + * @pps_tc_offset_div2: specify the default deblocking parameter offsets for tC + * divided by 2 + * @log2_parallel_merge_level_minus2: this value plus 2 specifies the value of + * the variable Log2ParMrgLevel + * @reserved: padding field. Should be zeroed by applications. + * @flags: see V4L2_HEVC_PPS_FLAG_{} + */ +struct v4l2_ctrl_hevc_pps { + __u8 pic_parameter_set_id; + __u8 num_extra_slice_header_bits; + __u8 num_ref_idx_l0_default_active_minus1; + __u8 num_ref_idx_l1_default_active_minus1; + __s8 init_qp_minus26; + __u8 diff_cu_qp_delta_depth; + __s8 pps_cb_qp_offset; + __s8 pps_cr_qp_offset; + __u8 num_tile_columns_minus1; + __u8 num_tile_rows_minus1; + __u8 column_width_minus1[20]; + __u8 row_height_minus1[22]; + __s8 pps_beta_offset_div2; + __s8 pps_tc_offset_div2; + __u8 log2_parallel_merge_level_minus2; + __u8 reserved; + __u64 flags; +}; + +#define V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE 0x01 + +#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME 0 +#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_FIELD 1 +#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_FIELD 2 +#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_BOTTOM 3 +#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_TOP 4 +#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_BOTTOM_TOP 5 +#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM 6 +#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME_DOUBLING 7 +#define V4L2_HEVC_SEI_PIC_STRUCT_FRAME_TRIPLING 8 +#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_PAIRED_PREVIOUS_BOTTOM 9 +#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_PAIRED_PREVIOUS_TOP 10 +#define V4L2_HEVC_SEI_PIC_STRUCT_TOP_PAIRED_NEXT_BOTTOM 11 +#define V4L2_HEVC_SEI_PIC_STRUCT_BOTTOM_PAIRED_NEXT_TOP 12 + +#define V4L2_HEVC_DPB_ENTRIES_NUM_MAX 16 + +/** + * struct v4l2_hevc_dpb_entry - HEVC decoded picture buffer entry + * + * @timestamp: timestamp of the V4L2 capture buffer to use as reference. + * @flags: long term flag for the reference frame + * @field_pic: whether the reference is a field picture or a frame. + * @reserved: padding field. Should be zeroed by applications. + * @pic_order_cnt_val: the picture order count of the current picture. + */ +struct v4l2_hevc_dpb_entry { + __u64 timestamp; + __u8 flags; + __u8 field_pic; + __u16 reserved; + __s32 pic_order_cnt_val; +}; + +/** + * struct v4l2_hevc_pred_weight_table - HEVC weighted prediction parameters + * + * @delta_luma_weight_l0: the difference of the weighting factor applied + * to the luma prediction value for list 0 + * @luma_offset_l0: the additive offset applied to the luma prediction value + * for list 0 + * @delta_chroma_weight_l0: the difference of the weighting factor applied + * to the chroma prediction values for list 0 + * @chroma_offset_l0: the difference of the additive offset applied to + * the chroma prediction values for list 0 + * @delta_luma_weight_l1: the difference of the weighting factor applied + * to the luma prediction value for list 1 + * @luma_offset_l1: the additive offset applied to the luma prediction value + * for list 1 + * @delta_chroma_weight_l1: the difference of the weighting factor applied + * to the chroma prediction values for list 1 + * @chroma_offset_l1: the difference of the additive offset applied to + * the chroma prediction values for list 1 + * @luma_log2_weight_denom: the base 2 logarithm of the denominator for + * all luma weighting factors + * @delta_chroma_log2_weight_denom: the difference of the base 2 logarithm + * of the denominator for all chroma + * weighting factors + */ +struct v4l2_hevc_pred_weight_table { + __s8 delta_luma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __s8 luma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __s8 delta_chroma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]; + __s8 chroma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]; + + __s8 delta_luma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __s8 luma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __s8 delta_chroma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]; + __s8 chroma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]; + + __u8 luma_log2_weight_denom; + __s8 delta_chroma_log2_weight_denom; +}; + +#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA (1ULL << 0) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA (1ULL << 1) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED (1ULL << 2) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO (1ULL << 3) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT (1ULL << 4) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0 (1ULL << 5) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV (1ULL << 6) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8) +#define V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 9) + +/** + * struct v4l2_ctrl_hevc_slice_params - HEVC slice parameters + * + * This control is a dynamically sized 1-dimensional array, + * V4L2_CTRL_FLAG_DYNAMIC_ARRAY flag must be set when using it. + * + * @bit_size: size (in bits) of the current slice data + * @data_byte_offset: offset (in bytes) to the video data in the current slice data + * @num_entry_point_offsets: specifies the number of entry point offset syntax + * elements in the slice header. + * @nal_unit_type: specifies the coding type of the slice (B, P or I) + * @nuh_temporal_id_plus1: minus 1 specifies a temporal identifier for the NAL unit + * @slice_type: see V4L2_HEVC_SLICE_TYPE_{} + * @colour_plane_id: specifies the colour plane associated with the current slice + * @slice_pic_order_cnt: specifies the picture order count + * @num_ref_idx_l0_active_minus1: this value plus 1 specifies the maximum + * reference index for reference picture list 0 + * that may be used to decode the slice + * @num_ref_idx_l1_active_minus1: this value plus 1 specifies the maximum + * reference index for reference picture list 1 + * that may be used to decode the slice + * @collocated_ref_idx: specifies the reference index of the collocated picture used + * for temporal motion vector prediction + * @five_minus_max_num_merge_cand: specifies the maximum number of merging + * motion vector prediction candidates supported in + * the slice subtracted from 5 + * @slice_qp_delta: specifies the initial value of QpY to be used for the coding + * blocks in the slice + * @slice_cb_qp_offset: specifies a difference to be added to the value of pps_cb_qp_offset + * @slice_cr_qp_offset: specifies a difference to be added to the value of pps_cr_qp_offset + * @slice_act_y_qp_offset: screen content extension parameters + * @slice_act_cb_qp_offset: screen content extension parameters + * @slice_act_cr_qp_offset: screen content extension parameters + * @slice_beta_offset_div2: specify the deblocking parameter offsets for beta divided by 2 + * @slice_tc_offset_div2: specify the deblocking parameter offsets for tC divided by 2 + * @pic_struct: indicates whether a picture should be displayed as a frame or as one or + * more fields + * @reserved0: padding field. Should be zeroed by applications. + * @slice_segment_addr: specifies the address of the first coding tree block in + * the slice segment + * @ref_idx_l0: the list of L0 reference elements as indices in the DPB + * @ref_idx_l1: the list of L1 reference elements as indices in the DPB + * @short_term_ref_pic_set_size: specifies the size of short-term reference + * pictures set included in the SPS + * @long_term_ref_pic_set_size: specifies the size of long-term reference + * pictures set include in the SPS + * @pred_weight_table: the prediction weight coefficients for inter-picture + * prediction + * @reserved1: padding field. Should be zeroed by applications. + * @flags: see V4L2_HEVC_SLICE_PARAMS_FLAG_{} + */ +struct v4l2_ctrl_hevc_slice_params { + __u32 bit_size; + __u32 data_byte_offset; + __u32 num_entry_point_offsets; + + /* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */ + __u8 nal_unit_type; + __u8 nuh_temporal_id_plus1; + + /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ + __u8 slice_type; + __u8 colour_plane_id; + __s32 slice_pic_order_cnt; + __u8 num_ref_idx_l0_active_minus1; + __u8 num_ref_idx_l1_active_minus1; + __u8 collocated_ref_idx; + __u8 five_minus_max_num_merge_cand; + __s8 slice_qp_delta; + __s8 slice_cb_qp_offset; + __s8 slice_cr_qp_offset; + __s8 slice_act_y_qp_offset; + __s8 slice_act_cb_qp_offset; + __s8 slice_act_cr_qp_offset; + __s8 slice_beta_offset_div2; + __s8 slice_tc_offset_div2; + + /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture timing SEI message */ + __u8 pic_struct; + + __u8 reserved0[3]; + /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ + __u32 slice_segment_addr; + __u8 ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u8 ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u16 short_term_ref_pic_set_size; + __u16 long_term_ref_pic_set_size; + + /* ISO/IEC 23008-2, ITU-T Rec. H.265: Weighted prediction parameter */ + struct v4l2_hevc_pred_weight_table pred_weight_table; + + __u8 reserved1[2]; + __u64 flags; +}; + +#define V4L2_HEVC_DECODE_PARAM_FLAG_IRAP_PIC 0x1 +#define V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC 0x2 +#define V4L2_HEVC_DECODE_PARAM_FLAG_NO_OUTPUT_OF_PRIOR 0x4 + +/** + * struct v4l2_ctrl_hevc_decode_params - HEVC decode parameters + * + * @pic_order_cnt_val: picture order count + * @short_term_ref_pic_set_size: specifies the size of short-term reference + * pictures set included in the SPS of the first slice + * @long_term_ref_pic_set_size: specifies the size of long-term reference + * pictures set include in the SPS of the first slice + * @num_active_dpb_entries: the number of entries in dpb + * @num_poc_st_curr_before: the number of reference pictures in the short-term + * set that come before the current frame + * @num_poc_st_curr_after: the number of reference pictures in the short-term + * set that come after the current frame + * @num_poc_lt_curr: the number of reference pictures in the long-term set + * @poc_st_curr_before: provides the index of the short term before references + * in DPB array + * @poc_st_curr_after: provides the index of the short term after references + * in DPB array + * @poc_lt_curr: provides the index of the long term references in DPB array + * @reserved: padding field. Should be zeroed by applications. + * @dpb: the decoded picture buffer, for meta-data about reference frames + * @flags: see V4L2_HEVC_DECODE_PARAM_FLAG_{} + */ +struct v4l2_ctrl_hevc_decode_params { + __s32 pic_order_cnt_val; + __u16 short_term_ref_pic_set_size; + __u16 long_term_ref_pic_set_size; + __u8 num_active_dpb_entries; + __u8 num_poc_st_curr_before; + __u8 num_poc_st_curr_after; + __u8 num_poc_lt_curr; + __u8 poc_st_curr_before[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u8 poc_st_curr_after[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u8 poc_lt_curr[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u8 reserved[4]; + struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; + __u64 flags; +}; + +/** + * struct v4l2_ctrl_hevc_scaling_matrix - HEVC scaling lists parameters + * + * @scaling_list_4x4: scaling list is used for the scaling process for + * transform coefficients. The values on each scaling + * list are expected in raster scan order + * @scaling_list_8x8: scaling list is used for the scaling process for + * transform coefficients. The values on each scaling + * list are expected in raster scan order + * @scaling_list_16x16: scaling list is used for the scaling process for + * transform coefficients. The values on each scaling + * list are expected in raster scan order + * @scaling_list_32x32: scaling list is used for the scaling process for + * transform coefficients. The values on each scaling + * list are expected in raster scan order + * @scaling_list_dc_coef_16x16: scaling list is used for the scaling process + * for transform coefficients. The values on each + * scaling list are expected in raster scan order. + * @scaling_list_dc_coef_32x32: scaling list is used for the scaling process + * for transform coefficients. The values on each + * scaling list are expected in raster scan order. + */ +struct v4l2_ctrl_hevc_scaling_matrix { + __u8 scaling_list_4x4[6][16]; + __u8 scaling_list_8x8[6][64]; + __u8 scaling_list_16x16[6][64]; + __u8 scaling_list_32x32[2][64]; + __u8 scaling_list_dc_coef_16x16[6]; + __u8 scaling_list_dc_coef_32x32[2]; +}; + #define V4L2_CID_COLORIMETRY_CLASS_BASE (V4L2_CTRL_CLASS_COLORIMETRY | 0x900) #define V4L2_CID_COLORIMETRY_CLASS (V4L2_CTRL_CLASS_COLORIMETRY | 1) diff --git a/include/uapi/linux/vduse.h b/include/uapi/linux/vduse.h index 7cfe1c1280c0..11bd48c72c6c 100644 --- a/include/uapi/linux/vduse.h +++ b/include/uapi/linux/vduse.h @@ -210,6 +210,53 @@ struct vduse_vq_eventfd { */ #define VDUSE_VQ_INJECT_IRQ _IOW(VDUSE_BASE, 0x17, __u32) +/** + * struct vduse_iova_umem - userspace memory configuration for one IOVA region + * @uaddr: start address of userspace memory, it must be aligned to page size + * @iova: start of the IOVA region + * @size: size of the IOVA region + * @reserved: for future use, needs to be initialized to zero + * + * Structure used by VDUSE_IOTLB_REG_UMEM and VDUSE_IOTLB_DEREG_UMEM + * ioctls to register/de-register userspace memory for IOVA regions + */ +struct vduse_iova_umem { + __u64 uaddr; + __u64 iova; + __u64 size; + __u64 reserved[3]; +}; + +/* Register userspace memory for IOVA regions */ +#define VDUSE_IOTLB_REG_UMEM _IOW(VDUSE_BASE, 0x18, struct vduse_iova_umem) + +/* De-register the userspace memory. Caller should set iova and size field. */ +#define VDUSE_IOTLB_DEREG_UMEM _IOW(VDUSE_BASE, 0x19, struct vduse_iova_umem) + +/** + * struct vduse_iova_info - information of one IOVA region + * @start: start of the IOVA region + * @last: last of the IOVA region + * @capability: capability of the IOVA regsion + * @reserved: for future use, needs to be initialized to zero + * + * Structure used by VDUSE_IOTLB_GET_INFO ioctl to get information of + * one IOVA region. + */ +struct vduse_iova_info { + __u64 start; + __u64 last; +#define VDUSE_IOVA_CAP_UMEM (1 << 0) + __u64 capability; + __u64 reserved[3]; +}; + +/* + * Find the first IOVA region that overlaps with the range [start, last] + * and return some information on it. Caller should set start and last fields. + */ +#define VDUSE_IOTLB_GET_INFO _IOWR(VDUSE_BASE, 0x1a, struct vduse_iova_info) + /* The control messages definition for read(2)/write(2) on /dev/vduse/$NAME */ /** diff --git a/include/uapi/linux/vfio_zdev.h b/include/uapi/linux/vfio_zdev.h index b4309397b6b2..77f2aff1f27e 100644 --- a/include/uapi/linux/vfio_zdev.h +++ b/include/uapi/linux/vfio_zdev.h @@ -29,6 +29,9 @@ struct vfio_device_info_cap_zpci_base { __u16 fmb_length; /* Measurement Block Length (in bytes) */ __u8 pft; /* PCI Function Type */ __u8 gid; /* PCI function group ID */ + /* End of version 1 */ + __u32 fh; /* PCI function handle */ + /* End of version 2 */ }; /** @@ -47,6 +50,10 @@ struct vfio_device_info_cap_zpci_group { __u16 noi; /* Maximum number of MSIs */ __u16 maxstbl; /* Maximum Store Block Length */ __u8 version; /* Supported PCI Version */ + /* End of version 1 */ + __u8 reserved; + __u16 imaxstbl; /* Maximum Interpreted Store Block Length */ + /* End of version 2 */ }; /** diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index cab645d4a645..f9f115a7c75b 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -171,4 +171,13 @@ #define VHOST_VDPA_SET_GROUP_ASID _IOW(VHOST_VIRTIO, 0x7C, \ struct vhost_vring_state) +/* Suspend a device so it does not process virtqueue requests anymore + * + * After the return of ioctl the device must preserve all the necessary state + * (the virtqueue vring base plus the possible device specific states) that is + * required for restoring in the future. The device must not change its + * configuration after that point. + */ +#define VHOST_VDPA_SUSPEND _IO(VHOST_VIRTIO, 0x7D) + #endif diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h index 391331a10879..53601ce2c20a 100644 --- a/include/uapi/linux/vhost_types.h +++ b/include/uapi/linux/vhost_types.h @@ -161,5 +161,7 @@ struct vhost_vdpa_iova_range { * message */ #define VHOST_BACKEND_F_IOTLB_ASID 0x3 +/* Device can be suspended */ +#define VHOST_BACKEND_F_SUSPEND 0x4 #endif diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 343b95107fce..01e630f2ec78 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -245,6 +245,14 @@ enum v4l2_colorspace { /* DCI-P3 colorspace, used by cinema projectors */ V4L2_COLORSPACE_DCI_P3 = 12, + +#ifdef __KERNEL__ + /* + * Largest supported colorspace value, assigned by the compiler, used + * by the framework to check for invalid values. + */ + V4L2_COLORSPACE_LAST, +#endif }; /* @@ -283,6 +291,13 @@ enum v4l2_xfer_func { V4L2_XFER_FUNC_NONE = 5, V4L2_XFER_FUNC_DCI_P3 = 6, V4L2_XFER_FUNC_SMPTE2084 = 7, +#ifdef __KERNEL__ + /* + * Largest supported transfer function value, assigned by the compiler, + * used by the framework to check for invalid values. + */ + V4L2_XFER_FUNC_LAST, +#endif }; /* @@ -343,6 +358,13 @@ enum v4l2_ycbcr_encoding { /* SMPTE 240M -- Obsolete HDTV */ V4L2_YCBCR_ENC_SMPTE240M = 8, +#ifdef __KERNEL__ + /* + * Largest supported encoding value, assigned by the compiler, used by + * the framework to check for invalid values. + */ + V4L2_YCBCR_ENC_LAST, +#endif }; /* @@ -593,6 +615,8 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_XYUV32 v4l2_fourcc('X', 'Y', 'U', 'V') /* 32 XYUV-8-8-8-8 */ #define V4L2_PIX_FMT_VUYA32 v4l2_fourcc('V', 'U', 'Y', 'A') /* 32 VUYA-8-8-8-8 */ #define V4L2_PIX_FMT_VUYX32 v4l2_fourcc('V', 'U', 'Y', 'X') /* 32 VUYX-8-8-8-8 */ +#define V4L2_PIX_FMT_YUVA32 v4l2_fourcc('Y', 'U', 'V', 'A') /* 32 YUVA-8-8-8-8 */ +#define V4L2_PIX_FMT_YUVX32 v4l2_fourcc('Y', 'U', 'V', 'X') /* 32 YUVX-8-8-8-8 */ #define V4L2_PIX_FMT_M420 v4l2_fourcc('M', '4', '2', '0') /* 12 YUV 4:2:0 2 lines y, 1 line uv interleaved */ /* two planes -- one Y, one Cr + Cb interleaved */ @@ -602,6 +626,7 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */ #define V4L2_PIX_FMT_NV24 v4l2_fourcc('N', 'V', '2', '4') /* 24 Y/CbCr 4:4:4 */ #define V4L2_PIX_FMT_NV42 v4l2_fourcc('N', 'V', '4', '2') /* 24 Y/CrCb 4:4:4 */ +#define V4L2_PIX_FMT_P010 v4l2_fourcc('P', '0', '1', '0') /* 24 Y/CbCr 4:2:0 10-bit per component */ /* two non contiguous planes - one Y, one Cr + Cb interleaved */ #define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */ @@ -629,6 +654,7 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_NV12_4L4 v4l2_fourcc('V', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 4x4 tiles */ #define V4L2_PIX_FMT_NV12_16L16 v4l2_fourcc('H', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 tiles */ #define V4L2_PIX_FMT_NV12_32L32 v4l2_fourcc('S', 'T', '1', '2') /* 12 Y/CbCr 4:2:0 32x32 tiles */ +#define V4L2_PIX_FMT_P010_4L4 v4l2_fourcc('T', '0', '1', '0') /* 12 Y/CbCr 4:2:0 10-bit 4x4 macroblocks */ /* Tiled YUV formats, non contiguous planes */ #define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 tiles */ @@ -711,6 +737,7 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_FWHT v4l2_fourcc('F', 'W', 'H', 'T') /* Fast Walsh Hadamard Transform (vicodec) */ #define V4L2_PIX_FMT_FWHT_STATELESS v4l2_fourcc('S', 'F', 'W', 'H') /* Stateless FWHT (vicodec) */ #define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */ +#define V4L2_PIX_FMT_HEVC_SLICE v4l2_fourcc('S', '2', '6', '5') /* HEVC parsed slices */ /* Vendor-specific formats */ #define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */ @@ -1767,6 +1794,11 @@ struct v4l2_ext_control { struct v4l2_ctrl_mpeg2_quantisation __user *p_mpeg2_quantisation; struct v4l2_ctrl_vp9_compressed_hdr __user *p_vp9_compressed_hdr_probs; struct v4l2_ctrl_vp9_frame __user *p_vp9_frame; + struct v4l2_ctrl_hevc_sps __user *p_hevc_sps; + struct v4l2_ctrl_hevc_pps __user *p_hevc_pps; + struct v4l2_ctrl_hevc_slice_params __user *p_hevc_slice_params; + struct v4l2_ctrl_hevc_scaling_matrix __user *p_hevc_scaling_matrix; + struct v4l2_ctrl_hevc_decode_params __user *p_hevc_decode_params; void __user *ptr; }; } __attribute__ ((packed)); @@ -1834,6 +1866,12 @@ enum v4l2_ctrl_type { V4L2_CTRL_TYPE_VP9_COMPRESSED_HDR = 0x0260, V4L2_CTRL_TYPE_VP9_FRAME = 0x0261, + + V4L2_CTRL_TYPE_HEVC_SPS = 0x0270, + V4L2_CTRL_TYPE_HEVC_PPS = 0x0271, + V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS = 0x0272, + V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX = 0x0273, + V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS = 0x0274, }; /* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */ @@ -1889,6 +1927,7 @@ struct v4l2_querymenu { #define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100 #define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200 #define V4L2_CTRL_FLAG_MODIFY_LAYOUT 0x0400 +#define V4L2_CTRL_FLAG_DYNAMIC_ARRAY 0x0800 /* Query flags, to be ORed with the control ID */ #define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000 diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index f0fb0ae021c0..3c05162bc988 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h @@ -52,7 +52,7 @@ * rest are per-device feature bits. */ #define VIRTIO_TRANSPORT_F_START 28 -#define VIRTIO_TRANSPORT_F_END 38 +#define VIRTIO_TRANSPORT_F_END 41 #ifndef VIRTIO_CONFIG_NO_LEGACY /* Do we get callbacks when the ring is completely used, even if we've @@ -98,4 +98,9 @@ * Does the device support Single Root I/O Virtualization? */ #define VIRTIO_F_SR_IOV 37 + +/* + * This feature indicates that the driver can reset a queue individually. + */ +#define VIRTIO_F_RING_RESET 40 #endif /* _UAPI_LINUX_VIRTIO_CONFIG_H */ diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index 3f55a4215f11..29ced55514d4 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -56,7 +56,7 @@ #define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow * Steering */ #define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */ - +#define VIRTIO_NET_F_NOTF_COAL 53 /* Guest can handle notifications coalescing */ #define VIRTIO_NET_F_HASH_REPORT 57 /* Supports hash report */ #define VIRTIO_NET_F_RSS 60 /* Supports RSS RX steering */ #define VIRTIO_NET_F_RSC_EXT 61 /* extended coalescing info */ @@ -355,4 +355,36 @@ struct virtio_net_hash_config { #define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5 #define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0 +/* + * Control notifications coalescing. + * + * Request the device to change the notifications coalescing parameters. + * + * Available with the VIRTIO_NET_F_NOTF_COAL feature bit. + */ +#define VIRTIO_NET_CTRL_NOTF_COAL 6 +/* + * Set the tx-usecs/tx-max-packets patameters. + * tx-usecs - Maximum number of usecs to delay a TX notification. + * tx-max-packets - Maximum number of packets to send before a TX notification. + */ +struct virtio_net_ctrl_coal_tx { + __le32 tx_max_packets; + __le32 tx_usecs; +}; + +#define VIRTIO_NET_CTRL_NOTF_COAL_TX_SET 0 + +/* + * Set the rx-usecs/rx-max-packets patameters. + * rx-usecs - Maximum number of usecs to delay a RX notification. + * rx-max-frames - Maximum number of packets to receive before a RX notification. + */ +struct virtio_net_ctrl_coal_rx { + __le32 rx_max_packets; + __le32 rx_usecs; +}; + +#define VIRTIO_NET_CTRL_NOTF_COAL_RX_SET 1 + #endif /* _UAPI_LINUX_VIRTIO_NET_H */ diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index 3a86f36d7e3d..f703afc7ad31 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -202,6 +202,8 @@ struct virtio_pci_cfg_cap { #define VIRTIO_PCI_COMMON_Q_AVAILHI 44 #define VIRTIO_PCI_COMMON_Q_USEDLO 48 #define VIRTIO_PCI_COMMON_Q_USEDHI 52 +#define VIRTIO_PCI_COMMON_Q_NDATA 56 +#define VIRTIO_PCI_COMMON_Q_RESET 58 #endif /* VIRTIO_PCI_NO_MODERN */ diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index e8191e0c3b56..b1f3e6a8f11a 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -511,9 +511,9 @@ struct xfrm_user_offload { int ifindex; __u8 flags; }; -/* This flag was exposed without any kernel code that supporting it. - * Unfortunately, strongswan has the code that uses sets this flag, - * which makes impossible to reuse this bit. +/* This flag was exposed without any kernel code that supports it. + * Unfortunately, strongswan has the code that sets this flag, + * which makes it impossible to reuse this bit. * * So leave it here to make sure that it won't be reused by mistake. */ diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 52540d5b4fc9..5d06d5c74dd1 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -185,6 +185,285 @@ enum gaudi_queue_id { }; /* + * In GAUDI2 we have two modes of operation in regard to queues: + * 1. Legacy mode, where each QMAN exposes 4 streams to the user + * 2. F/W mode, where we use F/W to schedule the JOBS to the different queues. + * + * When in legacy mode, the user sends the queue id per JOB according to + * enum gaudi2_queue_id below. + * + * When in F/W mode, the user sends a stream id per Command Submission. The + * stream id is a running number from 0 up to (N-1), where N is the number + * of streams the F/W exposes and is passed to the user in + * struct hl_info_hw_ip_info + */ + +enum gaudi2_queue_id { + GAUDI2_QUEUE_ID_PDMA_0_0 = 0, + GAUDI2_QUEUE_ID_PDMA_0_1 = 1, + GAUDI2_QUEUE_ID_PDMA_0_2 = 2, + GAUDI2_QUEUE_ID_PDMA_0_3 = 3, + GAUDI2_QUEUE_ID_PDMA_1_0 = 4, + GAUDI2_QUEUE_ID_PDMA_1_1 = 5, + GAUDI2_QUEUE_ID_PDMA_1_2 = 6, + GAUDI2_QUEUE_ID_PDMA_1_3 = 7, + GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0 = 8, + GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1 = 9, + GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2 = 10, + GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3 = 11, + GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0 = 12, + GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1 = 13, + GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2 = 14, + GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3 = 15, + GAUDI2_QUEUE_ID_DCORE0_MME_0_0 = 16, + GAUDI2_QUEUE_ID_DCORE0_MME_0_1 = 17, + GAUDI2_QUEUE_ID_DCORE0_MME_0_2 = 18, + GAUDI2_QUEUE_ID_DCORE0_MME_0_3 = 19, + GAUDI2_QUEUE_ID_DCORE0_TPC_0_0 = 20, + GAUDI2_QUEUE_ID_DCORE0_TPC_0_1 = 21, + GAUDI2_QUEUE_ID_DCORE0_TPC_0_2 = 22, + GAUDI2_QUEUE_ID_DCORE0_TPC_0_3 = 23, + GAUDI2_QUEUE_ID_DCORE0_TPC_1_0 = 24, + GAUDI2_QUEUE_ID_DCORE0_TPC_1_1 = 25, + GAUDI2_QUEUE_ID_DCORE0_TPC_1_2 = 26, + GAUDI2_QUEUE_ID_DCORE0_TPC_1_3 = 27, + GAUDI2_QUEUE_ID_DCORE0_TPC_2_0 = 28, + GAUDI2_QUEUE_ID_DCORE0_TPC_2_1 = 29, + GAUDI2_QUEUE_ID_DCORE0_TPC_2_2 = 30, + GAUDI2_QUEUE_ID_DCORE0_TPC_2_3 = 31, + GAUDI2_QUEUE_ID_DCORE0_TPC_3_0 = 32, + GAUDI2_QUEUE_ID_DCORE0_TPC_3_1 = 33, + GAUDI2_QUEUE_ID_DCORE0_TPC_3_2 = 34, + GAUDI2_QUEUE_ID_DCORE0_TPC_3_3 = 35, + GAUDI2_QUEUE_ID_DCORE0_TPC_4_0 = 36, + GAUDI2_QUEUE_ID_DCORE0_TPC_4_1 = 37, + GAUDI2_QUEUE_ID_DCORE0_TPC_4_2 = 38, + GAUDI2_QUEUE_ID_DCORE0_TPC_4_3 = 39, + GAUDI2_QUEUE_ID_DCORE0_TPC_5_0 = 40, + GAUDI2_QUEUE_ID_DCORE0_TPC_5_1 = 41, + GAUDI2_QUEUE_ID_DCORE0_TPC_5_2 = 42, + GAUDI2_QUEUE_ID_DCORE0_TPC_5_3 = 43, + GAUDI2_QUEUE_ID_DCORE0_TPC_6_0 = 44, + GAUDI2_QUEUE_ID_DCORE0_TPC_6_1 = 45, + GAUDI2_QUEUE_ID_DCORE0_TPC_6_2 = 46, + GAUDI2_QUEUE_ID_DCORE0_TPC_6_3 = 47, + GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0 = 48, + GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1 = 49, + GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2 = 50, + GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3 = 51, + GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0 = 52, + GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1 = 53, + GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2 = 54, + GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3 = 55, + GAUDI2_QUEUE_ID_DCORE1_MME_0_0 = 56, + GAUDI2_QUEUE_ID_DCORE1_MME_0_1 = 57, + GAUDI2_QUEUE_ID_DCORE1_MME_0_2 = 58, + GAUDI2_QUEUE_ID_DCORE1_MME_0_3 = 59, + GAUDI2_QUEUE_ID_DCORE1_TPC_0_0 = 60, + GAUDI2_QUEUE_ID_DCORE1_TPC_0_1 = 61, + GAUDI2_QUEUE_ID_DCORE1_TPC_0_2 = 62, + GAUDI2_QUEUE_ID_DCORE1_TPC_0_3 = 63, + GAUDI2_QUEUE_ID_DCORE1_TPC_1_0 = 64, + GAUDI2_QUEUE_ID_DCORE1_TPC_1_1 = 65, + GAUDI2_QUEUE_ID_DCORE1_TPC_1_2 = 66, + GAUDI2_QUEUE_ID_DCORE1_TPC_1_3 = 67, + GAUDI2_QUEUE_ID_DCORE1_TPC_2_0 = 68, + GAUDI2_QUEUE_ID_DCORE1_TPC_2_1 = 69, + GAUDI2_QUEUE_ID_DCORE1_TPC_2_2 = 70, + GAUDI2_QUEUE_ID_DCORE1_TPC_2_3 = 71, + GAUDI2_QUEUE_ID_DCORE1_TPC_3_0 = 72, + GAUDI2_QUEUE_ID_DCORE1_TPC_3_1 = 73, + GAUDI2_QUEUE_ID_DCORE1_TPC_3_2 = 74, + GAUDI2_QUEUE_ID_DCORE1_TPC_3_3 = 75, + GAUDI2_QUEUE_ID_DCORE1_TPC_4_0 = 76, + GAUDI2_QUEUE_ID_DCORE1_TPC_4_1 = 77, + GAUDI2_QUEUE_ID_DCORE1_TPC_4_2 = 78, + GAUDI2_QUEUE_ID_DCORE1_TPC_4_3 = 79, + GAUDI2_QUEUE_ID_DCORE1_TPC_5_0 = 80, + GAUDI2_QUEUE_ID_DCORE1_TPC_5_1 = 81, + GAUDI2_QUEUE_ID_DCORE1_TPC_5_2 = 82, + GAUDI2_QUEUE_ID_DCORE1_TPC_5_3 = 83, + GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0 = 84, + GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1 = 85, + GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2 = 86, + GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3 = 87, + GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0 = 88, + GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1 = 89, + GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2 = 90, + GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3 = 91, + GAUDI2_QUEUE_ID_DCORE2_MME_0_0 = 92, + GAUDI2_QUEUE_ID_DCORE2_MME_0_1 = 93, + GAUDI2_QUEUE_ID_DCORE2_MME_0_2 = 94, + GAUDI2_QUEUE_ID_DCORE2_MME_0_3 = 95, + GAUDI2_QUEUE_ID_DCORE2_TPC_0_0 = 96, + GAUDI2_QUEUE_ID_DCORE2_TPC_0_1 = 97, + GAUDI2_QUEUE_ID_DCORE2_TPC_0_2 = 98, + GAUDI2_QUEUE_ID_DCORE2_TPC_0_3 = 99, + GAUDI2_QUEUE_ID_DCORE2_TPC_1_0 = 100, + GAUDI2_QUEUE_ID_DCORE2_TPC_1_1 = 101, + GAUDI2_QUEUE_ID_DCORE2_TPC_1_2 = 102, + GAUDI2_QUEUE_ID_DCORE2_TPC_1_3 = 103, + GAUDI2_QUEUE_ID_DCORE2_TPC_2_0 = 104, + GAUDI2_QUEUE_ID_DCORE2_TPC_2_1 = 105, + GAUDI2_QUEUE_ID_DCORE2_TPC_2_2 = 106, + GAUDI2_QUEUE_ID_DCORE2_TPC_2_3 = 107, + GAUDI2_QUEUE_ID_DCORE2_TPC_3_0 = 108, + GAUDI2_QUEUE_ID_DCORE2_TPC_3_1 = 109, + GAUDI2_QUEUE_ID_DCORE2_TPC_3_2 = 110, + GAUDI2_QUEUE_ID_DCORE2_TPC_3_3 = 111, + GAUDI2_QUEUE_ID_DCORE2_TPC_4_0 = 112, + GAUDI2_QUEUE_ID_DCORE2_TPC_4_1 = 113, + GAUDI2_QUEUE_ID_DCORE2_TPC_4_2 = 114, + GAUDI2_QUEUE_ID_DCORE2_TPC_4_3 = 115, + GAUDI2_QUEUE_ID_DCORE2_TPC_5_0 = 116, + GAUDI2_QUEUE_ID_DCORE2_TPC_5_1 = 117, + GAUDI2_QUEUE_ID_DCORE2_TPC_5_2 = 118, + GAUDI2_QUEUE_ID_DCORE2_TPC_5_3 = 119, + GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0 = 120, + GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1 = 121, + GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2 = 122, + GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3 = 123, + GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0 = 124, + GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1 = 125, + GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2 = 126, + GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3 = 127, + GAUDI2_QUEUE_ID_DCORE3_MME_0_0 = 128, + GAUDI2_QUEUE_ID_DCORE3_MME_0_1 = 129, + GAUDI2_QUEUE_ID_DCORE3_MME_0_2 = 130, + GAUDI2_QUEUE_ID_DCORE3_MME_0_3 = 131, + GAUDI2_QUEUE_ID_DCORE3_TPC_0_0 = 132, + GAUDI2_QUEUE_ID_DCORE3_TPC_0_1 = 133, + GAUDI2_QUEUE_ID_DCORE3_TPC_0_2 = 134, + GAUDI2_QUEUE_ID_DCORE3_TPC_0_3 = 135, + GAUDI2_QUEUE_ID_DCORE3_TPC_1_0 = 136, + GAUDI2_QUEUE_ID_DCORE3_TPC_1_1 = 137, + GAUDI2_QUEUE_ID_DCORE3_TPC_1_2 = 138, + GAUDI2_QUEUE_ID_DCORE3_TPC_1_3 = 139, + GAUDI2_QUEUE_ID_DCORE3_TPC_2_0 = 140, + GAUDI2_QUEUE_ID_DCORE3_TPC_2_1 = 141, + GAUDI2_QUEUE_ID_DCORE3_TPC_2_2 = 142, + GAUDI2_QUEUE_ID_DCORE3_TPC_2_3 = 143, + GAUDI2_QUEUE_ID_DCORE3_TPC_3_0 = 144, + GAUDI2_QUEUE_ID_DCORE3_TPC_3_1 = 145, + GAUDI2_QUEUE_ID_DCORE3_TPC_3_2 = 146, + GAUDI2_QUEUE_ID_DCORE3_TPC_3_3 = 147, + GAUDI2_QUEUE_ID_DCORE3_TPC_4_0 = 148, + GAUDI2_QUEUE_ID_DCORE3_TPC_4_1 = 149, + GAUDI2_QUEUE_ID_DCORE3_TPC_4_2 = 150, + GAUDI2_QUEUE_ID_DCORE3_TPC_4_3 = 151, + GAUDI2_QUEUE_ID_DCORE3_TPC_5_0 = 152, + GAUDI2_QUEUE_ID_DCORE3_TPC_5_1 = 153, + GAUDI2_QUEUE_ID_DCORE3_TPC_5_2 = 154, + GAUDI2_QUEUE_ID_DCORE3_TPC_5_3 = 155, + GAUDI2_QUEUE_ID_NIC_0_0 = 156, + GAUDI2_QUEUE_ID_NIC_0_1 = 157, + GAUDI2_QUEUE_ID_NIC_0_2 = 158, + GAUDI2_QUEUE_ID_NIC_0_3 = 159, + GAUDI2_QUEUE_ID_NIC_1_0 = 160, + GAUDI2_QUEUE_ID_NIC_1_1 = 161, + GAUDI2_QUEUE_ID_NIC_1_2 = 162, + GAUDI2_QUEUE_ID_NIC_1_3 = 163, + GAUDI2_QUEUE_ID_NIC_2_0 = 164, + GAUDI2_QUEUE_ID_NIC_2_1 = 165, + GAUDI2_QUEUE_ID_NIC_2_2 = 166, + GAUDI2_QUEUE_ID_NIC_2_3 = 167, + GAUDI2_QUEUE_ID_NIC_3_0 = 168, + GAUDI2_QUEUE_ID_NIC_3_1 = 169, + GAUDI2_QUEUE_ID_NIC_3_2 = 170, + GAUDI2_QUEUE_ID_NIC_3_3 = 171, + GAUDI2_QUEUE_ID_NIC_4_0 = 172, + GAUDI2_QUEUE_ID_NIC_4_1 = 173, + GAUDI2_QUEUE_ID_NIC_4_2 = 174, + GAUDI2_QUEUE_ID_NIC_4_3 = 175, + GAUDI2_QUEUE_ID_NIC_5_0 = 176, + GAUDI2_QUEUE_ID_NIC_5_1 = 177, + GAUDI2_QUEUE_ID_NIC_5_2 = 178, + GAUDI2_QUEUE_ID_NIC_5_3 = 179, + GAUDI2_QUEUE_ID_NIC_6_0 = 180, + GAUDI2_QUEUE_ID_NIC_6_1 = 181, + GAUDI2_QUEUE_ID_NIC_6_2 = 182, + GAUDI2_QUEUE_ID_NIC_6_3 = 183, + GAUDI2_QUEUE_ID_NIC_7_0 = 184, + GAUDI2_QUEUE_ID_NIC_7_1 = 185, + GAUDI2_QUEUE_ID_NIC_7_2 = 186, + GAUDI2_QUEUE_ID_NIC_7_3 = 187, + GAUDI2_QUEUE_ID_NIC_8_0 = 188, + GAUDI2_QUEUE_ID_NIC_8_1 = 189, + GAUDI2_QUEUE_ID_NIC_8_2 = 190, + GAUDI2_QUEUE_ID_NIC_8_3 = 191, + GAUDI2_QUEUE_ID_NIC_9_0 = 192, + GAUDI2_QUEUE_ID_NIC_9_1 = 193, + GAUDI2_QUEUE_ID_NIC_9_2 = 194, + GAUDI2_QUEUE_ID_NIC_9_3 = 195, + GAUDI2_QUEUE_ID_NIC_10_0 = 196, + GAUDI2_QUEUE_ID_NIC_10_1 = 197, + GAUDI2_QUEUE_ID_NIC_10_2 = 198, + GAUDI2_QUEUE_ID_NIC_10_3 = 199, + GAUDI2_QUEUE_ID_NIC_11_0 = 200, + GAUDI2_QUEUE_ID_NIC_11_1 = 201, + GAUDI2_QUEUE_ID_NIC_11_2 = 202, + GAUDI2_QUEUE_ID_NIC_11_3 = 203, + GAUDI2_QUEUE_ID_NIC_12_0 = 204, + GAUDI2_QUEUE_ID_NIC_12_1 = 205, + GAUDI2_QUEUE_ID_NIC_12_2 = 206, + GAUDI2_QUEUE_ID_NIC_12_3 = 207, + GAUDI2_QUEUE_ID_NIC_13_0 = 208, + GAUDI2_QUEUE_ID_NIC_13_1 = 209, + GAUDI2_QUEUE_ID_NIC_13_2 = 210, + GAUDI2_QUEUE_ID_NIC_13_3 = 211, + GAUDI2_QUEUE_ID_NIC_14_0 = 212, + GAUDI2_QUEUE_ID_NIC_14_1 = 213, + GAUDI2_QUEUE_ID_NIC_14_2 = 214, + GAUDI2_QUEUE_ID_NIC_14_3 = 215, + GAUDI2_QUEUE_ID_NIC_15_0 = 216, + GAUDI2_QUEUE_ID_NIC_15_1 = 217, + GAUDI2_QUEUE_ID_NIC_15_2 = 218, + GAUDI2_QUEUE_ID_NIC_15_3 = 219, + GAUDI2_QUEUE_ID_NIC_16_0 = 220, + GAUDI2_QUEUE_ID_NIC_16_1 = 221, + GAUDI2_QUEUE_ID_NIC_16_2 = 222, + GAUDI2_QUEUE_ID_NIC_16_3 = 223, + GAUDI2_QUEUE_ID_NIC_17_0 = 224, + GAUDI2_QUEUE_ID_NIC_17_1 = 225, + GAUDI2_QUEUE_ID_NIC_17_2 = 226, + GAUDI2_QUEUE_ID_NIC_17_3 = 227, + GAUDI2_QUEUE_ID_NIC_18_0 = 228, + GAUDI2_QUEUE_ID_NIC_18_1 = 229, + GAUDI2_QUEUE_ID_NIC_18_2 = 230, + GAUDI2_QUEUE_ID_NIC_18_3 = 231, + GAUDI2_QUEUE_ID_NIC_19_0 = 232, + GAUDI2_QUEUE_ID_NIC_19_1 = 233, + GAUDI2_QUEUE_ID_NIC_19_2 = 234, + GAUDI2_QUEUE_ID_NIC_19_3 = 235, + GAUDI2_QUEUE_ID_NIC_20_0 = 236, + GAUDI2_QUEUE_ID_NIC_20_1 = 237, + GAUDI2_QUEUE_ID_NIC_20_2 = 238, + GAUDI2_QUEUE_ID_NIC_20_3 = 239, + GAUDI2_QUEUE_ID_NIC_21_0 = 240, + GAUDI2_QUEUE_ID_NIC_21_1 = 241, + GAUDI2_QUEUE_ID_NIC_21_2 = 242, + GAUDI2_QUEUE_ID_NIC_21_3 = 243, + GAUDI2_QUEUE_ID_NIC_22_0 = 244, + GAUDI2_QUEUE_ID_NIC_22_1 = 245, + GAUDI2_QUEUE_ID_NIC_22_2 = 246, + GAUDI2_QUEUE_ID_NIC_22_3 = 247, + GAUDI2_QUEUE_ID_NIC_23_0 = 248, + GAUDI2_QUEUE_ID_NIC_23_1 = 249, + GAUDI2_QUEUE_ID_NIC_23_2 = 250, + GAUDI2_QUEUE_ID_NIC_23_3 = 251, + GAUDI2_QUEUE_ID_ROT_0_0 = 252, + GAUDI2_QUEUE_ID_ROT_0_1 = 253, + GAUDI2_QUEUE_ID_ROT_0_2 = 254, + GAUDI2_QUEUE_ID_ROT_0_3 = 255, + GAUDI2_QUEUE_ID_ROT_1_0 = 256, + GAUDI2_QUEUE_ID_ROT_1_1 = 257, + GAUDI2_QUEUE_ID_ROT_1_2 = 258, + GAUDI2_QUEUE_ID_ROT_1_3 = 259, + GAUDI2_QUEUE_ID_CPU_PQ = 260, + GAUDI2_QUEUE_ID_SIZE +}; + +/* * Engine Numbering * * Used in the "busy_engines_mask" field in `struct hl_info_hw_idle' @@ -242,6 +521,85 @@ enum gaudi_engine_id { GAUDI_ENGINE_ID_SIZE }; +enum gaudi2_engine_id { + GAUDI2_DCORE0_ENGINE_ID_EDMA_0 = 0, + GAUDI2_DCORE0_ENGINE_ID_EDMA_1, + GAUDI2_DCORE0_ENGINE_ID_MME, + GAUDI2_DCORE0_ENGINE_ID_TPC_0, + GAUDI2_DCORE0_ENGINE_ID_TPC_1, + GAUDI2_DCORE0_ENGINE_ID_TPC_2, + GAUDI2_DCORE0_ENGINE_ID_TPC_3, + GAUDI2_DCORE0_ENGINE_ID_TPC_4, + GAUDI2_DCORE0_ENGINE_ID_TPC_5, + GAUDI2_DCORE0_ENGINE_ID_DEC_0, + GAUDI2_DCORE0_ENGINE_ID_DEC_1, + GAUDI2_DCORE1_ENGINE_ID_EDMA_0, + GAUDI2_DCORE1_ENGINE_ID_EDMA_1, + GAUDI2_DCORE1_ENGINE_ID_MME, + GAUDI2_DCORE1_ENGINE_ID_TPC_0, + GAUDI2_DCORE1_ENGINE_ID_TPC_1, + GAUDI2_DCORE1_ENGINE_ID_TPC_2, + GAUDI2_DCORE1_ENGINE_ID_TPC_3, + GAUDI2_DCORE1_ENGINE_ID_TPC_4, + GAUDI2_DCORE1_ENGINE_ID_TPC_5, + GAUDI2_DCORE1_ENGINE_ID_DEC_0, + GAUDI2_DCORE1_ENGINE_ID_DEC_1, + GAUDI2_DCORE2_ENGINE_ID_EDMA_0, + GAUDI2_DCORE2_ENGINE_ID_EDMA_1, + GAUDI2_DCORE2_ENGINE_ID_MME, + GAUDI2_DCORE2_ENGINE_ID_TPC_0, + GAUDI2_DCORE2_ENGINE_ID_TPC_1, + GAUDI2_DCORE2_ENGINE_ID_TPC_2, + GAUDI2_DCORE2_ENGINE_ID_TPC_3, + GAUDI2_DCORE2_ENGINE_ID_TPC_4, + GAUDI2_DCORE2_ENGINE_ID_TPC_5, + GAUDI2_DCORE2_ENGINE_ID_DEC_0, + GAUDI2_DCORE2_ENGINE_ID_DEC_1, + GAUDI2_DCORE3_ENGINE_ID_EDMA_0, + GAUDI2_DCORE3_ENGINE_ID_EDMA_1, + GAUDI2_DCORE3_ENGINE_ID_MME, + GAUDI2_DCORE3_ENGINE_ID_TPC_0, + GAUDI2_DCORE3_ENGINE_ID_TPC_1, + GAUDI2_DCORE3_ENGINE_ID_TPC_2, + GAUDI2_DCORE3_ENGINE_ID_TPC_3, + GAUDI2_DCORE3_ENGINE_ID_TPC_4, + GAUDI2_DCORE3_ENGINE_ID_TPC_5, + GAUDI2_DCORE3_ENGINE_ID_DEC_0, + GAUDI2_DCORE3_ENGINE_ID_DEC_1, + GAUDI2_DCORE0_ENGINE_ID_TPC_6, + GAUDI2_ENGINE_ID_PDMA_0, + GAUDI2_ENGINE_ID_PDMA_1, + GAUDI2_ENGINE_ID_ROT_0, + GAUDI2_ENGINE_ID_ROT_1, + GAUDI2_PCIE_ENGINE_ID_DEC_0, + GAUDI2_PCIE_ENGINE_ID_DEC_1, + GAUDI2_ENGINE_ID_NIC0_0, + GAUDI2_ENGINE_ID_NIC0_1, + GAUDI2_ENGINE_ID_NIC1_0, + GAUDI2_ENGINE_ID_NIC1_1, + GAUDI2_ENGINE_ID_NIC2_0, + GAUDI2_ENGINE_ID_NIC2_1, + GAUDI2_ENGINE_ID_NIC3_0, + GAUDI2_ENGINE_ID_NIC3_1, + GAUDI2_ENGINE_ID_NIC4_0, + GAUDI2_ENGINE_ID_NIC4_1, + GAUDI2_ENGINE_ID_NIC5_0, + GAUDI2_ENGINE_ID_NIC5_1, + GAUDI2_ENGINE_ID_NIC6_0, + GAUDI2_ENGINE_ID_NIC6_1, + GAUDI2_ENGINE_ID_NIC7_0, + GAUDI2_ENGINE_ID_NIC7_1, + GAUDI2_ENGINE_ID_NIC8_0, + GAUDI2_ENGINE_ID_NIC8_1, + GAUDI2_ENGINE_ID_NIC9_0, + GAUDI2_ENGINE_ID_NIC9_1, + GAUDI2_ENGINE_ID_NIC10_0, + GAUDI2_ENGINE_ID_NIC10_1, + GAUDI2_ENGINE_ID_NIC11_0, + GAUDI2_ENGINE_ID_NIC11_1, + GAUDI2_ENGINE_ID_SIZE +}; + /* * ASIC specific PLL index * @@ -275,6 +633,49 @@ enum hl_gaudi_pll_index { HL_GAUDI_PLL_MAX }; +enum hl_gaudi2_pll_index { + HL_GAUDI2_CPU_PLL = 0, + HL_GAUDI2_PCI_PLL, + HL_GAUDI2_SRAM_PLL, + HL_GAUDI2_HBM_PLL, + HL_GAUDI2_NIC_PLL, + HL_GAUDI2_DMA_PLL, + HL_GAUDI2_MESH_PLL, + HL_GAUDI2_MME_PLL, + HL_GAUDI2_TPC_PLL, + HL_GAUDI2_IF_PLL, + HL_GAUDI2_VID_PLL, + HL_GAUDI2_MSS_PLL, + HL_GAUDI2_PLL_MAX +}; + +/** + * enum hl_goya_dma_direction - Direction of DMA operation inside a LIN_DMA packet that is + * submitted to the GOYA's DMA QMAN. This attribute is not relevant + * to the H/W but the kernel driver use it to parse the packet's + * addresses and patch/validate them. + * @HL_DMA_HOST_TO_DRAM: DMA operation from Host memory to GOYA's DDR. + * @HL_DMA_HOST_TO_SRAM: DMA operation from Host memory to GOYA's SRAM. + * @HL_DMA_DRAM_TO_SRAM: DMA operation from GOYA's DDR to GOYA's SRAM. + * @HL_DMA_SRAM_TO_DRAM: DMA operation from GOYA's SRAM to GOYA's DDR. + * @HL_DMA_SRAM_TO_HOST: DMA operation from GOYA's SRAM to Host memory. + * @HL_DMA_DRAM_TO_HOST: DMA operation from GOYA's DDR to Host memory. + * @HL_DMA_DRAM_TO_DRAM: DMA operation from GOYA's DDR to GOYA's DDR. + * @HL_DMA_SRAM_TO_SRAM: DMA operation from GOYA's SRAM to GOYA's SRAM. + * @HL_DMA_ENUM_MAX: number of values in enum + */ +enum hl_goya_dma_direction { + HL_DMA_HOST_TO_DRAM, + HL_DMA_HOST_TO_SRAM, + HL_DMA_DRAM_TO_SRAM, + HL_DMA_SRAM_TO_DRAM, + HL_DMA_SRAM_TO_HOST, + HL_DMA_DRAM_TO_HOST, + HL_DMA_DRAM_TO_DRAM, + HL_DMA_SRAM_TO_SRAM, + HL_DMA_ENUM_MAX +}; + /** * enum hl_device_status - Device status information. * @HL_DEVICE_STATUS_OPERATIONAL: Device is operational. @@ -283,6 +684,8 @@ enum hl_gaudi_pll_index { * @HL_DEVICE_STATUS_NEEDS_RESET: Device needs reset because auto reset was disabled. * @HL_DEVICE_STATUS_IN_DEVICE_CREATION: Device is operational but its creation is still in * progress. + * @HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE: Device is currently during reset that was + * triggered because the user released the device * @HL_DEVICE_STATUS_LAST: Last status. */ enum hl_device_status { @@ -291,7 +694,8 @@ enum hl_device_status { HL_DEVICE_STATUS_MALFUNCTION, HL_DEVICE_STATUS_NEEDS_RESET, HL_DEVICE_STATUS_IN_DEVICE_CREATION, - HL_DEVICE_STATUS_LAST = HL_DEVICE_STATUS_IN_DEVICE_CREATION + HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE, + HL_DEVICE_STATUS_LAST = HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE }; enum hl_server_type { @@ -299,7 +703,8 @@ enum hl_server_type { HL_SERVER_GAUDI_HLS1 = 1, HL_SERVER_GAUDI_HLS1H = 2, HL_SERVER_GAUDI_TYPE1 = 3, - HL_SERVER_GAUDI_TYPE2 = 4 + HL_SERVER_GAUDI_TYPE2 = 4, + HL_SERVER_GAUDI2_HLS2 = 5 }; /* Opcode for management ioctl @@ -352,6 +757,7 @@ enum hl_server_type { * HL_INFO_REGISTER_EVENTFD - Register eventfd for event notifications. * HL_INFO_UNREGISTER_EVENTFD - Unregister eventfd * HL_INFO_GET_EVENTS - Retrieve the last occurred events + * HL_INFO_UNDEFINED_OPCODE_EVENT - Retrieve last undefined opcode error information. */ #define HL_INFO_HW_IP_INFO 0 #define HL_INFO_HW_EVENTS 1 @@ -380,6 +786,7 @@ enum hl_server_type { #define HL_INFO_REGISTER_EVENTFD 28 #define HL_INFO_UNREGISTER_EVENTFD 29 #define HL_INFO_GET_EVENTS 30 +#define HL_INFO_UNDEFINED_OPCODE_EVENT 31 #define HL_INFO_VERSION_MAX_LEN 128 #define HL_INFO_CARD_NAME_MAX_LEN 16 @@ -399,8 +806,10 @@ enum hl_server_type { * @device_id: PCI device ID of the ASIC. * @module_id: Module ID of the ASIC for mezzanine cards in servers * (From OCP spec). + * @decoder_enabled_mask: Bit-mask that represents which decoders are enabled. * @first_available_interrupt_id: The first available interrupt ID for the user * to be used when it works with user interrupts. + * Relevant for Gaudi2 and later. * @server_type: Server type that the Gaudi ASIC is currently installed in. * The value is according to enum hl_server_type * @cpld_version: CPLD version on the board. @@ -412,9 +821,15 @@ enum hl_server_type { * @tpc_enabled_mask: Bit-mask that represents which TPCs are enabled. Relevant * for Goya/Gaudi only. * @dram_enabled: Whether the DRAM is enabled. + * @mme_master_slave_mode: Indicate whether the MME is working in master/slave + * configuration. Relevant for Greco and later. * @cpucp_version: The CPUCP f/w version. * @card_name: The card name as passed by the f/w. + * @tpc_enabled_mask_ext: Bit-mask that represents which TPCs are enabled. + * Relevant for Greco and later. * @dram_page_size: The DRAM physical page size. + * @edma_enabled_mask: Bit-mask that represents which EDMAs are enabled. + * Relevant for Gaudi2 and later. * @number_of_user_interrupts: The number of interrupts that are available to the userspace * application to use. Relevant for Gaudi2 and later. * @device_mem_alloc_default_page_size: default page size used in device memory allocation. @@ -427,7 +842,7 @@ struct hl_info_hw_ip_info { __u32 num_of_events; __u32 device_id; __u32 module_id; - __u32 reserved; + __u32 decoder_enabled_mask; __u16 first_available_interrupt_id; __u16 server_type; __u32 cpld_version; @@ -437,12 +852,13 @@ struct hl_info_hw_ip_info { __u32 psoc_pci_pll_div_factor; __u8 tpc_enabled_mask; __u8 dram_enabled; - __u8 pad[2]; + __u8 reserved; + __u8 mme_master_slave_mode; __u8 cpucp_version[HL_INFO_VERSION_MAX_LEN]; __u8 card_name[HL_INFO_CARD_NAME_MAX_LEN]; - __u64 reserved2; + __u64 tpc_enabled_mask_ext; __u64 dram_page_size; - __u32 reserved3; + __u32 edma_enabled_mask; __u16 number_of_user_interrupts; __u16 pad2; __u64 reserved4; @@ -656,6 +1072,34 @@ struct hl_info_razwi_event { __u8 pad[2]; }; +#define MAX_QMAN_STREAMS_INFO 4 +#define OPCODE_INFO_MAX_ADDR_SIZE 8 +/** + * struct hl_info_undefined_opcode_event - info about last undefined opcode error + * @timestamp: timestamp of the undefined opcode error + * @cb_addr_streams: CB addresses (per stream) that are currently exists in the PQ + * entiers. In case all streams array entries are + * filled with values, it means the execution was in Lower-CP. + * @cq_addr: the address of the current handled command buffer + * @cq_size: the size of the current handled command buffer + * @cb_addr_streams_len: num of streams - actual len of cb_addr_streams array. + * should be equal to 1 incase of undefined opcode + * in Upper-CP (specific stream) and equal to 4 incase + * of undefined opcode in Lower-CP. + * @engine_id: engine-id that the error occurred on + * @stream_id: the stream id the error occurred on. In case the stream equals to + * MAX_QMAN_STREAMS_INFO it means the error occurred on a Lower-CP. + */ +struct hl_info_undefined_opcode_event { + __s64 timestamp; + __u64 cb_addr_streams[MAX_QMAN_STREAMS_INFO][OPCODE_INFO_MAX_ADDR_SIZE]; + __u64 cq_addr; + __u32 cq_size; + __u32 cb_addr_streams_len; + __u32 engine_id; + __u32 stream_id; +}; + /** * struct hl_info_dev_memalloc_page_sizes - valid page sizes in device mem alloc information. * @page_order_bitmask: bitmap in which a set bit represents the order of the supported page size @@ -764,16 +1208,16 @@ union hl_cb_args { /* HL_CS_CHUNK_FLAGS_ values * * HL_CS_CHUNK_FLAGS_USER_ALLOC_CB: - * Indicates if the CB was allocated and mapped by userspace. - * User allocated CB is a command buffer allocated by the user, via malloc - * (or similar). After allocating the CB, the user invokes “memory ioctl” - * to map the user memory into a device virtual address. The user provides - * this address via the cb_handle field. The interface provides the - * ability to create a large CBs, Which aren’t limited to - * “HL_MAX_CB_SIZE”. Therefore, it increases the PCI-DMA queues - * throughput. This CB allocation method also reduces the use of Linux - * DMA-able memory pool. Which are limited and used by other Linux - * sub-systems. + * Indicates if the CB was allocated and mapped by userspace + * (relevant to greco and above). User allocated CB is a command buffer, + * allocated by the user, via malloc (or similar). After allocating the + * CB, the user invokes - “memory ioctl” to map the user memory into a + * device virtual address. The user provides this address via the + * cb_handle field. The interface provides the ability to create a + * large CBs, Which aren’t limited to “HL_MAX_CB_SIZE”. Therefore, it + * increases the PCI-DMA queues throughput. This CB allocation method + * also reduces the use of Linux DMA-able memory pool. Which are limited + * and used by other Linux sub-systems. */ #define HL_CS_CHUNK_FLAGS_USER_ALLOC_CB 0x1 @@ -783,12 +1227,17 @@ union hl_cb_args { */ struct hl_cs_chunk { union { - /* For external queue, this represents a Handle of CB on the + /* Goya/Gaudi: + * For external queue, this represents a Handle of CB on the * Host. * For internal queue in Goya, this represents an SRAM or * a DRAM address of the internal CB. In Gaudi, this might also * represent a mapped host address of the CB. * + * Greco onwards: + * For H/W queue, this represents either a Handle of CB on the + * Host, or an SRAM, a DRAM, or a mapped host address of the CB. + * * A mapped host address is in the device address space, after * a host address was mapped by the device MMU. */ @@ -853,11 +1302,12 @@ struct hl_cs_chunk { __u32 pad[10]; }; -/* SIGNAL and WAIT/COLLECTIVE_WAIT flags are mutually exclusive */ +/* SIGNAL/WAIT/COLLECTIVE_WAIT flags are mutually exclusive */ #define HL_CS_FLAGS_FORCE_RESTORE 0x1 #define HL_CS_FLAGS_SIGNAL 0x2 #define HL_CS_FLAGS_WAIT 0x4 #define HL_CS_FLAGS_COLLECTIVE_WAIT 0x8 + #define HL_CS_FLAGS_TIMESTAMP 0x20 #define HL_CS_FLAGS_STAGED_SUBMISSION 0x40 #define HL_CS_FLAGS_STAGED_SUBMISSION_FIRST 0x80 @@ -949,6 +1399,7 @@ struct hl_cs_in { /* Context ID - Currently not in use */ __u32 ctx_id; + __u8 pad[4]; }; struct hl_cs_out { @@ -994,6 +1445,8 @@ union hl_cs_args { #define HL_WAIT_CS_FLAGS_INTERRUPT 0x2 #define HL_WAIT_CS_FLAGS_INTERRUPT_MASK 0xFFF00000 +#define HL_WAIT_CS_FLAGS_ANY_CQ_INTERRUPT 0xFFF00000 +#define HL_WAIT_CS_FLAGS_ANY_DEC_INTERRUPT 0xFFE00000 #define HL_WAIT_CS_FLAGS_MULTI_CS 0x4 #define HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ 0x10 #define HL_WAIT_CS_FLAGS_REGISTER_INTERRUPT 0x20 @@ -1043,8 +1496,13 @@ struct hl_wait_cs_in { /* HL_WAIT_CS_FLAGS_* * If HL_WAIT_CS_FLAGS_INTERRUPT is set, this field should include - * interrupt id according to HL_WAIT_CS_FLAGS_INTERRUPT_MASK, in order - * not to specify an interrupt id ,set mask to all 1s. + * interrupt id according to HL_WAIT_CS_FLAGS_INTERRUPT_MASK + * + * in order to wait for any CQ interrupt, set interrupt value to + * HL_WAIT_CS_FLAGS_ANY_CQ_INTERRUPT. + * + * in order to wait for any decoder interrupt, set interrupt value to + * HL_WAIT_CS_FLAGS_ANY_DEC_INTERRUPT. */ __u32 flags; @@ -1116,14 +1574,19 @@ union hl_wait_cs_args { /* Opcode to allocate device memory */ #define HL_MEM_OP_ALLOC 0 + /* Opcode to free previously allocated device memory */ #define HL_MEM_OP_FREE 1 + /* Opcode to map host and device memory */ #define HL_MEM_OP_MAP 2 + /* Opcode to unmap previously mapped host and device memory */ #define HL_MEM_OP_UNMAP 3 + /* Opcode to map a hw block */ #define HL_MEM_OP_MAP_BLOCK 4 + /* Opcode to create DMA-BUF object for an existing device memory allocation * and to export an FD of that DMA-BUF back to the caller */ @@ -1342,7 +1805,16 @@ struct hl_debug_params_bmon { /* Trace source ID */ __u32 id; - __u32 pad; + + /* Control register */ + __u32 control; + + /* Two more address ranges that the user can request to filter */ + __u64 start_addr2; + __u64 end_addr2; + + __u64 start_addr3; + __u64 end_addr3; }; struct hl_debug_params_spmu { @@ -1351,7 +1823,11 @@ struct hl_debug_params_spmu { /* Number of event types selection */ __u32 event_types_num; - __u32 pad; + + /* TRC configuration register values */ + __u32 pmtrc_val; + __u32 trc_ctrl_host_val; + __u32 trc_en_host_val; }; /* Opcode for ETR component */ @@ -1401,9 +1877,17 @@ struct hl_debug_args { /* * Notifier event values - for the notification mechanism and the HL_INFO_GET_EVENTS command * - * HL_NOTIFIER_EVENT_TPC_ASSERT - Indicates TPC assert event + * HL_NOTIFIER_EVENT_TPC_ASSERT - Indicates TPC assert event + * HL_NOTIFIER_EVENT_UNDEFINED_OPCODE - Indicates undefined operation code + * HL_NOTIFIER_EVENT_DEVICE_RESET - Indicates device requires a reset + * HL_NOTIFIER_EVENT_CS_TIMEOUT - Indicates CS timeout error + * HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE - Indicates device is unavailable */ -#define HL_NOTIFIER_EVENT_TPC_ASSERT (1 << 0) +#define HL_NOTIFIER_EVENT_TPC_ASSERT (1ULL << 0) +#define HL_NOTIFIER_EVENT_UNDEFINED_OPCODE (1ULL << 1) +#define HL_NOTIFIER_EVENT_DEVICE_RESET (1ULL << 2) +#define HL_NOTIFIER_EVENT_CS_TIMEOUT (1ULL << 3) +#define HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE (1ULL << 4) /* * Various information operations such as: @@ -1458,16 +1942,23 @@ struct hl_debug_args { * (or if its the first CS for this context). The user can also order the * driver to run the "restore" phase explicitly * + * Goya/Gaudi: * There are two types of queues - external and internal. External queues * are DMA queues which transfer data from/to the Host. All other queues are * internal. The driver will get completion notifications from the device only * on JOBS which are enqueued in the external queues. * + * Greco onwards: + * There is a single type of queue for all types of engines, either DMA engines + * for transfers from/to the host or inside the device, or compute engines. + * The driver will get completion notifications from the device for all queues. + * * For jobs on external queues, the user needs to create command buffers * through the CB ioctl and give the CB's handle to the CS ioctl. For jobs on * internal queues, the user needs to prepare a "command buffer" with packets * on either the device SRAM/DRAM or the host, and give the device address of * that buffer to the CS ioctl. + * For jobs on H/W queues both options of command buffers are valid. * * This IOCTL is asynchronous in regard to the actual execution of the CS. This * means it returns immediately after ALL the JOBS were enqueued on their @@ -1476,7 +1967,7 @@ struct hl_debug_args { * * Upon successful enqueue, the IOCTL returns a sequence number which the user * can use with the "Wait for CS" IOCTL to check whether the handle's CS - * external JOBS have been completed. Note that if the CS has internal JOBS + * non-internal JOBS have been completed. Note that if the CS has internal JOBS * which can execute AFTER the external JOBS have finished, the driver might * report that the CS has finished executing BEFORE the internal JOBS have * actually finished executing. diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h index b869990c2db2..890d9e5b76d7 100644 --- a/include/uapi/mtd/mtd-abi.h +++ b/include/uapi/mtd/mtd-abi.h @@ -69,8 +69,8 @@ enum { * struct mtd_write_req - data structure for requesting a write operation * * @start: start address - * @len: length of data buffer - * @ooblen: length of OOB buffer + * @len: length of data buffer (only lower 32 bits are used) + * @ooblen: length of OOB buffer (only lower 32 bits are used) * @usr_data: user-provided data buffer * @usr_oob: user-provided OOB buffer * @mode: MTD mode (see "MTD operation modes") diff --git a/include/uapi/rdma/erdma-abi.h b/include/uapi/rdma/erdma-abi.h new file mode 100644 index 000000000000..b7a0222f978f --- /dev/null +++ b/include/uapi/rdma/erdma-abi.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* + * Copyright (c) 2020-2022, Alibaba Group. + */ + +#ifndef __ERDMA_USER_H__ +#define __ERDMA_USER_H__ + +#include <linux/types.h> + +#define ERDMA_ABI_VERSION 1 + +struct erdma_ureq_create_cq { + __aligned_u64 db_record_va; + __aligned_u64 qbuf_va; + __u32 qbuf_len; + __u32 rsvd0; +}; + +struct erdma_uresp_create_cq { + __u32 cq_id; + __u32 num_cqe; +}; + +struct erdma_ureq_create_qp { + __aligned_u64 db_record_va; + __aligned_u64 qbuf_va; + __u32 qbuf_len; + __u32 rsvd0; +}; + +struct erdma_uresp_create_qp { + __u32 qp_id; + __u32 num_sqe; + __u32 num_rqe; + __u32 rq_offset; +}; + +struct erdma_uresp_alloc_ctx { + __u32 dev_id; + __u32 pad; + __u32 sdb_type; + __u32 sdb_offset; + __aligned_u64 sdb; + __aligned_u64 rdb; + __aligned_u64 cdb; +}; + +#endif diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h index 3072e5d6b692..7dd56210226f 100644 --- a/include/uapi/rdma/ib_user_ioctl_verbs.h +++ b/include/uapi/rdma/ib_user_ioctl_verbs.h @@ -250,6 +250,7 @@ enum rdma_driver_id { RDMA_DRIVER_QIB, RDMA_DRIVER_EFA, RDMA_DRIVER_SIW, + RDMA_DRIVER_ERDMA, }; enum ib_uverbs_gid_type { diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h index e539c84d63f1..3bee490eb585 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h +++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h @@ -228,6 +228,7 @@ enum mlx5_ib_objects { MLX5_IB_OBJECT_VAR, MLX5_IB_OBJECT_PP, MLX5_IB_OBJECT_UAR, + MLX5_IB_OBJECT_STEERING_ANCHOR, }; enum mlx5_ib_flow_matcher_create_attrs { @@ -248,6 +249,22 @@ enum mlx5_ib_flow_matcher_methods { MLX5_IB_METHOD_FLOW_MATCHER_DESTROY, }; +enum mlx5_ib_flow_steering_anchor_create_attrs { + MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE, + MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY, + MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID, +}; + +enum mlx5_ib_flow_steering_anchor_destroy_attrs { + MLX5_IB_ATTR_STEERING_ANCHOR_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum mlx5_ib_steering_anchor_methods { + MLX5_IB_METHOD_STEERING_ANCHOR_CREATE = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY, +}; + enum mlx5_ib_device_query_context_attrs { MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX = (1U << UVERBS_ID_NS_SHIFT), }; diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h index a21ca8ece8db..7af9e09ea556 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h +++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h @@ -63,6 +63,7 @@ enum mlx5_ib_uapi_dm_type { MLX5_IB_UAPI_DM_TYPE_MEMIC, MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM, MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM, + MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM, }; enum mlx5_ib_uapi_devx_create_event_channel_flags { diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h index 9555f31c8425..3aef123dbd7f 100644 --- a/include/uapi/sound/compress_offload.h +++ b/include/uapi/sound/compress_offload.h @@ -123,7 +123,7 @@ struct snd_compr_codec_caps { } __attribute__((packed, aligned(4))); /** - * enum sndrv_compress_encoder + * enum sndrv_compress_encoder - encoder metadata key * @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the * end of the track * @SNDRV_COMPRESS_ENCODER_DELAY: no of samples inserted by the encoder at the diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h index 79b14389ae41..726361716919 100644 --- a/include/uapi/sound/compress_params.h +++ b/include/uapi/sound/compress_params.h @@ -250,7 +250,7 @@ struct snd_enc_wma { /** - * struct snd_enc_vorbis + * struct snd_enc_vorbis - Vorbis encoder parameters * @quality: Sets encoding quality to n, between -1 (low) and 10 (high). * In the default mode of operation, the quality level is 3. * Normal quality range is 0 - 10. @@ -279,7 +279,7 @@ struct snd_enc_vorbis { /** - * struct snd_enc_real + * struct snd_enc_real - RealAudio encoder parameters * @quant_bits: number of coupling quantization bits in the stream * @start_region: coupling start region in the stream * @num_regions: number of regions value @@ -294,7 +294,7 @@ struct snd_enc_real { } __attribute__((packed, aligned(4))); /** - * struct snd_enc_flac + * struct snd_enc_flac - FLAC encoder parameters * @num: serial number, valid only for OGG formats * needs to be set by application * @gain: Add replay gain tags diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h index 0e7dccdc25fd..3566630ca965 100644 --- a/include/uapi/sound/sof/abi.h +++ b/include/uapi/sound/sof/abi.h @@ -24,9 +24,11 @@ #ifndef __INCLUDE_UAPI_SOUND_SOF_ABI_H__ #define __INCLUDE_UAPI_SOUND_SOF_ABI_H__ +#include <linux/types.h> + /* SOF ABI version major, minor and patch numbers */ #define SOF_ABI_MAJOR 3 -#define SOF_ABI_MINOR 21 +#define SOF_ABI_MINOR 23 #define SOF_ABI_PATCH 0 /* SOF ABI version number. Format within 32bit word is MMmmmppp */ diff --git a/include/uapi/sound/sof/header.h b/include/uapi/sound/sof/header.h index dbf137516522..e9bba93a5399 100644 --- a/include/uapi/sound/sof/header.h +++ b/include/uapi/sound/sof/header.h @@ -26,4 +26,34 @@ struct sof_abi_hdr { __u32 data[]; /**< Component data - opaque to core */ } __packed; +#define SOF_MANIFEST_DATA_TYPE_NHLT 1 + +/** + * struct sof_manifest_tlv - SOF manifest TLV data + * @type: type of data + * @size: data size (not including the size of this struct) + * @data: payload data + */ +struct sof_manifest_tlv { + __le32 type; + __le32 size; + __u8 data[]; +}; + +/** + * struct sof_manifest - SOF topology manifest + * @abi_major: Major ABI version + * @abi_minor: Minor ABI version + * @abi_patch: ABI patch + * @count: count of tlv items + * @items: consecutive variable size tlv items + */ +struct sof_manifest { + __le16 abi_major; + __le16 abi_minor; + __le16 abi_patch; + __le16 count; + struct sof_manifest_tlv items[]; +}; + #endif diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h index b72fa385bebf..5caf75cadaf8 100644 --- a/include/uapi/sound/sof/tokens.h +++ b/include/uapi/sound/sof/tokens.h @@ -52,11 +52,17 @@ #define SOF_TKN_SCHED_FRAMES 204 #define SOF_TKN_SCHED_TIME_DOMAIN 205 #define SOF_TKN_SCHED_DYNAMIC_PIPELINE 206 +#define SOF_TKN_SCHED_LP_MODE 207 +#define SOF_TKN_SCHED_MEM_USAGE 208 /* volume */ #define SOF_TKN_VOLUME_RAMP_STEP_TYPE 250 #define SOF_TKN_VOLUME_RAMP_STEP_MS 251 +#define SOF_TKN_GAIN_RAMP_TYPE 260 +#define SOF_TKN_GAIN_RAMP_DURATION 261 +#define SOF_TKN_GAIN_VAL 262 + /* SRC */ #define SOF_TKN_SRC_RATE_IN 300 #define SOF_TKN_SRC_RATE_OUT 301 @@ -79,6 +85,9 @@ */ #define SOF_TKN_COMP_CORE_ID 404 #define SOF_TKN_COMP_UUID 405 +#define SOF_TKN_COMP_CPC 406 +#define SOF_TKN_COMP_IS_PAGES 409 +#define SOF_TKN_COMP_NUM_AUDIO_FORMATS 410 /* SSP */ #define SOF_TKN_INTEL_SSP_CLKS_CONTROL 500 @@ -145,4 +154,39 @@ #define SOF_TKN_MEDIATEK_AFE_CH 1601 #define SOF_TKN_MEDIATEK_AFE_FORMAT 1602 +/* MIXER */ +#define SOF_TKN_MIXER_TYPE 1700 + +/* ACPDMIC */ +#define SOF_TKN_AMD_ACPDMIC_RATE 1800 +#define SOF_TKN_AMD_ACPDMIC_CH 1801 + +/* CAVS AUDIO FORMAT */ +#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_RATE 1900 +#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_BIT_DEPTH 1901 +#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_VALID_BIT 1902 +#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_CHANNELS 1903 +#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_CH_MAP 1904 +#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_CH_CFG 1905 +#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_INTERLEAVING_STYLE 1906 +#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_FMT_CFG 1907 +#define SOF_TKN_CAVS_AUDIO_FORMAT_IN_SAMPLE_TYPE 1908 +/* intentional token numbering discontinuity, reserved for future use */ +#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_RATE 1930 +#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_BIT_DEPTH 1931 +#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_VALID_BIT 1932 +#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_CHANNELS 1933 +#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_CH_MAP 1934 +#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_CH_CFG 1935 +#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_INTERLEAVING_STYLE 1936 +#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_FMT_CFG 1937 +#define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_SAMPLE_TYPE 1938 +/* intentional token numbering discontinuity, reserved for future use */ +#define SOF_TKN_CAVS_AUDIO_FORMAT_IBS 1970 +#define SOF_TKN_CAVS_AUDIO_FORMAT_OBS 1971 +#define SOF_TKN_CAVS_AUDIO_FORMAT_DMA_BUFFER_SIZE 1972 + +/* COPIER */ +#define SOF_TKN_INTEL_COPIER_NODE_TYPE 1980 + #endif diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index a92271421718..7fe1a926cd99 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -577,6 +577,18 @@ enum ufshcd_quirks { * support physical host configuration. */ UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16, + + /* + * This quirk needs to be enabled if the host controller has + * 64-bit addressing supported capability but it doesn't work. + */ + UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS = 1 << 17, + + /* + * This quirk needs to be enabled if the host controller has + * auto-hibernate capability but it's FASTAUTO only. + */ + UFSHCD_QUIRK_HIBERN_FASTAUTO = 1 << 18, }; enum ufshcd_caps { @@ -1087,6 +1099,7 @@ extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, u32 *mib_val, u8 peer); extern int ufshcd_config_pwr_mode(struct ufs_hba *hba, struct ufs_pa_layer_attr *desired_pwr_mode); +extern int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode); /* UIC command interfaces for DME primitives */ #define DME_LOCAL 0 @@ -1186,6 +1199,8 @@ void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba); +int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg); + int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd); int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, @@ -1217,14 +1232,14 @@ static inline int ufshcd_vops_phy_initialization(struct ufs_hba *hba) return 0; } -extern struct ufs_pm_lvl_states ufs_pm_lvl_states[]; +extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[]; int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, const char *prefix); int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask); int ufshcd_write_ee_control(struct ufs_hba *hba); -int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, u16 *other_mask, - u16 set, u16 clr); +int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask, + const u16 *other_mask, u16 set, u16 clr); #endif /* End of Header */ diff --git a/include/ufs/unipro.h b/include/ufs/unipro.h index 0521f887e3ac..6c553f98fe57 100644 --- a/include/ufs/unipro.h +++ b/include/ufs/unipro.h @@ -38,6 +38,18 @@ /* * M-RX Configuration Attributes */ +#define RX_HS_G1_SYNC_LENGTH_CAP 0x008B +#define RX_HS_G1_PREP_LENGTH_CAP 0x008C +#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F +#define RX_HIBERN8TIME_CAPABILITY 0x0092 +#define RX_HS_G2_SYNC_LENGTH_CAP 0x0094 +#define RX_HS_G3_SYNC_LENGTH_CAP 0x0095 +#define RX_HS_G2_PREP_LENGTH_CAP 0x0096 +#define RX_HS_G3_PREP_LENGTH_CAP 0x0097 +#define RX_ADV_GRANULARITY_CAP 0x0098 +#define RX_HIBERN8TIME_CAP 0x0092 +#define RX_ADV_HIBERN8TIME_CAP 0x0099 +#define RX_ADV_MIN_ACTIVATETIME_CAP 0x009A #define RX_MODE 0x00A1 #define RX_HSRATE_SERIES 0x00A2 #define RX_HSGEAR 0x00A3 @@ -47,32 +59,19 @@ #define RX_ENTER_HIBERN8 0x00A7 #define RX_BYPASS_8B10B_ENABLE 0x00A8 #define RX_TERMINATION_FORCE_ENABLE 0x00A9 -#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F -#define RX_HIBERN8TIME_CAPABILITY 0x0092 +#define RXCALCTRL 0x00B4 +#define RXSQCTRL 0x00B5 +#define CFGRXCDR8 0x00BA +#define CFGRXOVR8 0x00BD +#define CFGRXOVR6 0x00BF +#define RXDIRECTCTRL2 0x00C7 +#define CFGRXOVR4 0x00E9 #define RX_REFCLKFREQ 0x00EB #define RX_CFGCLKFREQVAL 0x00EC #define CFGWIDEINLN 0x00F0 -#define CFGRXCDR8 0x00BA #define ENARXDIRECTCFG4 0x00F2 -#define CFGRXOVR8 0x00BD -#define RXDIRECTCTRL2 0x00C7 #define ENARXDIRECTCFG3 0x00F3 -#define RXCALCTRL 0x00B4 #define ENARXDIRECTCFG2 0x00F4 -#define CFGRXOVR4 0x00E9 -#define RXSQCTRL 0x00B5 -#define CFGRXOVR6 0x00BF -#define RX_HS_G1_SYNC_LENGTH_CAP 0x008B -#define RX_HS_G1_PREP_LENGTH_CAP 0x008C -#define RX_HS_G2_SYNC_LENGTH_CAP 0x0094 -#define RX_HS_G3_SYNC_LENGTH_CAP 0x0095 -#define RX_HS_G2_PREP_LENGTH_CAP 0x0096 -#define RX_HS_G3_PREP_LENGTH_CAP 0x0097 -#define RX_ADV_GRANULARITY_CAP 0x0098 -#define RX_MIN_ACTIVATETIME_CAP 0x008F -#define RX_HIBERN8TIME_CAP 0x0092 -#define RX_ADV_HIBERN8TIME_CAP 0x0099 -#define RX_ADV_MIN_ACTIVATETIME_CAP 0x009A #define is_mphy_tx_attr(attr) (attr < RX_MODE) @@ -103,47 +102,50 @@ /* * PHY Adapter attributes */ -#define PA_ACTIVETXDATALANES 0x1560 -#define PA_ACTIVERXDATALANES 0x1580 -#define PA_TXTRAILINGCLOCKS 0x1564 #define PA_PHY_TYPE 0x1500 #define PA_AVAILTXDATALANES 0x1520 -#define PA_AVAILRXDATALANES 0x1540 -#define PA_MINRXTRAILINGCLOCKS 0x1543 -#define PA_TXPWRSTATUS 0x1567 -#define PA_RXPWRSTATUS 0x1582 -#define PA_TXFORCECLOCK 0x1562 -#define PA_TXPWRMODE 0x1563 -#define PA_LEGACYDPHYESCDL 0x1570 #define PA_MAXTXSPEEDFAST 0x1521 #define PA_MAXTXSPEEDSLOW 0x1522 #define PA_MAXRXSPEEDFAST 0x1541 #define PA_MAXRXSPEEDSLOW 0x1542 #define PA_TXLINKSTARTUPHS 0x1544 +#define PA_AVAILRXDATALANES 0x1540 +#define PA_MINRXTRAILINGCLOCKS 0x1543 #define PA_LOCAL_TX_LCC_ENABLE 0x155E +#define PA_ACTIVETXDATALANES 0x1560 +#define PA_CONNECTEDTXDATALANES 0x1561 +#define PA_TXFORCECLOCK 0x1562 +#define PA_TXPWRMODE 0x1563 +#define PA_TXTRAILINGCLOCKS 0x1564 #define PA_TXSPEEDFAST 0x1565 #define PA_TXSPEEDSLOW 0x1566 -#define PA_REMOTEVERINFO 0x15A0 +#define PA_TXPWRSTATUS 0x1567 #define PA_TXGEAR 0x1568 #define PA_TXTERMINATION 0x1569 #define PA_HSSERIES 0x156A +#define PA_LEGACYDPHYESCDL 0x1570 #define PA_PWRMODE 0x1571 +#define PA_ACTIVERXDATALANES 0x1580 +#define PA_CONNECTEDRXDATALANES 0x1581 +#define PA_RXPWRSTATUS 0x1582 #define PA_RXGEAR 0x1583 #define PA_RXTERMINATION 0x1584 #define PA_MAXRXPWMGEAR 0x1586 #define PA_MAXRXHSGEAR 0x1587 +#define PA_PACPREQTIMEOUT 0x1590 +#define PA_PACPREQEOBTIMEOUT 0x1591 +#define PA_REMOTEVERINFO 0x15A0 +#define PA_LOGICALLANEMAP 0x15A1 +#define PA_SLEEPNOCONFIGTIME 0x15A2 +#define PA_STALLNOCONFIGTIME 0x15A3 +#define PA_SAVECONFIGTIME 0x15A4 #define PA_RXHSUNTERMCAP 0x15A5 #define PA_RXLSTERMCAP 0x15A6 #define PA_GRANULARITY 0x15AA -#define PA_PACPREQTIMEOUT 0x1590 -#define PA_PACPREQEOBTIMEOUT 0x1591 #define PA_HIBERN8TIME 0x15A7 #define PA_LOCALVERINFO 0x15A9 #define PA_GRANULARITY 0x15AA #define PA_TACTIVATE 0x15A8 -#define PA_PACPFRAMECOUNT 0x15C0 -#define PA_PACPERRORCOUNT 0x15C1 -#define PA_PHYTESTCONTROL 0x15C2 #define PA_PWRMODEUSERDATA0 0x15B0 #define PA_PWRMODEUSERDATA1 0x15B1 #define PA_PWRMODEUSERDATA2 0x15B2 @@ -156,12 +158,9 @@ #define PA_PWRMODEUSERDATA9 0x15B9 #define PA_PWRMODEUSERDATA10 0x15BA #define PA_PWRMODEUSERDATA11 0x15BB -#define PA_CONNECTEDTXDATALANES 0x1561 -#define PA_CONNECTEDRXDATALANES 0x1581 -#define PA_LOGICALLANEMAP 0x15A1 -#define PA_SLEEPNOCONFIGTIME 0x15A2 -#define PA_STALLNOCONFIGTIME 0x15A3 -#define PA_SAVECONFIGTIME 0x15A4 +#define PA_PACPFRAMECOUNT 0x15C0 +#define PA_PACPERRORCOUNT 0x15C1 +#define PA_PHYTESTCONTROL 0x15C2 #define PA_TXHSADAPTTYPE 0x15D4 /* Adpat type for PA_TXHSADAPTTYPE attribute */ @@ -173,9 +172,9 @@ #define PA_HIBERN8_TIME_UNIT_US 100 /*Other attributes*/ +#define VS_POWERSTATE 0xD083 #define VS_MPHYCFGUPDT 0xD085 #define VS_DEBUGOMC 0xD09E -#define VS_POWERSTATE 0xD083 #define PA_GRANULARITY_MIN_VAL 1 #define PA_GRANULARITY_MAX_VAL 6 @@ -229,6 +228,7 @@ enum ufs_hs_gear_tag { UFS_HS_G2, /* HS Gear 2 */ UFS_HS_G3, /* HS Gear 3 */ UFS_HS_G4, /* HS Gear 4 */ + UFS_HS_G5 /* HS Gear 5 */ }; enum ufs_unipro_ver { @@ -246,27 +246,27 @@ enum ufs_unipro_ver { /* * Data Link Layer Attributes */ +#define DL_TXPREEMPTIONCAP 0x2000 +#define DL_TC0TXMAXSDUSIZE 0x2001 +#define DL_TC0RXINITCREDITVAL 0x2002 +#define DL_TC1TXMAXSDUSIZE 0x2003 +#define DL_TC1RXINITCREDITVAL 0x2004 +#define DL_TC0TXBUFFERSIZE 0x2005 +#define DL_TC1TXBUFFERSIZE 0x2006 #define DL_TC0TXFCTHRESHOLD 0x2040 #define DL_FC0PROTTIMEOUTVAL 0x2041 #define DL_TC0REPLAYTIMEOUTVAL 0x2042 #define DL_AFC0REQTIMEOUTVAL 0x2043 #define DL_AFC0CREDITTHRESHOLD 0x2044 #define DL_TC0OUTACKTHRESHOLD 0x2045 +#define DL_PEERTC0PRESENT 0x2046 +#define DL_PEERTC0RXINITCREVAL 0x2047 #define DL_TC1TXFCTHRESHOLD 0x2060 #define DL_FC1PROTTIMEOUTVAL 0x2061 #define DL_TC1REPLAYTIMEOUTVAL 0x2062 #define DL_AFC1REQTIMEOUTVAL 0x2063 #define DL_AFC1CREDITTHRESHOLD 0x2064 #define DL_TC1OUTACKTHRESHOLD 0x2065 -#define DL_TXPREEMPTIONCAP 0x2000 -#define DL_TC0TXMAXSDUSIZE 0x2001 -#define DL_TC0RXINITCREDITVAL 0x2002 -#define DL_TC0TXBUFFERSIZE 0x2005 -#define DL_PEERTC0PRESENT 0x2046 -#define DL_PEERTC0RXINITCREVAL 0x2047 -#define DL_TC1TXMAXSDUSIZE 0x2003 -#define DL_TC1RXINITCREDITVAL 0x2004 -#define DL_TC1TXBUFFERSIZE 0x2006 #define DL_PEERTC1PRESENT 0x2066 #define DL_PEERTC1RXINITCREVAL 0x2067 diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index 80546960f8b7..dae0f350c678 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -5,6 +5,7 @@ #include <linux/percpu.h> #include <linux/notifier.h> #include <linux/efi.h> +#include <linux/virtio_anchor.h> #include <xen/features.h> #include <asm/xen/interface.h> #include <xen/interface/vcpu.h> @@ -217,6 +218,7 @@ static inline void xen_preemptible_hcall_end(void) { } #ifdef CONFIG_XEN_GRANT_DMA_OPS void xen_grant_setup_dma_ops(struct device *dev); bool xen_is_grant_dma_device(struct device *dev); +bool xen_virtio_mem_acc(struct virtio_device *dev); #else static inline void xen_grant_setup_dma_ops(struct device *dev) { @@ -225,6 +227,13 @@ static inline bool xen_is_grant_dma_device(struct device *dev) { return false; } + +struct virtio_device; + +static inline bool xen_virtio_mem_acc(struct virtio_device *dev) +{ + return false; +} #endif /* CONFIG_XEN_GRANT_DMA_OPS */ #endif /* INCLUDE_XEN_OPS_H */ diff --git a/include/xen/xen.h b/include/xen/xen.h index 0780a81e140d..a99bab817523 100644 --- a/include/xen/xen.h +++ b/include/xen/xen.h @@ -52,14 +52,6 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, extern u64 xen_saved_max_mem_size; #endif -#include <linux/platform-feature.h> - -static inline void xen_set_restricted_virtio_memory_access(void) -{ - if (IS_ENABLED(CONFIG_XEN_VIRTIO) && xen_domain()) - platform_set(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS); -} - #ifdef CONFIG_XEN_UNPOPULATED_ALLOC int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages); void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages); |