diff options
author | Maarten Lankhorst <maarten.lankhorst@linux.intel.com> | 2020-06-29 13:15:51 +0300 |
---|---|---|
committer | Maarten Lankhorst <maarten.lankhorst@linux.intel.com> | 2020-06-29 13:16:26 +0300 |
commit | 60e9eabf41fa916d2ef68c5bf929197975917578 (patch) | |
tree | 39ce456390ed34d2624aed1260203f43fff94d38 /include | |
parent | 84e543bc9d1dc550132ba25b72df28d40cc44333 (diff) | |
parent | 0a19b068acc47d05212f03e494381926dc0381e2 (diff) | |
download | linux-60e9eabf41fa916d2ef68c5bf929197975917578.tar.xz |
Backmerge remote-tracking branch 'drm/drm-next' into drm-misc-next
Some conflicts with ttm_bo->offset removal, but drm-misc-next needs updating to v5.8.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Diffstat (limited to 'include')
791 files changed, 23263 insertions, 5947 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index a92bea7184a8..5afb6ceb284f 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -44,6 +44,7 @@ acpi_status acpi_execute_simple_method(acpi_handle handle, char *method, u64 arg); acpi_status acpi_evaluate_ej0(acpi_handle handle); acpi_status acpi_evaluate_lck(acpi_handle handle, int lock); +acpi_status acpi_evaluate_reg(acpi_handle handle, u8 space_id, u32 function); bool acpi_ata_match(acpi_handle handle); bool acpi_bay_match(acpi_handle handle); bool acpi_dock_match(acpi_handle handle); diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 49b519f36b69..459d6981ca96 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -12,7 +12,7 @@ /* Current ACPICA subsystem version in YYYYMMDD format */ -#define ACPI_CA_VERSION 0x20200326 +#define ACPI_CA_VERSION 0x20200528 #include <acpi/acconfig.h> #include <acpi/actypes.h> diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 4defed58ea33..aa236b9e6f24 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -815,8 +815,9 @@ typedef u8 acpi_adr_space_type; #define ACPI_ADR_SPACE_GPIO (acpi_adr_space_type) 8 #define ACPI_ADR_SPACE_GSBUS (acpi_adr_space_type) 9 #define ACPI_ADR_SPACE_PLATFORM_COMM (acpi_adr_space_type) 10 +#define ACPI_ADR_SPACE_PLATFORM_RT (acpi_adr_space_type) 11 -#define ACPI_NUM_PREDEFINED_REGIONS 11 +#define ACPI_NUM_PREDEFINED_REGIONS 12 /* * Special Address Spaces diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index e3f1cddb4ac8..517a5231cc1b 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -33,6 +33,9 @@ struct ghes_estatus_node { struct llist_node llnode; struct acpi_hest_generic *generic; struct ghes *ghes; + + int task_work_cpu; + struct callback_head task_work; }; struct ghes_estatus_cache { diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h deleted file mode 100644 index 4c74b1c1d13b..000000000000 --- a/include/asm-generic/5level-fixup.h +++ /dev/null @@ -1,58 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _5LEVEL_FIXUP_H -#define _5LEVEL_FIXUP_H - -#define __ARCH_HAS_5LEVEL_HACK -#define __PAGETABLE_P4D_FOLDED 1 - -#define P4D_SHIFT PGDIR_SHIFT -#define P4D_SIZE PGDIR_SIZE -#define P4D_MASK PGDIR_MASK -#define MAX_PTRS_PER_P4D 1 -#define PTRS_PER_P4D 1 - -#define p4d_t pgd_t - -#define pud_alloc(mm, p4d, address) \ - ((unlikely(pgd_none(*(p4d))) && __pud_alloc(mm, p4d, address)) ? \ - NULL : pud_offset(p4d, address)) - -#define p4d_alloc(mm, pgd, address) (pgd) -#define p4d_offset(pgd, start) (pgd) - -#ifndef __ASSEMBLY__ -static inline int p4d_none(p4d_t p4d) -{ - return 0; -} - -static inline int p4d_bad(p4d_t p4d) -{ - return 0; -} - -static inline int p4d_present(p4d_t p4d) -{ - return 1; -} -#endif - -#define p4d_ERROR(p4d) do { } while (0) -#define p4d_clear(p4d) pgd_clear(p4d) -#define p4d_val(p4d) pgd_val(p4d) -#define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud) -#define p4d_populate_safe(mm, p4d, pud) pgd_populate(mm, p4d, pud) -#define p4d_page(p4d) pgd_page(p4d) -#define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d) - -#define __p4d(x) __pgd(x) -#define set_p4d(p4dp, p4d) set_pgd(p4dp, p4d) - -#undef p4d_free_tlb -#define p4d_free_tlb(tlb, x, addr) do { } while (0) -#define p4d_free(mm, x) do { } while (0) - -#undef p4d_addr_end -#define p4d_addr_end(addr, end) (end) - -#endif diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 36341dfded70..44ec80e70518 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -56,6 +56,7 @@ mandatory-y += topology.h mandatory-y += trace_clock.h mandatory-y += uaccess.h mandatory-y += unaligned.h +mandatory-y += vermagic.h mandatory-y += vga.h mandatory-y += word-at-a-time.h mandatory-y += xor.h diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h index e8730c6b9fe2..379986e40159 100644 --- a/include/asm-generic/atomic-instrumented.h +++ b/include/asm-generic/atomic-instrumented.h @@ -18,1623 +18,1624 @@ #define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H #include <linux/build_bug.h> -#include <linux/kasan-checks.h> +#include <linux/compiler.h> +#include <linux/instrumented.h> -static inline int +static __always_inline int atomic_read(const atomic_t *v) { - kasan_check_read(v, sizeof(*v)); + instrument_atomic_read(v, sizeof(*v)); return arch_atomic_read(v); } #define atomic_read atomic_read #if defined(arch_atomic_read_acquire) -static inline int +static __always_inline int atomic_read_acquire(const atomic_t *v) { - kasan_check_read(v, sizeof(*v)); + instrument_atomic_read(v, sizeof(*v)); return arch_atomic_read_acquire(v); } #define atomic_read_acquire atomic_read_acquire #endif -static inline void +static __always_inline void atomic_set(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_set(v, i); } #define atomic_set atomic_set #if defined(arch_atomic_set_release) -static inline void +static __always_inline void atomic_set_release(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_set_release(v, i); } #define atomic_set_release atomic_set_release #endif -static inline void +static __always_inline void atomic_add(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_add(i, v); } #define atomic_add atomic_add #if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return) -static inline int +static __always_inline int atomic_add_return(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_return(i, v); } #define atomic_add_return atomic_add_return #endif #if defined(arch_atomic_add_return_acquire) -static inline int +static __always_inline int atomic_add_return_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_return_acquire(i, v); } #define atomic_add_return_acquire atomic_add_return_acquire #endif #if defined(arch_atomic_add_return_release) -static inline int +static __always_inline int atomic_add_return_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_return_release(i, v); } #define atomic_add_return_release atomic_add_return_release #endif #if defined(arch_atomic_add_return_relaxed) -static inline int +static __always_inline int atomic_add_return_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_return_relaxed(i, v); } #define atomic_add_return_relaxed atomic_add_return_relaxed #endif #if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add) -static inline int +static __always_inline int atomic_fetch_add(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_add(i, v); } #define atomic_fetch_add atomic_fetch_add #endif #if defined(arch_atomic_fetch_add_acquire) -static inline int +static __always_inline int atomic_fetch_add_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_add_acquire(i, v); } #define atomic_fetch_add_acquire atomic_fetch_add_acquire #endif #if defined(arch_atomic_fetch_add_release) -static inline int +static __always_inline int atomic_fetch_add_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_add_release(i, v); } #define atomic_fetch_add_release atomic_fetch_add_release #endif #if defined(arch_atomic_fetch_add_relaxed) -static inline int +static __always_inline int atomic_fetch_add_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_add_relaxed(i, v); } #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed #endif -static inline void +static __always_inline void atomic_sub(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_sub(i, v); } #define atomic_sub atomic_sub #if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return) -static inline int +static __always_inline int atomic_sub_return(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_sub_return(i, v); } #define atomic_sub_return atomic_sub_return #endif #if defined(arch_atomic_sub_return_acquire) -static inline int +static __always_inline int atomic_sub_return_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_sub_return_acquire(i, v); } #define atomic_sub_return_acquire atomic_sub_return_acquire #endif #if defined(arch_atomic_sub_return_release) -static inline int +static __always_inline int atomic_sub_return_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_sub_return_release(i, v); } #define atomic_sub_return_release atomic_sub_return_release #endif #if defined(arch_atomic_sub_return_relaxed) -static inline int +static __always_inline int atomic_sub_return_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_sub_return_relaxed(i, v); } #define atomic_sub_return_relaxed atomic_sub_return_relaxed #endif #if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub) -static inline int +static __always_inline int atomic_fetch_sub(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_sub(i, v); } #define atomic_fetch_sub atomic_fetch_sub #endif #if defined(arch_atomic_fetch_sub_acquire) -static inline int +static __always_inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_sub_acquire(i, v); } #define atomic_fetch_sub_acquire atomic_fetch_sub_acquire #endif #if defined(arch_atomic_fetch_sub_release) -static inline int +static __always_inline int atomic_fetch_sub_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_sub_release(i, v); } #define atomic_fetch_sub_release atomic_fetch_sub_release #endif #if defined(arch_atomic_fetch_sub_relaxed) -static inline int +static __always_inline int atomic_fetch_sub_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_sub_relaxed(i, v); } #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed #endif #if defined(arch_atomic_inc) -static inline void +static __always_inline void atomic_inc(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_inc(v); } #define atomic_inc atomic_inc #endif #if defined(arch_atomic_inc_return) -static inline int +static __always_inline int atomic_inc_return(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_return(v); } #define atomic_inc_return atomic_inc_return #endif #if defined(arch_atomic_inc_return_acquire) -static inline int +static __always_inline int atomic_inc_return_acquire(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_return_acquire(v); } #define atomic_inc_return_acquire atomic_inc_return_acquire #endif #if defined(arch_atomic_inc_return_release) -static inline int +static __always_inline int atomic_inc_return_release(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_return_release(v); } #define atomic_inc_return_release atomic_inc_return_release #endif #if defined(arch_atomic_inc_return_relaxed) -static inline int +static __always_inline int atomic_inc_return_relaxed(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_return_relaxed(v); } #define atomic_inc_return_relaxed atomic_inc_return_relaxed #endif #if defined(arch_atomic_fetch_inc) -static inline int +static __always_inline int atomic_fetch_inc(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_inc(v); } #define atomic_fetch_inc atomic_fetch_inc #endif #if defined(arch_atomic_fetch_inc_acquire) -static inline int +static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_inc_acquire(v); } #define atomic_fetch_inc_acquire atomic_fetch_inc_acquire #endif #if defined(arch_atomic_fetch_inc_release) -static inline int +static __always_inline int atomic_fetch_inc_release(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_inc_release(v); } #define atomic_fetch_inc_release atomic_fetch_inc_release #endif #if defined(arch_atomic_fetch_inc_relaxed) -static inline int +static __always_inline int atomic_fetch_inc_relaxed(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_inc_relaxed(v); } #define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed #endif #if defined(arch_atomic_dec) -static inline void +static __always_inline void atomic_dec(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_dec(v); } #define atomic_dec atomic_dec #endif #if defined(arch_atomic_dec_return) -static inline int +static __always_inline int atomic_dec_return(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_return(v); } #define atomic_dec_return atomic_dec_return #endif #if defined(arch_atomic_dec_return_acquire) -static inline int +static __always_inline int atomic_dec_return_acquire(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_return_acquire(v); } #define atomic_dec_return_acquire atomic_dec_return_acquire #endif #if defined(arch_atomic_dec_return_release) -static inline int +static __always_inline int atomic_dec_return_release(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_return_release(v); } #define atomic_dec_return_release atomic_dec_return_release #endif #if defined(arch_atomic_dec_return_relaxed) -static inline int +static __always_inline int atomic_dec_return_relaxed(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_return_relaxed(v); } #define atomic_dec_return_relaxed atomic_dec_return_relaxed #endif #if defined(arch_atomic_fetch_dec) -static inline int +static __always_inline int atomic_fetch_dec(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_dec(v); } #define atomic_fetch_dec atomic_fetch_dec #endif #if defined(arch_atomic_fetch_dec_acquire) -static inline int +static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_dec_acquire(v); } #define atomic_fetch_dec_acquire atomic_fetch_dec_acquire #endif #if defined(arch_atomic_fetch_dec_release) -static inline int +static __always_inline int atomic_fetch_dec_release(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_dec_release(v); } #define atomic_fetch_dec_release atomic_fetch_dec_release #endif #if defined(arch_atomic_fetch_dec_relaxed) -static inline int +static __always_inline int atomic_fetch_dec_relaxed(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_dec_relaxed(v); } #define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed #endif -static inline void +static __always_inline void atomic_and(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_and(i, v); } #define atomic_and atomic_and #if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and) -static inline int +static __always_inline int atomic_fetch_and(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_and(i, v); } #define atomic_fetch_and atomic_fetch_and #endif #if defined(arch_atomic_fetch_and_acquire) -static inline int +static __always_inline int atomic_fetch_and_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_and_acquire(i, v); } #define atomic_fetch_and_acquire atomic_fetch_and_acquire #endif #if defined(arch_atomic_fetch_and_release) -static inline int +static __always_inline int atomic_fetch_and_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_and_release(i, v); } #define atomic_fetch_and_release atomic_fetch_and_release #endif #if defined(arch_atomic_fetch_and_relaxed) -static inline int +static __always_inline int atomic_fetch_and_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_and_relaxed(i, v); } #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed #endif #if defined(arch_atomic_andnot) -static inline void +static __always_inline void atomic_andnot(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_andnot(i, v); } #define atomic_andnot atomic_andnot #endif #if defined(arch_atomic_fetch_andnot) -static inline int +static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_andnot(i, v); } #define atomic_fetch_andnot atomic_fetch_andnot #endif #if defined(arch_atomic_fetch_andnot_acquire) -static inline int +static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_acquire(i, v); } #define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire #endif #if defined(arch_atomic_fetch_andnot_release) -static inline int +static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_release(i, v); } #define atomic_fetch_andnot_release atomic_fetch_andnot_release #endif #if defined(arch_atomic_fetch_andnot_relaxed) -static inline int +static __always_inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_andnot_relaxed(i, v); } #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed #endif -static inline void +static __always_inline void atomic_or(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_or(i, v); } #define atomic_or atomic_or #if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or) -static inline int +static __always_inline int atomic_fetch_or(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_or(i, v); } #define atomic_fetch_or atomic_fetch_or #endif #if defined(arch_atomic_fetch_or_acquire) -static inline int +static __always_inline int atomic_fetch_or_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_or_acquire(i, v); } #define atomic_fetch_or_acquire atomic_fetch_or_acquire #endif #if defined(arch_atomic_fetch_or_release) -static inline int +static __always_inline int atomic_fetch_or_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_or_release(i, v); } #define atomic_fetch_or_release atomic_fetch_or_release #endif #if defined(arch_atomic_fetch_or_relaxed) -static inline int +static __always_inline int atomic_fetch_or_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_or_relaxed(i, v); } #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed #endif -static inline void +static __always_inline void atomic_xor(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic_xor(i, v); } #define atomic_xor atomic_xor #if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor) -static inline int +static __always_inline int atomic_fetch_xor(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_xor(i, v); } #define atomic_fetch_xor atomic_fetch_xor #endif #if defined(arch_atomic_fetch_xor_acquire) -static inline int +static __always_inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_xor_acquire(i, v); } #define atomic_fetch_xor_acquire atomic_fetch_xor_acquire #endif #if defined(arch_atomic_fetch_xor_release) -static inline int +static __always_inline int atomic_fetch_xor_release(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_xor_release(i, v); } #define atomic_fetch_xor_release atomic_fetch_xor_release #endif #if defined(arch_atomic_fetch_xor_relaxed) -static inline int +static __always_inline int atomic_fetch_xor_relaxed(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_xor_relaxed(i, v); } #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed #endif #if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg) -static inline int +static __always_inline int atomic_xchg(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_xchg(v, i); } #define atomic_xchg atomic_xchg #endif #if defined(arch_atomic_xchg_acquire) -static inline int +static __always_inline int atomic_xchg_acquire(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_xchg_acquire(v, i); } #define atomic_xchg_acquire atomic_xchg_acquire #endif #if defined(arch_atomic_xchg_release) -static inline int +static __always_inline int atomic_xchg_release(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_xchg_release(v, i); } #define atomic_xchg_release atomic_xchg_release #endif #if defined(arch_atomic_xchg_relaxed) -static inline int +static __always_inline int atomic_xchg_relaxed(atomic_t *v, int i) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_xchg_relaxed(v, i); } #define atomic_xchg_relaxed atomic_xchg_relaxed #endif #if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg) -static inline int +static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_cmpxchg(v, old, new); } #define atomic_cmpxchg atomic_cmpxchg #endif #if defined(arch_atomic_cmpxchg_acquire) -static inline int +static __always_inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_cmpxchg_acquire(v, old, new); } #define atomic_cmpxchg_acquire atomic_cmpxchg_acquire #endif #if defined(arch_atomic_cmpxchg_release) -static inline int +static __always_inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_cmpxchg_release(v, old, new); } #define atomic_cmpxchg_release atomic_cmpxchg_release #endif #if defined(arch_atomic_cmpxchg_relaxed) -static inline int +static __always_inline int atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_cmpxchg_relaxed(v, old, new); } #define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed #endif #if defined(arch_atomic_try_cmpxchg) -static inline bool +static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg(v, old, new); } #define atomic_try_cmpxchg atomic_try_cmpxchg #endif #if defined(arch_atomic_try_cmpxchg_acquire) -static inline bool +static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_acquire(v, old, new); } #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire #endif #if defined(arch_atomic_try_cmpxchg_release) -static inline bool +static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_release(v, old, new); } #define atomic_try_cmpxchg_release atomic_try_cmpxchg_release #endif #if defined(arch_atomic_try_cmpxchg_relaxed) -static inline bool +static __always_inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic_try_cmpxchg_relaxed(v, old, new); } #define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed #endif #if defined(arch_atomic_sub_and_test) -static inline bool +static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_sub_and_test(i, v); } #define atomic_sub_and_test atomic_sub_and_test #endif #if defined(arch_atomic_dec_and_test) -static inline bool +static __always_inline bool atomic_dec_and_test(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_and_test(v); } #define atomic_dec_and_test atomic_dec_and_test #endif #if defined(arch_atomic_inc_and_test) -static inline bool +static __always_inline bool atomic_inc_and_test(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_and_test(v); } #define atomic_inc_and_test atomic_inc_and_test #endif #if defined(arch_atomic_add_negative) -static inline bool +static __always_inline bool atomic_add_negative(int i, atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_negative(i, v); } #define atomic_add_negative atomic_add_negative #endif #if defined(arch_atomic_fetch_add_unless) -static inline int +static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_fetch_add_unless(v, a, u); } #define atomic_fetch_add_unless atomic_fetch_add_unless #endif #if defined(arch_atomic_add_unless) -static inline bool +static __always_inline bool atomic_add_unless(atomic_t *v, int a, int u) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_add_unless(v, a, u); } #define atomic_add_unless atomic_add_unless #endif #if defined(arch_atomic_inc_not_zero) -static inline bool +static __always_inline bool atomic_inc_not_zero(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_not_zero(v); } #define atomic_inc_not_zero atomic_inc_not_zero #endif #if defined(arch_atomic_inc_unless_negative) -static inline bool +static __always_inline bool atomic_inc_unless_negative(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_inc_unless_negative(v); } #define atomic_inc_unless_negative atomic_inc_unless_negative #endif #if defined(arch_atomic_dec_unless_positive) -static inline bool +static __always_inline bool atomic_dec_unless_positive(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_unless_positive(v); } #define atomic_dec_unless_positive atomic_dec_unless_positive #endif #if defined(arch_atomic_dec_if_positive) -static inline int +static __always_inline int atomic_dec_if_positive(atomic_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic_dec_if_positive(v); } #define atomic_dec_if_positive atomic_dec_if_positive #endif -static inline s64 +static __always_inline s64 atomic64_read(const atomic64_t *v) { - kasan_check_read(v, sizeof(*v)); + instrument_atomic_read(v, sizeof(*v)); return arch_atomic64_read(v); } #define atomic64_read atomic64_read #if defined(arch_atomic64_read_acquire) -static inline s64 +static __always_inline s64 atomic64_read_acquire(const atomic64_t *v) { - kasan_check_read(v, sizeof(*v)); + instrument_atomic_read(v, sizeof(*v)); return arch_atomic64_read_acquire(v); } #define atomic64_read_acquire atomic64_read_acquire #endif -static inline void +static __always_inline void atomic64_set(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_set(v, i); } #define atomic64_set atomic64_set #if defined(arch_atomic64_set_release) -static inline void +static __always_inline void atomic64_set_release(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_set_release(v, i); } #define atomic64_set_release atomic64_set_release #endif -static inline void +static __always_inline void atomic64_add(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_add(i, v); } #define atomic64_add atomic64_add #if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return) -static inline s64 +static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_return(i, v); } #define atomic64_add_return atomic64_add_return #endif #if defined(arch_atomic64_add_return_acquire) -static inline s64 +static __always_inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_return_acquire(i, v); } #define atomic64_add_return_acquire atomic64_add_return_acquire #endif #if defined(arch_atomic64_add_return_release) -static inline s64 +static __always_inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_return_release(i, v); } #define atomic64_add_return_release atomic64_add_return_release #endif #if defined(arch_atomic64_add_return_relaxed) -static inline s64 +static __always_inline s64 atomic64_add_return_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_return_relaxed(i, v); } #define atomic64_add_return_relaxed atomic64_add_return_relaxed #endif #if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add) -static inline s64 +static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_add(i, v); } #define atomic64_fetch_add atomic64_fetch_add #endif #if defined(arch_atomic64_fetch_add_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_add_acquire(i, v); } #define atomic64_fetch_add_acquire atomic64_fetch_add_acquire #endif #if defined(arch_atomic64_fetch_add_release) -static inline s64 +static __always_inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_add_release(i, v); } #define atomic64_fetch_add_release atomic64_fetch_add_release #endif #if defined(arch_atomic64_fetch_add_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_add_relaxed(i, v); } #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed #endif -static inline void +static __always_inline void atomic64_sub(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_sub(i, v); } #define atomic64_sub atomic64_sub #if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return) -static inline s64 +static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_sub_return(i, v); } #define atomic64_sub_return atomic64_sub_return #endif #if defined(arch_atomic64_sub_return_acquire) -static inline s64 +static __always_inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_sub_return_acquire(i, v); } #define atomic64_sub_return_acquire atomic64_sub_return_acquire #endif #if defined(arch_atomic64_sub_return_release) -static inline s64 +static __always_inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_sub_return_release(i, v); } #define atomic64_sub_return_release atomic64_sub_return_release #endif #if defined(arch_atomic64_sub_return_relaxed) -static inline s64 +static __always_inline s64 atomic64_sub_return_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_sub_return_relaxed(i, v); } #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed #endif #if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub) -static inline s64 +static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_sub(i, v); } #define atomic64_fetch_sub atomic64_fetch_sub #endif #if defined(arch_atomic64_fetch_sub_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_acquire(i, v); } #define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire #endif #if defined(arch_atomic64_fetch_sub_release) -static inline s64 +static __always_inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_release(i, v); } #define atomic64_fetch_sub_release atomic64_fetch_sub_release #endif #if defined(arch_atomic64_fetch_sub_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_sub_relaxed(i, v); } #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed #endif #if defined(arch_atomic64_inc) -static inline void +static __always_inline void atomic64_inc(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_inc(v); } #define atomic64_inc atomic64_inc #endif #if defined(arch_atomic64_inc_return) -static inline s64 +static __always_inline s64 atomic64_inc_return(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_return(v); } #define atomic64_inc_return atomic64_inc_return #endif #if defined(arch_atomic64_inc_return_acquire) -static inline s64 +static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_return_acquire(v); } #define atomic64_inc_return_acquire atomic64_inc_return_acquire #endif #if defined(arch_atomic64_inc_return_release) -static inline s64 +static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_return_release(v); } #define atomic64_inc_return_release atomic64_inc_return_release #endif #if defined(arch_atomic64_inc_return_relaxed) -static inline s64 +static __always_inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_return_relaxed(v); } #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed #endif #if defined(arch_atomic64_fetch_inc) -static inline s64 +static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_inc(v); } #define atomic64_fetch_inc atomic64_fetch_inc #endif #if defined(arch_atomic64_fetch_inc_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_acquire(v); } #define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire #endif #if defined(arch_atomic64_fetch_inc_release) -static inline s64 +static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_release(v); } #define atomic64_fetch_inc_release atomic64_fetch_inc_release #endif #if defined(arch_atomic64_fetch_inc_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_inc_relaxed(v); } #define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed #endif #if defined(arch_atomic64_dec) -static inline void +static __always_inline void atomic64_dec(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_dec(v); } #define atomic64_dec atomic64_dec #endif #if defined(arch_atomic64_dec_return) -static inline s64 +static __always_inline s64 atomic64_dec_return(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_return(v); } #define atomic64_dec_return atomic64_dec_return #endif #if defined(arch_atomic64_dec_return_acquire) -static inline s64 +static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_return_acquire(v); } #define atomic64_dec_return_acquire atomic64_dec_return_acquire #endif #if defined(arch_atomic64_dec_return_release) -static inline s64 +static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_return_release(v); } #define atomic64_dec_return_release atomic64_dec_return_release #endif #if defined(arch_atomic64_dec_return_relaxed) -static inline s64 +static __always_inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_return_relaxed(v); } #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed #endif #if defined(arch_atomic64_fetch_dec) -static inline s64 +static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_dec(v); } #define atomic64_fetch_dec atomic64_fetch_dec #endif #if defined(arch_atomic64_fetch_dec_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_acquire(v); } #define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire #endif #if defined(arch_atomic64_fetch_dec_release) -static inline s64 +static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_release(v); } #define atomic64_fetch_dec_release atomic64_fetch_dec_release #endif #if defined(arch_atomic64_fetch_dec_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_dec_relaxed(v); } #define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed #endif -static inline void +static __always_inline void atomic64_and(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_and(i, v); } #define atomic64_and atomic64_and #if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and) -static inline s64 +static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_and(i, v); } #define atomic64_fetch_and atomic64_fetch_and #endif #if defined(arch_atomic64_fetch_and_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_and_acquire(i, v); } #define atomic64_fetch_and_acquire atomic64_fetch_and_acquire #endif #if defined(arch_atomic64_fetch_and_release) -static inline s64 +static __always_inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_and_release(i, v); } #define atomic64_fetch_and_release atomic64_fetch_and_release #endif #if defined(arch_atomic64_fetch_and_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_and_relaxed(i, v); } #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed #endif #if defined(arch_atomic64_andnot) -static inline void +static __always_inline void atomic64_andnot(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_andnot(i, v); } #define atomic64_andnot atomic64_andnot #endif #if defined(arch_atomic64_fetch_andnot) -static inline s64 +static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot(i, v); } #define atomic64_fetch_andnot atomic64_fetch_andnot #endif #if defined(arch_atomic64_fetch_andnot_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_acquire(i, v); } #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire #endif #if defined(arch_atomic64_fetch_andnot_release) -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_release(i, v); } #define atomic64_fetch_andnot_release atomic64_fetch_andnot_release #endif #if defined(arch_atomic64_fetch_andnot_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_andnot_relaxed(i, v); } #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed #endif -static inline void +static __always_inline void atomic64_or(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_or(i, v); } #define atomic64_or atomic64_or #if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or) -static inline s64 +static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_or(i, v); } #define atomic64_fetch_or atomic64_fetch_or #endif #if defined(arch_atomic64_fetch_or_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_or_acquire(i, v); } #define atomic64_fetch_or_acquire atomic64_fetch_or_acquire #endif #if defined(arch_atomic64_fetch_or_release) -static inline s64 +static __always_inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_or_release(i, v); } #define atomic64_fetch_or_release atomic64_fetch_or_release #endif #if defined(arch_atomic64_fetch_or_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_or_relaxed(i, v); } #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed #endif -static inline void +static __always_inline void atomic64_xor(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); arch_atomic64_xor(i, v); } #define atomic64_xor atomic64_xor #if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor) -static inline s64 +static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_xor(i, v); } #define atomic64_fetch_xor atomic64_fetch_xor #endif #if defined(arch_atomic64_fetch_xor_acquire) -static inline s64 +static __always_inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_acquire(i, v); } #define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire #endif #if defined(arch_atomic64_fetch_xor_release) -static inline s64 +static __always_inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_release(i, v); } #define atomic64_fetch_xor_release atomic64_fetch_xor_release #endif #if defined(arch_atomic64_fetch_xor_relaxed) -static inline s64 +static __always_inline s64 atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_xor_relaxed(i, v); } #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed #endif #if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg) -static inline s64 +static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_xchg(v, i); } #define atomic64_xchg atomic64_xchg #endif #if defined(arch_atomic64_xchg_acquire) -static inline s64 +static __always_inline s64 atomic64_xchg_acquire(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_xchg_acquire(v, i); } #define atomic64_xchg_acquire atomic64_xchg_acquire #endif #if defined(arch_atomic64_xchg_release) -static inline s64 +static __always_inline s64 atomic64_xchg_release(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_xchg_release(v, i); } #define atomic64_xchg_release atomic64_xchg_release #endif #if defined(arch_atomic64_xchg_relaxed) -static inline s64 +static __always_inline s64 atomic64_xchg_relaxed(atomic64_t *v, s64 i) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_xchg_relaxed(v, i); } #define atomic64_xchg_relaxed atomic64_xchg_relaxed #endif #if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg) -static inline s64 +static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_cmpxchg(v, old, new); } #define atomic64_cmpxchg atomic64_cmpxchg #endif #if defined(arch_atomic64_cmpxchg_acquire) -static inline s64 +static __always_inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_acquire(v, old, new); } #define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire #endif #if defined(arch_atomic64_cmpxchg_release) -static inline s64 +static __always_inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_release(v, old, new); } #define atomic64_cmpxchg_release atomic64_cmpxchg_release #endif #if defined(arch_atomic64_cmpxchg_relaxed) -static inline s64 +static __always_inline s64 atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_cmpxchg_relaxed(v, old, new); } #define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed #endif #if defined(arch_atomic64_try_cmpxchg) -static inline bool +static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg(v, old, new); } #define atomic64_try_cmpxchg atomic64_try_cmpxchg #endif #if defined(arch_atomic64_try_cmpxchg_acquire) -static inline bool +static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_acquire(v, old, new); } #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire #endif #if defined(arch_atomic64_try_cmpxchg_release) -static inline bool +static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_release(v, old, new); } #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release #endif #if defined(arch_atomic64_try_cmpxchg_relaxed) -static inline bool +static __always_inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { - kasan_check_write(v, sizeof(*v)); - kasan_check_write(old, sizeof(*old)); + instrument_atomic_write(v, sizeof(*v)); + instrument_atomic_write(old, sizeof(*old)); return arch_atomic64_try_cmpxchg_relaxed(v, old, new); } #define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed #endif #if defined(arch_atomic64_sub_and_test) -static inline bool +static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_sub_and_test(i, v); } #define atomic64_sub_and_test atomic64_sub_and_test #endif #if defined(arch_atomic64_dec_and_test) -static inline bool +static __always_inline bool atomic64_dec_and_test(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_and_test(v); } #define atomic64_dec_and_test atomic64_dec_and_test #endif #if defined(arch_atomic64_inc_and_test) -static inline bool +static __always_inline bool atomic64_inc_and_test(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_and_test(v); } #define atomic64_inc_and_test atomic64_inc_and_test #endif #if defined(arch_atomic64_add_negative) -static inline bool +static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_negative(i, v); } #define atomic64_add_negative atomic64_add_negative #endif #if defined(arch_atomic64_fetch_add_unless) -static inline s64 +static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_fetch_add_unless(v, a, u); } #define atomic64_fetch_add_unless atomic64_fetch_add_unless #endif #if defined(arch_atomic64_add_unless) -static inline bool +static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_add_unless(v, a, u); } #define atomic64_add_unless atomic64_add_unless #endif #if defined(arch_atomic64_inc_not_zero) -static inline bool +static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_not_zero(v); } #define atomic64_inc_not_zero atomic64_inc_not_zero #endif #if defined(arch_atomic64_inc_unless_negative) -static inline bool +static __always_inline bool atomic64_inc_unless_negative(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_inc_unless_negative(v); } #define atomic64_inc_unless_negative atomic64_inc_unless_negative #endif #if defined(arch_atomic64_dec_unless_positive) -static inline bool +static __always_inline bool atomic64_dec_unless_positive(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_unless_positive(v); } #define atomic64_dec_unless_positive atomic64_dec_unless_positive #endif #if defined(arch_atomic64_dec_if_positive) -static inline s64 +static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) { - kasan_check_write(v, sizeof(*v)); + instrument_atomic_write(v, sizeof(*v)); return arch_atomic64_dec_if_positive(v); } #define atomic64_dec_if_positive atomic64_dec_if_positive @@ -1644,7 +1645,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1653,7 +1654,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1662,7 +1663,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_release(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1671,7 +1672,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define xchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1680,7 +1681,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1689,7 +1690,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1698,7 +1699,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1707,7 +1708,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1716,7 +1717,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1725,7 +1726,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1734,7 +1735,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64_release(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1743,7 +1744,7 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg64_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ }) #endif @@ -1751,28 +1752,28 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ }) #define sync_cmpxchg(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg_double(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \ }) @@ -1780,9 +1781,9 @@ atomic64_dec_if_positive(atomic64_t *v) #define cmpxchg_double_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ }) #endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */ -// b29b625d5de9280f680e42c7be859b55b15e5f6a +// 89bf97f3a7509b740845e51ddf31055b48a81f40 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h index 881c7e27af28..073cf40f431b 100644 --- a/include/asm-generic/atomic-long.h +++ b/include/asm-generic/atomic-long.h @@ -6,6 +6,7 @@ #ifndef _ASM_GENERIC_ATOMIC_LONG_H #define _ASM_GENERIC_ATOMIC_LONG_H +#include <linux/compiler.h> #include <asm/types.h> #ifdef CONFIG_64BIT @@ -22,493 +23,493 @@ typedef atomic_t atomic_long_t; #ifdef CONFIG_64BIT -static inline long +static __always_inline long atomic_long_read(const atomic_long_t *v) { return atomic64_read(v); } -static inline long +static __always_inline long atomic_long_read_acquire(const atomic_long_t *v) { return atomic64_read_acquire(v); } -static inline void +static __always_inline void atomic_long_set(atomic_long_t *v, long i) { atomic64_set(v, i); } -static inline void +static __always_inline void atomic_long_set_release(atomic_long_t *v, long i) { atomic64_set_release(v, i); } -static inline void +static __always_inline void atomic_long_add(long i, atomic_long_t *v) { atomic64_add(i, v); } -static inline long +static __always_inline long atomic_long_add_return(long i, atomic_long_t *v) { return atomic64_add_return(i, v); } -static inline long +static __always_inline long atomic_long_add_return_acquire(long i, atomic_long_t *v) { return atomic64_add_return_acquire(i, v); } -static inline long +static __always_inline long atomic_long_add_return_release(long i, atomic_long_t *v) { return atomic64_add_return_release(i, v); } -static inline long +static __always_inline long atomic_long_add_return_relaxed(long i, atomic_long_t *v) { return atomic64_add_return_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add(long i, atomic_long_t *v) { return atomic64_fetch_add(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_acquire(long i, atomic_long_t *v) { return atomic64_fetch_add_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_release(long i, atomic_long_t *v) { return atomic64_fetch_add_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_add_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_sub(long i, atomic_long_t *v) { atomic64_sub(i, v); } -static inline long +static __always_inline long atomic_long_sub_return(long i, atomic_long_t *v) { return atomic64_sub_return(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_acquire(long i, atomic_long_t *v) { return atomic64_sub_return_acquire(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_release(long i, atomic_long_t *v) { return atomic64_sub_return_release(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_relaxed(long i, atomic_long_t *v) { return atomic64_sub_return_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub(long i, atomic_long_t *v) { return atomic64_fetch_sub(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) { return atomic64_fetch_sub_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_release(long i, atomic_long_t *v) { return atomic64_fetch_sub_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_sub_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_inc(atomic_long_t *v) { atomic64_inc(v); } -static inline long +static __always_inline long atomic_long_inc_return(atomic_long_t *v) { return atomic64_inc_return(v); } -static inline long +static __always_inline long atomic_long_inc_return_acquire(atomic_long_t *v) { return atomic64_inc_return_acquire(v); } -static inline long +static __always_inline long atomic_long_inc_return_release(atomic_long_t *v) { return atomic64_inc_return_release(v); } -static inline long +static __always_inline long atomic_long_inc_return_relaxed(atomic_long_t *v) { return atomic64_inc_return_relaxed(v); } -static inline long +static __always_inline long atomic_long_fetch_inc(atomic_long_t *v) { return atomic64_fetch_inc(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_acquire(atomic_long_t *v) { return atomic64_fetch_inc_acquire(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_release(atomic_long_t *v) { return atomic64_fetch_inc_release(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_relaxed(atomic_long_t *v) { return atomic64_fetch_inc_relaxed(v); } -static inline void +static __always_inline void atomic_long_dec(atomic_long_t *v) { atomic64_dec(v); } -static inline long +static __always_inline long atomic_long_dec_return(atomic_long_t *v) { return atomic64_dec_return(v); } -static inline long +static __always_inline long atomic_long_dec_return_acquire(atomic_long_t *v) { return atomic64_dec_return_acquire(v); } -static inline long +static __always_inline long atomic_long_dec_return_release(atomic_long_t *v) { return atomic64_dec_return_release(v); } -static inline long +static __always_inline long atomic_long_dec_return_relaxed(atomic_long_t *v) { return atomic64_dec_return_relaxed(v); } -static inline long +static __always_inline long atomic_long_fetch_dec(atomic_long_t *v) { return atomic64_fetch_dec(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_acquire(atomic_long_t *v) { return atomic64_fetch_dec_acquire(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_release(atomic_long_t *v) { return atomic64_fetch_dec_release(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_relaxed(atomic_long_t *v) { return atomic64_fetch_dec_relaxed(v); } -static inline void +static __always_inline void atomic_long_and(long i, atomic_long_t *v) { atomic64_and(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and(long i, atomic_long_t *v) { return atomic64_fetch_and(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_acquire(long i, atomic_long_t *v) { return atomic64_fetch_and_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_release(long i, atomic_long_t *v) { return atomic64_fetch_and_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_and_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_andnot(long i, atomic_long_t *v) { atomic64_andnot(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot(long i, atomic_long_t *v) { return atomic64_fetch_andnot(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) { return atomic64_fetch_andnot_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_release(long i, atomic_long_t *v) { return atomic64_fetch_andnot_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_andnot_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_or(long i, atomic_long_t *v) { atomic64_or(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or(long i, atomic_long_t *v) { return atomic64_fetch_or(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_acquire(long i, atomic_long_t *v) { return atomic64_fetch_or_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_release(long i, atomic_long_t *v) { return atomic64_fetch_or_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_or_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_xor(long i, atomic_long_t *v) { atomic64_xor(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor(long i, atomic_long_t *v) { return atomic64_fetch_xor(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) { return atomic64_fetch_xor_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_release(long i, atomic_long_t *v) { return atomic64_fetch_xor_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) { return atomic64_fetch_xor_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_xchg(atomic_long_t *v, long i) { return atomic64_xchg(v, i); } -static inline long +static __always_inline long atomic_long_xchg_acquire(atomic_long_t *v, long i) { return atomic64_xchg_acquire(v, i); } -static inline long +static __always_inline long atomic_long_xchg_release(atomic_long_t *v, long i) { return atomic64_xchg_release(v, i); } -static inline long +static __always_inline long atomic_long_xchg_relaxed(atomic_long_t *v, long i) { return atomic64_xchg_relaxed(v, i); } -static inline long +static __always_inline long atomic_long_cmpxchg(atomic_long_t *v, long old, long new) { return atomic64_cmpxchg(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) { return atomic64_cmpxchg_acquire(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) { return atomic64_cmpxchg_release(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) { return atomic64_cmpxchg_relaxed(v, old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) { return atomic64_try_cmpxchg(v, (s64 *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) { return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) { return atomic64_try_cmpxchg_release(v, (s64 *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) { return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); } -static inline bool +static __always_inline bool atomic_long_sub_and_test(long i, atomic_long_t *v) { return atomic64_sub_and_test(i, v); } -static inline bool +static __always_inline bool atomic_long_dec_and_test(atomic_long_t *v) { return atomic64_dec_and_test(v); } -static inline bool +static __always_inline bool atomic_long_inc_and_test(atomic_long_t *v) { return atomic64_inc_and_test(v); } -static inline bool +static __always_inline bool atomic_long_add_negative(long i, atomic_long_t *v) { return atomic64_add_negative(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) { return atomic64_fetch_add_unless(v, a, u); } -static inline bool +static __always_inline bool atomic_long_add_unless(atomic_long_t *v, long a, long u) { return atomic64_add_unless(v, a, u); } -static inline bool +static __always_inline bool atomic_long_inc_not_zero(atomic_long_t *v) { return atomic64_inc_not_zero(v); } -static inline bool +static __always_inline bool atomic_long_inc_unless_negative(atomic_long_t *v) { return atomic64_inc_unless_negative(v); } -static inline bool +static __always_inline bool atomic_long_dec_unless_positive(atomic_long_t *v) { return atomic64_dec_unless_positive(v); } -static inline long +static __always_inline long atomic_long_dec_if_positive(atomic_long_t *v) { return atomic64_dec_if_positive(v); @@ -516,493 +517,493 @@ atomic_long_dec_if_positive(atomic_long_t *v) #else /* CONFIG_64BIT */ -static inline long +static __always_inline long atomic_long_read(const atomic_long_t *v) { return atomic_read(v); } -static inline long +static __always_inline long atomic_long_read_acquire(const atomic_long_t *v) { return atomic_read_acquire(v); } -static inline void +static __always_inline void atomic_long_set(atomic_long_t *v, long i) { atomic_set(v, i); } -static inline void +static __always_inline void atomic_long_set_release(atomic_long_t *v, long i) { atomic_set_release(v, i); } -static inline void +static __always_inline void atomic_long_add(long i, atomic_long_t *v) { atomic_add(i, v); } -static inline long +static __always_inline long atomic_long_add_return(long i, atomic_long_t *v) { return atomic_add_return(i, v); } -static inline long +static __always_inline long atomic_long_add_return_acquire(long i, atomic_long_t *v) { return atomic_add_return_acquire(i, v); } -static inline long +static __always_inline long atomic_long_add_return_release(long i, atomic_long_t *v) { return atomic_add_return_release(i, v); } -static inline long +static __always_inline long atomic_long_add_return_relaxed(long i, atomic_long_t *v) { return atomic_add_return_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add(long i, atomic_long_t *v) { return atomic_fetch_add(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_acquire(long i, atomic_long_t *v) { return atomic_fetch_add_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_release(long i, atomic_long_t *v) { return atomic_fetch_add_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) { return atomic_fetch_add_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_sub(long i, atomic_long_t *v) { atomic_sub(i, v); } -static inline long +static __always_inline long atomic_long_sub_return(long i, atomic_long_t *v) { return atomic_sub_return(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_acquire(long i, atomic_long_t *v) { return atomic_sub_return_acquire(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_release(long i, atomic_long_t *v) { return atomic_sub_return_release(i, v); } -static inline long +static __always_inline long atomic_long_sub_return_relaxed(long i, atomic_long_t *v) { return atomic_sub_return_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub(long i, atomic_long_t *v) { return atomic_fetch_sub(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) { return atomic_fetch_sub_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_release(long i, atomic_long_t *v) { return atomic_fetch_sub_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) { return atomic_fetch_sub_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_inc(atomic_long_t *v) { atomic_inc(v); } -static inline long +static __always_inline long atomic_long_inc_return(atomic_long_t *v) { return atomic_inc_return(v); } -static inline long +static __always_inline long atomic_long_inc_return_acquire(atomic_long_t *v) { return atomic_inc_return_acquire(v); } -static inline long +static __always_inline long atomic_long_inc_return_release(atomic_long_t *v) { return atomic_inc_return_release(v); } -static inline long +static __always_inline long atomic_long_inc_return_relaxed(atomic_long_t *v) { return atomic_inc_return_relaxed(v); } -static inline long +static __always_inline long atomic_long_fetch_inc(atomic_long_t *v) { return atomic_fetch_inc(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_acquire(atomic_long_t *v) { return atomic_fetch_inc_acquire(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_release(atomic_long_t *v) { return atomic_fetch_inc_release(v); } -static inline long +static __always_inline long atomic_long_fetch_inc_relaxed(atomic_long_t *v) { return atomic_fetch_inc_relaxed(v); } -static inline void +static __always_inline void atomic_long_dec(atomic_long_t *v) { atomic_dec(v); } -static inline long +static __always_inline long atomic_long_dec_return(atomic_long_t *v) { return atomic_dec_return(v); } -static inline long +static __always_inline long atomic_long_dec_return_acquire(atomic_long_t *v) { return atomic_dec_return_acquire(v); } -static inline long +static __always_inline long atomic_long_dec_return_release(atomic_long_t *v) { return atomic_dec_return_release(v); } -static inline long +static __always_inline long atomic_long_dec_return_relaxed(atomic_long_t *v) { return atomic_dec_return_relaxed(v); } -static inline long +static __always_inline long atomic_long_fetch_dec(atomic_long_t *v) { return atomic_fetch_dec(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_acquire(atomic_long_t *v) { return atomic_fetch_dec_acquire(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_release(atomic_long_t *v) { return atomic_fetch_dec_release(v); } -static inline long +static __always_inline long atomic_long_fetch_dec_relaxed(atomic_long_t *v) { return atomic_fetch_dec_relaxed(v); } -static inline void +static __always_inline void atomic_long_and(long i, atomic_long_t *v) { atomic_and(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and(long i, atomic_long_t *v) { return atomic_fetch_and(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_acquire(long i, atomic_long_t *v) { return atomic_fetch_and_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_release(long i, atomic_long_t *v) { return atomic_fetch_and_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) { return atomic_fetch_and_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_andnot(long i, atomic_long_t *v) { atomic_andnot(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot(long i, atomic_long_t *v) { return atomic_fetch_andnot(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) { return atomic_fetch_andnot_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_release(long i, atomic_long_t *v) { return atomic_fetch_andnot_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) { return atomic_fetch_andnot_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_or(long i, atomic_long_t *v) { atomic_or(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or(long i, atomic_long_t *v) { return atomic_fetch_or(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_acquire(long i, atomic_long_t *v) { return atomic_fetch_or_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_release(long i, atomic_long_t *v) { return atomic_fetch_or_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) { return atomic_fetch_or_relaxed(i, v); } -static inline void +static __always_inline void atomic_long_xor(long i, atomic_long_t *v) { atomic_xor(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor(long i, atomic_long_t *v) { return atomic_fetch_xor(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) { return atomic_fetch_xor_acquire(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_release(long i, atomic_long_t *v) { return atomic_fetch_xor_release(i, v); } -static inline long +static __always_inline long atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) { return atomic_fetch_xor_relaxed(i, v); } -static inline long +static __always_inline long atomic_long_xchg(atomic_long_t *v, long i) { return atomic_xchg(v, i); } -static inline long +static __always_inline long atomic_long_xchg_acquire(atomic_long_t *v, long i) { return atomic_xchg_acquire(v, i); } -static inline long +static __always_inline long atomic_long_xchg_release(atomic_long_t *v, long i) { return atomic_xchg_release(v, i); } -static inline long +static __always_inline long atomic_long_xchg_relaxed(atomic_long_t *v, long i) { return atomic_xchg_relaxed(v, i); } -static inline long +static __always_inline long atomic_long_cmpxchg(atomic_long_t *v, long old, long new) { return atomic_cmpxchg(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) { return atomic_cmpxchg_acquire(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) { return atomic_cmpxchg_release(v, old, new); } -static inline long +static __always_inline long atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) { return atomic_cmpxchg_relaxed(v, old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) { return atomic_try_cmpxchg(v, (int *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) { return atomic_try_cmpxchg_acquire(v, (int *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) { return atomic_try_cmpxchg_release(v, (int *)old, new); } -static inline bool +static __always_inline bool atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) { return atomic_try_cmpxchg_relaxed(v, (int *)old, new); } -static inline bool +static __always_inline bool atomic_long_sub_and_test(long i, atomic_long_t *v) { return atomic_sub_and_test(i, v); } -static inline bool +static __always_inline bool atomic_long_dec_and_test(atomic_long_t *v) { return atomic_dec_and_test(v); } -static inline bool +static __always_inline bool atomic_long_inc_and_test(atomic_long_t *v) { return atomic_inc_and_test(v); } -static inline bool +static __always_inline bool atomic_long_add_negative(long i, atomic_long_t *v) { return atomic_add_negative(i, v); } -static inline long +static __always_inline long atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) { return atomic_fetch_add_unless(v, a, u); } -static inline bool +static __always_inline bool atomic_long_add_unless(atomic_long_t *v, long a, long u) { return atomic_add_unless(v, a, u); } -static inline bool +static __always_inline bool atomic_long_inc_not_zero(atomic_long_t *v) { return atomic_inc_not_zero(v); } -static inline bool +static __always_inline bool atomic_long_inc_unless_negative(atomic_long_t *v) { return atomic_inc_unless_negative(v); } -static inline bool +static __always_inline bool atomic_long_dec_unless_positive(atomic_long_t *v) { return atomic_dec_unless_positive(v); } -static inline long +static __always_inline long atomic_long_dec_if_positive(atomic_long_t *v) { return atomic_dec_if_positive(v); @@ -1010,4 +1011,4 @@ atomic_long_dec_if_positive(atomic_long_t *v) #endif /* CONFIG_64BIT */ #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ -// 77558968132ce4f911ad53f6f52ce423006f6268 +// a624200981f552b2c6be4f32fe44da8289f30d87 diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 85b28eb80b11..2eacaf7d62f6 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -128,10 +128,10 @@ do { \ #ifndef __smp_load_acquire #define __smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ + __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ __smp_mb(); \ - ___p1; \ + (typeof(*p))___p1; \ }) #endif @@ -183,10 +183,10 @@ do { \ #ifndef smp_load_acquire #define smp_load_acquire(p) \ ({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ + __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ barrier(); \ - ___p1; \ + (typeof(*p))___p1; \ }) #endif @@ -229,14 +229,14 @@ do { \ #ifndef smp_cond_load_relaxed #define smp_cond_load_relaxed(ptr, cond_expr) ({ \ typeof(ptr) __PTR = (ptr); \ - typeof(*ptr) VAL; \ + __unqual_scalar_typeof(*ptr) VAL; \ for (;;) { \ VAL = READ_ONCE(*__PTR); \ if (cond_expr) \ break; \ cpu_relax(); \ } \ - VAL; \ + (typeof(*ptr))VAL; \ }) #endif @@ -250,10 +250,10 @@ do { \ */ #ifndef smp_cond_load_acquire #define smp_cond_load_acquire(ptr, cond_expr) ({ \ - typeof(*ptr) _val; \ + __unqual_scalar_typeof(*ptr) _val; \ _val = smp_cond_load_relaxed(ptr, cond_expr); \ smp_acquire__after_ctrl_dep(); \ - _val; \ + (typeof(*ptr))_val; \ }) #endif diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h index 18ce3c9e8eec..fb2cb33a4013 100644 --- a/include/asm-generic/bitops/instrumented-atomic.h +++ b/include/asm-generic/bitops/instrumented-atomic.h @@ -11,7 +11,7 @@ #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H #define _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H -#include <linux/kasan-checks.h> +#include <linux/instrumented.h> /** * set_bit - Atomically set a bit in memory @@ -25,7 +25,7 @@ */ static inline void set_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_set_bit(nr, addr); } @@ -38,7 +38,7 @@ static inline void set_bit(long nr, volatile unsigned long *addr) */ static inline void clear_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_clear_bit(nr, addr); } @@ -54,7 +54,7 @@ static inline void clear_bit(long nr, volatile unsigned long *addr) */ static inline void change_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_change_bit(nr, addr); } @@ -67,7 +67,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr) */ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_set_bit(nr, addr); } @@ -80,7 +80,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr) */ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_clear_bit(nr, addr); } @@ -93,7 +93,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) */ static inline bool test_and_change_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_change_bit(nr, addr); } diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h index ec53fdeea9ec..b9bec468ae03 100644 --- a/include/asm-generic/bitops/instrumented-lock.h +++ b/include/asm-generic/bitops/instrumented-lock.h @@ -11,7 +11,7 @@ #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H #define _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H -#include <linux/kasan-checks.h> +#include <linux/instrumented.h> /** * clear_bit_unlock - Clear a bit in memory, for unlock @@ -22,7 +22,7 @@ */ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_clear_bit_unlock(nr, addr); } @@ -37,7 +37,7 @@ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) */ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___clear_bit_unlock(nr, addr); } @@ -52,7 +52,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) */ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_set_bit_lock(nr, addr); } @@ -71,7 +71,7 @@ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); return arch_clear_bit_unlock_is_negative_byte(nr, addr); } /* Let everybody know we have it. */ diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h index 95ff28d128a1..20f788a25ef9 100644 --- a/include/asm-generic/bitops/instrumented-non-atomic.h +++ b/include/asm-generic/bitops/instrumented-non-atomic.h @@ -11,7 +11,7 @@ #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H #define _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H -#include <linux/kasan-checks.h> +#include <linux/instrumented.h> /** * __set_bit - Set a bit in memory @@ -24,7 +24,7 @@ */ static inline void __set_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___set_bit(nr, addr); } @@ -39,7 +39,7 @@ static inline void __set_bit(long nr, volatile unsigned long *addr) */ static inline void __clear_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___clear_bit(nr, addr); } @@ -54,7 +54,7 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr) */ static inline void __change_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___change_bit(nr, addr); } @@ -68,7 +68,7 @@ static inline void __change_bit(long nr, volatile unsigned long *addr) */ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_write(addr + BIT_WORD(nr), sizeof(long)); return arch___test_and_set_bit(nr, addr); } @@ -82,7 +82,7 @@ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr) */ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_write(addr + BIT_WORD(nr), sizeof(long)); return arch___test_and_clear_bit(nr, addr); } @@ -96,7 +96,7 @@ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr) */ static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) { - kasan_check_write(addr + BIT_WORD(nr), sizeof(long)); + instrument_write(addr + BIT_WORD(nr), sizeof(long)); return arch___test_and_change_bit(nr, addr); } @@ -107,7 +107,7 @@ static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr) */ static inline bool test_bit(long nr, const volatile unsigned long *addr) { - kasan_check_read(addr + BIT_WORD(nr), sizeof(long)); + instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long)); return arch_test_bit(nr, addr); } diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 384b5c835ced..c94e33ae3e7b 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -83,14 +83,19 @@ extern __printf(4, 5) void warn_slowpath_fmt(const char *file, const int line, unsigned taint, const char *fmt, ...); #define __WARN() __WARN_printf(TAINT_WARN, NULL) -#define __WARN_printf(taint, arg...) \ - warn_slowpath_fmt(__FILE__, __LINE__, taint, arg) +#define __WARN_printf(taint, arg...) do { \ + instrumentation_begin(); \ + warn_slowpath_fmt(__FILE__, __LINE__, taint, arg); \ + instrumentation_end(); \ + } while (0) #else extern __printf(1, 2) void __warn_printk(const char *fmt, ...); #define __WARN() __WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN)) #define __WARN_printf(taint, arg...) do { \ + instrumentation_begin(); \ __warn_printk(arg); \ __WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\ + instrumentation_end(); \ } while (0) #define WARN_ON_ONCE(condition) ({ \ int __ret_warn_on = !!(condition); \ diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index cac7404b2bdd..907fa5d16494 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h @@ -1,11 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_CACHEFLUSH_H -#define __ASM_CACHEFLUSH_H - -/* Keep includes the same across arches. */ -#include <linux/mm.h> - -#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 +#ifndef _ASM_GENERIC_CACHEFLUSH_H +#define _ASM_GENERIC_CACHEFLUSH_H /* * The cache doesn't need to be flushed when TLB entries change when @@ -45,12 +40,14 @@ static inline void flush_cache_page(struct vm_area_struct *vma, } #endif -#ifndef flush_dcache_page +#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE static inline void flush_dcache_page(struct page *page) { } +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #endif + #ifndef flush_dcache_mmap_lock static inline void flush_dcache_mmap_lock(struct address_space *mapping) { @@ -69,6 +66,10 @@ static inline void flush_icache_range(unsigned long start, unsigned long end) } #endif +#ifndef flush_icache_user_range +#define flush_icache_user_range flush_icache_range +#endif + #ifndef flush_icache_page static inline void flush_icache_page(struct vm_area_struct *vma, struct page *page) @@ -76,8 +77,8 @@ static inline void flush_icache_page(struct vm_area_struct *vma, } #endif -#ifndef flush_icache_user_range -static inline void flush_icache_user_range(struct vm_area_struct *vma, +#ifndef flush_icache_user_page +static inline void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len) { @@ -100,7 +101,7 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end) #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ - flush_icache_user_range(vma, page, vaddr, len); \ + flush_icache_user_page(vma, page, vaddr, len); \ } while (0) #endif @@ -109,4 +110,4 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end) memcpy(dst, src, len) #endif -#endif /* __ASM_CACHEFLUSH_H */ +#endif /* _ASM_GENERIC_CACHEFLUSH_H */ diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h index 34785c0f57b0..5a80f8e54300 100644 --- a/include/asm-generic/checksum.h +++ b/include/asm-generic/checksum.h @@ -25,15 +25,6 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum); */ extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum); -/* - * the same as csum_partial_copy, but copies from user space. - * - * here even more important to align src and dst on a 32-bit (or even - * better 64-bit) boundary - */ -extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *csum_err); - #ifndef csum_partial_copy_nocheck #define csum_partial_copy_nocheck(src, dst, len, sum) \ csum_partial_copy((src), (dst), (len), (sum)) diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h index 822f433ac95c..8e1e6244a89d 100644 --- a/include/asm-generic/hugetlb.h +++ b/include/asm-generic/hugetlb.h @@ -122,7 +122,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, #ifndef __HAVE_ARCH_HUGE_PTEP_GET static inline pte_t huge_ptep_get(pte_t *ptep) { - return *ptep; + return ptep_get(ptep); } #endif diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h new file mode 100644 index 000000000000..e73a11850055 --- /dev/null +++ b/include/asm-generic/hyperv-tlfs.h @@ -0,0 +1,497 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * This file contains definitions from Hyper-V Hypervisor Top-Level Functional + * Specification (TLFS): + * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs + */ + +#ifndef _ASM_GENERIC_HYPERV_TLFS_H +#define _ASM_GENERIC_HYPERV_TLFS_H + +#include <linux/types.h> +#include <linux/bits.h> +#include <linux/time64.h> + +/* + * While not explicitly listed in the TLFS, Hyper-V always runs with a page size + * of 4096. These definitions are used when communicating with Hyper-V using + * guest physical pages and guest physical page addresses, since the guest page + * size may not be 4096 on all architectures. + */ +#define HV_HYP_PAGE_SHIFT 12 +#define HV_HYP_PAGE_SIZE BIT(HV_HYP_PAGE_SHIFT) +#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1)) + +/* + * Hyper-V provides two categories of flags relevant to guest VMs. The + * "Features" category indicates specific functionality that is available + * to guests on this particular instance of Hyper-V. The "Features" + * are presented in four groups, each of which is 32 bits. The group A + * and B definitions are common across architectures and are listed here. + * However, not all flags are relevant on all architectures. + * + * Groups C and D vary across architectures and are listed in the + * architecture specific portion of hyperv-tlfs.h. Some of these flags exist + * on multiple architectures, but the bit positions are different so they + * cannot appear in the generic portion of hyperv-tlfs.h. + * + * The "Enlightenments" category provides recommendations on whether to use + * specific enlightenments that are available. The Enlighenments are a single + * group of 32 bits, but they vary across architectures and are listed in + * the architecture specific portion of hyperv-tlfs.h. + */ + +/* + * Group A Features. + */ + +/* VP Runtime register available */ +#define HV_MSR_VP_RUNTIME_AVAILABLE BIT(0) +/* Partition Reference Counter available*/ +#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1) +/* Basic SynIC register available */ +#define HV_MSR_SYNIC_AVAILABLE BIT(2) +/* Synthetic Timer registers available */ +#define HV_MSR_SYNTIMER_AVAILABLE BIT(3) +/* Virtual APIC assist and VP assist page registers available */ +#define HV_MSR_APIC_ACCESS_AVAILABLE BIT(4) +/* Hypercall and Guest OS ID registers available*/ +#define HV_MSR_HYPERCALL_AVAILABLE BIT(5) +/* Access virtual processor index register available*/ +#define HV_MSR_VP_INDEX_AVAILABLE BIT(6) +/* Virtual system reset register available*/ +#define HV_MSR_RESET_AVAILABLE BIT(7) +/* Access statistics page registers available */ +#define HV_MSR_STAT_PAGES_AVAILABLE BIT(8) +/* Partition reference TSC register is available */ +#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9) +/* Partition Guest IDLE register is available */ +#define HV_MSR_GUEST_IDLE_AVAILABLE BIT(10) +/* Partition local APIC and TSC frequency registers available */ +#define HV_ACCESS_FREQUENCY_MSRS BIT(11) +/* AccessReenlightenmentControls privilege */ +#define HV_ACCESS_REENLIGHTENMENT BIT(13) +/* AccessTscInvariantControls privilege */ +#define HV_ACCESS_TSC_INVARIANT BIT(15) + +/* + * Group B features. + */ +#define HV_CREATE_PARTITIONS BIT(0) +#define HV_ACCESS_PARTITION_ID BIT(1) +#define HV_ACCESS_MEMORY_POOL BIT(2) +#define HV_ADJUST_MESSAGE_BUFFERS BIT(3) +#define HV_POST_MESSAGES BIT(4) +#define HV_SIGNAL_EVENTS BIT(5) +#define HV_CREATE_PORT BIT(6) +#define HV_CONNECT_PORT BIT(7) +#define HV_ACCESS_STATS BIT(8) +#define HV_DEBUGGING BIT(11) +#define HV_CPU_POWER_MANAGEMENT BIT(12) + + +/* + * TSC page layout. + */ +struct ms_hyperv_tsc_page { + volatile u32 tsc_sequence; + u32 reserved1; + volatile u64 tsc_scale; + volatile s64 tsc_offset; +} __packed; + +/* + * The guest OS needs to register the guest ID with the hypervisor. + * The guest ID is a 64 bit entity and the structure of this ID is + * specified in the Hyper-V specification: + * + * msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx + * + * While the current guideline does not specify how Linux guest ID(s) + * need to be generated, our plan is to publish the guidelines for + * Linux and other guest operating systems that currently are hosted + * on Hyper-V. The implementation here conforms to this yet + * unpublished guidelines. + * + * + * Bit(s) + * 63 - Indicates if the OS is Open Source or not; 1 is Open Source + * 62:56 - Os Type; Linux is 0x100 + * 55:48 - Distro specific identification + * 47:16 - Linux kernel version number + * 15:0 - Distro specific identification + * + * + */ + +#define HV_LINUX_VENDOR_ID 0x8100 + +/* + * Crash notification flags. + */ +#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62) +#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63) + +/* Declare the various hypercall operations. */ +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003 +#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 +#define HVCALL_SEND_IPI 0x000b +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013 +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014 +#define HVCALL_SEND_IPI_EX 0x0015 +#define HVCALL_GET_VP_REGISTERS 0x0050 +#define HVCALL_SET_VP_REGISTERS 0x0051 +#define HVCALL_POST_MESSAGE 0x005c +#define HVCALL_SIGNAL_EVENT 0x005d +#define HVCALL_POST_DEBUG_DATA 0x0069 +#define HVCALL_RETRIEVE_DEBUG_DATA 0x006a +#define HVCALL_RESET_DEBUG_SESSION 0x006b +#define HVCALL_RETARGET_INTERRUPT 0x007e +#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af +#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0 + +#define HV_FLUSH_ALL_PROCESSORS BIT(0) +#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1) +#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2) +#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3) + +enum HV_GENERIC_SET_FORMAT { + HV_GENERIC_SET_SPARSE_4K, + HV_GENERIC_SET_ALL, +}; + +#define HV_PARTITION_ID_SELF ((u64)-1) +#define HV_VP_INDEX_SELF ((u32)-2) + +#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0) +#define HV_HYPERCALL_FAST_BIT BIT(16) +#define HV_HYPERCALL_VARHEAD_OFFSET 17 +#define HV_HYPERCALL_REP_COMP_OFFSET 32 +#define HV_HYPERCALL_REP_COMP_1 BIT_ULL(32) +#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32) +#define HV_HYPERCALL_REP_START_OFFSET 48 +#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48) + +/* hypercall status code */ +#define HV_STATUS_SUCCESS 0 +#define HV_STATUS_INVALID_HYPERCALL_CODE 2 +#define HV_STATUS_INVALID_HYPERCALL_INPUT 3 +#define HV_STATUS_INVALID_ALIGNMENT 4 +#define HV_STATUS_INVALID_PARAMETER 5 +#define HV_STATUS_OPERATION_DENIED 8 +#define HV_STATUS_INSUFFICIENT_MEMORY 11 +#define HV_STATUS_INVALID_PORT_ID 17 +#define HV_STATUS_INVALID_CONNECTION_ID 18 +#define HV_STATUS_INSUFFICIENT_BUFFERS 19 + +/* + * The Hyper-V TimeRefCount register and the TSC + * page provide a guest VM clock with 100ns tick rate + */ +#define HV_CLOCK_HZ (NSEC_PER_SEC/100) + +/* Define the number of synthetic interrupt sources. */ +#define HV_SYNIC_SINT_COUNT (16) +/* Define the expected SynIC version. */ +#define HV_SYNIC_VERSION_1 (0x1) +/* Valid SynIC vectors are 16-255. */ +#define HV_SYNIC_FIRST_VALID_VECTOR (16) + +#define HV_SYNIC_CONTROL_ENABLE (1ULL << 0) +#define HV_SYNIC_SIMP_ENABLE (1ULL << 0) +#define HV_SYNIC_SIEFP_ENABLE (1ULL << 0) +#define HV_SYNIC_SINT_MASKED (1ULL << 16) +#define HV_SYNIC_SINT_AUTO_EOI (1ULL << 17) +#define HV_SYNIC_SINT_VECTOR_MASK (0xFF) + +#define HV_SYNIC_STIMER_COUNT (4) + +/* Define synthetic interrupt controller message constants. */ +#define HV_MESSAGE_SIZE (256) +#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240) +#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30) + +/* Define synthetic interrupt controller message flags. */ +union hv_message_flags { + __u8 asu8; + struct { + __u8 msg_pending:1; + __u8 reserved:7; + } __packed; +}; + +/* Define port identifier type. */ +union hv_port_id { + __u32 asu32; + struct { + __u32 id:24; + __u32 reserved:8; + } __packed u; +}; + +/* Define synthetic interrupt controller message header. */ +struct hv_message_header { + __u32 message_type; + __u8 payload_size; + union hv_message_flags message_flags; + __u8 reserved[2]; + union { + __u64 sender; + union hv_port_id port; + }; +} __packed; + +/* Define synthetic interrupt controller message format. */ +struct hv_message { + struct hv_message_header header; + union { + __u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; + } u; +} __packed; + +/* Define the synthetic interrupt message page layout. */ +struct hv_message_page { + struct hv_message sint_message[HV_SYNIC_SINT_COUNT]; +} __packed; + +/* Define timer message payload structure. */ +struct hv_timer_message_payload { + __u32 timer_index; + __u32 reserved; + __u64 expiration_time; /* When the timer expired */ + __u64 delivery_time; /* When the message was delivered */ +} __packed; + + +/* Define synthetic interrupt controller flag constants. */ +#define HV_EVENT_FLAGS_COUNT (256 * 8) +#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long)) + +/* + * Synthetic timer configuration. + */ +union hv_stimer_config { + u64 as_uint64; + struct { + u64 enable:1; + u64 periodic:1; + u64 lazy:1; + u64 auto_enable:1; + u64 apic_vector:8; + u64 direct_mode:1; + u64 reserved_z0:3; + u64 sintx:4; + u64 reserved_z1:44; + } __packed; +}; + + +/* Define the synthetic interrupt controller event flags format. */ +union hv_synic_event_flags { + unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT]; +}; + +/* Define SynIC control register. */ +union hv_synic_scontrol { + u64 as_uint64; + struct { + u64 enable:1; + u64 reserved:63; + } __packed; +}; + +/* Define synthetic interrupt source. */ +union hv_synic_sint { + u64 as_uint64; + struct { + u64 vector:8; + u64 reserved1:8; + u64 masked:1; + u64 auto_eoi:1; + u64 polling:1; + u64 reserved2:45; + } __packed; +}; + +/* Define the format of the SIMP register */ +union hv_synic_simp { + u64 as_uint64; + struct { + u64 simp_enabled:1; + u64 preserved:11; + u64 base_simp_gpa:52; + } __packed; +}; + +/* Define the format of the SIEFP register */ +union hv_synic_siefp { + u64 as_uint64; + struct { + u64 siefp_enabled:1; + u64 preserved:11; + u64 base_siefp_gpa:52; + } __packed; +}; + +struct hv_vpset { + u64 format; + u64 valid_bank_mask; + u64 bank_contents[]; +} __packed; + +/* HvCallSendSyntheticClusterIpi hypercall */ +struct hv_send_ipi { + u32 vector; + u32 reserved; + u64 cpu_mask; +} __packed; + +/* HvCallSendSyntheticClusterIpiEx hypercall */ +struct hv_send_ipi_ex { + u32 vector; + u32 reserved; + struct hv_vpset vp_set; +} __packed; + +/* HvFlushGuestPhysicalAddressSpace hypercalls */ +struct hv_guest_mapping_flush { + u64 address_space; + u64 flags; +} __packed; + +/* + * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited + * by the bitwidth of "additional_pages" in union hv_gpa_page_range. + */ +#define HV_MAX_FLUSH_PAGES (2048) + +/* HvFlushGuestPhysicalAddressList hypercall */ +union hv_gpa_page_range { + u64 address_space; + struct { + u64 additional_pages:11; + u64 largepage:1; + u64 basepfn:52; + } page; +}; + +/* + * All input flush parameters should be in single page. The max flush + * count is equal with how many entries of union hv_gpa_page_range can + * be populated into the input parameter page. + */ +#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) / \ + sizeof(union hv_gpa_page_range)) + +struct hv_guest_mapping_flush_list { + u64 address_space; + u64 flags; + union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT]; +}; + +/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */ +struct hv_tlb_flush { + u64 address_space; + u64 flags; + u64 processor_mask; + u64 gva_list[]; +} __packed; + +/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */ +struct hv_tlb_flush_ex { + u64 address_space; + u64 flags; + struct hv_vpset hv_vp_set; + u64 gva_list[]; +} __packed; + +/* HvRetargetDeviceInterrupt hypercall */ +union hv_msi_entry { + u64 as_uint64; + struct { + u32 address; + u32 data; + } __packed; +}; + +struct hv_interrupt_entry { + u32 source; /* 1 for MSI(-X) */ + u32 reserved1; + union hv_msi_entry msi_entry; +} __packed; + +/* + * flags for hv_device_interrupt_target.flags + */ +#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1 +#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2 + +struct hv_device_interrupt_target { + u32 vector; + u32 flags; + union { + u64 vp_mask; + struct hv_vpset vp_set; + }; +} __packed; + +struct hv_retarget_device_interrupt { + u64 partition_id; /* use "self" */ + u64 device_id; + struct hv_interrupt_entry int_entry; + u64 reserved2; + struct hv_device_interrupt_target int_target; +} __packed __aligned(8); + + +/* HvGetVpRegisters hypercall input with variable size reg name list*/ +struct hv_get_vp_registers_input { + struct { + u64 partitionid; + u32 vpindex; + u8 inputvtl; + u8 padding[3]; + } header; + struct input { + u32 name0; + u32 name1; + } element[]; +} __packed; + + +/* HvGetVpRegisters returns an array of these output elements */ +struct hv_get_vp_registers_output { + union { + struct { + u32 a; + u32 b; + u32 c; + u32 d; + } as32 __packed; + struct { + u64 low; + u64 high; + } as64 __packed; + }; +}; + +/* HvSetVpRegisters hypercall with variable size reg name/value list*/ +struct hv_set_vp_registers_input { + struct { + u64 partitionid; + u32 vpindex; + u8 inputvtl; + u8 padding[3]; + } header; + struct { + u32 name; + u32 padding1; + u64 padding2; + u64 valuelow; + u64 valuehigh; + } element[]; +} __packed; + +#endif diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index d39ac997dda8..8b1e020e9a03 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -448,17 +448,15 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer, #define IO_SPACE_LIMIT 0xffff #endif -#include <linux/logic_pio.h> - /* * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be * implemented on hardware that needs an additional delay for I/O accesses to * take effect. */ -#ifndef inb -#define inb inb -static inline u8 inb(unsigned long addr) +#if !defined(inb) && !defined(_inb) +#define _inb _inb +static inline u16 _inb(unsigned long addr) { u8 val; @@ -469,9 +467,9 @@ static inline u8 inb(unsigned long addr) } #endif -#ifndef inw -#define inw inw -static inline u16 inw(unsigned long addr) +#if !defined(inw) && !defined(_inw) +#define _inw _inw +static inline u16 _inw(unsigned long addr) { u16 val; @@ -482,9 +480,9 @@ static inline u16 inw(unsigned long addr) } #endif -#ifndef inl -#define inl inl -static inline u32 inl(unsigned long addr) +#if !defined(inl) && !defined(_inl) +#define _inl _inl +static inline u16 _inl(unsigned long addr) { u32 val; @@ -495,9 +493,9 @@ static inline u32 inl(unsigned long addr) } #endif -#ifndef outb -#define outb outb -static inline void outb(u8 value, unsigned long addr) +#if !defined(outb) && !defined(_outb) +#define _outb _outb +static inline void _outb(u8 value, unsigned long addr) { __io_pbw(); __raw_writeb(value, PCI_IOBASE + addr); @@ -505,9 +503,9 @@ static inline void outb(u8 value, unsigned long addr) } #endif -#ifndef outw -#define outw outw -static inline void outw(u16 value, unsigned long addr) +#if !defined(outw) && !defined(_outw) +#define _outw _outw +static inline void _outw(u16 value, unsigned long addr) { __io_pbw(); __raw_writew(cpu_to_le16(value), PCI_IOBASE + addr); @@ -515,9 +513,9 @@ static inline void outw(u16 value, unsigned long addr) } #endif -#ifndef outl -#define outl outl -static inline void outl(u32 value, unsigned long addr) +#if !defined(outl) && !defined(_outl) +#define _outl _outl +static inline void _outl(u32 value, unsigned long addr) { __io_pbw(); __raw_writel(cpu_to_le32(value), PCI_IOBASE + addr); @@ -525,6 +523,32 @@ static inline void outl(u32 value, unsigned long addr) } #endif +#include <linux/logic_pio.h> + +#ifndef inb +#define inb _inb +#endif + +#ifndef inw +#define inw _inw +#endif + +#ifndef inl +#define inl _inl +#endif + +#ifndef outb +#define outb _outb +#endif + +#ifndef outw +#define outw _outw +#endif + +#ifndef outl +#define outl _outl +#endif + #ifndef inb_p #define inb_p inb_p static inline u8 inb_p(unsigned long addr) @@ -948,7 +972,7 @@ static inline void iounmap(void __iomem *addr) } #endif #elif defined(CONFIG_GENERIC_IOREMAP) -#include <asm/pgtable.h> +#include <linux/pgtable.h> void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot); void iounmap(volatile void __iomem *addr); diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index b3f1082cc435..1c4fd950f091 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -163,7 +163,7 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset, return nr_bank; } -void hyperv_report_panic(struct pt_regs *regs, long err); +void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die); void hyperv_report_panic_msg(phys_addr_t pa, size_t size); bool hv_is_hyperv_initialized(void); bool hv_is_hibernation_supported(void); diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h deleted file mode 100644 index 829bdb0d6327..000000000000 --- a/include/asm-generic/pgtable-nop4d-hack.h +++ /dev/null @@ -1,64 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _PGTABLE_NOP4D_HACK_H -#define _PGTABLE_NOP4D_HACK_H - -#ifndef __ASSEMBLY__ -#include <asm-generic/5level-fixup.h> - -#define __PAGETABLE_PUD_FOLDED 1 - -/* - * Having the pud type consist of a pgd gets the size right, and allows - * us to conceptually access the pgd entry that this pud is folded into - * without casting. - */ -typedef struct { pgd_t pgd; } pud_t; - -#define PUD_SHIFT PGDIR_SHIFT -#define PTRS_PER_PUD 1 -#define PUD_SIZE (1UL << PUD_SHIFT) -#define PUD_MASK (~(PUD_SIZE-1)) - -/* - * The "pgd_xxx()" functions here are trivial for a folded two-level - * setup: the pud is never bad, and a pud always exists (as it's folded - * into the pgd entry) - */ -static inline int pgd_none(pgd_t pgd) { return 0; } -static inline int pgd_bad(pgd_t pgd) { return 0; } -static inline int pgd_present(pgd_t pgd) { return 1; } -static inline void pgd_clear(pgd_t *pgd) { } -#define pud_ERROR(pud) (pgd_ERROR((pud).pgd)) - -#define pgd_populate(mm, pgd, pud) do { } while (0) -#define pgd_populate_safe(mm, pgd, pud) do { } while (0) -/* - * (puds are folded into pgds so this doesn't get actually called, - * but the define is needed for a generic inline function.) - */ -#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval }) - -static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) -{ - return (pud_t *)pgd; -} - -#define pud_val(x) (pgd_val((x).pgd)) -#define __pud(x) ((pud_t) { __pgd(x) }) - -#define pgd_page(pgd) (pud_page((pud_t){ pgd })) -#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd })) - -/* - * allocating and freeing a pud is trivial: the 1-entry pud is - * inside the pgd, so has no extra memory associated with it. - */ -#define pud_alloc_one(mm, address) NULL -#define pud_free(mm, x) do { } while (0) -#define __pud_free_tlb(tlb, x, a) do { } while (0) - -#undef pud_addr_end -#define pud_addr_end(addr, end) (end) - -#endif /* __ASSEMBLY__ */ -#endif /* _PGTABLE_NOP4D_HACK_H */ diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h index 0d9b28cba16d..3e13acd019ae 100644 --- a/include/asm-generic/pgtable-nopmd.h +++ b/include/asm-generic/pgtable-nopmd.h @@ -45,6 +45,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address) { return (pmd_t *)pud; } +#define pmd_offset pmd_offset #define pmd_val(x) (pud_val((x).pud)) #define __pmd(x) ((pmd_t) { __pud(x) } ) diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h index d3776cb494c0..a9d751fbda9e 100644 --- a/include/asm-generic/pgtable-nopud.h +++ b/include/asm-generic/pgtable-nopud.h @@ -4,9 +4,6 @@ #ifndef __ASSEMBLY__ -#ifdef __ARCH_USE_5LEVEL_HACK -#include <asm-generic/pgtable-nop4d-hack.h> -#else #include <asm-generic/pgtable-nop4d.h> #define __PAGETABLE_PUD_FOLDED 1 @@ -46,6 +43,7 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) { return (pud_t *)p4d; } +#define pud_offset pud_offset #define pud_val(x) (p4d_val((x).p4d)) #define __pud(x) ((pud_t) { __p4d(x) }) @@ -65,5 +63,4 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) #define pud_addr_end(addr, end) (end) #endif /* __ASSEMBLY__ */ -#endif /* !__ARCH_USE_5LEVEL_HACK */ #endif /* _PGTABLE_NOPUD_H */ diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index d1779d442aa5..66397ed10acb 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -53,6 +53,9 @@ extern char __ctors_start[], __ctors_end[]; /* Start and end of .opd section - used for function descriptors. */ extern char __start_opd[], __end_opd[]; +/* Start and end of instrumentation protected text section */ +extern char __noinstr_text_start[], __noinstr_text_end[]; + extern __visible const void __nosave_begin, __nosave_end; /* Function descriptor handling (if any). Override in asm/sections.h */ diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h index 238873739550..5aa8705df87e 100644 --- a/include/asm-generic/topology.h +++ b/include/asm-generic/topology.h @@ -48,7 +48,7 @@ #ifdef CONFIG_NEED_MULTIPLE_NODES #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask) #else - #define cpumask_of_node(node) ((void)node, cpu_online_mask) + #define cpumask_of_node(node) ((void)(node), cpu_online_mask) #endif #endif #ifndef pcibus_to_node diff --git a/include/asm-generic/vermagic.h b/include/asm-generic/vermagic.h new file mode 100644 index 000000000000..084274a1219e --- /dev/null +++ b/include/asm-generic/vermagic.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _ASM_GENERIC_VERMAGIC_H +#define _ASM_GENERIC_VERMAGIC_H + +#define MODULE_ARCH_VERMAGIC "" + +#endif /* _ASM_GENERIC_VERMAGIC_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 71e387a5fe90..db600ef218d7 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -541,6 +541,15 @@ __end_rodata = .; /* + * Non-instrumentable text section + */ +#define NOINSTR_TEXT \ + ALIGN_FUNCTION(); \ + __noinstr_text_start = .; \ + *(.noinstr.text) \ + __noinstr_text_end = .; + +/* * .text section. Map to function alignment to avoid address changes * during second ld run in second ld pass when generating System.map * @@ -551,6 +560,7 @@ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ + NOINSTR_TEXT \ *(.text..refcount) \ *(.ref.text) \ MEM_KEEP(init.text*) \ diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index d873f999b334..2b4d2b06ccbd 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -157,7 +157,7 @@ static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask) { type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_ACOMPRESS; - mask |= CRYPTO_ALG_TYPE_MASK; + mask |= CRYPTO_ALG_TYPE_ACOMPRESS_MASK; return crypto_has_alg(alg_name, type, mask); } diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index e115f9215ed5..00a9cf98debe 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -125,6 +125,8 @@ int crypto_inst_setname(struct crypto_instance *inst, const char *name, void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); int crypto_enqueue_request(struct crypto_queue *queue, struct crypto_async_request *request); +void crypto_enqueue_request_head(struct crypto_queue *queue, + struct crypto_async_request *request); struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); static inline unsigned int crypto_queue_len(struct crypto_queue *queue) { diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h index 8c9af21efce1..c4165126937e 100644 --- a/include/crypto/drbg.h +++ b/include/crypto/drbg.h @@ -184,11 +184,7 @@ static inline size_t drbg_max_addtl(struct drbg_state *drbg) static inline size_t drbg_max_requests(struct drbg_state *drbg) { /* SP800-90A requires 2**48 maximum requests before reseeding */ -#if (__BITS_PER_LONG == 32) - return SIZE_MAX; -#else - return (1UL<<48); -#endif + return (1<<20); } /* diff --git a/include/crypto/engine.h b/include/crypto/engine.h index e29cd67f93c7..3f06e40d063a 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -24,7 +24,9 @@ * @idling: the engine is entering idle state * @busy: request pump is busy * @running: the engine is on working - * @cur_req_prepared: current request is prepared + * @retry_support: indication that the hardware allows re-execution + * of a failed backlog request + * crypto-engine, in head position to keep order * @list: link with the global crypto engine list * @queue_lock: spinlock to syncronise access to request queue * @queue: the crypto queue of the engine @@ -35,6 +37,8 @@ * @unprepare_crypt_hardware: there are currently no more requests on the * queue so the subsystem notifies the driver that it may relax the * hardware by issuing this call + * @do_batch_requests: execute a batch of requests. Depends on multiple + * requests support. * @kworker: kthread worker struct for request pump * @pump_requests: work struct for scheduling work to the request pump * @priv_data: the engine private data @@ -45,7 +49,8 @@ struct crypto_engine { bool idling; bool busy; bool running; - bool cur_req_prepared; + + bool retry_support; struct list_head list; spinlock_t queue_lock; @@ -56,6 +61,8 @@ struct crypto_engine { int (*prepare_crypt_hardware)(struct crypto_engine *engine); int (*unprepare_crypt_hardware)(struct crypto_engine *engine); + int (*do_batch_requests)(struct crypto_engine *engine); + struct kthread_worker *kworker; struct kthread_work pump_requests; @@ -102,6 +109,10 @@ void crypto_finalize_skcipher_request(struct crypto_engine *engine, int crypto_engine_start(struct crypto_engine *engine); int crypto_engine_stop(struct crypto_engine *engine); struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); +struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, + bool retry_support, + int (*cbk_do_batch)(struct crypto_engine *engine), + bool rt, int qlen); int crypto_engine_exit(struct crypto_engine *engine); #endif /* _CRYPTO_ENGINE_H */ diff --git a/include/crypto/hash.h b/include/crypto/hash.h index cee446c59497..4829d2367eda 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -856,6 +856,25 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out); /** + * crypto_shash_tfm_digest() - calculate message digest for buffer + * @tfm: hash transformation object + * @data: see crypto_shash_update() + * @len: see crypto_shash_update() + * @out: see crypto_shash_final() + * + * This is a simplified version of crypto_shash_digest() for users who don't + * want to allocate their own hash descriptor (shash_desc). Instead, + * crypto_shash_tfm_digest() takes a hash transformation object (crypto_shash) + * directly, and it allocates a hash descriptor on the stack internally. + * Note that this stack allocation may be fairly large. + * + * Context: Any context. + * Return: 0 on success; < 0 if an error occurred. + */ +int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data, + unsigned int len, u8 *out); + +/** * crypto_shash_export() - extract operational state for message digest * @desc: reference to the operational state handle whose state is exported * @out: output buffer of sufficient size that can hold the hash state diff --git a/include/crypto/sha.h b/include/crypto/sha.h index 5c2132c71900..10753ff71d46 100644 --- a/include/crypto/sha.h +++ b/include/crypto/sha.h @@ -114,6 +114,16 @@ extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *hash); /* + * An implementation of SHA-1's compression function. Don't use in new code! + * You shouldn't be using SHA-1, and even if you *have* to use SHA-1, this isn't + * the correct way to hash something with SHA-1 (use crypto_shash instead). + */ +#define SHA1_DIGEST_WORDS (SHA1_DIGEST_SIZE / 4) +#define SHA1_WORKSPACE_WORDS 16 +void sha1_init(__u32 *buf); +void sha1_transform(__u32 *digest, const char *data, __u32 *W); + +/* * Stand-alone implementation of the SHA256 algorithm. It is designed to * have as little dependencies as possible so it can be used in the * kexec_file purgatory. In other cases you should generally use the @@ -123,7 +133,7 @@ extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data, * For details see lib/crypto/sha256.c */ -static inline int sha256_init(struct sha256_state *sctx) +static inline void sha256_init(struct sha256_state *sctx) { sctx->state[0] = SHA256_H0; sctx->state[1] = SHA256_H1; @@ -134,14 +144,11 @@ static inline int sha256_init(struct sha256_state *sctx) sctx->state[6] = SHA256_H6; sctx->state[7] = SHA256_H7; sctx->count = 0; - - return 0; } -extern int sha256_update(struct sha256_state *sctx, const u8 *input, - unsigned int length); -extern int sha256_final(struct sha256_state *sctx, u8 *hash); +void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len); +void sha256_final(struct sha256_state *sctx, u8 *out); -static inline int sha224_init(struct sha256_state *sctx) +static inline void sha224_init(struct sha256_state *sctx) { sctx->state[0] = SHA224_H0; sctx->state[1] = SHA224_H1; @@ -152,11 +159,8 @@ static inline int sha224_init(struct sha256_state *sctx) sctx->state[6] = SHA224_H6; sctx->state[7] = SHA224_H7; sctx->count = 0; - - return 0; } -extern int sha224_update(struct sha256_state *sctx, const u8 *input, - unsigned int length); -extern int sha224_final(struct sha256_state *sctx, u8 *hash); +void sha224_update(struct sha256_state *sctx, const u8 *data, unsigned int len); +void sha224_final(struct sha256_state *sctx, u8 *out); #endif diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h index cea60cff80bd..6ded110783ae 100644 --- a/include/crypto/sha256_base.h +++ b/include/crypto/sha256_base.h @@ -22,14 +22,16 @@ static inline int sha224_base_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); - return sha224_init(sctx); + sha224_init(sctx); + return 0; } static inline int sha256_base_init(struct shash_desc *desc) { struct sha256_state *sctx = shash_desc_ctx(desc); - return sha256_init(sctx); + sha256_init(sctx); + return 0; } static inline int sha256_base_do_update(struct shash_desc *desc, diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h index 27bdd273fc4e..77941efb5426 100644 --- a/include/drm/drm_displayid.h +++ b/include/drm/drm_displayid.h @@ -89,7 +89,7 @@ struct displayid_detailed_timings_1 { struct displayid_detailed_timing_block { struct displayid_block base; - struct displayid_detailed_timings_1 timings[0]; + struct displayid_detailed_timings_1 timings[]; }; #define for_each_displayid_db(displayid, block, idx, length) \ diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 92b8d326ab4a..e47dc22ebf50 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h @@ -1218,6 +1218,139 @@ struct dp_sdp { #define EDP_VSC_PSR_UPDATE_RFB (1<<1) #define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2) +/** + * enum dp_pixelformat - drm DP Pixel encoding formats + * + * This enum is used to indicate DP VSC SDP Pixel encoding formats. + * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through + * DB18] + * + * @DP_PIXELFORMAT_RGB: RGB pixel encoding format + * @DP_PIXELFORMAT_YUV444: YCbCr 4:4:4 pixel encoding format + * @DP_PIXELFORMAT_YUV422: YCbCr 4:2:2 pixel encoding format + * @DP_PIXELFORMAT_YUV420: YCbCr 4:2:0 pixel encoding format + * @DP_PIXELFORMAT_Y_ONLY: Y Only pixel encoding format + * @DP_PIXELFORMAT_RAW: RAW pixel encoding format + * @DP_PIXELFORMAT_RESERVED: Reserved pixel encoding format + */ +enum dp_pixelformat { + DP_PIXELFORMAT_RGB = 0, + DP_PIXELFORMAT_YUV444 = 0x1, + DP_PIXELFORMAT_YUV422 = 0x2, + DP_PIXELFORMAT_YUV420 = 0x3, + DP_PIXELFORMAT_Y_ONLY = 0x4, + DP_PIXELFORMAT_RAW = 0x5, + DP_PIXELFORMAT_RESERVED = 0x6, +}; + +/** + * enum dp_colorimetry - drm DP Colorimetry formats + * + * This enum is used to indicate DP VSC SDP Colorimetry formats. + * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through + * DB18] and a name of enum member follows DRM_MODE_COLORIMETRY definition. + * + * @DP_COLORIMETRY_DEFAULT: sRGB (IEC 61966-2-1) or + * ITU-R BT.601 colorimetry format + * @DP_COLORIMETRY_RGB_WIDE_FIXED: RGB wide gamut fixed point colorimetry format + * @DP_COLORIMETRY_BT709_YCC: ITU-R BT.709 colorimetry format + * @DP_COLORIMETRY_RGB_WIDE_FLOAT: RGB wide gamut floating point + * (scRGB (IEC 61966-2-2)) colorimetry format + * @DP_COLORIMETRY_XVYCC_601: xvYCC601 colorimetry format + * @DP_COLORIMETRY_OPRGB: OpRGB colorimetry format + * @DP_COLORIMETRY_XVYCC_709: xvYCC709 colorimetry format + * @DP_COLORIMETRY_DCI_P3_RGB: DCI-P3 (SMPTE RP 431-2) colorimetry format + * @DP_COLORIMETRY_SYCC_601: sYCC601 colorimetry format + * @DP_COLORIMETRY_RGB_CUSTOM: RGB Custom Color Profile colorimetry format + * @DP_COLORIMETRY_OPYCC_601: opYCC601 colorimetry format + * @DP_COLORIMETRY_BT2020_RGB: ITU-R BT.2020 R' G' B' colorimetry format + * @DP_COLORIMETRY_BT2020_CYCC: ITU-R BT.2020 Y'c C'bc C'rc colorimetry format + * @DP_COLORIMETRY_BT2020_YCC: ITU-R BT.2020 Y' C'b C'r colorimetry format + */ +enum dp_colorimetry { + DP_COLORIMETRY_DEFAULT = 0, + DP_COLORIMETRY_RGB_WIDE_FIXED = 0x1, + DP_COLORIMETRY_BT709_YCC = 0x1, + DP_COLORIMETRY_RGB_WIDE_FLOAT = 0x2, + DP_COLORIMETRY_XVYCC_601 = 0x2, + DP_COLORIMETRY_OPRGB = 0x3, + DP_COLORIMETRY_XVYCC_709 = 0x3, + DP_COLORIMETRY_DCI_P3_RGB = 0x4, + DP_COLORIMETRY_SYCC_601 = 0x4, + DP_COLORIMETRY_RGB_CUSTOM = 0x5, + DP_COLORIMETRY_OPYCC_601 = 0x5, + DP_COLORIMETRY_BT2020_RGB = 0x6, + DP_COLORIMETRY_BT2020_CYCC = 0x6, + DP_COLORIMETRY_BT2020_YCC = 0x7, +}; + +/** + * enum dp_dynamic_range - drm DP Dynamic Range + * + * This enum is used to indicate DP VSC SDP Dynamic Range. + * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through + * DB18] + * + * @DP_DYNAMIC_RANGE_VESA: VESA range + * @DP_DYNAMIC_RANGE_CTA: CTA range + */ +enum dp_dynamic_range { + DP_DYNAMIC_RANGE_VESA = 0, + DP_DYNAMIC_RANGE_CTA = 1, +}; + +/** + * enum dp_content_type - drm DP Content Type + * + * This enum is used to indicate DP VSC SDP Content Types. + * It is based on DP 1.4 spec [Table 2-117: VSC SDP Payload for DB16 through + * DB18] + * CTA-861-G defines content types and expected processing by a sink device + * + * @DP_CONTENT_TYPE_NOT_DEFINED: Not defined type + * @DP_CONTENT_TYPE_GRAPHICS: Graphics type + * @DP_CONTENT_TYPE_PHOTO: Photo type + * @DP_CONTENT_TYPE_VIDEO: Video type + * @DP_CONTENT_TYPE_GAME: Game type + */ +enum dp_content_type { + DP_CONTENT_TYPE_NOT_DEFINED = 0x00, + DP_CONTENT_TYPE_GRAPHICS = 0x01, + DP_CONTENT_TYPE_PHOTO = 0x02, + DP_CONTENT_TYPE_VIDEO = 0x03, + DP_CONTENT_TYPE_GAME = 0x04, +}; + +/** + * struct drm_dp_vsc_sdp - drm DP VSC SDP + * + * This structure represents a DP VSC SDP of drm + * It is based on DP 1.4 spec [Table 2-116: VSC SDP Header Bytes] and + * [Table 2-117: VSC SDP Payload for DB16 through DB18] + * + * @sdp_type: secondary-data packet type + * @revision: revision number + * @length: number of valid data bytes + * @pixelformat: pixel encoding format + * @colorimetry: colorimetry format + * @bpc: bit per color + * @dynamic_range: dynamic range information + * @content_type: CTA-861-G defines content types and expected processing by a sink device + */ +struct drm_dp_vsc_sdp { + unsigned char sdp_type; + unsigned char revision; + unsigned char length; + enum dp_pixelformat pixelformat; + enum dp_colorimetry colorimetry; + int bpc; + enum dp_dynamic_range dynamic_range; + enum dp_content_type content_type; +}; + +void drm_dp_vsc_sdp_log(const char *level, struct device *dev, + const struct drm_dp_vsc_sdp *vsc); + int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]); static inline int @@ -1565,6 +1698,13 @@ enum drm_dp_quirk { * capabilities advertised. */ DP_QUIRK_FORCE_DPCD_BACKLIGHT, + /** + * @DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS: + * + * The device supports a link rate of 3.24 Gbps (multiplier 0xc) despite + * the DP_MAX_LINK_RATE register reporting a lower max multiplier. + */ + DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS, }; /** diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h index fc47a183526e..eee3c9de6c4f 100644 --- a/include/drm/drm_modes.h +++ b/include/drm/drm_modes.h @@ -48,7 +48,7 @@ struct videomode; * @MODE_HSYNC: hsync out of range * @MODE_VSYNC: vsync out of range * @MODE_H_ILLEGAL: mode has illegal horizontal timings - * @MODE_V_ILLEGAL: mode has illegal horizontal timings + * @MODE_V_ILLEGAL: mode has illegal vertical timings * @MODE_BAD_WIDTH: requires an unsupported linepitch * @MODE_NOMODE: no mode with a matching name * @MODE_NO_INTERLACE: interlaced mode not supported diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 1d2c12219f44..662d8351c87a 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -593,12 +593,16 @@ /* TGL */ #define INTEL_TGL_12_IDS(info) \ - INTEL_VGA_DEVICE(0x9A49, info), \ INTEL_VGA_DEVICE(0x9A40, info), \ + INTEL_VGA_DEVICE(0x9A49, info), \ INTEL_VGA_DEVICE(0x9A59, info), \ INTEL_VGA_DEVICE(0x9A60, info), \ INTEL_VGA_DEVICE(0x9A68, info), \ INTEL_VGA_DEVICE(0x9A70, info), \ - INTEL_VGA_DEVICE(0x9A78, info) + INTEL_VGA_DEVICE(0x9A78, info), \ + INTEL_VGA_DEVICE(0x9AC0, info), \ + INTEL_VGA_DEVICE(0x9AC9, info), \ + INTEL_VGA_DEVICE(0x9AD9, info), \ + INTEL_VGA_DEVICE(0x9AF8, info) #endif /* _I915_PCIIDS_H */ diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index a6ec6101d9ec..b1c705a93517 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -666,10 +666,6 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo); int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, struct ttm_bo_device *bdev); -void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot); - -void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot); - /** * ttm_bo_io * diff --git a/include/dt-bindings/clock/agilex-clock.h b/include/dt-bindings/clock/agilex-clock.h new file mode 100644 index 000000000000..f19cf8ccbdd2 --- /dev/null +++ b/include/dt-bindings/clock/agilex-clock.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019, Intel Corporation + */ + +#ifndef __AGILEX_CLOCK_H +#define __AGILEX_CLOCK_H + +/* fixed rate clocks */ +#define AGILEX_OSC1 0 +#define AGILEX_CB_INTOSC_HS_DIV2_CLK 1 +#define AGILEX_CB_INTOSC_LS_CLK 2 +#define AGILEX_L4_SYS_FREE_CLK 3 +#define AGILEX_F2S_FREE_CLK 4 + +/* PLL clocks */ +#define AGILEX_MAIN_PLL_CLK 5 +#define AGILEX_MAIN_PLL_C0_CLK 6 +#define AGILEX_MAIN_PLL_C1_CLK 7 +#define AGILEX_MAIN_PLL_C2_CLK 8 +#define AGILEX_MAIN_PLL_C3_CLK 9 +#define AGILEX_PERIPH_PLL_CLK 10 +#define AGILEX_PERIPH_PLL_C0_CLK 11 +#define AGILEX_PERIPH_PLL_C1_CLK 12 +#define AGILEX_PERIPH_PLL_C2_CLK 13 +#define AGILEX_PERIPH_PLL_C3_CLK 14 +#define AGILEX_MPU_FREE_CLK 15 +#define AGILEX_MPU_CCU_CLK 16 +#define AGILEX_BOOT_CLK 17 + +/* fixed factor clocks */ +#define AGILEX_L3_MAIN_FREE_CLK 18 +#define AGILEX_NOC_FREE_CLK 19 +#define AGILEX_S2F_USR0_CLK 20 +#define AGILEX_NOC_CLK 21 +#define AGILEX_EMAC_A_FREE_CLK 22 +#define AGILEX_EMAC_B_FREE_CLK 23 +#define AGILEX_EMAC_PTP_FREE_CLK 24 +#define AGILEX_GPIO_DB_FREE_CLK 25 +#define AGILEX_SDMMC_FREE_CLK 26 +#define AGILEX_S2F_USER0_FREE_CLK 27 +#define AGILEX_S2F_USER1_FREE_CLK 28 +#define AGILEX_PSI_REF_FREE_CLK 29 + +/* Gate clocks */ +#define AGILEX_MPU_CLK 30 +#define AGILEX_MPU_L2RAM_CLK 31 +#define AGILEX_MPU_PERIPH_CLK 32 +#define AGILEX_L4_MAIN_CLK 33 +#define AGILEX_L4_MP_CLK 34 +#define AGILEX_L4_SP_CLK 35 +#define AGILEX_CS_AT_CLK 36 +#define AGILEX_CS_TRACE_CLK 37 +#define AGILEX_CS_PDBG_CLK 38 +#define AGILEX_CS_TIMER_CLK 39 +#define AGILEX_S2F_USER0_CLK 40 +#define AGILEX_EMAC0_CLK 41 +#define AGILEX_EMAC1_CLK 43 +#define AGILEX_EMAC2_CLK 44 +#define AGILEX_EMAC_PTP_CLK 45 +#define AGILEX_GPIO_DB_CLK 46 +#define AGILEX_NAND_CLK 47 +#define AGILEX_PSI_REF_CLK 48 +#define AGILEX_S2F_USER1_CLK 49 +#define AGILEX_SDMMC_CLK 50 +#define AGILEX_SPI_M_CLK 51 +#define AGILEX_USB_CLK 52 +#define AGILEX_NUM_CLKS 53 + +#endif /* __AGILEX_CLOCK_H */ diff --git a/include/dt-bindings/clock/at91.h b/include/dt-bindings/clock/at91.h index 38b5554153c8..eba17106608b 100644 --- a/include/dt-bindings/clock/at91.h +++ b/include/dt-bindings/clock/at91.h @@ -12,6 +12,7 @@ #define PMC_TYPE_SYSTEM 1 #define PMC_TYPE_PERIPHERAL 2 #define PMC_TYPE_GCK 3 +#define PMC_TYPE_PROGRAMMABLE 4 #define PMC_SLOW 0 #define PMC_MCK 1 @@ -20,6 +21,9 @@ #define PMC_MCK2 4 #define PMC_I2S0_MUX 5 #define PMC_I2S1_MUX 6 +#define PMC_PLLACK 7 +#define PMC_PLLBCK 8 +#define PMC_AUDIOPLLCK 9 #ifndef AT91_PMC_MOSCS #define AT91_PMC_MOSCS 0 /* MOSCS Flag */ diff --git a/include/dt-bindings/clock/bt1-ccu.h b/include/dt-bindings/clock/bt1-ccu.h new file mode 100644 index 000000000000..5f166d27a00a --- /dev/null +++ b/include/dt-bindings/clock/bt1-ccu.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 CCU clock indices + */ +#ifndef __DT_BINDINGS_CLOCK_BT1_CCU_H +#define __DT_BINDINGS_CLOCK_BT1_CCU_H + +#define CCU_CPU_PLL 0 +#define CCU_SATA_PLL 1 +#define CCU_DDR_PLL 2 +#define CCU_PCIE_PLL 3 +#define CCU_ETH_PLL 4 + +#define CCU_AXI_MAIN_CLK 0 +#define CCU_AXI_DDR_CLK 1 +#define CCU_AXI_SATA_CLK 2 +#define CCU_AXI_GMAC0_CLK 3 +#define CCU_AXI_GMAC1_CLK 4 +#define CCU_AXI_XGMAC_CLK 5 +#define CCU_AXI_PCIE_M_CLK 6 +#define CCU_AXI_PCIE_S_CLK 7 +#define CCU_AXI_USB_CLK 8 +#define CCU_AXI_HWA_CLK 9 +#define CCU_AXI_SRAM_CLK 10 + +#define CCU_SYS_SATA_REF_CLK 0 +#define CCU_SYS_APB_CLK 1 +#define CCU_SYS_GMAC0_TX_CLK 2 +#define CCU_SYS_GMAC0_PTP_CLK 3 +#define CCU_SYS_GMAC1_TX_CLK 4 +#define CCU_SYS_GMAC1_PTP_CLK 5 +#define CCU_SYS_XGMAC_REF_CLK 6 +#define CCU_SYS_XGMAC_PTP_CLK 7 +#define CCU_SYS_USB_CLK 8 +#define CCU_SYS_PVT_CLK 9 +#define CCU_SYS_HWA_CLK 10 +#define CCU_SYS_UART_CLK 11 +#define CCU_SYS_I2C1_CLK 12 +#define CCU_SYS_I2C2_CLK 13 +#define CCU_SYS_GPIO_CLK 14 +#define CCU_SYS_TIMER0_CLK 15 +#define CCU_SYS_TIMER1_CLK 16 +#define CCU_SYS_TIMER2_CLK 17 +#define CCU_SYS_WDT_CLK 18 + +#endif /* __DT_BINDINGS_CLOCK_BT1_CCU_H */ diff --git a/include/dt-bindings/clock/imx7ulp-clock.h b/include/dt-bindings/clock/imx7ulp-clock.h index 38145bdcd975..b58370d146e2 100644 --- a/include/dt-bindings/clock/imx7ulp-clock.h +++ b/include/dt-bindings/clock/imx7ulp-clock.h @@ -58,7 +58,10 @@ #define IMX7ULP_CLK_HSRUN_SYS_SEL 44 #define IMX7ULP_CLK_HSRUN_CORE_DIV 45 -#define IMX7ULP_CLK_SCG1_END 46 +#define IMX7ULP_CLK_CORE 46 +#define IMX7ULP_CLK_HSRUN_CORE 47 + +#define IMX7ULP_CLK_SCG1_END 48 /* PCC2 */ #define IMX7ULP_CLK_DMA1 0 diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h index 47ab082238b4..7a23f289b27f 100644 --- a/include/dt-bindings/clock/imx8mp-clock.h +++ b/include/dt-bindings/clock/imx8mp-clock.h @@ -296,6 +296,94 @@ #define IMX8MP_CLK_ARM 287 #define IMX8MP_CLK_A53_CORE 288 -#define IMX8MP_CLK_END 289 +#define IMX8MP_SYS_PLL1_40M_CG 289 +#define IMX8MP_SYS_PLL1_80M_CG 290 +#define IMX8MP_SYS_PLL1_100M_CG 291 +#define IMX8MP_SYS_PLL1_133M_CG 292 +#define IMX8MP_SYS_PLL1_160M_CG 293 +#define IMX8MP_SYS_PLL1_200M_CG 294 +#define IMX8MP_SYS_PLL1_266M_CG 295 +#define IMX8MP_SYS_PLL1_400M_CG 296 +#define IMX8MP_SYS_PLL2_50M_CG 297 +#define IMX8MP_SYS_PLL2_100M_CG 298 +#define IMX8MP_SYS_PLL2_125M_CG 299 +#define IMX8MP_SYS_PLL2_166M_CG 300 +#define IMX8MP_SYS_PLL2_200M_CG 301 +#define IMX8MP_SYS_PLL2_250M_CG 302 +#define IMX8MP_SYS_PLL2_333M_CG 303 +#define IMX8MP_SYS_PLL2_500M_CG 304 + +#define IMX8MP_CLK_M7_CORE 305 +#define IMX8MP_CLK_ML_CORE 306 +#define IMX8MP_CLK_GPU3D_CORE 307 +#define IMX8MP_CLK_GPU3D_SHADER_CORE 308 +#define IMX8MP_CLK_GPU2D_CORE 309 +#define IMX8MP_CLK_AUDIO_AXI 310 +#define IMX8MP_CLK_HSIO_AXI 311 +#define IMX8MP_CLK_MEDIA_ISP 312 + +#define IMX8MP_CLK_END 313 + +#define IMX8MP_CLK_AUDIOMIX_SAI1_IPG 0 +#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1 1 +#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK2 2 +#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK3 3 +#define IMX8MP_CLK_AUDIOMIX_SAI2_IPG 4 +#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK1 5 +#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK2 6 +#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK3 7 +#define IMX8MP_CLK_AUDIOMIX_SAI3_IPG 8 +#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1 9 +#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK2 10 +#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK3 11 +#define IMX8MP_CLK_AUDIOMIX_SAI5_IPG 12 +#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK1 13 +#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK2 14 +#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK3 15 +#define IMX8MP_CLK_AUDIOMIX_SAI6_IPG 16 +#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK1 17 +#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK2 18 +#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK3 19 +#define IMX8MP_CLK_AUDIOMIX_SAI7_IPG 20 +#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK1 21 +#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK2 22 +#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK3 23 +#define IMX8MP_CLK_AUDIOMIX_ASRC_IPG 24 +#define IMX8MP_CLK_AUDIOMIX_PDM_IPG 25 +#define IMX8MP_CLK_AUDIOMIX_SDMA2_ROOT 26 +#define IMX8MP_CLK_AUDIOMIX_SDMA3_ROOT 27 +#define IMX8MP_CLK_AUDIOMIX_SPBA2_ROOT 28 +#define IMX8MP_CLK_AUDIOMIX_DSP_ROOT 29 +#define IMX8MP_CLK_AUDIOMIX_DSPDBG_ROOT 30 +#define IMX8MP_CLK_AUDIOMIX_EARC_IPG 31 +#define IMX8MP_CLK_AUDIOMIX_OCRAMA_IPG 32 +#define IMX8MP_CLK_AUDIOMIX_AUD2HTX_IPG 33 +#define IMX8MP_CLK_AUDIOMIX_EDMA_ROOT 34 +#define IMX8MP_CLK_AUDIOMIX_AUDPLL_ROOT 35 +#define IMX8MP_CLK_AUDIOMIX_MU2_ROOT 36 +#define IMX8MP_CLK_AUDIOMIX_MU3_ROOT 37 +#define IMX8MP_CLK_AUDIOMIX_EARC_PHY 38 +#define IMX8MP_CLK_AUDIOMIX_PDM_ROOT 39 +#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK1_SEL 40 +#define IMX8MP_CLK_AUDIOMIX_SAI1_MCLK2_SEL 41 +#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK1_SEL 42 +#define IMX8MP_CLK_AUDIOMIX_SAI2_MCLK2_SEL 43 +#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK1_SEL 44 +#define IMX8MP_CLK_AUDIOMIX_SAI3_MCLK2_SEL 45 +#define IMX8MP_CLK_AUDIOMIX_SAI4_MCLK1_SEL 46 +#define IMX8MP_CLK_AUDIOMIX_SAI4_MCLK2_SEL 47 +#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK1_SEL 48 +#define IMX8MP_CLK_AUDIOMIX_SAI5_MCLK2_SEL 49 +#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK1_SEL 50 +#define IMX8MP_CLK_AUDIOMIX_SAI6_MCLK2_SEL 51 +#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK1_SEL 52 +#define IMX8MP_CLK_AUDIOMIX_SAI7_MCLK2_SEL 53 +#define IMX8MP_CLK_AUDIOMIX_PDM_SEL 54 +#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_REF_SEL 55 +#define IMX8MP_CLK_AUDIOMIX_SAI_PLL 56 +#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_BYPASS 57 +#define IMX8MP_CLK_AUDIOMIX_SAI_PLL_OUT 58 + +#define IMX8MP_CLK_AUDIOMIX_END 59 #endif diff --git a/include/dt-bindings/clock/intel,lgm-clk.h b/include/dt-bindings/clock/intel,lgm-clk.h new file mode 100644 index 000000000000..92f5be6490bb --- /dev/null +++ b/include/dt-bindings/clock/intel,lgm-clk.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) 2020 Intel Corporation. + * Lei Chuanhua <Chuanhua.lei@intel.com> + * Zhu Yixin <Yixin.zhu@intel.com> + */ +#ifndef __INTEL_LGM_CLK_H +#define __INTEL_LGM_CLK_H + +/* PLL clocks */ +#define LGM_CLK_OSC 1 +#define LGM_CLK_PLLPP 2 +#define LGM_CLK_PLL2 3 +#define LGM_CLK_PLL0CZ 4 +#define LGM_CLK_PLL0B 5 +#define LGM_CLK_PLL1 6 +#define LGM_CLK_LJPLL3 7 +#define LGM_CLK_LJPLL4 8 +#define LGM_CLK_PLL0CM0 9 +#define LGM_CLK_PLL0CM1 10 + +/* clocks from PLLs */ + +/* ROPLL clocks */ +#define LGM_CLK_PP_HW 15 +#define LGM_CLK_PP_UC 16 +#define LGM_CLK_PP_FXD 17 +#define LGM_CLK_PP_TBM 18 + +/* PLL2 clocks */ +#define LGM_CLK_DDR 20 + +/* PLL0CZ */ +#define LGM_CLK_CM 25 +#define LGM_CLK_IC 26 +#define LGM_CLK_SDXC3 27 + +/* PLL0B */ +#define LGM_CLK_NGI 30 +#define LGM_CLK_NOC4 31 +#define LGM_CLK_SW 32 +#define LGM_CLK_QSPI 33 +#define LGM_CLK_CQEM LGM_CLK_SW +#define LGM_CLK_EMMC5 LGM_CLK_NOC4 + +/* PLL1 */ +#define LGM_CLK_CT 35 +#define LGM_CLK_DSP 36 +#define LGM_CLK_VIF 37 + +/* LJPLL3 */ +#define LGM_CLK_CML 40 +#define LGM_CLK_SERDES 41 +#define LGM_CLK_POOL 42 +#define LGM_CLK_PTP 43 + +/* LJPLL4 */ +#define LGM_CLK_PCIE 45 +#define LGM_CLK_SATA LGM_CLK_PCIE + +/* PLL0CM0 */ +#define LGM_CLK_CPU0 50 + +/* PLL0CM1 */ +#define LGM_CLK_CPU1 55 + +/* Miscellaneous clocks */ +#define LGM_CLK_EMMC4 60 +#define LGM_CLK_SDXC2 61 +#define LGM_CLK_EMMC 62 +#define LGM_CLK_SDXC 63 +#define LGM_CLK_SLIC 64 +#define LGM_CLK_DCL 65 +#define LGM_CLK_DOCSIS 66 +#define LGM_CLK_PCM 67 +#define LGM_CLK_DDR_PHY 68 +#define LGM_CLK_PONDEF 69 +#define LGM_CLK_PL25M 70 +#define LGM_CLK_PL10M 71 +#define LGM_CLK_PL1544K 72 +#define LGM_CLK_PL2048K 73 +#define LGM_CLK_PL8K 74 +#define LGM_CLK_PON_NTR 75 +#define LGM_CLK_SYNC0 76 +#define LGM_CLK_SYNC1 77 +#define LGM_CLK_PROGDIV 78 +#define LGM_CLK_OD0 79 +#define LGM_CLK_OD1 80 +#define LGM_CLK_CBPHY0 81 +#define LGM_CLK_CBPHY1 82 +#define LGM_CLK_CBPHY2 83 +#define LGM_CLK_CBPHY3 84 + +/* Gate clocks */ +/* Gate CLK0 */ +#define LGM_GCLK_C55 100 +#define LGM_GCLK_QSPI 101 +#define LGM_GCLK_EIP197 102 +#define LGM_GCLK_VAULT 103 +#define LGM_GCLK_TOE 104 +#define LGM_GCLK_SDXC 105 +#define LGM_GCLK_EMMC 106 +#define LGM_GCLK_SPI_DBG 107 +#define LGM_GCLK_DMA3 108 + +/* Gate CLK1 */ +#define LGM_GCLK_DMA0 120 +#define LGM_GCLK_LEDC0 121 +#define LGM_GCLK_LEDC1 122 +#define LGM_GCLK_I2S0 123 +#define LGM_GCLK_I2S1 124 +#define LGM_GCLK_EBU 125 +#define LGM_GCLK_PWM 126 +#define LGM_GCLK_I2C0 127 +#define LGM_GCLK_I2C1 128 +#define LGM_GCLK_I2C2 129 +#define LGM_GCLK_I2C3 130 +#define LGM_GCLK_SSC0 131 +#define LGM_GCLK_SSC1 132 +#define LGM_GCLK_SSC2 133 +#define LGM_GCLK_SSC3 134 +#define LGM_GCLK_GPTC0 135 +#define LGM_GCLK_GPTC1 136 +#define LGM_GCLK_GPTC2 137 +#define LGM_GCLK_GPTC3 138 +#define LGM_GCLK_ASC0 139 +#define LGM_GCLK_ASC1 140 +#define LGM_GCLK_ASC2 141 +#define LGM_GCLK_ASC3 142 +#define LGM_GCLK_PCM0 143 +#define LGM_GCLK_PCM1 144 +#define LGM_GCLK_PCM2 145 + +/* Gate CLK2 */ +#define LGM_GCLK_PCIE10 150 +#define LGM_GCLK_PCIE11 151 +#define LGM_GCLK_PCIE30 152 +#define LGM_GCLK_PCIE31 153 +#define LGM_GCLK_PCIE20 154 +#define LGM_GCLK_PCIE21 155 +#define LGM_GCLK_PCIE40 156 +#define LGM_GCLK_PCIE41 157 +#define LGM_GCLK_XPCS0 158 +#define LGM_GCLK_XPCS1 159 +#define LGM_GCLK_XPCS2 160 +#define LGM_GCLK_XPCS3 161 +#define LGM_GCLK_SATA0 162 +#define LGM_GCLK_SATA1 163 +#define LGM_GCLK_SATA2 164 +#define LGM_GCLK_SATA3 165 + +/* Gate CLK3 */ +#define LGM_GCLK_ARCEM4 170 +#define LGM_GCLK_IDMAR1 171 +#define LGM_GCLK_IDMAT0 172 +#define LGM_GCLK_IDMAT1 173 +#define LGM_GCLK_IDMAT2 174 +#define LGM_GCLK_PPV4 175 +#define LGM_GCLK_GSWIPO 176 +#define LGM_GCLK_CQEM 177 +#define LGM_GCLK_XPCS5 178 +#define LGM_GCLK_USB1 179 +#define LGM_GCLK_USB2 180 + +#endif /* __INTEL_LGM_CLK_H */ diff --git a/include/dt-bindings/clock/marvell,mmp2-audio.h b/include/dt-bindings/clock/marvell,mmp2-audio.h new file mode 100644 index 000000000000..20664776f497 --- /dev/null +++ b/include/dt-bindings/clock/marvell,mmp2-audio.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause) */ +#ifndef __DT_BINDINGS_CLOCK_MARVELL_MMP2_AUDIO_H +#define __DT_BINDINGS_CLOCK_MARVELL_MMP2_AUDIO_H + +#define MMP2_CLK_AUDIO_SYSCLK 0 +#define MMP2_CLK_AUDIO_SSPA0 1 +#define MMP2_CLK_AUDIO_SSPA1 2 + +#define MMP2_CLK_AUDIO_NR_CLKS 3 +#endif diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h index 06bb7fe4c62f..87f5ad5df72f 100644 --- a/include/dt-bindings/clock/marvell,mmp2.h +++ b/include/dt-bindings/clock/marvell,mmp2.h @@ -29,6 +29,8 @@ #define MMP3_CLK_PLL1_P 28 #define MMP3_CLK_PLL2_P 29 #define MMP3_CLK_PLL3 30 +#define MMP2_CLK_I2S0 31 +#define MMP2_CLK_I2S1 32 /* apb periphrals */ #define MMP2_CLK_TWSI0 60 @@ -87,6 +89,7 @@ #define MMP3_CLK_GPU_3D MMP2_CLK_GPU_3D #define MMP3_CLK_GPU_2D 125 #define MMP3_CLK_SDH4 126 +#define MMP2_CLK_AUDIO 127 #define MMP2_NR_CLKS 200 #endif diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h index 68862aaf977e..4c5965ae1df4 100644 --- a/include/dt-bindings/clock/meson8b-clkc.h +++ b/include/dt-bindings/clock/meson8b-clkc.h @@ -107,6 +107,7 @@ #define CLKID_PERIPH 126 #define CLKID_AXI 128 #define CLKID_L2_DRAM 130 +#define CLKID_HDMI_SYS 174 #define CLKID_VPU 190 #define CLKID_VDEC_1 196 #define CLKID_VDEC_HCODEC 199 diff --git a/include/dt-bindings/clock/mt6765-clk.h b/include/dt-bindings/clock/mt6765-clk.h new file mode 100644 index 000000000000..eb97e568518e --- /dev/null +++ b/include/dt-bindings/clock/mt6765-clk.h @@ -0,0 +1,313 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _DT_BINDINGS_CLK_MT6765_H +#define _DT_BINDINGS_CLK_MT6765_H + +/* FIX Clks */ +#define CLK_TOP_CLK26M 0 + +/* APMIXEDSYS */ +#define CLK_APMIXED_ARMPLL_L 0 +#define CLK_APMIXED_ARMPLL 1 +#define CLK_APMIXED_CCIPLL 2 +#define CLK_APMIXED_MAINPLL 3 +#define CLK_APMIXED_MFGPLL 4 +#define CLK_APMIXED_MMPLL 5 +#define CLK_APMIXED_UNIV2PLL 6 +#define CLK_APMIXED_MSDCPLL 7 +#define CLK_APMIXED_APLL1 8 +#define CLK_APMIXED_MPLL 9 +#define CLK_APMIXED_ULPOSC1 10 +#define CLK_APMIXED_ULPOSC2 11 +#define CLK_APMIXED_SSUSB26M 12 +#define CLK_APMIXED_APPLL26M 13 +#define CLK_APMIXED_MIPIC0_26M 14 +#define CLK_APMIXED_MDPLLGP26M 15 +#define CLK_APMIXED_MMSYS_F26M 16 +#define CLK_APMIXED_UFS26M 17 +#define CLK_APMIXED_MIPIC1_26M 18 +#define CLK_APMIXED_MEMPLL26M 19 +#define CLK_APMIXED_CLKSQ_LVPLL_26M 20 +#define CLK_APMIXED_MIPID0_26M 21 +#define CLK_APMIXED_NR_CLK 22 + +/* TOPCKGEN */ +#define CLK_TOP_SYSPLL 0 +#define CLK_TOP_SYSPLL_D2 1 +#define CLK_TOP_SYSPLL1_D2 2 +#define CLK_TOP_SYSPLL1_D4 3 +#define CLK_TOP_SYSPLL1_D8 4 +#define CLK_TOP_SYSPLL1_D16 5 +#define CLK_TOP_SYSPLL_D3 6 +#define CLK_TOP_SYSPLL2_D2 7 +#define CLK_TOP_SYSPLL2_D4 8 +#define CLK_TOP_SYSPLL2_D8 9 +#define CLK_TOP_SYSPLL_D5 10 +#define CLK_TOP_SYSPLL3_D2 11 +#define CLK_TOP_SYSPLL3_D4 12 +#define CLK_TOP_SYSPLL_D7 13 +#define CLK_TOP_SYSPLL4_D2 14 +#define CLK_TOP_SYSPLL4_D4 15 +#define CLK_TOP_USB20_192M 16 +#define CLK_TOP_USB20_192M_D4 17 +#define CLK_TOP_USB20_192M_D8 18 +#define CLK_TOP_USB20_192M_D16 19 +#define CLK_TOP_USB20_192M_D32 20 +#define CLK_TOP_UNIVPLL 21 +#define CLK_TOP_UNIVPLL_D2 22 +#define CLK_TOP_UNIVPLL1_D2 23 +#define CLK_TOP_UNIVPLL1_D4 24 +#define CLK_TOP_UNIVPLL_D3 25 +#define CLK_TOP_UNIVPLL2_D2 26 +#define CLK_TOP_UNIVPLL2_D4 27 +#define CLK_TOP_UNIVPLL2_D8 28 +#define CLK_TOP_UNIVPLL2_D32 29 +#define CLK_TOP_UNIVPLL_D5 30 +#define CLK_TOP_UNIVPLL3_D2 31 +#define CLK_TOP_UNIVPLL3_D4 32 +#define CLK_TOP_MMPLL 33 +#define CLK_TOP_MMPLL_D2 34 +#define CLK_TOP_MPLL 35 +#define CLK_TOP_DA_MPLL_104M_DIV 36 +#define CLK_TOP_DA_MPLL_52M_DIV 37 +#define CLK_TOP_MFGPLL 38 +#define CLK_TOP_MSDCPLL 39 +#define CLK_TOP_MSDCPLL_D2 40 +#define CLK_TOP_APLL1 41 +#define CLK_TOP_APLL1_D2 42 +#define CLK_TOP_APLL1_D4 43 +#define CLK_TOP_APLL1_D8 44 +#define CLK_TOP_ULPOSC1 45 +#define CLK_TOP_ULPOSC1_D2 46 +#define CLK_TOP_ULPOSC1_D4 47 +#define CLK_TOP_ULPOSC1_D8 48 +#define CLK_TOP_ULPOSC1_D16 49 +#define CLK_TOP_ULPOSC1_D32 50 +#define CLK_TOP_DMPLL 51 +#define CLK_TOP_F_FRTC 52 +#define CLK_TOP_F_F26M 53 +#define CLK_TOP_AXI 54 +#define CLK_TOP_MM 55 +#define CLK_TOP_SCP 56 +#define CLK_TOP_MFG 57 +#define CLK_TOP_F_FUART 58 +#define CLK_TOP_SPI 59 +#define CLK_TOP_MSDC50_0 60 +#define CLK_TOP_MSDC30_1 61 +#define CLK_TOP_AUDIO 62 +#define CLK_TOP_AUD_1 63 +#define CLK_TOP_AUD_ENGEN1 64 +#define CLK_TOP_F_FDISP_PWM 65 +#define CLK_TOP_SSPM 66 +#define CLK_TOP_DXCC 67 +#define CLK_TOP_I2C 68 +#define CLK_TOP_F_FPWM 69 +#define CLK_TOP_F_FSENINF 70 +#define CLK_TOP_AES_FDE 71 +#define CLK_TOP_F_BIST2FPC 72 +#define CLK_TOP_ARMPLL_DIVIDER_PLL0 73 +#define CLK_TOP_ARMPLL_DIVIDER_PLL1 74 +#define CLK_TOP_ARMPLL_DIVIDER_PLL2 75 +#define CLK_TOP_DA_USB20_48M_DIV 76 +#define CLK_TOP_DA_UNIV_48M_DIV 77 +#define CLK_TOP_APLL12_DIV0 78 +#define CLK_TOP_APLL12_DIV1 79 +#define CLK_TOP_APLL12_DIV2 80 +#define CLK_TOP_APLL12_DIV3 81 +#define CLK_TOP_ARMPLL_DIVIDER_PLL0_EN 82 +#define CLK_TOP_ARMPLL_DIVIDER_PLL1_EN 83 +#define CLK_TOP_ARMPLL_DIVIDER_PLL2_EN 84 +#define CLK_TOP_FMEM_OCC_DRC_EN 85 +#define CLK_TOP_USB20_48M_EN 86 +#define CLK_TOP_UNIVPLL_48M_EN 87 +#define CLK_TOP_MPLL_104M_EN 88 +#define CLK_TOP_MPLL_52M_EN 89 +#define CLK_TOP_F_UFS_MP_SAP_CFG_EN 90 +#define CLK_TOP_F_BIST2FPC_EN 91 +#define CLK_TOP_MD_32K 92 +#define CLK_TOP_MD_26M 93 +#define CLK_TOP_MD2_32K 94 +#define CLK_TOP_MD2_26M 95 +#define CLK_TOP_AXI_SEL 96 +#define CLK_TOP_MEM_SEL 97 +#define CLK_TOP_MM_SEL 98 +#define CLK_TOP_SCP_SEL 99 +#define CLK_TOP_MFG_SEL 100 +#define CLK_TOP_ATB_SEL 101 +#define CLK_TOP_CAMTG_SEL 102 +#define CLK_TOP_CAMTG1_SEL 103 +#define CLK_TOP_CAMTG2_SEL 104 +#define CLK_TOP_CAMTG3_SEL 105 +#define CLK_TOP_UART_SEL 106 +#define CLK_TOP_SPI_SEL 107 +#define CLK_TOP_MSDC50_0_HCLK_SEL 108 +#define CLK_TOP_MSDC50_0_SEL 109 +#define CLK_TOP_MSDC30_1_SEL 110 +#define CLK_TOP_AUDIO_SEL 111 +#define CLK_TOP_AUD_INTBUS_SEL 112 +#define CLK_TOP_AUD_1_SEL 113 +#define CLK_TOP_AUD_ENGEN1_SEL 114 +#define CLK_TOP_DISP_PWM_SEL 115 +#define CLK_TOP_SSPM_SEL 116 +#define CLK_TOP_DXCC_SEL 117 +#define CLK_TOP_USB_TOP_SEL 118 +#define CLK_TOP_SPM_SEL 119 +#define CLK_TOP_I2C_SEL 120 +#define CLK_TOP_PWM_SEL 121 +#define CLK_TOP_SENINF_SEL 122 +#define CLK_TOP_AES_FDE_SEL 123 +#define CLK_TOP_PWRAP_ULPOSC_SEL 124 +#define CLK_TOP_CAMTM_SEL 125 +#define CLK_TOP_NR_CLK 126 + +/* INFRACFG */ +#define CLK_IFR_ICUSB 0 +#define CLK_IFR_GCE 1 +#define CLK_IFR_THERM 2 +#define CLK_IFR_I2C_AP 3 +#define CLK_IFR_I2C_CCU 4 +#define CLK_IFR_I2C_SSPM 5 +#define CLK_IFR_I2C_RSV 6 +#define CLK_IFR_PWM_HCLK 7 +#define CLK_IFR_PWM1 8 +#define CLK_IFR_PWM2 9 +#define CLK_IFR_PWM3 10 +#define CLK_IFR_PWM4 11 +#define CLK_IFR_PWM5 12 +#define CLK_IFR_PWM 13 +#define CLK_IFR_UART0 14 +#define CLK_IFR_UART1 15 +#define CLK_IFR_GCE_26M 16 +#define CLK_IFR_CQ_DMA_FPC 17 +#define CLK_IFR_BTIF 18 +#define CLK_IFR_SPI0 19 +#define CLK_IFR_MSDC0 20 +#define CLK_IFR_MSDC1 21 +#define CLK_IFR_TRNG 22 +#define CLK_IFR_AUXADC 23 +#define CLK_IFR_CCIF1_AP 24 +#define CLK_IFR_CCIF1_MD 25 +#define CLK_IFR_AUXADC_MD 26 +#define CLK_IFR_AP_DMA 27 +#define CLK_IFR_DEVICE_APC 28 +#define CLK_IFR_CCIF_AP 29 +#define CLK_IFR_AUDIO 30 +#define CLK_IFR_CCIF_MD 31 +#define CLK_IFR_RG_PWM_FBCLK6 32 +#define CLK_IFR_DISP_PWM 33 +#define CLK_IFR_CLDMA_BCLK 34 +#define CLK_IFR_AUDIO_26M_BCLK 35 +#define CLK_IFR_SPI1 36 +#define CLK_IFR_I2C4 37 +#define CLK_IFR_SPI2 38 +#define CLK_IFR_SPI3 39 +#define CLK_IFR_I2C5 40 +#define CLK_IFR_I2C5_ARBITER 41 +#define CLK_IFR_I2C5_IMM 42 +#define CLK_IFR_I2C1_ARBITER 43 +#define CLK_IFR_I2C1_IMM 44 +#define CLK_IFR_I2C2_ARBITER 45 +#define CLK_IFR_I2C2_IMM 46 +#define CLK_IFR_SPI4 47 +#define CLK_IFR_SPI5 48 +#define CLK_IFR_CQ_DMA 49 +#define CLK_IFR_FAES_FDE 50 +#define CLK_IFR_MSDC0_SELF 51 +#define CLK_IFR_MSDC1_SELF 52 +#define CLK_IFR_I2C6 53 +#define CLK_IFR_AP_MSDC0 54 +#define CLK_IFR_MD_MSDC0 55 +#define CLK_IFR_MSDC0_SRC 56 +#define CLK_IFR_MSDC1_SRC 57 +#define CLK_IFR_AES_TOP0_BCLK 58 +#define CLK_IFR_MCU_PM_BCLK 59 +#define CLK_IFR_CCIF2_AP 60 +#define CLK_IFR_CCIF2_MD 61 +#define CLK_IFR_CCIF3_AP 62 +#define CLK_IFR_CCIF3_MD 63 +#define CLK_IFR_NR_CLK 64 + +/* AUDIO */ +#define CLK_AUDIO_AFE 0 +#define CLK_AUDIO_22M 1 +#define CLK_AUDIO_APLL_TUNER 2 +#define CLK_AUDIO_ADC 3 +#define CLK_AUDIO_DAC 4 +#define CLK_AUDIO_DAC_PREDIS 5 +#define CLK_AUDIO_TML 6 +#define CLK_AUDIO_I2S1_BCLK 7 +#define CLK_AUDIO_I2S2_BCLK 8 +#define CLK_AUDIO_I2S3_BCLK 9 +#define CLK_AUDIO_I2S4_BCLK 10 +#define CLK_AUDIO_NR_CLK 11 + +/* MIPI_RX_ANA_CSI0A */ + +#define CLK_MIPI0A_CSR_CSI_EN_0A 0 +#define CLK_MIPI0A_NR_CLK 1 + +/* MMSYS_CONFIG */ + +#define CLK_MM_MDP_RDMA0 0 +#define CLK_MM_MDP_CCORR0 1 +#define CLK_MM_MDP_RSZ0 2 +#define CLK_MM_MDP_RSZ1 3 +#define CLK_MM_MDP_TDSHP0 4 +#define CLK_MM_MDP_WROT0 5 +#define CLK_MM_MDP_WDMA0 6 +#define CLK_MM_DISP_OVL0 7 +#define CLK_MM_DISP_OVL0_2L 8 +#define CLK_MM_DISP_RSZ0 9 +#define CLK_MM_DISP_RDMA0 10 +#define CLK_MM_DISP_WDMA0 11 +#define CLK_MM_DISP_COLOR0 12 +#define CLK_MM_DISP_CCORR0 13 +#define CLK_MM_DISP_AAL0 14 +#define CLK_MM_DISP_GAMMA0 15 +#define CLK_MM_DISP_DITHER0 16 +#define CLK_MM_DSI0 17 +#define CLK_MM_FAKE_ENG 18 +#define CLK_MM_SMI_COMMON 19 +#define CLK_MM_SMI_LARB0 20 +#define CLK_MM_SMI_COMM0 21 +#define CLK_MM_SMI_COMM1 22 +#define CLK_MM_CAM_MDP 23 +#define CLK_MM_SMI_IMG 24 +#define CLK_MM_SMI_CAM 25 +#define CLK_MM_IMG_DL_RELAY 26 +#define CLK_MM_IMG_DL_ASYNC_TOP 27 +#define CLK_MM_DIG_DSI 28 +#define CLK_MM_F26M_HRTWT 29 +#define CLK_MM_NR_CLK 30 + +/* IMGSYS */ + +#define CLK_IMG_LARB2 0 +#define CLK_IMG_DIP 1 +#define CLK_IMG_FDVT 2 +#define CLK_IMG_DPE 3 +#define CLK_IMG_RSC 4 +#define CLK_IMG_NR_CLK 5 + +/* VENCSYS */ + +#define CLK_VENC_SET0_LARB 0 +#define CLK_VENC_SET1_VENC 1 +#define CLK_VENC_SET2_JPGENC 2 +#define CLK_VENC_SET3_VDEC 3 +#define CLK_VENC_NR_CLK 4 + +/* CAMSYS */ + +#define CLK_CAM_LARB3 0 +#define CLK_CAM_DFP_VAD 1 +#define CLK_CAM 2 +#define CLK_CAMTG 3 +#define CLK_CAM_SENINF 4 +#define CLK_CAMSV0 5 +#define CLK_CAMSV1 6 +#define CLK_CAMSV2 7 +#define CLK_CAM_CCU 8 +#define CLK_CAM_NR_CLK 9 + +#endif /* _DT_BINDINGS_CLK_MT6765_H */ diff --git a/include/dt-bindings/clock/qcom,gcc-msm8939.h b/include/dt-bindings/clock/qcom,gcc-msm8939.h new file mode 100644 index 000000000000..0634467c4ce5 --- /dev/null +++ b/include/dt-bindings/clock/qcom,gcc-msm8939.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2020 Linaro Limited + */ + +#ifndef _DT_BINDINGS_CLK_MSM_GCC_8939_H +#define _DT_BINDINGS_CLK_MSM_GCC_8939_H + +#define GPLL0 0 +#define GPLL0_VOTE 1 +#define BIMC_PLL 2 +#define BIMC_PLL_VOTE 3 +#define GPLL1 4 +#define GPLL1_VOTE 5 +#define GPLL2 6 +#define GPLL2_VOTE 7 +#define PCNOC_BFDCD_CLK_SRC 8 +#define SYSTEM_NOC_BFDCD_CLK_SRC 9 +#define CAMSS_AHB_CLK_SRC 10 +#define APSS_AHB_CLK_SRC 11 +#define CSI0_CLK_SRC 12 +#define CSI1_CLK_SRC 13 +#define GFX3D_CLK_SRC 14 +#define VFE0_CLK_SRC 15 +#define BLSP1_QUP1_I2C_APPS_CLK_SRC 16 +#define BLSP1_QUP1_SPI_APPS_CLK_SRC 17 +#define BLSP1_QUP2_I2C_APPS_CLK_SRC 18 +#define BLSP1_QUP2_SPI_APPS_CLK_SRC 19 +#define BLSP1_QUP3_I2C_APPS_CLK_SRC 20 +#define BLSP1_QUP3_SPI_APPS_CLK_SRC 21 +#define BLSP1_QUP4_I2C_APPS_CLK_SRC 22 +#define BLSP1_QUP4_SPI_APPS_CLK_SRC 23 +#define BLSP1_QUP5_I2C_APPS_CLK_SRC 24 +#define BLSP1_QUP5_SPI_APPS_CLK_SRC 25 +#define BLSP1_QUP6_I2C_APPS_CLK_SRC 26 +#define BLSP1_QUP6_SPI_APPS_CLK_SRC 27 +#define BLSP1_UART1_APPS_CLK_SRC 28 +#define BLSP1_UART2_APPS_CLK_SRC 29 +#define CCI_CLK_SRC 30 +#define CAMSS_GP0_CLK_SRC 31 +#define CAMSS_GP1_CLK_SRC 32 +#define JPEG0_CLK_SRC 33 +#define MCLK0_CLK_SRC 34 +#define MCLK1_CLK_SRC 35 +#define CSI0PHYTIMER_CLK_SRC 36 +#define CSI1PHYTIMER_CLK_SRC 37 +#define CPP_CLK_SRC 38 +#define CRYPTO_CLK_SRC 39 +#define GP1_CLK_SRC 40 +#define GP2_CLK_SRC 41 +#define GP3_CLK_SRC 42 +#define BYTE0_CLK_SRC 43 +#define ESC0_CLK_SRC 44 +#define MDP_CLK_SRC 45 +#define PCLK0_CLK_SRC 46 +#define VSYNC_CLK_SRC 47 +#define PDM2_CLK_SRC 48 +#define SDCC1_APPS_CLK_SRC 49 +#define SDCC2_APPS_CLK_SRC 50 +#define APSS_TCU_CLK_SRC 51 +#define USB_HS_SYSTEM_CLK_SRC 52 +#define VCODEC0_CLK_SRC 53 +#define GCC_BLSP1_AHB_CLK 54 +#define GCC_BLSP1_SLEEP_CLK 55 +#define GCC_BLSP1_QUP1_I2C_APPS_CLK 56 +#define GCC_BLSP1_QUP1_SPI_APPS_CLK 57 +#define GCC_BLSP1_QUP2_I2C_APPS_CLK 58 +#define GCC_BLSP1_QUP2_SPI_APPS_CLK 59 +#define GCC_BLSP1_QUP3_I2C_APPS_CLK 60 +#define GCC_BLSP1_QUP3_SPI_APPS_CLK 61 +#define GCC_BLSP1_QUP4_I2C_APPS_CLK 62 +#define GCC_BLSP1_QUP4_SPI_APPS_CLK 63 +#define GCC_BLSP1_QUP5_I2C_APPS_CLK 64 +#define GCC_BLSP1_QUP5_SPI_APPS_CLK 65 +#define GCC_BLSP1_QUP6_I2C_APPS_CLK 66 +#define GCC_BLSP1_QUP6_SPI_APPS_CLK 67 +#define GCC_BLSP1_UART1_APPS_CLK 68 +#define GCC_BLSP1_UART2_APPS_CLK 69 +#define GCC_BOOT_ROM_AHB_CLK 70 +#define GCC_CAMSS_CCI_AHB_CLK 71 +#define GCC_CAMSS_CCI_CLK 72 +#define GCC_CAMSS_CSI0_AHB_CLK 73 +#define GCC_CAMSS_CSI0_CLK 74 +#define GCC_CAMSS_CSI0PHY_CLK 75 +#define GCC_CAMSS_CSI0PIX_CLK 76 +#define GCC_CAMSS_CSI0RDI_CLK 77 +#define GCC_CAMSS_CSI1_AHB_CLK 78 +#define GCC_CAMSS_CSI1_CLK 79 +#define GCC_CAMSS_CSI1PHY_CLK 80 +#define GCC_CAMSS_CSI1PIX_CLK 81 +#define GCC_CAMSS_CSI1RDI_CLK 82 +#define GCC_CAMSS_CSI_VFE0_CLK 83 +#define GCC_CAMSS_GP0_CLK 84 +#define GCC_CAMSS_GP1_CLK 85 +#define GCC_CAMSS_ISPIF_AHB_CLK 86 +#define GCC_CAMSS_JPEG0_CLK 87 +#define GCC_CAMSS_JPEG_AHB_CLK 88 +#define GCC_CAMSS_JPEG_AXI_CLK 89 +#define GCC_CAMSS_MCLK0_CLK 90 +#define GCC_CAMSS_MCLK1_CLK 91 +#define GCC_CAMSS_MICRO_AHB_CLK 92 +#define GCC_CAMSS_CSI0PHYTIMER_CLK 93 +#define GCC_CAMSS_CSI1PHYTIMER_CLK 94 +#define GCC_CAMSS_AHB_CLK 95 +#define GCC_CAMSS_TOP_AHB_CLK 96 +#define GCC_CAMSS_CPP_AHB_CLK 97 +#define GCC_CAMSS_CPP_CLK 98 +#define GCC_CAMSS_VFE0_CLK 99 +#define GCC_CAMSS_VFE_AHB_CLK 100 +#define GCC_CAMSS_VFE_AXI_CLK 101 +#define GCC_CRYPTO_AHB_CLK 102 +#define GCC_CRYPTO_AXI_CLK 103 +#define GCC_CRYPTO_CLK 104 +#define GCC_OXILI_GMEM_CLK 105 +#define GCC_GP1_CLK 106 +#define GCC_GP2_CLK 107 +#define GCC_GP3_CLK 108 +#define GCC_MDSS_AHB_CLK 109 +#define GCC_MDSS_AXI_CLK 110 +#define GCC_MDSS_BYTE0_CLK 111 +#define GCC_MDSS_ESC0_CLK 112 +#define GCC_MDSS_MDP_CLK 113 +#define GCC_MDSS_PCLK0_CLK 114 +#define GCC_MDSS_VSYNC_CLK 115 +#define GCC_MSS_CFG_AHB_CLK 116 +#define GCC_OXILI_AHB_CLK 117 +#define GCC_OXILI_GFX3D_CLK 118 +#define GCC_PDM2_CLK 119 +#define GCC_PDM_AHB_CLK 120 +#define GCC_PRNG_AHB_CLK 121 +#define GCC_SDCC1_AHB_CLK 122 +#define GCC_SDCC1_APPS_CLK 123 +#define GCC_SDCC2_AHB_CLK 124 +#define GCC_SDCC2_APPS_CLK 125 +#define GCC_GTCU_AHB_CLK 126 +#define GCC_JPEG_TBU_CLK 127 +#define GCC_MDP_TBU_CLK 128 +#define GCC_SMMU_CFG_CLK 129 +#define GCC_VENUS_TBU_CLK 130 +#define GCC_VFE_TBU_CLK 131 +#define GCC_USB2A_PHY_SLEEP_CLK 132 +#define GCC_USB_HS_AHB_CLK 133 +#define GCC_USB_HS_SYSTEM_CLK 134 +#define GCC_VENUS0_AHB_CLK 135 +#define GCC_VENUS0_AXI_CLK 136 +#define GCC_VENUS0_VCODEC0_CLK 137 +#define BIMC_DDR_CLK_SRC 138 +#define GCC_APSS_TCU_CLK 139 +#define GCC_GFX_TCU_CLK 140 +#define BIMC_GPU_CLK_SRC 141 +#define GCC_BIMC_GFX_CLK 142 +#define GCC_BIMC_GPU_CLK 143 +#define ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC 144 +#define ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC 145 +#define ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC 146 +#define ULTAUDIO_XO_CLK_SRC 147 +#define ULTAUDIO_AHBFABRIC_CLK_SRC 148 +#define CODEC_DIGCODEC_CLK_SRC 149 +#define GCC_ULTAUDIO_PCNOC_MPORT_CLK 150 +#define GCC_ULTAUDIO_PCNOC_SWAY_CLK 151 +#define GCC_ULTAUDIO_AVSYNC_XO_CLK 152 +#define GCC_ULTAUDIO_STC_XO_CLK 153 +#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK 154 +#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK 155 +#define GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK 156 +#define GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK 157 +#define GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK 158 +#define GCC_CODEC_DIGCODEC_CLK 159 +#define GCC_MSS_Q6_BIMC_AXI_CLK 160 +#define GPLL3 161 +#define GPLL3_VOTE 162 +#define GPLL4 163 +#define GPLL4_VOTE 164 +#define GPLL5 165 +#define GPLL5_VOTE 166 +#define GPLL6 167 +#define GPLL6_VOTE 168 +#define BYTE1_CLK_SRC 169 +#define GCC_MDSS_BYTE1_CLK 170 +#define ESC1_CLK_SRC 171 +#define GCC_MDSS_ESC1_CLK 172 +#define PCLK1_CLK_SRC 173 +#define GCC_MDSS_PCLK1_CLK 174 +#define GCC_GFX_TBU_CLK 175 +#define GCC_CPP_TBU_CLK 176 +#define GCC_MDP_RT_TBU_CLK 177 +#define USB_FS_SYSTEM_CLK_SRC 178 +#define USB_FS_IC_CLK_SRC 179 +#define GCC_USB_FS_AHB_CLK 180 +#define GCC_USB_FS_IC_CLK 181 +#define GCC_USB_FS_SYSTEM_CLK 182 +#define GCC_VENUS0_CORE0_VCODEC0_CLK 183 +#define GCC_VENUS0_CORE1_VCODEC0_CLK 184 +#define GCC_OXILI_TIMER_CLK 185 + +/* Indexes for GDSCs */ +#define BIMC_GDSC 0 +#define VENUS_GDSC 1 +#define MDSS_GDSC 2 +#define JPEG_GDSC 3 +#define VFE_GDSC 4 +#define OXILI_GDSC 5 +#define VENUS_CORE0_GDSC 6 +#define VENUS_CORE1_GDSC 7 + +#endif diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h index 63e02dc32a0b..6a73a174f049 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8998.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h @@ -183,6 +183,7 @@ #define GCC_MSS_SNOC_AXI_CLK 174 #define GCC_MSS_MNOC_BIMC_AXI_CLK 175 #define GCC_BIMC_GFX_CLK 176 +#define UFS_UNIPRO_CORE_CLK_SRC 177 #define PCIE_0_GDSC 0 #define UFS_GDSC 1 diff --git a/include/dt-bindings/clock/qcom,gcc-sc7180.h b/include/dt-bindings/clock/qcom,gcc-sc7180.h index 1258fd05db68..992b67b7e5e4 100644 --- a/include/dt-bindings/clock/qcom,gcc-sc7180.h +++ b/include/dt-bindings/clock/qcom,gcc-sc7180.h @@ -137,6 +137,7 @@ #define GCC_MSS_NAV_AXI_CLK 127 #define GCC_MSS_Q6_MEMNOC_AXI_CLK 128 #define GCC_MSS_SNOC_AXI_CLK 129 +#define GCC_SEC_CTRL_CLK_SRC 130 /* GCC resets */ #define GCC_QUSB2PHY_PRIM_BCR 0 diff --git a/include/dt-bindings/clock/r8a7742-cpg-mssr.h b/include/dt-bindings/clock/r8a7742-cpg-mssr.h new file mode 100644 index 000000000000..e68191c24881 --- /dev/null +++ b/include/dt-bindings/clock/r8a7742-cpg-mssr.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0+ + * + * Copyright (C) 2020 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R8A7742_CPG_MSSR_H__ +#define __DT_BINDINGS_CLOCK_R8A7742_CPG_MSSR_H__ + +#include <dt-bindings/clock/renesas-cpg-mssr.h> + +/* r8a7742 CPG Core Clocks */ +#define R8A7742_CLK_Z 0 +#define R8A7742_CLK_Z2 1 +#define R8A7742_CLK_ZG 2 +#define R8A7742_CLK_ZTR 3 +#define R8A7742_CLK_ZTRD2 4 +#define R8A7742_CLK_ZT 5 +#define R8A7742_CLK_ZX 6 +#define R8A7742_CLK_ZS 7 +#define R8A7742_CLK_HP 8 +#define R8A7742_CLK_B 9 +#define R8A7742_CLK_LB 10 +#define R8A7742_CLK_P 11 +#define R8A7742_CLK_CL 12 +#define R8A7742_CLK_M2 13 +#define R8A7742_CLK_ZB3 14 +#define R8A7742_CLK_ZB3D2 15 +#define R8A7742_CLK_DDR 16 +#define R8A7742_CLK_SDH 17 +#define R8A7742_CLK_SD0 18 +#define R8A7742_CLK_SD1 19 +#define R8A7742_CLK_SD2 20 +#define R8A7742_CLK_SD3 21 +#define R8A7742_CLK_MMC0 22 +#define R8A7742_CLK_MMC1 23 +#define R8A7742_CLK_MP 24 +#define R8A7742_CLK_QSPI 25 +#define R8A7742_CLK_CP 26 +#define R8A7742_CLK_RCAN 27 +#define R8A7742_CLK_R 28 +#define R8A7742_CLK_OSC 29 + +#endif /* __DT_BINDINGS_CLOCK_R8A7742_CPG_MSSR_H__ */ diff --git a/include/dt-bindings/clock/sprd,sc9863a-clk.h b/include/dt-bindings/clock/sprd,sc9863a-clk.h index 901ba59676c2..4e030421641f 100644 --- a/include/dt-bindings/clock/sprd,sc9863a-clk.h +++ b/include/dt-bindings/clock/sprd,sc9863a-clk.h @@ -308,6 +308,11 @@ #define CLK_MCPHY_CFG_EB 14 #define CLK_MM_GATE_NUM (CLK_MCPHY_CFG_EB + 1) +#define CLK_MIPI_CSI 0 +#define CLK_MIPI_CSI_S 1 +#define CLK_MIPI_CSI_M 2 +#define CLK_MM_CLK_NUM (CLK_MIPI_CSI_M + 1) + #define CLK_SIM0_EB 0 #define CLK_IIS0_EB 1 #define CLK_IIS1_EB 2 diff --git a/include/dt-bindings/clock/tegra114-car.h b/include/dt-bindings/clock/tegra114-car.h index df59aaf5bf34..a93426f008ac 100644 --- a/include/dt-bindings/clock/tegra114-car.h +++ b/include/dt-bindings/clock/tegra114-car.h @@ -272,10 +272,10 @@ #define TEGRA114_CLK_AUDIO3 242 #define TEGRA114_CLK_AUDIO4 243 #define TEGRA114_CLK_SPDIF 244 -#define TEGRA114_CLK_CLK_OUT_1 245 -#define TEGRA114_CLK_CLK_OUT_2 246 -#define TEGRA114_CLK_CLK_OUT_3 247 -#define TEGRA114_CLK_BLINK 248 +/* 245 */ +/* 246 */ +/* 247 */ +/* 248 */ #define TEGRA114_CLK_OSC 249 /* 250 */ /* 251 */ @@ -335,9 +335,9 @@ #define TEGRA114_CLK_AUDIO3_MUX 303 #define TEGRA114_CLK_AUDIO4_MUX 304 #define TEGRA114_CLK_SPDIF_MUX 305 -#define TEGRA114_CLK_CLK_OUT_1_MUX 306 -#define TEGRA114_CLK_CLK_OUT_2_MUX 307 -#define TEGRA114_CLK_CLK_OUT_3_MUX 308 +/* 306 */ +/* 307 */ +/* 308 */ #define TEGRA114_CLK_DSIA_MUX 309 #define TEGRA114_CLK_DSIB_MUX 310 #define TEGRA114_CLK_XUSB_SS_DIV2 311 diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h index 2a9acd592bff..c59f9de01b4d 100644 --- a/include/dt-bindings/clock/tegra124-car-common.h +++ b/include/dt-bindings/clock/tegra124-car-common.h @@ -271,10 +271,10 @@ #define TEGRA124_CLK_AUDIO3 242 #define TEGRA124_CLK_AUDIO4 243 #define TEGRA124_CLK_SPDIF 244 -#define TEGRA124_CLK_CLK_OUT_1 245 -#define TEGRA124_CLK_CLK_OUT_2 246 -#define TEGRA124_CLK_CLK_OUT_3 247 -#define TEGRA124_CLK_BLINK 248 +/* 245 */ +/* 246 */ +/* 247 */ +/* 248 */ #define TEGRA124_CLK_OSC 249 /* 250 */ /* 251 */ @@ -334,9 +334,9 @@ #define TEGRA124_CLK_AUDIO3_MUX 303 #define TEGRA124_CLK_AUDIO4_MUX 304 #define TEGRA124_CLK_SPDIF_MUX 305 -#define TEGRA124_CLK_CLK_OUT_1_MUX 306 -#define TEGRA124_CLK_CLK_OUT_2_MUX 307 -#define TEGRA124_CLK_CLK_OUT_3_MUX 308 +/* 306 */ +/* 307 */ +/* 308 */ /* 309 */ /* 310 */ #define TEGRA124_CLK_SOR0_LVDS 311 /* deprecated */ diff --git a/include/dt-bindings/clock/tegra20-car.h b/include/dt-bindings/clock/tegra20-car.h index b21a0eb32921..fe541f627965 100644 --- a/include/dt-bindings/clock/tegra20-car.h +++ b/include/dt-bindings/clock/tegra20-car.h @@ -131,7 +131,7 @@ #define TEGRA20_CLK_CCLK 108 #define TEGRA20_CLK_HCLK 109 #define TEGRA20_CLK_PCLK 110 -#define TEGRA20_CLK_BLINK 111 +/* 111 */ #define TEGRA20_CLK_PLL_A 112 #define TEGRA20_CLK_PLL_A_OUT0 113 #define TEGRA20_CLK_PLL_C 114 diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h index 7a8f10b9a66d..ab8b8a737a0a 100644 --- a/include/dt-bindings/clock/tegra210-car.h +++ b/include/dt-bindings/clock/tegra210-car.h @@ -306,10 +306,10 @@ #define TEGRA210_CLK_AUDIO3 274 #define TEGRA210_CLK_AUDIO4 275 #define TEGRA210_CLK_SPDIF 276 -#define TEGRA210_CLK_CLK_OUT_1 277 -#define TEGRA210_CLK_CLK_OUT_2 278 -#define TEGRA210_CLK_CLK_OUT_3 279 -#define TEGRA210_CLK_BLINK 280 +/* 277 */ +/* 278 */ +/* 279 */ +/* 280 */ #define TEGRA210_CLK_SOR0_LVDS 281 /* deprecated */ #define TEGRA210_CLK_SOR0_OUT 281 #define TEGRA210_CLK_SOR1_OUT 282 @@ -351,14 +351,14 @@ #define TEGRA210_CLK_PLL_P_OUT_XUSB 317 #define TEGRA210_CLK_XUSB_SSP_SRC 318 #define TEGRA210_CLK_PLL_RE_OUT1 319 -/* 320 */ -/* 321 */ +#define TEGRA210_CLK_PLL_MB_UD 320 +#define TEGRA210_CLK_PLL_P_UD 321 #define TEGRA210_CLK_ISP 322 #define TEGRA210_CLK_PLL_A_OUT_ADSP 323 #define TEGRA210_CLK_PLL_A_OUT0_OUT_ADSP 324 /* 325 */ #define TEGRA210_CLK_OSC 326 -/* 327 */ +#define TEGRA210_CLK_CSI_TPG 327 /* 328 */ /* 329 */ /* 330 */ @@ -388,9 +388,9 @@ #define TEGRA210_CLK_AUDIO3_MUX 353 #define TEGRA210_CLK_AUDIO4_MUX 354 #define TEGRA210_CLK_SPDIF_MUX 355 -#define TEGRA210_CLK_CLK_OUT_1_MUX 356 -#define TEGRA210_CLK_CLK_OUT_2_MUX 357 -#define TEGRA210_CLK_CLK_OUT_3_MUX 358 +/* 356 */ +/* 357 */ +/* 358 */ #define TEGRA210_CLK_DSIA_MUX 359 #define TEGRA210_CLK_DSIB_MUX 360 /* 361 */ diff --git a/include/dt-bindings/clock/tegra30-car.h b/include/dt-bindings/clock/tegra30-car.h index 7b542c10fc27..f193663e6f28 100644 --- a/include/dt-bindings/clock/tegra30-car.h +++ b/include/dt-bindings/clock/tegra30-car.h @@ -232,11 +232,11 @@ #define TEGRA30_CLK_AUDIO3 204 #define TEGRA30_CLK_AUDIO4 205 #define TEGRA30_CLK_SPDIF 206 -#define TEGRA30_CLK_CLK_OUT_1 207 /* (extern1) */ -#define TEGRA30_CLK_CLK_OUT_2 208 /* (extern2) */ -#define TEGRA30_CLK_CLK_OUT_3 209 /* (extern3) */ +/* 207 */ +/* 208 */ +/* 209 */ #define TEGRA30_CLK_SCLK 210 -#define TEGRA30_CLK_BLINK 211 +/* 211 */ #define TEGRA30_CLK_CCLK_G 212 #define TEGRA30_CLK_CCLK_LP 213 #define TEGRA30_CLK_TWD 214 @@ -262,9 +262,9 @@ /* 297 */ /* 298 */ /* 299 */ -#define TEGRA30_CLK_CLK_OUT_1_MUX 300 -#define TEGRA30_CLK_CLK_OUT_2_MUX 301 -#define TEGRA30_CLK_CLK_OUT_3_MUX 302 +/* 300 */ +/* 301 */ +/* 302 */ #define TEGRA30_CLK_AUDIO0_MUX 303 #define TEGRA30_CLK_AUDIO1_MUX 304 #define TEGRA30_CLK_AUDIO2_MUX 305 diff --git a/include/dt-bindings/clock/x1000-cgu.h b/include/dt-bindings/clock/x1000-cgu.h index bbaebaf7adb9..0367c8c02e16 100644 --- a/include/dt-bindings/clock/x1000-cgu.h +++ b/include/dt-bindings/clock/x1000-cgu.h @@ -12,33 +12,41 @@ #ifndef __DT_BINDINGS_CLOCK_X1000_CGU_H__ #define __DT_BINDINGS_CLOCK_X1000_CGU_H__ -#define X1000_CLK_EXCLK 0 -#define X1000_CLK_RTCLK 1 -#define X1000_CLK_APLL 2 -#define X1000_CLK_MPLL 3 -#define X1000_CLK_SCLKA 4 -#define X1000_CLK_CPUMUX 5 -#define X1000_CLK_CPU 6 -#define X1000_CLK_L2CACHE 7 -#define X1000_CLK_AHB0 8 -#define X1000_CLK_AHB2PMUX 9 -#define X1000_CLK_AHB2 10 -#define X1000_CLK_PCLK 11 -#define X1000_CLK_DDR 12 -#define X1000_CLK_MAC 13 -#define X1000_CLK_MSCMUX 14 -#define X1000_CLK_MSC0 15 -#define X1000_CLK_MSC1 16 -#define X1000_CLK_SSIPLL 17 -#define X1000_CLK_SSIMUX 18 -#define X1000_CLK_SFC 19 -#define X1000_CLK_I2C0 20 -#define X1000_CLK_I2C1 21 -#define X1000_CLK_I2C2 22 -#define X1000_CLK_UART0 23 -#define X1000_CLK_UART1 24 -#define X1000_CLK_UART2 25 -#define X1000_CLK_SSI 26 -#define X1000_CLK_PDMA 27 +#define X1000_CLK_EXCLK 0 +#define X1000_CLK_RTCLK 1 +#define X1000_CLK_APLL 2 +#define X1000_CLK_MPLL 3 +#define X1000_CLK_OTGPHY 4 +#define X1000_CLK_SCLKA 5 +#define X1000_CLK_CPUMUX 6 +#define X1000_CLK_CPU 7 +#define X1000_CLK_L2CACHE 8 +#define X1000_CLK_AHB0 9 +#define X1000_CLK_AHB2PMUX 10 +#define X1000_CLK_AHB2 11 +#define X1000_CLK_PCLK 12 +#define X1000_CLK_DDR 13 +#define X1000_CLK_MAC 14 +#define X1000_CLK_LCD 15 +#define X1000_CLK_MSCMUX 16 +#define X1000_CLK_MSC0 17 +#define X1000_CLK_MSC1 18 +#define X1000_CLK_OTG 19 +#define X1000_CLK_SSIPLL 20 +#define X1000_CLK_SSIPLL_DIV2 21 +#define X1000_CLK_SSIMUX 22 +#define X1000_CLK_EMC 23 +#define X1000_CLK_EFUSE 24 +#define X1000_CLK_SFC 25 +#define X1000_CLK_I2C0 26 +#define X1000_CLK_I2C1 27 +#define X1000_CLK_I2C2 28 +#define X1000_CLK_UART0 29 +#define X1000_CLK_UART1 30 +#define X1000_CLK_UART2 31 +#define X1000_CLK_TCU 32 +#define X1000_CLK_SSI 33 +#define X1000_CLK_OST 34 +#define X1000_CLK_PDMA 35 #endif /* __DT_BINDINGS_CLOCK_X1000_CGU_H__ */ diff --git a/include/dt-bindings/clock/x1830-cgu.h b/include/dt-bindings/clock/x1830-cgu.h new file mode 100644 index 000000000000..801e1d09c881 --- /dev/null +++ b/include/dt-bindings/clock/x1830-cgu.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides clock numbers for the ingenic,x1830-cgu DT binding. + * + * They are roughly ordered as: + * - external clocks + * - PLLs + * - muxes/dividers in the order they appear in the x1830 programmers manual + * - gates in order of their bit in the CLKGR* registers + */ + +#ifndef __DT_BINDINGS_CLOCK_X1830_CGU_H__ +#define __DT_BINDINGS_CLOCK_X1830_CGU_H__ + +#define X1830_CLK_EXCLK 0 +#define X1830_CLK_RTCLK 1 +#define X1830_CLK_APLL 2 +#define X1830_CLK_MPLL 3 +#define X1830_CLK_EPLL 4 +#define X1830_CLK_VPLL 5 +#define X1830_CLK_OTGPHY 6 +#define X1830_CLK_SCLKA 7 +#define X1830_CLK_CPUMUX 8 +#define X1830_CLK_CPU 9 +#define X1830_CLK_L2CACHE 10 +#define X1830_CLK_AHB0 11 +#define X1830_CLK_AHB2PMUX 12 +#define X1830_CLK_AHB2 13 +#define X1830_CLK_PCLK 14 +#define X1830_CLK_DDR 15 +#define X1830_CLK_MAC 16 +#define X1830_CLK_LCD 17 +#define X1830_CLK_MSCMUX 18 +#define X1830_CLK_MSC0 19 +#define X1830_CLK_MSC1 20 +#define X1830_CLK_SSIPLL 21 +#define X1830_CLK_SSIPLL_DIV2 22 +#define X1830_CLK_SSIMUX 23 +#define X1830_CLK_EMC 24 +#define X1830_CLK_EFUSE 25 +#define X1830_CLK_OTG 26 +#define X1830_CLK_SSI0 27 +#define X1830_CLK_SMB0 28 +#define X1830_CLK_SMB1 29 +#define X1830_CLK_SMB2 30 +#define X1830_CLK_UART0 31 +#define X1830_CLK_UART1 32 +#define X1830_CLK_SSI1 33 +#define X1830_CLK_SFC 34 +#define X1830_CLK_PDMA 35 +#define X1830_CLK_TCU 36 +#define X1830_CLK_DTRNG 37 +#define X1830_CLK_OST 38 + +#endif /* __DT_BINDINGS_CLOCK_X1830_CGU_H__ */ diff --git a/include/dt-bindings/firmware/imx/rsrc.h b/include/dt-bindings/firmware/imx/rsrc.h index 4e61f6485097..54278d5c1856 100644 --- a/include/dt-bindings/firmware/imx/rsrc.h +++ b/include/dt-bindings/firmware/imx/rsrc.h @@ -547,4 +547,88 @@ #define IMX_SC_R_ATTESTATION 545 #define IMX_SC_R_LAST 546 +/* + * Defines for SC PM CLK + */ +#define IMX_SC_PM_CLK_SLV_BUS 0 /* Slave bus clock */ +#define IMX_SC_PM_CLK_MST_BUS 1 /* Master bus clock */ +#define IMX_SC_PM_CLK_PER 2 /* Peripheral clock */ +#define IMX_SC_PM_CLK_PHY 3 /* Phy clock */ +#define IMX_SC_PM_CLK_MISC 4 /* Misc clock */ +#define IMX_SC_PM_CLK_MISC0 0 /* Misc 0 clock */ +#define IMX_SC_PM_CLK_MISC1 1 /* Misc 1 clock */ +#define IMX_SC_PM_CLK_MISC2 2 /* Misc 2 clock */ +#define IMX_SC_PM_CLK_MISC3 3 /* Misc 3 clock */ +#define IMX_SC_PM_CLK_MISC4 4 /* Misc 4 clock */ +#define IMX_SC_PM_CLK_CPU 2 /* CPU clock */ +#define IMX_SC_PM_CLK_PLL 4 /* PLL */ +#define IMX_SC_PM_CLK_BYPASS 4 /* Bypass clock */ + +/* + * Defines for SC CONTROL + */ +#define IMX_SC_C_TEMP 0 +#define IMX_SC_C_TEMP_HI 1 +#define IMX_SC_C_TEMP_LOW 2 +#define IMX_SC_C_PXL_LINK_MST1_ADDR 3 +#define IMX_SC_C_PXL_LINK_MST2_ADDR 4 +#define IMX_SC_C_PXL_LINK_MST_ENB 5 +#define IMX_SC_C_PXL_LINK_MST1_ENB 6 +#define IMX_SC_C_PXL_LINK_MST2_ENB 7 +#define IMX_SC_C_PXL_LINK_SLV1_ADDR 8 +#define IMX_SC_C_PXL_LINK_SLV2_ADDR 9 +#define IMX_SC_C_PXL_LINK_MST_VLD 10 +#define IMX_SC_C_PXL_LINK_MST1_VLD 11 +#define IMX_SC_C_PXL_LINK_MST2_VLD 12 +#define IMX_SC_C_SINGLE_MODE 13 +#define IMX_SC_C_ID 14 +#define IMX_SC_C_PXL_CLK_POLARITY 15 +#define IMX_SC_C_LINESTATE 16 +#define IMX_SC_C_PCIE_G_RST 17 +#define IMX_SC_C_PCIE_BUTTON_RST 18 +#define IMX_SC_C_PCIE_PERST 19 +#define IMX_SC_C_PHY_RESET 20 +#define IMX_SC_C_PXL_LINK_RATE_CORRECTION 21 +#define IMX_SC_C_PANIC 22 +#define IMX_SC_C_PRIORITY_GROUP 23 +#define IMX_SC_C_TXCLK 24 +#define IMX_SC_C_CLKDIV 25 +#define IMX_SC_C_DISABLE_50 26 +#define IMX_SC_C_DISABLE_125 27 +#define IMX_SC_C_SEL_125 28 +#define IMX_SC_C_MODE 29 +#define IMX_SC_C_SYNC_CTRL0 30 +#define IMX_SC_C_KACHUNK_CNT 31 +#define IMX_SC_C_KACHUNK_SEL 32 +#define IMX_SC_C_SYNC_CTRL1 33 +#define IMX_SC_C_DPI_RESET 34 +#define IMX_SC_C_MIPI_RESET 35 +#define IMX_SC_C_DUAL_MODE 36 +#define IMX_SC_C_VOLTAGE 37 +#define IMX_SC_C_PXL_LINK_SEL 38 +#define IMX_SC_C_OFS_SEL 39 +#define IMX_SC_C_OFS_AUDIO 40 +#define IMX_SC_C_OFS_PERIPH 41 +#define IMX_SC_C_OFS_IRQ 42 +#define IMX_SC_C_RST0 43 +#define IMX_SC_C_RST1 44 +#define IMX_SC_C_SEL0 45 +#define IMX_SC_C_CALIB0 46 +#define IMX_SC_C_CALIB1 47 +#define IMX_SC_C_CALIB2 48 +#define IMX_SC_C_IPG_DEBUG 49 +#define IMX_SC_C_IPG_DOZE 50 +#define IMX_SC_C_IPG_WAIT 51 +#define IMX_SC_C_IPG_STOP 52 +#define IMX_SC_C_IPG_STOP_MODE 53 +#define IMX_SC_C_IPG_STOP_ACK 54 +#define IMX_SC_C_SYNC_CTRL 55 +#define IMX_SC_C_OFS_AUDIO_ALT 56 +#define IMX_SC_C_DSP_BYP 57 +#define IMX_SC_C_CLK_GEN_EN 58 +#define IMX_SC_C_INTF_SEL 59 +#define IMX_SC_C_RXC_DLY 60 +#define IMX_SC_C_TIMER_SEL 61 +#define IMX_SC_C_LAST 62 + #endif /* __DT_BINDINGS_RSCRC_IMX_H */ diff --git a/include/dt-bindings/interconnect/imx8mm.h b/include/dt-bindings/interconnect/imx8mm.h new file mode 100644 index 000000000000..8f10bb06cb59 --- /dev/null +++ b/include/dt-bindings/interconnect/imx8mm.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Interconnect framework driver for i.MX SoC + * + * Copyright (c) 2019, BayLibre + * Copyright (c) 2019-2020, NXP + * Author: Alexandre Bailon <abailon@baylibre.com> + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_IMX8MM_H +#define __DT_BINDINGS_INTERCONNECT_IMX8MM_H + +#define IMX8MM_ICN_NOC 1 +#define IMX8MM_ICS_DRAM 2 +#define IMX8MM_ICS_OCRAM 3 +#define IMX8MM_ICM_A53 4 + +#define IMX8MM_ICM_VPU_H1 5 +#define IMX8MM_ICM_VPU_G1 6 +#define IMX8MM_ICM_VPU_G2 7 +#define IMX8MM_ICN_VIDEO 8 + +#define IMX8MM_ICM_GPU2D 9 +#define IMX8MM_ICM_GPU3D 10 +#define IMX8MM_ICN_GPU 11 + +#define IMX8MM_ICM_CSI 12 +#define IMX8MM_ICM_LCDIF 13 +#define IMX8MM_ICN_MIPI 14 + +#define IMX8MM_ICM_USB1 15 +#define IMX8MM_ICM_USB2 16 +#define IMX8MM_ICM_PCIE 17 +#define IMX8MM_ICN_HSIO 18 + +#define IMX8MM_ICM_SDMA2 19 +#define IMX8MM_ICM_SDMA3 20 +#define IMX8MM_ICN_AUDIO 21 + +#define IMX8MM_ICN_ENET 22 +#define IMX8MM_ICM_ENET 23 + +#define IMX8MM_ICN_MAIN 24 +#define IMX8MM_ICM_NAND 25 +#define IMX8MM_ICM_SDMA1 26 +#define IMX8MM_ICM_USDHC1 27 +#define IMX8MM_ICM_USDHC2 28 +#define IMX8MM_ICM_USDHC3 29 + +#endif /* __DT_BINDINGS_INTERCONNECT_IMX8MM_H */ diff --git a/include/dt-bindings/interconnect/imx8mn.h b/include/dt-bindings/interconnect/imx8mn.h new file mode 100644 index 000000000000..307b977100b6 --- /dev/null +++ b/include/dt-bindings/interconnect/imx8mn.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Interconnect framework driver for i.MX SoC + * + * Copyright (c) 2019-2020, NXP + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_IMX8MN_H +#define __DT_BINDINGS_INTERCONNECT_IMX8MN_H + +#define IMX8MN_ICN_NOC 1 +#define IMX8MN_ICS_DRAM 2 +#define IMX8MN_ICS_OCRAM 3 +#define IMX8MN_ICM_A53 4 + +#define IMX8MN_ICM_GPU 5 +#define IMX8MN_ICN_GPU 6 + +#define IMX8MN_ICM_CSI1 7 +#define IMX8MN_ICM_CSI2 8 +#define IMX8MN_ICM_ISI 9 +#define IMX8MN_ICM_LCDIF 10 +#define IMX8MN_ICN_MIPI 11 + +#define IMX8MN_ICM_USB 12 + +#define IMX8MN_ICM_SDMA2 13 +#define IMX8MN_ICM_SDMA3 14 +#define IMX8MN_ICN_AUDIO 15 + +#define IMX8MN_ICN_ENET 16 +#define IMX8MN_ICM_ENET 17 + +#define IMX8MN_ICM_NAND 18 +#define IMX8MN_ICM_SDMA1 19 +#define IMX8MN_ICM_USDHC1 20 +#define IMX8MN_ICM_USDHC2 21 +#define IMX8MN_ICM_USDHC3 22 +#define IMX8MN_ICN_MAIN 23 + +#endif /* __DT_BINDINGS_INTERCONNECT_IMX8MN_H */ diff --git a/include/dt-bindings/interconnect/imx8mq.h b/include/dt-bindings/interconnect/imx8mq.h new file mode 100644 index 000000000000..1a4cae7f8be2 --- /dev/null +++ b/include/dt-bindings/interconnect/imx8mq.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Interconnect framework driver for i.MX SoC + * + * Copyright (c) 2019-2020, NXP + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_IMX8MQ_H +#define __DT_BINDINGS_INTERCONNECT_IMX8MQ_H + +#define IMX8MQ_ICN_NOC 1 +#define IMX8MQ_ICS_DRAM 2 +#define IMX8MQ_ICS_OCRAM 3 +#define IMX8MQ_ICM_A53 4 + +#define IMX8MQ_ICM_VPU 5 +#define IMX8MQ_ICN_VIDEO 6 + +#define IMX8MQ_ICM_GPU 7 +#define IMX8MQ_ICN_GPU 8 + +#define IMX8MQ_ICM_DCSS 9 +#define IMX8MQ_ICN_DCSS 10 + +#define IMX8MQ_ICM_USB1 11 +#define IMX8MQ_ICM_USB2 12 +#define IMX8MQ_ICN_USB 13 + +#define IMX8MQ_ICM_CSI1 14 +#define IMX8MQ_ICM_CSI2 15 +#define IMX8MQ_ICM_LCDIF 16 +#define IMX8MQ_ICN_DISPLAY 17 + +#define IMX8MQ_ICM_SDMA2 18 +#define IMX8MQ_ICN_AUDIO 19 + +#define IMX8MQ_ICN_ENET 20 +#define IMX8MQ_ICM_ENET 21 + +#define IMX8MQ_ICM_SDMA1 22 +#define IMX8MQ_ICM_NAND 23 +#define IMX8MQ_ICM_USDHC1 24 +#define IMX8MQ_ICM_USDHC2 25 +#define IMX8MQ_ICM_PCIE1 26 +#define IMX8MQ_ICM_PCIE2 27 +#define IMX8MQ_ICN_MAIN 28 + +#endif /* __DT_BINDINGS_INTERCONNECT_IMX8MQ_H */ diff --git a/include/dt-bindings/mailbox/qcom-ipcc.h b/include/dt-bindings/mailbox/qcom-ipcc.h new file mode 100644 index 000000000000..4c23eefed5f3 --- /dev/null +++ b/include/dt-bindings/mailbox/qcom-ipcc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + */ + +#ifndef __DT_BINDINGS_MAILBOX_IPCC_H +#define __DT_BINDINGS_MAILBOX_IPCC_H + +/* Signal IDs for MPROC protocol */ +#define IPCC_MPROC_SIGNAL_GLINK_QMP 0 +#define IPCC_MPROC_SIGNAL_SMP2P 2 +#define IPCC_MPROC_SIGNAL_PING 3 + +/* Client IDs */ +#define IPCC_CLIENT_AOP 0 +#define IPCC_CLIENT_TZ 1 +#define IPCC_CLIENT_MPSS 2 +#define IPCC_CLIENT_LPASS 3 +#define IPCC_CLIENT_SLPI 4 +#define IPCC_CLIENT_SDC 5 +#define IPCC_CLIENT_CDSP 6 +#define IPCC_CLIENT_NPU 7 +#define IPCC_CLIENT_APSS 8 +#define IPCC_CLIENT_GPU 9 +#define IPCC_CLIENT_CVP 10 +#define IPCC_CLIENT_CAM 11 +#define IPCC_CLIENT_VPU 12 +#define IPCC_CLIENT_PCIE0 13 +#define IPCC_CLIENT_PCIE1 14 +#define IPCC_CLIENT_PCIE2 15 +#define IPCC_CLIENT_SPSS 16 + +#endif diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h index 1f3f866fae7b..3727ef72138b 100644 --- a/include/dt-bindings/phy/phy.h +++ b/include/dt-bindings/phy/phy.h @@ -17,5 +17,6 @@ #define PHY_TYPE_USB3 4 #define PHY_TYPE_UFS 5 #define PHY_TYPE_DP 6 +#define PHY_TYPE_XPCS 7 #endif /* _DT_BINDINGS_PHY */ diff --git a/include/dt-bindings/pinctrl/pads-imx8dxl.h b/include/dt-bindings/pinctrl/pads-imx8dxl.h new file mode 100644 index 000000000000..b1d7b84c3e0a --- /dev/null +++ b/include/dt-bindings/pinctrl/pads-imx8dxl.h @@ -0,0 +1,639 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2019~2020 NXP + */ + +#ifndef _IMX8DXL_PADS_H +#define _IMX8DXL_PADS_H + +/* pin id */ +#define IMX8DXL_PCIE_CTRL0_PERST_B 0 +#define IMX8DXL_PCIE_CTRL0_CLKREQ_B 1 +#define IMX8DXL_PCIE_CTRL0_WAKE_B 2 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_PCIESEP 3 +#define IMX8DXL_USB_SS3_TC0 4 +#define IMX8DXL_USB_SS3_TC1 5 +#define IMX8DXL_USB_SS3_TC2 6 +#define IMX8DXL_USB_SS3_TC3 7 +#define IMX8DXL_COMP_CTL_GPIO_3V3_USB3IO 8 +#define IMX8DXL_EMMC0_CLK 9 +#define IMX8DXL_EMMC0_CMD 10 +#define IMX8DXL_EMMC0_DATA0 11 +#define IMX8DXL_EMMC0_DATA1 12 +#define IMX8DXL_EMMC0_DATA2 13 +#define IMX8DXL_EMMC0_DATA3 14 +#define IMX8DXL_EMMC0_DATA4 15 +#define IMX8DXL_EMMC0_DATA5 16 +#define IMX8DXL_EMMC0_DATA6 17 +#define IMX8DXL_EMMC0_DATA7 18 +#define IMX8DXL_EMMC0_STROBE 19 +#define IMX8DXL_EMMC0_RESET_B 20 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_SD1FIX0 21 +#define IMX8DXL_USDHC1_RESET_B 22 +#define IMX8DXL_USDHC1_VSELECT 23 +#define IMX8DXL_CTL_NAND_RE_P_N 24 +#define IMX8DXL_USDHC1_WP 25 +#define IMX8DXL_USDHC1_CD_B 26 +#define IMX8DXL_CTL_NAND_DQS_P_N 27 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_VSELSEP 28 +#define IMX8DXL_ENET0_RGMII_TXC 29 +#define IMX8DXL_ENET0_RGMII_TX_CTL 30 +#define IMX8DXL_ENET0_RGMII_TXD0 31 +#define IMX8DXL_ENET0_RGMII_TXD1 32 +#define IMX8DXL_ENET0_RGMII_TXD2 33 +#define IMX8DXL_ENET0_RGMII_TXD3 34 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB0 35 +#define IMX8DXL_ENET0_RGMII_RXC 36 +#define IMX8DXL_ENET0_RGMII_RX_CTL 37 +#define IMX8DXL_ENET0_RGMII_RXD0 38 +#define IMX8DXL_ENET0_RGMII_RXD1 39 +#define IMX8DXL_ENET0_RGMII_RXD2 40 +#define IMX8DXL_ENET0_RGMII_RXD3 41 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB1 42 +#define IMX8DXL_ENET0_REFCLK_125M_25M 43 +#define IMX8DXL_ENET0_MDIO 44 +#define IMX8DXL_ENET0_MDC 45 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOCT 46 +#define IMX8DXL_ENET1_RGMII_TXC 47 +#define IMX8DXL_ENET1_RGMII_TXD2 48 +#define IMX8DXL_ENET1_RGMII_TX_CTL 49 +#define IMX8DXL_ENET1_RGMII_TXD3 50 +#define IMX8DXL_ENET1_RGMII_RXC 51 +#define IMX8DXL_ENET1_RGMII_RXD3 52 +#define IMX8DXL_ENET1_RGMII_RXD2 53 +#define IMX8DXL_ENET1_RGMII_RXD1 54 +#define IMX8DXL_ENET1_RGMII_TXD0 55 +#define IMX8DXL_ENET1_RGMII_TXD1 56 +#define IMX8DXL_ENET1_RGMII_RXD0 57 +#define IMX8DXL_ENET1_RGMII_RX_CTL 58 +#define IMX8DXL_ENET1_REFCLK_125M_25M 59 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHB 60 +#define IMX8DXL_SPI3_SCK 61 +#define IMX8DXL_SPI3_SDO 62 +#define IMX8DXL_SPI3_SDI 63 +#define IMX8DXL_SPI3_CS0 64 +#define IMX8DXL_SPI3_CS1 65 +#define IMX8DXL_MCLK_IN1 66 +#define IMX8DXL_MCLK_IN0 67 +#define IMX8DXL_MCLK_OUT0 68 +#define IMX8DXL_UART1_TX 69 +#define IMX8DXL_UART1_RX 70 +#define IMX8DXL_UART1_RTS_B 71 +#define IMX8DXL_UART1_CTS_B 72 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHK 73 +#define IMX8DXL_SPI0_SCK 74 +#define IMX8DXL_SPI0_SDI 75 +#define IMX8DXL_SPI0_SDO 76 +#define IMX8DXL_SPI0_CS1 77 +#define IMX8DXL_SPI0_CS0 78 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHT 79 +#define IMX8DXL_ADC_IN1 80 +#define IMX8DXL_ADC_IN0 81 +#define IMX8DXL_ADC_IN3 82 +#define IMX8DXL_ADC_IN2 83 +#define IMX8DXL_ADC_IN5 84 +#define IMX8DXL_ADC_IN4 85 +#define IMX8DXL_FLEXCAN0_RX 86 +#define IMX8DXL_FLEXCAN0_TX 87 +#define IMX8DXL_FLEXCAN1_RX 88 +#define IMX8DXL_FLEXCAN1_TX 89 +#define IMX8DXL_FLEXCAN2_RX 90 +#define IMX8DXL_FLEXCAN2_TX 91 +#define IMX8DXL_UART0_RX 92 +#define IMX8DXL_UART0_TX 93 +#define IMX8DXL_UART2_TX 94 +#define IMX8DXL_UART2_RX 95 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOLH 96 +#define IMX8DXL_JTAG_TRST_B 97 +#define IMX8DXL_PMIC_I2C_SCL 98 +#define IMX8DXL_PMIC_I2C_SDA 99 +#define IMX8DXL_PMIC_INT_B 100 +#define IMX8DXL_SCU_GPIO0_00 101 +#define IMX8DXL_SCU_GPIO0_01 102 +#define IMX8DXL_SCU_PMIC_STANDBY 103 +#define IMX8DXL_SCU_BOOT_MODE1 104 +#define IMX8DXL_SCU_BOOT_MODE0 105 +#define IMX8DXL_SCU_BOOT_MODE2 106 +#define IMX8DXL_SNVS_TAMPER_OUT1 107 +#define IMX8DXL_SNVS_TAMPER_OUT2 108 +#define IMX8DXL_SNVS_TAMPER_OUT3 109 +#define IMX8DXL_SNVS_TAMPER_OUT4 110 +#define IMX8DXL_SNVS_TAMPER_IN0 111 +#define IMX8DXL_SNVS_TAMPER_IN1 112 +#define IMX8DXL_SNVS_TAMPER_IN2 113 +#define IMX8DXL_SNVS_TAMPER_IN3 114 +#define IMX8DXL_SPI1_SCK 115 +#define IMX8DXL_SPI1_SDO 116 +#define IMX8DXL_SPI1_SDI 117 +#define IMX8DXL_SPI1_CS0 118 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHD 119 +#define IMX8DXL_QSPI0A_DATA1 120 +#define IMX8DXL_QSPI0A_DATA0 121 +#define IMX8DXL_QSPI0A_DATA3 122 +#define IMX8DXL_QSPI0A_DATA2 123 +#define IMX8DXL_QSPI0A_SS0_B 124 +#define IMX8DXL_QSPI0A_DQS 125 +#define IMX8DXL_QSPI0A_SCLK 126 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0A 127 +#define IMX8DXL_QSPI0B_SCLK 128 +#define IMX8DXL_QSPI0B_DQS 129 +#define IMX8DXL_QSPI0B_DATA1 130 +#define IMX8DXL_QSPI0B_DATA0 131 +#define IMX8DXL_QSPI0B_DATA3 132 +#define IMX8DXL_QSPI0B_DATA2 133 +#define IMX8DXL_QSPI0B_SS0_B 134 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0B 135 + +/* format: <pin_id mux_mode> */ +#define IMX8DXL_PCIE_CTRL0_PERST_B_HSIO_PCIE0_PERST_B IMX8DXL_PCIE_CTRL0_PERST_B 0 +#define IMX8DXL_PCIE_CTRL0_PERST_B_LSIO_GPIO4_IO00 IMX8DXL_PCIE_CTRL0_PERST_B 4 +#define IMX8DXL_PCIE_CTRL0_PERST_B_LSIO_GPIO7_IO00 IMX8DXL_PCIE_CTRL0_PERST_B 5 +#define IMX8DXL_PCIE_CTRL0_CLKREQ_B_HSIO_PCIE0_CLKREQ_B IMX8DXL_PCIE_CTRL0_CLKREQ_B 0 +#define IMX8DXL_PCIE_CTRL0_CLKREQ_B_LSIO_GPIO4_IO01 IMX8DXL_PCIE_CTRL0_CLKREQ_B 4 +#define IMX8DXL_PCIE_CTRL0_CLKREQ_B_LSIO_GPIO7_IO01 IMX8DXL_PCIE_CTRL0_CLKREQ_B 5 +#define IMX8DXL_PCIE_CTRL0_WAKE_B_HSIO_PCIE0_WAKE_B IMX8DXL_PCIE_CTRL0_WAKE_B 0 +#define IMX8DXL_PCIE_CTRL0_WAKE_B_LSIO_GPIO4_IO02 IMX8DXL_PCIE_CTRL0_WAKE_B 4 +#define IMX8DXL_PCIE_CTRL0_WAKE_B_LSIO_GPIO7_IO02 IMX8DXL_PCIE_CTRL0_WAKE_B 5 +#define IMX8DXL_USB_SS3_TC0_ADMA_I2C1_SCL IMX8DXL_USB_SS3_TC0 0 +#define IMX8DXL_USB_SS3_TC0_CONN_USB_OTG1_PWR IMX8DXL_USB_SS3_TC0 1 +#define IMX8DXL_USB_SS3_TC0_CONN_USB_OTG2_PWR IMX8DXL_USB_SS3_TC0 2 +#define IMX8DXL_USB_SS3_TC0_LSIO_GPIO4_IO03 IMX8DXL_USB_SS3_TC0 4 +#define IMX8DXL_USB_SS3_TC0_LSIO_GPIO7_IO03 IMX8DXL_USB_SS3_TC0 5 +#define IMX8DXL_USB_SS3_TC1_ADMA_I2C1_SCL IMX8DXL_USB_SS3_TC1 0 +#define IMX8DXL_USB_SS3_TC1_CONN_USB_OTG2_PWR IMX8DXL_USB_SS3_TC1 1 +#define IMX8DXL_USB_SS3_TC1_LSIO_GPIO4_IO04 IMX8DXL_USB_SS3_TC1 4 +#define IMX8DXL_USB_SS3_TC1_LSIO_GPIO7_IO04 IMX8DXL_USB_SS3_TC1 5 +#define IMX8DXL_USB_SS3_TC2_ADMA_I2C1_SDA IMX8DXL_USB_SS3_TC2 0 +#define IMX8DXL_USB_SS3_TC2_CONN_USB_OTG1_OC IMX8DXL_USB_SS3_TC2 1 +#define IMX8DXL_USB_SS3_TC2_CONN_USB_OTG2_OC IMX8DXL_USB_SS3_TC2 2 +#define IMX8DXL_USB_SS3_TC2_LSIO_GPIO4_IO05 IMX8DXL_USB_SS3_TC2 4 +#define IMX8DXL_USB_SS3_TC2_LSIO_GPIO7_IO05 IMX8DXL_USB_SS3_TC2 5 +#define IMX8DXL_USB_SS3_TC3_ADMA_I2C1_SDA IMX8DXL_USB_SS3_TC3 0 +#define IMX8DXL_USB_SS3_TC3_CONN_USB_OTG2_OC IMX8DXL_USB_SS3_TC3 1 +#define IMX8DXL_USB_SS3_TC3_LSIO_GPIO4_IO06 IMX8DXL_USB_SS3_TC3 4 +#define IMX8DXL_USB_SS3_TC3_LSIO_GPIO7_IO06 IMX8DXL_USB_SS3_TC3 5 +#define IMX8DXL_EMMC0_CLK_CONN_EMMC0_CLK IMX8DXL_EMMC0_CLK 0 +#define IMX8DXL_EMMC0_CLK_CONN_NAND_READY_B IMX8DXL_EMMC0_CLK 1 +#define IMX8DXL_EMMC0_CLK_LSIO_GPIO4_IO07 IMX8DXL_EMMC0_CLK 4 +#define IMX8DXL_EMMC0_CMD_CONN_EMMC0_CMD IMX8DXL_EMMC0_CMD 0 +#define IMX8DXL_EMMC0_CMD_CONN_NAND_DQS IMX8DXL_EMMC0_CMD 1 +#define IMX8DXL_EMMC0_CMD_LSIO_GPIO4_IO08 IMX8DXL_EMMC0_CMD 4 +#define IMX8DXL_EMMC0_DATA0_CONN_EMMC0_DATA0 IMX8DXL_EMMC0_DATA0 0 +#define IMX8DXL_EMMC0_DATA0_CONN_NAND_DATA00 IMX8DXL_EMMC0_DATA0 1 +#define IMX8DXL_EMMC0_DATA0_LSIO_GPIO4_IO09 IMX8DXL_EMMC0_DATA0 4 +#define IMX8DXL_EMMC0_DATA1_CONN_EMMC0_DATA1 IMX8DXL_EMMC0_DATA1 0 +#define IMX8DXL_EMMC0_DATA1_CONN_NAND_DATA01 IMX8DXL_EMMC0_DATA1 1 +#define IMX8DXL_EMMC0_DATA1_LSIO_GPIO4_IO10 IMX8DXL_EMMC0_DATA1 4 +#define IMX8DXL_EMMC0_DATA2_CONN_EMMC0_DATA2 IMX8DXL_EMMC0_DATA2 0 +#define IMX8DXL_EMMC0_DATA2_CONN_NAND_DATA02 IMX8DXL_EMMC0_DATA2 1 +#define IMX8DXL_EMMC0_DATA2_LSIO_GPIO4_IO11 IMX8DXL_EMMC0_DATA2 4 +#define IMX8DXL_EMMC0_DATA3_CONN_EMMC0_DATA3 IMX8DXL_EMMC0_DATA3 0 +#define IMX8DXL_EMMC0_DATA3_CONN_NAND_DATA03 IMX8DXL_EMMC0_DATA3 1 +#define IMX8DXL_EMMC0_DATA3_LSIO_GPIO4_IO12 IMX8DXL_EMMC0_DATA3 4 +#define IMX8DXL_EMMC0_DATA4_CONN_EMMC0_DATA4 IMX8DXL_EMMC0_DATA4 0 +#define IMX8DXL_EMMC0_DATA4_CONN_NAND_DATA04 IMX8DXL_EMMC0_DATA4 1 +#define IMX8DXL_EMMC0_DATA4_LSIO_GPIO4_IO13 IMX8DXL_EMMC0_DATA4 4 +#define IMX8DXL_EMMC0_DATA5_CONN_EMMC0_DATA5 IMX8DXL_EMMC0_DATA5 0 +#define IMX8DXL_EMMC0_DATA5_CONN_NAND_DATA05 IMX8DXL_EMMC0_DATA5 1 +#define IMX8DXL_EMMC0_DATA5_LSIO_GPIO4_IO14 IMX8DXL_EMMC0_DATA5 4 +#define IMX8DXL_EMMC0_DATA6_CONN_EMMC0_DATA6 IMX8DXL_EMMC0_DATA6 0 +#define IMX8DXL_EMMC0_DATA6_CONN_NAND_DATA06 IMX8DXL_EMMC0_DATA6 1 +#define IMX8DXL_EMMC0_DATA6_LSIO_GPIO4_IO15 IMX8DXL_EMMC0_DATA6 4 +#define IMX8DXL_EMMC0_DATA7_CONN_EMMC0_DATA7 IMX8DXL_EMMC0_DATA7 0 +#define IMX8DXL_EMMC0_DATA7_CONN_NAND_DATA07 IMX8DXL_EMMC0_DATA7 1 +#define IMX8DXL_EMMC0_DATA7_LSIO_GPIO4_IO16 IMX8DXL_EMMC0_DATA7 4 +#define IMX8DXL_EMMC0_STROBE_CONN_EMMC0_STROBE IMX8DXL_EMMC0_STROBE 0 +#define IMX8DXL_EMMC0_STROBE_CONN_NAND_CLE IMX8DXL_EMMC0_STROBE 1 +#define IMX8DXL_EMMC0_STROBE_LSIO_GPIO4_IO17 IMX8DXL_EMMC0_STROBE 4 +#define IMX8DXL_EMMC0_RESET_B_CONN_EMMC0_RESET_B IMX8DXL_EMMC0_RESET_B 0 +#define IMX8DXL_EMMC0_RESET_B_CONN_NAND_WP_B IMX8DXL_EMMC0_RESET_B 1 +#define IMX8DXL_EMMC0_RESET_B_LSIO_GPIO4_IO18 IMX8DXL_EMMC0_RESET_B 4 +#define IMX8DXL_USDHC1_RESET_B_CONN_USDHC1_RESET_B IMX8DXL_USDHC1_RESET_B 0 +#define IMX8DXL_USDHC1_RESET_B_CONN_NAND_RE_N IMX8DXL_USDHC1_RESET_B 1 +#define IMX8DXL_USDHC1_RESET_B_ADMA_SPI2_SCK IMX8DXL_USDHC1_RESET_B 2 +#define IMX8DXL_USDHC1_RESET_B_CONN_NAND_WE_B IMX8DXL_USDHC1_RESET_B 3 +#define IMX8DXL_USDHC1_RESET_B_LSIO_GPIO4_IO19 IMX8DXL_USDHC1_RESET_B 4 +#define IMX8DXL_USDHC1_RESET_B_LSIO_GPIO7_IO08 IMX8DXL_USDHC1_RESET_B 5 +#define IMX8DXL_USDHC1_VSELECT_CONN_USDHC1_VSELECT IMX8DXL_USDHC1_VSELECT 0 +#define IMX8DXL_USDHC1_VSELECT_CONN_NAND_RE_P IMX8DXL_USDHC1_VSELECT 1 +#define IMX8DXL_USDHC1_VSELECT_ADMA_SPI2_SDO IMX8DXL_USDHC1_VSELECT 2 +#define IMX8DXL_USDHC1_VSELECT_CONN_NAND_RE_B IMX8DXL_USDHC1_VSELECT 3 +#define IMX8DXL_USDHC1_VSELECT_LSIO_GPIO4_IO20 IMX8DXL_USDHC1_VSELECT 4 +#define IMX8DXL_USDHC1_VSELECT_LSIO_GPIO7_IO09 IMX8DXL_USDHC1_VSELECT 5 +#define IMX8DXL_USDHC1_WP_CONN_USDHC1_WP IMX8DXL_USDHC1_WP 0 +#define IMX8DXL_USDHC1_WP_CONN_NAND_DQS_N IMX8DXL_USDHC1_WP 1 +#define IMX8DXL_USDHC1_WP_ADMA_SPI2_SDI IMX8DXL_USDHC1_WP 2 +#define IMX8DXL_USDHC1_WP_CONN_NAND_ALE IMX8DXL_USDHC1_WP 3 +#define IMX8DXL_USDHC1_WP_LSIO_GPIO4_IO21 IMX8DXL_USDHC1_WP 4 +#define IMX8DXL_USDHC1_WP_LSIO_GPIO7_IO10 IMX8DXL_USDHC1_WP 5 +#define IMX8DXL_USDHC1_CD_B_CONN_USDHC1_CD_B IMX8DXL_USDHC1_CD_B 0 +#define IMX8DXL_USDHC1_CD_B_CONN_NAND_DQS_P IMX8DXL_USDHC1_CD_B 1 +#define IMX8DXL_USDHC1_CD_B_ADMA_SPI2_CS0 IMX8DXL_USDHC1_CD_B 2 +#define IMX8DXL_USDHC1_CD_B_CONN_NAND_DQS IMX8DXL_USDHC1_CD_B 3 +#define IMX8DXL_USDHC1_CD_B_LSIO_GPIO4_IO22 IMX8DXL_USDHC1_CD_B 4 +#define IMX8DXL_USDHC1_CD_B_LSIO_GPIO7_IO11 IMX8DXL_USDHC1_CD_B 5 +#define IMX8DXL_ENET0_RGMII_TXC_CONN_ENET0_RGMII_TXC IMX8DXL_ENET0_RGMII_TXC 0 +#define IMX8DXL_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_OUT IMX8DXL_ENET0_RGMII_TXC 1 +#define IMX8DXL_ENET0_RGMII_TXC_CONN_ENET0_RCLK50M_IN IMX8DXL_ENET0_RGMII_TXC 2 +#define IMX8DXL_ENET0_RGMII_TXC_CONN_NAND_CE1_B IMX8DXL_ENET0_RGMII_TXC 3 +#define IMX8DXL_ENET0_RGMII_TXC_LSIO_GPIO4_IO29 IMX8DXL_ENET0_RGMII_TXC 4 +#define IMX8DXL_ENET0_RGMII_TXC_CONN_USDHC2_CLK IMX8DXL_ENET0_RGMII_TXC 5 +#define IMX8DXL_ENET0_RGMII_TX_CTL_CONN_ENET0_RGMII_TX_CTL IMX8DXL_ENET0_RGMII_TX_CTL 0 +#define IMX8DXL_ENET0_RGMII_TX_CTL_CONN_USDHC1_RESET_B IMX8DXL_ENET0_RGMII_TX_CTL 3 +#define IMX8DXL_ENET0_RGMII_TX_CTL_LSIO_GPIO4_IO30 IMX8DXL_ENET0_RGMII_TX_CTL 4 +#define IMX8DXL_ENET0_RGMII_TX_CTL_CONN_USDHC2_CMD IMX8DXL_ENET0_RGMII_TX_CTL 5 +#define IMX8DXL_ENET0_RGMII_TXD0_CONN_ENET0_RGMII_TXD0 IMX8DXL_ENET0_RGMII_TXD0 0 +#define IMX8DXL_ENET0_RGMII_TXD0_CONN_USDHC1_VSELECT IMX8DXL_ENET0_RGMII_TXD0 3 +#define IMX8DXL_ENET0_RGMII_TXD0_LSIO_GPIO4_IO31 IMX8DXL_ENET0_RGMII_TXD0 4 +#define IMX8DXL_ENET0_RGMII_TXD0_CONN_USDHC2_DATA0 IMX8DXL_ENET0_RGMII_TXD0 5 +#define IMX8DXL_ENET0_RGMII_TXD1_CONN_ENET0_RGMII_TXD1 IMX8DXL_ENET0_RGMII_TXD1 0 +#define IMX8DXL_ENET0_RGMII_TXD1_CONN_USDHC1_WP IMX8DXL_ENET0_RGMII_TXD1 3 +#define IMX8DXL_ENET0_RGMII_TXD1_LSIO_GPIO5_IO00 IMX8DXL_ENET0_RGMII_TXD1 4 +#define IMX8DXL_ENET0_RGMII_TXD1_CONN_USDHC2_DATA1 IMX8DXL_ENET0_RGMII_TXD1 5 +#define IMX8DXL_ENET0_RGMII_TXD2_CONN_ENET0_RGMII_TXD2 IMX8DXL_ENET0_RGMII_TXD2 0 +#define IMX8DXL_ENET0_RGMII_TXD2_CONN_NAND_CE0_B IMX8DXL_ENET0_RGMII_TXD2 2 +#define IMX8DXL_ENET0_RGMII_TXD2_CONN_USDHC1_CD_B IMX8DXL_ENET0_RGMII_TXD2 3 +#define IMX8DXL_ENET0_RGMII_TXD2_LSIO_GPIO5_IO01 IMX8DXL_ENET0_RGMII_TXD2 4 +#define IMX8DXL_ENET0_RGMII_TXD2_CONN_USDHC2_DATA2 IMX8DXL_ENET0_RGMII_TXD2 5 +#define IMX8DXL_ENET0_RGMII_TXD3_CONN_ENET0_RGMII_TXD3 IMX8DXL_ENET0_RGMII_TXD3 0 +#define IMX8DXL_ENET0_RGMII_TXD3_CONN_NAND_RE_B IMX8DXL_ENET0_RGMII_TXD3 2 +#define IMX8DXL_ENET0_RGMII_TXD3_LSIO_GPIO5_IO02 IMX8DXL_ENET0_RGMII_TXD3 4 +#define IMX8DXL_ENET0_RGMII_TXD3_CONN_USDHC2_DATA3 IMX8DXL_ENET0_RGMII_TXD3 5 +#define IMX8DXL_ENET0_RGMII_RXC_CONN_ENET0_RGMII_RXC IMX8DXL_ENET0_RGMII_RXC 0 +#define IMX8DXL_ENET0_RGMII_RXC_CONN_NAND_WE_B IMX8DXL_ENET0_RGMII_RXC 2 +#define IMX8DXL_ENET0_RGMII_RXC_CONN_USDHC1_CLK IMX8DXL_ENET0_RGMII_RXC 3 +#define IMX8DXL_ENET0_RGMII_RXC_LSIO_GPIO5_IO03 IMX8DXL_ENET0_RGMII_RXC 4 +#define IMX8DXL_ENET0_RGMII_RX_CTL_CONN_ENET0_RGMII_RX_CTL IMX8DXL_ENET0_RGMII_RX_CTL 0 +#define IMX8DXL_ENET0_RGMII_RX_CTL_CONN_USDHC1_CMD IMX8DXL_ENET0_RGMII_RX_CTL 3 +#define IMX8DXL_ENET0_RGMII_RX_CTL_LSIO_GPIO5_IO04 IMX8DXL_ENET0_RGMII_RX_CTL 4 +#define IMX8DXL_ENET0_RGMII_RXD0_CONN_ENET0_RGMII_RXD0 IMX8DXL_ENET0_RGMII_RXD0 0 +#define IMX8DXL_ENET0_RGMII_RXD0_CONN_USDHC1_DATA0 IMX8DXL_ENET0_RGMII_RXD0 3 +#define IMX8DXL_ENET0_RGMII_RXD0_LSIO_GPIO5_IO05 IMX8DXL_ENET0_RGMII_RXD0 4 +#define IMX8DXL_ENET0_RGMII_RXD1_CONN_ENET0_RGMII_RXD1 IMX8DXL_ENET0_RGMII_RXD1 0 +#define IMX8DXL_ENET0_RGMII_RXD1_CONN_USDHC1_DATA1 IMX8DXL_ENET0_RGMII_RXD1 3 +#define IMX8DXL_ENET0_RGMII_RXD1_LSIO_GPIO5_IO06 IMX8DXL_ENET0_RGMII_RXD1 4 +#define IMX8DXL_ENET0_RGMII_RXD2_CONN_ENET0_RGMII_RXD2 IMX8DXL_ENET0_RGMII_RXD2 0 +#define IMX8DXL_ENET0_RGMII_RXD2_CONN_ENET0_RMII_RX_ER IMX8DXL_ENET0_RGMII_RXD2 1 +#define IMX8DXL_ENET0_RGMII_RXD2_CONN_USDHC1_DATA2 IMX8DXL_ENET0_RGMII_RXD2 3 +#define IMX8DXL_ENET0_RGMII_RXD2_LSIO_GPIO5_IO07 IMX8DXL_ENET0_RGMII_RXD2 4 +#define IMX8DXL_ENET0_RGMII_RXD3_CONN_ENET0_RGMII_RXD3 IMX8DXL_ENET0_RGMII_RXD3 0 +#define IMX8DXL_ENET0_RGMII_RXD3_CONN_NAND_ALE IMX8DXL_ENET0_RGMII_RXD3 2 +#define IMX8DXL_ENET0_RGMII_RXD3_CONN_USDHC1_DATA3 IMX8DXL_ENET0_RGMII_RXD3 3 +#define IMX8DXL_ENET0_RGMII_RXD3_LSIO_GPIO5_IO08 IMX8DXL_ENET0_RGMII_RXD3 4 +#define IMX8DXL_ENET0_REFCLK_125M_25M_CONN_ENET0_REFCLK_125M_25M IMX8DXL_ENET0_REFCLK_125M_25M 0 +#define IMX8DXL_ENET0_REFCLK_125M_25M_CONN_ENET0_PPS IMX8DXL_ENET0_REFCLK_125M_25M 1 +#define IMX8DXL_ENET0_REFCLK_125M_25M_CONN_EQOS_PPS_IN IMX8DXL_ENET0_REFCLK_125M_25M 2 +#define IMX8DXL_ENET0_REFCLK_125M_25M_CONN_EQOS_PPS_OUT IMX8DXL_ENET0_REFCLK_125M_25M 3 +#define IMX8DXL_ENET0_REFCLK_125M_25M_LSIO_GPIO5_IO09 IMX8DXL_ENET0_REFCLK_125M_25M 4 +#define IMX8DXL_ENET0_MDIO_CONN_ENET0_MDIO IMX8DXL_ENET0_MDIO 0 +#define IMX8DXL_ENET0_MDIO_ADMA_I2C3_SDA IMX8DXL_ENET0_MDIO 1 +#define IMX8DXL_ENET0_MDIO_CONN_EQOS_MDIO IMX8DXL_ENET0_MDIO 2 +#define IMX8DXL_ENET0_MDIO_LSIO_GPIO5_IO10 IMX8DXL_ENET0_MDIO 4 +#define IMX8DXL_ENET0_MDIO_LSIO_GPIO7_IO16 IMX8DXL_ENET0_MDIO 5 +#define IMX8DXL_ENET0_MDC_CONN_ENET0_MDC IMX8DXL_ENET0_MDC 0 +#define IMX8DXL_ENET0_MDC_ADMA_I2C3_SCL IMX8DXL_ENET0_MDC 1 +#define IMX8DXL_ENET0_MDC_CONN_EQOS_MDC IMX8DXL_ENET0_MDC 2 +#define IMX8DXL_ENET0_MDC_LSIO_GPIO5_IO11 IMX8DXL_ENET0_MDC 4 +#define IMX8DXL_ENET0_MDC_LSIO_GPIO7_IO17 IMX8DXL_ENET0_MDC 5 +#define IMX8DXL_ENET1_RGMII_TXC_LSIO_GPIO0_IO00 IMX8DXL_ENET1_RGMII_TXC 0 +#define IMX8DXL_ENET1_RGMII_TXC_CONN_EQOS_RCLK50M_OUT IMX8DXL_ENET1_RGMII_TXC 1 +#define IMX8DXL_ENET1_RGMII_TXC_ADMA_LCDIF_D00 IMX8DXL_ENET1_RGMII_TXC 2 +#define IMX8DXL_ENET1_RGMII_TXC_CONN_EQOS_RGMII_TXC IMX8DXL_ENET1_RGMII_TXC 3 +#define IMX8DXL_ENET1_RGMII_TXC_CONN_EQOS_RCLK50M_IN IMX8DXL_ENET1_RGMII_TXC 4 +#define IMX8DXL_ENET1_RGMII_TXD2_ADMA_LCDIF_D01 IMX8DXL_ENET1_RGMII_TXD2 2 +#define IMX8DXL_ENET1_RGMII_TXD2_CONN_EQOS_RGMII_TXD2 IMX8DXL_ENET1_RGMII_TXD2 3 +#define IMX8DXL_ENET1_RGMII_TXD2_LSIO_GPIO0_IO01 IMX8DXL_ENET1_RGMII_TXD2 4 +#define IMX8DXL_ENET1_RGMII_TX_CTL_ADMA_LCDIF_D02 IMX8DXL_ENET1_RGMII_TX_CTL 2 +#define IMX8DXL_ENET1_RGMII_TX_CTL_CONN_EQOS_RGMII_TX_CTL IMX8DXL_ENET1_RGMII_TX_CTL 3 +#define IMX8DXL_ENET1_RGMII_TX_CTL_LSIO_GPIO0_IO02 IMX8DXL_ENET1_RGMII_TX_CTL 4 +#define IMX8DXL_ENET1_RGMII_TXD3_ADMA_LCDIF_D03 IMX8DXL_ENET1_RGMII_TXD3 2 +#define IMX8DXL_ENET1_RGMII_TXD3_CONN_EQOS_RGMII_TXD3 IMX8DXL_ENET1_RGMII_TXD3 3 +#define IMX8DXL_ENET1_RGMII_TXD3_LSIO_GPIO0_IO03 IMX8DXL_ENET1_RGMII_TXD3 4 +#define IMX8DXL_ENET1_RGMII_RXC_ADMA_LCDIF_D04 IMX8DXL_ENET1_RGMII_RXC 2 +#define IMX8DXL_ENET1_RGMII_RXC_CONN_EQOS_RGMII_RXC IMX8DXL_ENET1_RGMII_RXC 3 +#define IMX8DXL_ENET1_RGMII_RXC_LSIO_GPIO0_IO04 IMX8DXL_ENET1_RGMII_RXC 4 +#define IMX8DXL_ENET1_RGMII_RXD3_ADMA_LCDIF_D05 IMX8DXL_ENET1_RGMII_RXD3 2 +#define IMX8DXL_ENET1_RGMII_RXD3_CONN_EQOS_RGMII_RXD3 IMX8DXL_ENET1_RGMII_RXD3 3 +#define IMX8DXL_ENET1_RGMII_RXD3_LSIO_GPIO0_IO05 IMX8DXL_ENET1_RGMII_RXD3 4 +#define IMX8DXL_ENET1_RGMII_RXD2_ADMA_LCDIF_D06 IMX8DXL_ENET1_RGMII_RXD2 2 +#define IMX8DXL_ENET1_RGMII_RXD2_CONN_EQOS_RGMII_RXD2 IMX8DXL_ENET1_RGMII_RXD2 3 +#define IMX8DXL_ENET1_RGMII_RXD2_LSIO_GPIO0_IO06 IMX8DXL_ENET1_RGMII_RXD2 4 +#define IMX8DXL_ENET1_RGMII_RXD2_LSIO_GPIO6_IO00 IMX8DXL_ENET1_RGMII_RXD2 5 +#define IMX8DXL_ENET1_RGMII_RXD1_ADMA_LCDIF_D07 IMX8DXL_ENET1_RGMII_RXD1 2 +#define IMX8DXL_ENET1_RGMII_RXD1_CONN_EQOS_RGMII_RXD1 IMX8DXL_ENET1_RGMII_RXD1 3 +#define IMX8DXL_ENET1_RGMII_RXD1_LSIO_GPIO0_IO07 IMX8DXL_ENET1_RGMII_RXD1 4 +#define IMX8DXL_ENET1_RGMII_RXD1_LSIO_GPIO6_IO01 IMX8DXL_ENET1_RGMII_RXD1 5 +#define IMX8DXL_ENET1_RGMII_TXD0_ADMA_LCDIF_D08 IMX8DXL_ENET1_RGMII_TXD0 2 +#define IMX8DXL_ENET1_RGMII_TXD0_CONN_EQOS_RGMII_TXD0 IMX8DXL_ENET1_RGMII_TXD0 3 +#define IMX8DXL_ENET1_RGMII_TXD0_LSIO_GPIO0_IO08 IMX8DXL_ENET1_RGMII_TXD0 4 +#define IMX8DXL_ENET1_RGMII_TXD0_LSIO_GPIO6_IO02 IMX8DXL_ENET1_RGMII_TXD0 5 +#define IMX8DXL_ENET1_RGMII_TXD1_ADMA_LCDIF_D09 IMX8DXL_ENET1_RGMII_TXD1 2 +#define IMX8DXL_ENET1_RGMII_TXD1_CONN_EQOS_RGMII_TXD1 IMX8DXL_ENET1_RGMII_TXD1 3 +#define IMX8DXL_ENET1_RGMII_TXD1_LSIO_GPIO0_IO09 IMX8DXL_ENET1_RGMII_TXD1 4 +#define IMX8DXL_ENET1_RGMII_TXD1_LSIO_GPIO6_IO03 IMX8DXL_ENET1_RGMII_TXD1 5 +#define IMX8DXL_ENET1_RGMII_RXD0_ADMA_SPDIF0_RX IMX8DXL_ENET1_RGMII_RXD0 0 +#define IMX8DXL_ENET1_RGMII_RXD0_ADMA_MQS_R IMX8DXL_ENET1_RGMII_RXD0 1 +#define IMX8DXL_ENET1_RGMII_RXD0_ADMA_LCDIF_D10 IMX8DXL_ENET1_RGMII_RXD0 2 +#define IMX8DXL_ENET1_RGMII_RXD0_CONN_EQOS_RGMII_RXD0 IMX8DXL_ENET1_RGMII_RXD0 3 +#define IMX8DXL_ENET1_RGMII_RXD0_LSIO_GPIO0_IO10 IMX8DXL_ENET1_RGMII_RXD0 4 +#define IMX8DXL_ENET1_RGMII_RXD0_LSIO_GPIO6_IO04 IMX8DXL_ENET1_RGMII_RXD0 5 +#define IMX8DXL_ENET1_RGMII_RX_CTL_ADMA_SPDIF0_TX IMX8DXL_ENET1_RGMII_RX_CTL 0 +#define IMX8DXL_ENET1_RGMII_RX_CTL_ADMA_MQS_L IMX8DXL_ENET1_RGMII_RX_CTL 1 +#define IMX8DXL_ENET1_RGMII_RX_CTL_ADMA_LCDIF_D11 IMX8DXL_ENET1_RGMII_RX_CTL 2 +#define IMX8DXL_ENET1_RGMII_RX_CTL_CONN_EQOS_RGMII_RX_CTL IMX8DXL_ENET1_RGMII_RX_CTL 3 +#define IMX8DXL_ENET1_RGMII_RX_CTL_LSIO_GPIO0_IO11 IMX8DXL_ENET1_RGMII_RX_CTL 4 +#define IMX8DXL_ENET1_RGMII_RX_CTL_LSIO_GPIO6_IO05 IMX8DXL_ENET1_RGMII_RX_CTL 5 +#define IMX8DXL_ENET1_REFCLK_125M_25M_ADMA_SPDIF0_EXT_CLK IMX8DXL_ENET1_REFCLK_125M_25M 0 +#define IMX8DXL_ENET1_REFCLK_125M_25M_ADMA_LCDIF_D12 IMX8DXL_ENET1_REFCLK_125M_25M 2 +#define IMX8DXL_ENET1_REFCLK_125M_25M_CONN_EQOS_REFCLK_125M_25M IMX8DXL_ENET1_REFCLK_125M_25M 3 +#define IMX8DXL_ENET1_REFCLK_125M_25M_LSIO_GPIO0_IO12 IMX8DXL_ENET1_REFCLK_125M_25M 4 +#define IMX8DXL_ENET1_REFCLK_125M_25M_LSIO_GPIO6_IO06 IMX8DXL_ENET1_REFCLK_125M_25M 5 +#define IMX8DXL_SPI3_SCK_ADMA_SPI3_SCK IMX8DXL_SPI3_SCK 0 +#define IMX8DXL_SPI3_SCK_ADMA_LCDIF_D13 IMX8DXL_SPI3_SCK 2 +#define IMX8DXL_SPI3_SCK_LSIO_GPIO0_IO13 IMX8DXL_SPI3_SCK 4 +#define IMX8DXL_SPI3_SCK_ADMA_LCDIF_D00 IMX8DXL_SPI3_SCK 5 +#define IMX8DXL_SPI3_SDO_ADMA_SPI3_SDO IMX8DXL_SPI3_SDO 0 +#define IMX8DXL_SPI3_SDO_ADMA_LCDIF_D14 IMX8DXL_SPI3_SDO 2 +#define IMX8DXL_SPI3_SDO_LSIO_GPIO0_IO14 IMX8DXL_SPI3_SDO 4 +#define IMX8DXL_SPI3_SDO_ADMA_LCDIF_D01 IMX8DXL_SPI3_SDO 5 +#define IMX8DXL_SPI3_SDI_ADMA_SPI3_SDI IMX8DXL_SPI3_SDI 0 +#define IMX8DXL_SPI3_SDI_ADMA_LCDIF_D15 IMX8DXL_SPI3_SDI 2 +#define IMX8DXL_SPI3_SDI_LSIO_GPIO0_IO15 IMX8DXL_SPI3_SDI 4 +#define IMX8DXL_SPI3_SDI_ADMA_LCDIF_D02 IMX8DXL_SPI3_SDI 5 +#define IMX8DXL_SPI3_CS0_ADMA_SPI3_CS0 IMX8DXL_SPI3_CS0 0 +#define IMX8DXL_SPI3_CS0_ADMA_ACM_MCLK_OUT1 IMX8DXL_SPI3_CS0 1 +#define IMX8DXL_SPI3_CS0_ADMA_LCDIF_HSYNC IMX8DXL_SPI3_CS0 2 +#define IMX8DXL_SPI3_CS0_LSIO_GPIO0_IO16 IMX8DXL_SPI3_CS0 4 +#define IMX8DXL_SPI3_CS0_ADMA_LCDIF_CS IMX8DXL_SPI3_CS0 5 +#define IMX8DXL_SPI3_CS1_ADMA_SPI3_CS1 IMX8DXL_SPI3_CS1 0 +#define IMX8DXL_SPI3_CS1_ADMA_I2C3_SCL IMX8DXL_SPI3_CS1 1 +#define IMX8DXL_SPI3_CS1_ADMA_LCDIF_RESET IMX8DXL_SPI3_CS1 2 +#define IMX8DXL_SPI3_CS1_ADMA_SPI2_CS0 IMX8DXL_SPI3_CS1 3 +#define IMX8DXL_SPI3_CS1_ADMA_LCDIF_D16 IMX8DXL_SPI3_CS1 4 +#define IMX8DXL_SPI3_CS1_ADMA_LCDIF_RD_E IMX8DXL_SPI3_CS1 5 +#define IMX8DXL_MCLK_IN1_ADMA_ACM_MCLK_IN1 IMX8DXL_MCLK_IN1 0 +#define IMX8DXL_MCLK_IN1_ADMA_I2C3_SDA IMX8DXL_MCLK_IN1 1 +#define IMX8DXL_MCLK_IN1_ADMA_LCDIF_EN IMX8DXL_MCLK_IN1 2 +#define IMX8DXL_MCLK_IN1_ADMA_SPI2_SCK IMX8DXL_MCLK_IN1 3 +#define IMX8DXL_MCLK_IN1_ADMA_LCDIF_D17 IMX8DXL_MCLK_IN1 4 +#define IMX8DXL_MCLK_IN1_ADMA_LCDIF_D03 IMX8DXL_MCLK_IN1 5 +#define IMX8DXL_MCLK_IN0_ADMA_ACM_MCLK_IN0 IMX8DXL_MCLK_IN0 0 +#define IMX8DXL_MCLK_IN0_ADMA_LCDIF_VSYNC IMX8DXL_MCLK_IN0 2 +#define IMX8DXL_MCLK_IN0_ADMA_SPI2_SDI IMX8DXL_MCLK_IN0 3 +#define IMX8DXL_MCLK_IN0_LSIO_GPIO0_IO19 IMX8DXL_MCLK_IN0 4 +#define IMX8DXL_MCLK_IN0_ADMA_LCDIF_RS IMX8DXL_MCLK_IN0 5 +#define IMX8DXL_MCLK_OUT0_ADMA_ACM_MCLK_OUT0 IMX8DXL_MCLK_OUT0 0 +#define IMX8DXL_MCLK_OUT0_ADMA_LCDIF_CLK IMX8DXL_MCLK_OUT0 2 +#define IMX8DXL_MCLK_OUT0_ADMA_SPI2_SDO IMX8DXL_MCLK_OUT0 3 +#define IMX8DXL_MCLK_OUT0_LSIO_GPIO0_IO20 IMX8DXL_MCLK_OUT0 4 +#define IMX8DXL_MCLK_OUT0_ADMA_LCDIF_WR_RWN IMX8DXL_MCLK_OUT0 5 +#define IMX8DXL_UART1_TX_ADMA_UART1_TX IMX8DXL_UART1_TX 0 +#define IMX8DXL_UART1_TX_LSIO_PWM0_OUT IMX8DXL_UART1_TX 1 +#define IMX8DXL_UART1_TX_LSIO_GPT0_CAPTURE IMX8DXL_UART1_TX 2 +#define IMX8DXL_UART1_TX_LSIO_GPIO0_IO21 IMX8DXL_UART1_TX 4 +#define IMX8DXL_UART1_TX_ADMA_LCDIF_D04 IMX8DXL_UART1_TX 5 +#define IMX8DXL_UART1_RX_ADMA_UART1_RX IMX8DXL_UART1_RX 0 +#define IMX8DXL_UART1_RX_LSIO_PWM1_OUT IMX8DXL_UART1_RX 1 +#define IMX8DXL_UART1_RX_LSIO_GPT0_COMPARE IMX8DXL_UART1_RX 2 +#define IMX8DXL_UART1_RX_LSIO_GPT1_CLK IMX8DXL_UART1_RX 3 +#define IMX8DXL_UART1_RX_LSIO_GPIO0_IO22 IMX8DXL_UART1_RX 4 +#define IMX8DXL_UART1_RX_ADMA_LCDIF_D05 IMX8DXL_UART1_RX 5 +#define IMX8DXL_UART1_RTS_B_ADMA_UART1_RTS_B IMX8DXL_UART1_RTS_B 0 +#define IMX8DXL_UART1_RTS_B_LSIO_PWM2_OUT IMX8DXL_UART1_RTS_B 1 +#define IMX8DXL_UART1_RTS_B_ADMA_LCDIF_D16 IMX8DXL_UART1_RTS_B 2 +#define IMX8DXL_UART1_RTS_B_LSIO_GPT1_CAPTURE IMX8DXL_UART1_RTS_B 3 +#define IMX8DXL_UART1_RTS_B_LSIO_GPT0_CLK IMX8DXL_UART1_RTS_B 4 +#define IMX8DXL_UART1_RTS_B_ADMA_LCDIF_D06 IMX8DXL_UART1_RTS_B 5 +#define IMX8DXL_UART1_CTS_B_ADMA_UART1_CTS_B IMX8DXL_UART1_CTS_B 0 +#define IMX8DXL_UART1_CTS_B_LSIO_PWM3_OUT IMX8DXL_UART1_CTS_B 1 +#define IMX8DXL_UART1_CTS_B_ADMA_LCDIF_D17 IMX8DXL_UART1_CTS_B 2 +#define IMX8DXL_UART1_CTS_B_LSIO_GPT1_COMPARE IMX8DXL_UART1_CTS_B 3 +#define IMX8DXL_UART1_CTS_B_LSIO_GPIO0_IO24 IMX8DXL_UART1_CTS_B 4 +#define IMX8DXL_UART1_CTS_B_ADMA_LCDIF_D07 IMX8DXL_UART1_CTS_B 5 +#define IMX8DXL_SPI0_SCK_ADMA_SPI0_SCK IMX8DXL_SPI0_SCK 0 +#define IMX8DXL_SPI0_SCK_ADMA_SAI0_TXC IMX8DXL_SPI0_SCK 1 +#define IMX8DXL_SPI0_SCK_M40_I2C0_SCL IMX8DXL_SPI0_SCK 2 +#define IMX8DXL_SPI0_SCK_M40_GPIO0_IO00 IMX8DXL_SPI0_SCK 3 +#define IMX8DXL_SPI0_SCK_LSIO_GPIO1_IO04 IMX8DXL_SPI0_SCK 4 +#define IMX8DXL_SPI0_SCK_ADMA_LCDIF_D08 IMX8DXL_SPI0_SCK 5 +#define IMX8DXL_SPI0_SDI_ADMA_SPI0_SDI IMX8DXL_SPI0_SDI 0 +#define IMX8DXL_SPI0_SDI_ADMA_SAI0_TXD IMX8DXL_SPI0_SDI 1 +#define IMX8DXL_SPI0_SDI_M40_TPM0_CH0 IMX8DXL_SPI0_SDI 2 +#define IMX8DXL_SPI0_SDI_M40_GPIO0_IO02 IMX8DXL_SPI0_SDI 3 +#define IMX8DXL_SPI0_SDI_LSIO_GPIO1_IO05 IMX8DXL_SPI0_SDI 4 +#define IMX8DXL_SPI0_SDI_ADMA_LCDIF_D09 IMX8DXL_SPI0_SDI 5 +#define IMX8DXL_SPI0_SDO_ADMA_SPI0_SDO IMX8DXL_SPI0_SDO 0 +#define IMX8DXL_SPI0_SDO_ADMA_SAI0_TXFS IMX8DXL_SPI0_SDO 1 +#define IMX8DXL_SPI0_SDO_M40_I2C0_SDA IMX8DXL_SPI0_SDO 2 +#define IMX8DXL_SPI0_SDO_M40_GPIO0_IO01 IMX8DXL_SPI0_SDO 3 +#define IMX8DXL_SPI0_SDO_LSIO_GPIO1_IO06 IMX8DXL_SPI0_SDO 4 +#define IMX8DXL_SPI0_SDO_ADMA_LCDIF_D10 IMX8DXL_SPI0_SDO 5 +#define IMX8DXL_SPI0_CS1_ADMA_SPI0_CS1 IMX8DXL_SPI0_CS1 0 +#define IMX8DXL_SPI0_CS1_ADMA_SAI0_RXC IMX8DXL_SPI0_CS1 1 +#define IMX8DXL_SPI0_CS1_ADMA_SAI1_TXD IMX8DXL_SPI0_CS1 2 +#define IMX8DXL_SPI0_CS1_ADMA_LCD_PWM0_OUT IMX8DXL_SPI0_CS1 3 +#define IMX8DXL_SPI0_CS1_LSIO_GPIO1_IO07 IMX8DXL_SPI0_CS1 4 +#define IMX8DXL_SPI0_CS1_ADMA_LCDIF_D11 IMX8DXL_SPI0_CS1 5 +#define IMX8DXL_SPI0_CS0_ADMA_SPI0_CS0 IMX8DXL_SPI0_CS0 0 +#define IMX8DXL_SPI0_CS0_ADMA_SAI0_RXD IMX8DXL_SPI0_CS0 1 +#define IMX8DXL_SPI0_CS0_M40_TPM0_CH1 IMX8DXL_SPI0_CS0 2 +#define IMX8DXL_SPI0_CS0_M40_GPIO0_IO03 IMX8DXL_SPI0_CS0 3 +#define IMX8DXL_SPI0_CS0_LSIO_GPIO1_IO08 IMX8DXL_SPI0_CS0 4 +#define IMX8DXL_SPI0_CS0_ADMA_LCDIF_D12 IMX8DXL_SPI0_CS0 5 +#define IMX8DXL_ADC_IN1_ADMA_ADC_IN1 IMX8DXL_ADC_IN1 0 +#define IMX8DXL_ADC_IN1_M40_I2C0_SDA IMX8DXL_ADC_IN1 1 +#define IMX8DXL_ADC_IN1_M40_GPIO0_IO01 IMX8DXL_ADC_IN1 2 +#define IMX8DXL_ADC_IN1_ADMA_I2C0_SDA IMX8DXL_ADC_IN1 3 +#define IMX8DXL_ADC_IN1_LSIO_GPIO1_IO09 IMX8DXL_ADC_IN1 4 +#define IMX8DXL_ADC_IN1_ADMA_LCDIF_D13 IMX8DXL_ADC_IN1 5 +#define IMX8DXL_ADC_IN0_ADMA_ADC_IN0 IMX8DXL_ADC_IN0 0 +#define IMX8DXL_ADC_IN0_M40_I2C0_SCL IMX8DXL_ADC_IN0 1 +#define IMX8DXL_ADC_IN0_M40_GPIO0_IO00 IMX8DXL_ADC_IN0 2 +#define IMX8DXL_ADC_IN0_ADMA_I2C0_SCL IMX8DXL_ADC_IN0 3 +#define IMX8DXL_ADC_IN0_LSIO_GPIO1_IO10 IMX8DXL_ADC_IN0 4 +#define IMX8DXL_ADC_IN0_ADMA_LCDIF_D14 IMX8DXL_ADC_IN0 5 +#define IMX8DXL_ADC_IN3_ADMA_ADC_IN3 IMX8DXL_ADC_IN3 0 +#define IMX8DXL_ADC_IN3_M40_UART0_TX IMX8DXL_ADC_IN3 1 +#define IMX8DXL_ADC_IN3_M40_GPIO0_IO03 IMX8DXL_ADC_IN3 2 +#define IMX8DXL_ADC_IN3_ADMA_ACM_MCLK_OUT0 IMX8DXL_ADC_IN3 3 +#define IMX8DXL_ADC_IN3_LSIO_GPIO1_IO11 IMX8DXL_ADC_IN3 4 +#define IMX8DXL_ADC_IN3_ADMA_LCDIF_D15 IMX8DXL_ADC_IN3 5 +#define IMX8DXL_ADC_IN2_ADMA_ADC_IN2 IMX8DXL_ADC_IN2 0 +#define IMX8DXL_ADC_IN2_M40_UART0_RX IMX8DXL_ADC_IN2 1 +#define IMX8DXL_ADC_IN2_M40_GPIO0_IO02 IMX8DXL_ADC_IN2 2 +#define IMX8DXL_ADC_IN2_ADMA_ACM_MCLK_IN0 IMX8DXL_ADC_IN2 3 +#define IMX8DXL_ADC_IN2_LSIO_GPIO1_IO12 IMX8DXL_ADC_IN2 4 +#define IMX8DXL_ADC_IN2_ADMA_LCDIF_D16 IMX8DXL_ADC_IN2 5 +#define IMX8DXL_ADC_IN5_ADMA_ADC_IN5 IMX8DXL_ADC_IN5 0 +#define IMX8DXL_ADC_IN5_M40_TPM0_CH1 IMX8DXL_ADC_IN5 1 +#define IMX8DXL_ADC_IN5_M40_GPIO0_IO05 IMX8DXL_ADC_IN5 2 +#define IMX8DXL_ADC_IN5_ADMA_LCDIF_LCDBUSY IMX8DXL_ADC_IN5 3 +#define IMX8DXL_ADC_IN5_LSIO_GPIO1_IO13 IMX8DXL_ADC_IN5 4 +#define IMX8DXL_ADC_IN5_ADMA_LCDIF_D17 IMX8DXL_ADC_IN5 5 +#define IMX8DXL_ADC_IN4_ADMA_ADC_IN4 IMX8DXL_ADC_IN4 0 +#define IMX8DXL_ADC_IN4_M40_TPM0_CH0 IMX8DXL_ADC_IN4 1 +#define IMX8DXL_ADC_IN4_M40_GPIO0_IO04 IMX8DXL_ADC_IN4 2 +#define IMX8DXL_ADC_IN4_ADMA_LCDIF_LCDRESET IMX8DXL_ADC_IN4 3 +#define IMX8DXL_ADC_IN4_LSIO_GPIO1_IO14 IMX8DXL_ADC_IN4 4 +#define IMX8DXL_FLEXCAN0_RX_ADMA_FLEXCAN0_RX IMX8DXL_FLEXCAN0_RX 0 +#define IMX8DXL_FLEXCAN0_RX_ADMA_SAI2_RXC IMX8DXL_FLEXCAN0_RX 1 +#define IMX8DXL_FLEXCAN0_RX_ADMA_UART0_RTS_B IMX8DXL_FLEXCAN0_RX 2 +#define IMX8DXL_FLEXCAN0_RX_ADMA_SAI1_TXC IMX8DXL_FLEXCAN0_RX 3 +#define IMX8DXL_FLEXCAN0_RX_LSIO_GPIO1_IO15 IMX8DXL_FLEXCAN0_RX 4 +#define IMX8DXL_FLEXCAN0_RX_LSIO_GPIO6_IO08 IMX8DXL_FLEXCAN0_RX 5 +#define IMX8DXL_FLEXCAN0_TX_ADMA_FLEXCAN0_TX IMX8DXL_FLEXCAN0_TX 0 +#define IMX8DXL_FLEXCAN0_TX_ADMA_SAI2_RXD IMX8DXL_FLEXCAN0_TX 1 +#define IMX8DXL_FLEXCAN0_TX_ADMA_UART0_CTS_B IMX8DXL_FLEXCAN0_TX 2 +#define IMX8DXL_FLEXCAN0_TX_ADMA_SAI1_TXFS IMX8DXL_FLEXCAN0_TX 3 +#define IMX8DXL_FLEXCAN0_TX_LSIO_GPIO1_IO16 IMX8DXL_FLEXCAN0_TX 4 +#define IMX8DXL_FLEXCAN0_TX_LSIO_GPIO6_IO09 IMX8DXL_FLEXCAN0_TX 5 +#define IMX8DXL_FLEXCAN1_RX_ADMA_FLEXCAN1_RX IMX8DXL_FLEXCAN1_RX 0 +#define IMX8DXL_FLEXCAN1_RX_ADMA_SAI2_RXFS IMX8DXL_FLEXCAN1_RX 1 +#define IMX8DXL_FLEXCAN1_RX_ADMA_FTM_CH2 IMX8DXL_FLEXCAN1_RX 2 +#define IMX8DXL_FLEXCAN1_RX_ADMA_SAI1_TXD IMX8DXL_FLEXCAN1_RX 3 +#define IMX8DXL_FLEXCAN1_RX_LSIO_GPIO1_IO17 IMX8DXL_FLEXCAN1_RX 4 +#define IMX8DXL_FLEXCAN1_RX_LSIO_GPIO6_IO10 IMX8DXL_FLEXCAN1_RX 5 +#define IMX8DXL_FLEXCAN1_TX_ADMA_FLEXCAN1_TX IMX8DXL_FLEXCAN1_TX 0 +#define IMX8DXL_FLEXCAN1_TX_ADMA_SAI3_RXC IMX8DXL_FLEXCAN1_TX 1 +#define IMX8DXL_FLEXCAN1_TX_ADMA_DMA0_REQ_IN0 IMX8DXL_FLEXCAN1_TX 2 +#define IMX8DXL_FLEXCAN1_TX_ADMA_SAI1_RXD IMX8DXL_FLEXCAN1_TX 3 +#define IMX8DXL_FLEXCAN1_TX_LSIO_GPIO1_IO18 IMX8DXL_FLEXCAN1_TX 4 +#define IMX8DXL_FLEXCAN1_TX_LSIO_GPIO6_IO11 IMX8DXL_FLEXCAN1_TX 5 +#define IMX8DXL_FLEXCAN2_RX_ADMA_FLEXCAN2_RX IMX8DXL_FLEXCAN2_RX 0 +#define IMX8DXL_FLEXCAN2_RX_ADMA_SAI3_RXD IMX8DXL_FLEXCAN2_RX 1 +#define IMX8DXL_FLEXCAN2_RX_ADMA_UART3_RX IMX8DXL_FLEXCAN2_RX 2 +#define IMX8DXL_FLEXCAN2_RX_ADMA_SAI1_RXFS IMX8DXL_FLEXCAN2_RX 3 +#define IMX8DXL_FLEXCAN2_RX_LSIO_GPIO1_IO19 IMX8DXL_FLEXCAN2_RX 4 +#define IMX8DXL_FLEXCAN2_RX_LSIO_GPIO6_IO12 IMX8DXL_FLEXCAN2_RX 5 +#define IMX8DXL_FLEXCAN2_TX_ADMA_FLEXCAN2_TX IMX8DXL_FLEXCAN2_TX 0 +#define IMX8DXL_FLEXCAN2_TX_ADMA_SAI3_RXFS IMX8DXL_FLEXCAN2_TX 1 +#define IMX8DXL_FLEXCAN2_TX_ADMA_UART3_TX IMX8DXL_FLEXCAN2_TX 2 +#define IMX8DXL_FLEXCAN2_TX_ADMA_SAI1_RXC IMX8DXL_FLEXCAN2_TX 3 +#define IMX8DXL_FLEXCAN2_TX_LSIO_GPIO1_IO20 IMX8DXL_FLEXCAN2_TX 4 +#define IMX8DXL_FLEXCAN2_TX_LSIO_GPIO6_IO13 IMX8DXL_FLEXCAN2_TX 5 +#define IMX8DXL_UART0_RX_ADMA_UART0_RX IMX8DXL_UART0_RX 0 +#define IMX8DXL_UART0_RX_ADMA_MQS_R IMX8DXL_UART0_RX 1 +#define IMX8DXL_UART0_RX_ADMA_FLEXCAN0_RX IMX8DXL_UART0_RX 2 +#define IMX8DXL_UART0_RX_SCU_UART0_RX IMX8DXL_UART0_RX 3 +#define IMX8DXL_UART0_RX_LSIO_GPIO1_IO21 IMX8DXL_UART0_RX 4 +#define IMX8DXL_UART0_RX_LSIO_GPIO6_IO14 IMX8DXL_UART0_RX 5 +#define IMX8DXL_UART0_TX_ADMA_UART0_TX IMX8DXL_UART0_TX 0 +#define IMX8DXL_UART0_TX_ADMA_MQS_L IMX8DXL_UART0_TX 1 +#define IMX8DXL_UART0_TX_ADMA_FLEXCAN0_TX IMX8DXL_UART0_TX 2 +#define IMX8DXL_UART0_TX_SCU_UART0_TX IMX8DXL_UART0_TX 3 +#define IMX8DXL_UART0_TX_LSIO_GPIO1_IO22 IMX8DXL_UART0_TX 4 +#define IMX8DXL_UART0_TX_LSIO_GPIO6_IO15 IMX8DXL_UART0_TX 5 +#define IMX8DXL_UART2_TX_ADMA_UART2_TX IMX8DXL_UART2_TX 0 +#define IMX8DXL_UART2_TX_ADMA_FTM_CH1 IMX8DXL_UART2_TX 1 +#define IMX8DXL_UART2_TX_ADMA_FLEXCAN1_TX IMX8DXL_UART2_TX 2 +#define IMX8DXL_UART2_TX_LSIO_GPIO1_IO23 IMX8DXL_UART2_TX 4 +#define IMX8DXL_UART2_TX_LSIO_GPIO6_IO16 IMX8DXL_UART2_TX 5 +#define IMX8DXL_UART2_RX_ADMA_UART2_RX IMX8DXL_UART2_RX 0 +#define IMX8DXL_UART2_RX_ADMA_FTM_CH0 IMX8DXL_UART2_RX 1 +#define IMX8DXL_UART2_RX_ADMA_FLEXCAN1_RX IMX8DXL_UART2_RX 2 +#define IMX8DXL_UART2_RX_LSIO_GPIO1_IO24 IMX8DXL_UART2_RX 4 +#define IMX8DXL_UART2_RX_LSIO_GPIO6_IO17 IMX8DXL_UART2_RX 5 +#define IMX8DXL_JTAG_TRST_B_SCU_JTAG_TRST_B IMX8DXL_JTAG_TRST_B 0 +#define IMX8DXL_JTAG_TRST_B_SCU_WDOG0_WDOG_OUT IMX8DXL_JTAG_TRST_B 1 +#define IMX8DXL_PMIC_I2C_SCL_SCU_PMIC_I2C_SCL IMX8DXL_PMIC_I2C_SCL 0 +#define IMX8DXL_PMIC_I2C_SCL_SCU_GPIO0_IOXX_PMIC_A35_ON IMX8DXL_PMIC_I2C_SCL 1 +#define IMX8DXL_PMIC_I2C_SCL_LSIO_GPIO2_IO01 IMX8DXL_PMIC_I2C_SCL 4 +#define IMX8DXL_PMIC_I2C_SDA_SCU_PMIC_I2C_SDA IMX8DXL_PMIC_I2C_SDA 0 +#define IMX8DXL_PMIC_I2C_SDA_SCU_GPIO0_IOXX_PMIC_GPU_ON IMX8DXL_PMIC_I2C_SDA 1 +#define IMX8DXL_PMIC_I2C_SDA_LSIO_GPIO2_IO02 IMX8DXL_PMIC_I2C_SDA 4 +#define IMX8DXL_PMIC_INT_B_SCU_DSC_PMIC_INT_B IMX8DXL_PMIC_INT_B 0 +#define IMX8DXL_SCU_GPIO0_00_SCU_GPIO0_IO00 IMX8DXL_SCU_GPIO0_00 0 +#define IMX8DXL_SCU_GPIO0_00_SCU_UART0_RX IMX8DXL_SCU_GPIO0_00 1 +#define IMX8DXL_SCU_GPIO0_00_M40_UART0_RX IMX8DXL_SCU_GPIO0_00 2 +#define IMX8DXL_SCU_GPIO0_00_ADMA_UART3_RX IMX8DXL_SCU_GPIO0_00 3 +#define IMX8DXL_SCU_GPIO0_00_LSIO_GPIO2_IO03 IMX8DXL_SCU_GPIO0_00 4 +#define IMX8DXL_SCU_GPIO0_01_SCU_GPIO0_IO01 IMX8DXL_SCU_GPIO0_01 0 +#define IMX8DXL_SCU_GPIO0_01_SCU_UART0_TX IMX8DXL_SCU_GPIO0_01 1 +#define IMX8DXL_SCU_GPIO0_01_M40_UART0_TX IMX8DXL_SCU_GPIO0_01 2 +#define IMX8DXL_SCU_GPIO0_01_ADMA_UART3_TX IMX8DXL_SCU_GPIO0_01 3 +#define IMX8DXL_SCU_GPIO0_01_SCU_WDOG0_WDOG_OUT IMX8DXL_SCU_GPIO0_01 4 +#define IMX8DXL_SCU_PMIC_STANDBY_SCU_DSC_PMIC_STANDBY IMX8DXL_SCU_PMIC_STANDBY 0 +#define IMX8DXL_SCU_BOOT_MODE1_SCU_DSC_BOOT_MODE1 IMX8DXL_SCU_BOOT_MODE1 0 +#define IMX8DXL_SCU_BOOT_MODE0_SCU_DSC_BOOT_MODE0 IMX8DXL_SCU_BOOT_MODE0 0 +#define IMX8DXL_SCU_BOOT_MODE2_SCU_DSC_BOOT_MODE2 IMX8DXL_SCU_BOOT_MODE2 0 +#define IMX8DXL_SCU_BOOT_MODE2_SCU_DSC_RTC_CLOCK_OUTPUT_32K IMX8DXL_SCU_BOOT_MODE2 1 +#define IMX8DXL_SNVS_TAMPER_OUT1_LSIO_GPIO2_IO05_IN IMX8DXL_SNVS_TAMPER_OUT1 4 +#define IMX8DXL_SNVS_TAMPER_OUT1_LSIO_GPIO6_IO19_IN IMX8DXL_SNVS_TAMPER_OUT1 5 +#define IMX8DXL_SNVS_TAMPER_OUT2_LSIO_GPIO2_IO06_IN IMX8DXL_SNVS_TAMPER_OUT2 4 +#define IMX8DXL_SNVS_TAMPER_OUT2_LSIO_GPIO6_IO20_IN IMX8DXL_SNVS_TAMPER_OUT2 5 +#define IMX8DXL_SNVS_TAMPER_OUT3_ADMA_SAI2_RXC IMX8DXL_SNVS_TAMPER_OUT3 2 +#define IMX8DXL_SNVS_TAMPER_OUT3_LSIO_GPIO2_IO07_IN IMX8DXL_SNVS_TAMPER_OUT3 4 +#define IMX8DXL_SNVS_TAMPER_OUT3_LSIO_GPIO6_IO21_IN IMX8DXL_SNVS_TAMPER_OUT3 5 +#define IMX8DXL_SNVS_TAMPER_OUT4_ADMA_SAI2_RXD IMX8DXL_SNVS_TAMPER_OUT4 2 +#define IMX8DXL_SNVS_TAMPER_OUT4_LSIO_GPIO2_IO08_IN IMX8DXL_SNVS_TAMPER_OUT4 4 +#define IMX8DXL_SNVS_TAMPER_OUT4_LSIO_GPIO6_IO22_IN IMX8DXL_SNVS_TAMPER_OUT4 5 +#define IMX8DXL_SNVS_TAMPER_IN0_ADMA_SAI2_RXFS IMX8DXL_SNVS_TAMPER_IN0 2 +#define IMX8DXL_SNVS_TAMPER_IN0_LSIO_GPIO2_IO09_IN IMX8DXL_SNVS_TAMPER_IN0 4 +#define IMX8DXL_SNVS_TAMPER_IN0_LSIO_GPIO6_IO23_IN IMX8DXL_SNVS_TAMPER_IN0 5 +#define IMX8DXL_SNVS_TAMPER_IN1_ADMA_SAI3_RXC IMX8DXL_SNVS_TAMPER_IN1 2 +#define IMX8DXL_SNVS_TAMPER_IN1_LSIO_GPIO2_IO10_IN IMX8DXL_SNVS_TAMPER_IN1 4 +#define IMX8DXL_SNVS_TAMPER_IN1_LSIO_GPIO6_IO24_IN IMX8DXL_SNVS_TAMPER_IN1 5 +#define IMX8DXL_SNVS_TAMPER_IN2_ADMA_SAI3_RXD IMX8DXL_SNVS_TAMPER_IN2 2 +#define IMX8DXL_SNVS_TAMPER_IN2_LSIO_GPIO2_IO11_IN IMX8DXL_SNVS_TAMPER_IN2 4 +#define IMX8DXL_SNVS_TAMPER_IN2_LSIO_GPIO6_IO25_IN IMX8DXL_SNVS_TAMPER_IN2 5 +#define IMX8DXL_SNVS_TAMPER_IN3_ADMA_SAI3_RXFS IMX8DXL_SNVS_TAMPER_IN3 2 +#define IMX8DXL_SNVS_TAMPER_IN3_LSIO_GPIO2_IO12_IN IMX8DXL_SNVS_TAMPER_IN3 4 +#define IMX8DXL_SNVS_TAMPER_IN3_LSIO_GPIO6_IO26_IN IMX8DXL_SNVS_TAMPER_IN3 5 +#define IMX8DXL_SPI1_SCK_ADMA_I2C2_SDA IMX8DXL_SPI1_SCK 2 +#define IMX8DXL_SPI1_SCK_ADMA_SPI1_SCK IMX8DXL_SPI1_SCK 3 +#define IMX8DXL_SPI1_SCK_LSIO_GPIO3_IO00 IMX8DXL_SPI1_SCK 4 +#define IMX8DXL_SPI1_SDO_ADMA_I2C2_SCL IMX8DXL_SPI1_SDO 2 +#define IMX8DXL_SPI1_SDO_ADMA_SPI1_SDO IMX8DXL_SPI1_SDO 3 +#define IMX8DXL_SPI1_SDO_LSIO_GPIO3_IO01 IMX8DXL_SPI1_SDO 4 +#define IMX8DXL_SPI1_SDI_ADMA_I2C3_SCL IMX8DXL_SPI1_SDI 2 +#define IMX8DXL_SPI1_SDI_ADMA_SPI1_SDI IMX8DXL_SPI1_SDI 3 +#define IMX8DXL_SPI1_SDI_LSIO_GPIO3_IO02 IMX8DXL_SPI1_SDI 4 +#define IMX8DXL_SPI1_CS0_ADMA_I2C3_SDA IMX8DXL_SPI1_CS0 2 +#define IMX8DXL_SPI1_CS0_ADMA_SPI1_CS0 IMX8DXL_SPI1_CS0 3 +#define IMX8DXL_SPI1_CS0_LSIO_GPIO3_IO03 IMX8DXL_SPI1_CS0 4 +#define IMX8DXL_QSPI0A_DATA1_LSIO_QSPI0A_DATA1 IMX8DXL_QSPI0A_DATA1 0 +#define IMX8DXL_QSPI0A_DATA1_LSIO_GPIO3_IO10 IMX8DXL_QSPI0A_DATA1 4 +#define IMX8DXL_QSPI0A_DATA0_LSIO_QSPI0A_DATA0 IMX8DXL_QSPI0A_DATA0 0 +#define IMX8DXL_QSPI0A_DATA0_LSIO_GPIO3_IO09 IMX8DXL_QSPI0A_DATA0 4 +#define IMX8DXL_QSPI0A_DATA3_LSIO_QSPI0A_DATA3 IMX8DXL_QSPI0A_DATA3 0 +#define IMX8DXL_QSPI0A_DATA3_LSIO_GPIO3_IO12 IMX8DXL_QSPI0A_DATA3 4 +#define IMX8DXL_QSPI0A_DATA2_LSIO_QSPI0A_DATA2 IMX8DXL_QSPI0A_DATA2 0 +#define IMX8DXL_QSPI0A_DATA2_LSIO_GPIO3_IO11 IMX8DXL_QSPI0A_DATA2 4 +#define IMX8DXL_QSPI0A_SS0_B_LSIO_QSPI0A_SS0_B IMX8DXL_QSPI0A_SS0_B 0 +#define IMX8DXL_QSPI0A_SS0_B_LSIO_GPIO3_IO14 IMX8DXL_QSPI0A_SS0_B 4 +#define IMX8DXL_QSPI0A_DQS_LSIO_QSPI0A_DQS IMX8DXL_QSPI0A_DQS 0 +#define IMX8DXL_QSPI0A_DQS_LSIO_GPIO3_IO13 IMX8DXL_QSPI0A_DQS 4 +#define IMX8DXL_QSPI0A_SCLK_LSIO_QSPI0A_SCLK IMX8DXL_QSPI0A_SCLK 0 +#define IMX8DXL_QSPI0A_SCLK_LSIO_GPIO3_IO16 IMX8DXL_QSPI0A_SCLK 4 +#define IMX8DXL_QSPI0B_SCLK_LSIO_QSPI0B_SCLK IMX8DXL_QSPI0B_SCLK 0 +#define IMX8DXL_QSPI0B_SCLK_LSIO_GPIO3_IO17 IMX8DXL_QSPI0B_SCLK 4 +#define IMX8DXL_QSPI0B_DQS_LSIO_QSPI0B_DQS IMX8DXL_QSPI0B_DQS 0 +#define IMX8DXL_QSPI0B_DQS_LSIO_GPIO3_IO22 IMX8DXL_QSPI0B_DQS 4 +#define IMX8DXL_QSPI0B_DATA1_LSIO_QSPI0B_DATA1 IMX8DXL_QSPI0B_DATA1 0 +#define IMX8DXL_QSPI0B_DATA1_LSIO_GPIO3_IO19 IMX8DXL_QSPI0B_DATA1 4 +#define IMX8DXL_QSPI0B_DATA0_LSIO_QSPI0B_DATA0 IMX8DXL_QSPI0B_DATA0 0 +#define IMX8DXL_QSPI0B_DATA0_LSIO_GPIO3_IO18 IMX8DXL_QSPI0B_DATA0 4 +#define IMX8DXL_QSPI0B_DATA3_LSIO_QSPI0B_DATA3 IMX8DXL_QSPI0B_DATA3 0 +#define IMX8DXL_QSPI0B_DATA3_LSIO_GPIO3_IO21 IMX8DXL_QSPI0B_DATA3 4 +#define IMX8DXL_QSPI0B_DATA2_LSIO_QSPI0B_DATA2 IMX8DXL_QSPI0B_DATA2 0 +#define IMX8DXL_QSPI0B_DATA2_LSIO_GPIO3_IO20 IMX8DXL_QSPI0B_DATA2 4 +#define IMX8DXL_QSPI0B_SS0_B_LSIO_QSPI0B_SS0_B IMX8DXL_QSPI0B_SS0_B 0 +#define IMX8DXL_QSPI0B_SS0_B_LSIO_GPIO3_IO23 IMX8DXL_QSPI0B_SS0_B 4 +#define IMX8DXL_QSPI0B_SS0_B_LSIO_QSPI0A_SS1_B IMX8DXL_QSPI0B_SS0_B 5 + +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_PCIESEP_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_PCIESEP 0 +#define IMX8DXL_COMP_CTL_GPIO_3V3_USB3IO_PAD IMX8DXL_COMP_CTL_GPIO_3V3_USB3IO 0 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_SD1FIX0_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_SD1FIX0 0 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_VSELSEP_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_VSELSEP 0 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB0_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB0 0 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB1_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_ENET_ENETB1 0 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOCT_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOCT 0 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHB_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHB 0 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHK_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHK 0 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHT_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHT 0 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOLH_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIOLH 0 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHD_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_GPIORHD 0 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0A_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0A 0 +#define IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0B_PAD IMX8DXL_COMP_CTL_GPIO_1V8_3V3_QSPI0B 0 + +#endif diff --git a/include/dt-bindings/pinctrl/rockchip.h b/include/dt-bindings/pinctrl/rockchip.h index 6d6bac1c26d7..5f291045e8fd 100644 --- a/include/dt-bindings/pinctrl/rockchip.h +++ b/include/dt-bindings/pinctrl/rockchip.h @@ -9,13 +9,6 @@ #ifndef __DT_BINDINGS_ROCKCHIP_PINCTRL_H__ #define __DT_BINDINGS_ROCKCHIP_PINCTRL_H__ -#define RK_GPIO0 0 -#define RK_GPIO1 1 -#define RK_GPIO2 2 -#define RK_GPIO3 3 -#define RK_GPIO4 4 -#define RK_GPIO6 6 - #define RK_PA0 0 #define RK_PA1 1 #define RK_PA2 2 @@ -50,9 +43,5 @@ #define RK_PD7 31 #define RK_FUNC_GPIO 0 -#define RK_FUNC_1 1 /* deprecated */ -#define RK_FUNC_2 2 /* deprecated */ -#define RK_FUNC_3 3 /* deprecated */ -#define RK_FUNC_4 4 /* deprecated */ #endif diff --git a/include/dt-bindings/power/marvell,mmp2.h b/include/dt-bindings/power/marvell,mmp2.h new file mode 100644 index 000000000000..c53d2b3e1057 --- /dev/null +++ b/include/dt-bindings/power/marvell,mmp2.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __DTS_MARVELL_MMP2_POWER_H +#define __DTS_MARVELL_MMP2_POWER_H + +#define MMP2_POWER_DOMAIN_GPU 0 +#define MMP2_POWER_DOMAIN_AUDIO 1 +#define MMP3_POWER_DOMAIN_CAMERA 2 + +#define MMP2_NR_POWER_DOMAINS 3 + +#endif diff --git a/include/dt-bindings/power/meson-gxbb-power.h b/include/dt-bindings/power/meson-gxbb-power.h new file mode 100644 index 000000000000..1262dac696c0 --- /dev/null +++ b/include/dt-bindings/power/meson-gxbb-power.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (c) 2019 BayLibre, SAS + * Author: Neil Armstrong <narmstrong@baylibre.com> + */ + +#ifndef _DT_BINDINGS_MESON_GXBB_POWER_H +#define _DT_BINDINGS_MESON_GXBB_POWER_H + +#define PWRC_GXBB_VPU_ID 0 +#define PWRC_GXBB_ETHERNET_MEM_ID 1 + +#endif diff --git a/include/dt-bindings/power/meson8-power.h b/include/dt-bindings/power/meson8-power.h new file mode 100644 index 000000000000..dd8b2ddb82a7 --- /dev/null +++ b/include/dt-bindings/power/meson8-power.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */ +/* + * Copyright (c) 2019 Martin Blumenstingl <martin.blumenstingl@googlemail.com> + */ + +#ifndef _DT_BINDINGS_MESON8_POWER_H +#define _DT_BINDINGS_MESON8_POWER_H + +#define PWRC_MESON8_VPU_ID 0 +#define PWRC_MESON8_ETHERNET_MEM_ID 1 +#define PWRC_MESON8_AUDIO_DSP_MEM_ID 2 + +#endif /* _DT_BINDINGS_MESON8_POWER_H */ diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h index 3f74096d5a7c..dc146e44228b 100644 --- a/include/dt-bindings/power/qcom-rpmpd.h +++ b/include/dt-bindings/power/qcom-rpmpd.h @@ -28,6 +28,18 @@ #define SM8150_MMCX 9 #define SM8150_MMCX_AO 10 +/* SM8250 Power Domain Indexes */ +#define SM8250_CX 0 +#define SM8250_CX_AO 1 +#define SM8250_EBI 2 +#define SM8250_GFX 3 +#define SM8250_LCX 4 +#define SM8250_LMX 5 +#define SM8250_MMCX 6 +#define SM8250_MMCX_AO 7 +#define SM8250_MX 8 +#define SM8250_MX_AO 9 + /* SC7180 Power Domain Indexes */ #define SC7180_CX 0 #define SC7180_CX_AO 1 diff --git a/include/dt-bindings/power/r8a7742-sysc.h b/include/dt-bindings/power/r8a7742-sysc.h new file mode 100644 index 000000000000..1b1bd3cf95db --- /dev/null +++ b/include/dt-bindings/power/r8a7742-sysc.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_POWER_R8A7742_SYSC_H__ +#define __DT_BINDINGS_POWER_R8A7742_SYSC_H__ + +/* + * These power domain indices match the numbers of the interrupt bits + * representing the power areas in the various Interrupt Registers + * (e.g. SYSCISR, Interrupt Status Register) + */ + +#define R8A7742_PD_CA15_CPU0 0 +#define R8A7742_PD_CA15_CPU1 1 +#define R8A7742_PD_CA15_CPU2 2 +#define R8A7742_PD_CA15_CPU3 3 +#define R8A7742_PD_CA7_CPU0 5 +#define R8A7742_PD_CA7_CPU1 6 +#define R8A7742_PD_CA7_CPU2 7 +#define R8A7742_PD_CA7_CPU3 8 +#define R8A7742_PD_CA15_SCU 12 +#define R8A7742_PD_RGX 20 +#define R8A7742_PD_CA7_SCU 21 + +/* Always-on power area */ +#define R8A7742_PD_ALWAYS_ON 32 + +#endif /* __DT_BINDINGS_POWER_R8A7742_SYSC_H__ */ diff --git a/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h index ea5058618863..883bfd3bcbad 100644 --- a/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h +++ b/include/dt-bindings/reset/amlogic,meson-gxbb-reset.h @@ -69,7 +69,7 @@ #define RESET_SYS_CPU_L2 58 #define RESET_SYS_CPU_P 59 #define RESET_SYS_CPU_MBIST 60 -/* 61 */ +#define RESET_ACODEC 61 /* 62 */ /* 63 */ /* RESET2 */ diff --git a/include/dt-bindings/reset/bt1-ccu.h b/include/dt-bindings/reset/bt1-ccu.h new file mode 100644 index 000000000000..3578e83026bc --- /dev/null +++ b/include/dt-bindings/reset/bt1-ccu.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC + * + * Baikal-T1 CCU reset indices + */ +#ifndef __DT_BINDINGS_RESET_BT1_CCU_H +#define __DT_BINDINGS_RESET_BT1_CCU_H + +#define CCU_AXI_MAIN_RST 0 +#define CCU_AXI_DDR_RST 1 +#define CCU_AXI_SATA_RST 2 +#define CCU_AXI_GMAC0_RST 3 +#define CCU_AXI_GMAC1_RST 4 +#define CCU_AXI_XGMAC_RST 5 +#define CCU_AXI_PCIE_M_RST 6 +#define CCU_AXI_PCIE_S_RST 7 +#define CCU_AXI_USB_RST 8 +#define CCU_AXI_HWA_RST 9 +#define CCU_AXI_SRAM_RST 10 + +#define CCU_SYS_SATA_REF_RST 0 +#define CCU_SYS_APB_RST 1 + +#endif /* __DT_BINDINGS_RESET_BT1_CCU_H */ diff --git a/include/dt-bindings/reset/imx8mp-reset.h b/include/dt-bindings/reset/imx8mp-reset.h new file mode 100644 index 000000000000..2e8c9104b666 --- /dev/null +++ b/include/dt-bindings/reset/imx8mp-reset.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2020 NXP + */ + +#ifndef DT_BINDING_RESET_IMX8MP_H +#define DT_BINDING_RESET_IMX8MP_H + +#define IMX8MP_RESET_A53_CORE_POR_RESET0 0 +#define IMX8MP_RESET_A53_CORE_POR_RESET1 1 +#define IMX8MP_RESET_A53_CORE_POR_RESET2 2 +#define IMX8MP_RESET_A53_CORE_POR_RESET3 3 +#define IMX8MP_RESET_A53_CORE_RESET0 4 +#define IMX8MP_RESET_A53_CORE_RESET1 5 +#define IMX8MP_RESET_A53_CORE_RESET2 6 +#define IMX8MP_RESET_A53_CORE_RESET3 7 +#define IMX8MP_RESET_A53_DBG_RESET0 8 +#define IMX8MP_RESET_A53_DBG_RESET1 9 +#define IMX8MP_RESET_A53_DBG_RESET2 10 +#define IMX8MP_RESET_A53_DBG_RESET3 11 +#define IMX8MP_RESET_A53_ETM_RESET0 12 +#define IMX8MP_RESET_A53_ETM_RESET1 13 +#define IMX8MP_RESET_A53_ETM_RESET2 14 +#define IMX8MP_RESET_A53_ETM_RESET3 15 +#define IMX8MP_RESET_A53_SOC_DBG_RESET 16 +#define IMX8MP_RESET_A53_L2RESET 17 +#define IMX8MP_RESET_SW_NON_SCLR_M7C_RST 18 +#define IMX8MP_RESET_OTG1_PHY_RESET 19 +#define IMX8MP_RESET_OTG2_PHY_RESET 20 +#define IMX8MP_RESET_SUPERMIX_RESET 21 +#define IMX8MP_RESET_AUDIOMIX_RESET 22 +#define IMX8MP_RESET_MLMIX_RESET 23 +#define IMX8MP_RESET_PCIEPHY 24 +#define IMX8MP_RESET_PCIEPHY_PERST 25 +#define IMX8MP_RESET_PCIE_CTRL_APPS_EN 26 +#define IMX8MP_RESET_PCIE_CTRL_APPS_TURNOFF 27 +#define IMX8MP_RESET_HDMI_PHY_APB_RESET 28 +#define IMX8MP_RESET_MEDIA_RESET 29 +#define IMX8MP_RESET_GPU2D_RESET 30 +#define IMX8MP_RESET_GPU3D_RESET 31 +#define IMX8MP_RESET_GPU_RESET 32 +#define IMX8MP_RESET_VPU_RESET 33 +#define IMX8MP_RESET_VPU_G1_RESET 34 +#define IMX8MP_RESET_VPU_G2_RESET 35 +#define IMX8MP_RESET_VPUVC8KE_RESET 36 +#define IMX8MP_RESET_NOC_RESET 37 + +#define IMX8MP_RESET_NUM 38 + +#endif diff --git a/include/dt-bindings/reset/imx8mq-reset.h b/include/dt-bindings/reset/imx8mq-reset.h index 9a301082d361..a5b570737582 100644 --- a/include/dt-bindings/reset/imx8mq-reset.h +++ b/include/dt-bindings/reset/imx8mq-reset.h @@ -28,36 +28,36 @@ #define IMX8MQ_RESET_A53_L2RESET 17 #define IMX8MQ_RESET_SW_NON_SCLR_M4C_RST 18 #define IMX8MQ_RESET_OTG1_PHY_RESET 19 -#define IMX8MQ_RESET_OTG2_PHY_RESET 20 -#define IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N 21 -#define IMX8MQ_RESET_MIPI_DSI_RESET_N 22 -#define IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N 23 -#define IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N 24 -#define IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N 25 -#define IMX8MQ_RESET_PCIEPHY 26 -#define IMX8MQ_RESET_PCIEPHY_PERST 27 -#define IMX8MQ_RESET_PCIE_CTRL_APPS_EN 28 -#define IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF 29 -#define IMX8MQ_RESET_HDMI_PHY_APB_RESET 30 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_OTG2_PHY_RESET 20 /* i.MX8MN does NOT support */ +#define IMX8MQ_RESET_MIPI_DSI_RESET_BYTE_N 21 /* i.MX8MN does NOT support */ +#define IMX8MQ_RESET_MIPI_DSI_RESET_N 22 /* i.MX8MN does NOT support */ +#define IMX8MQ_RESET_MIPI_DSI_DPI_RESET_N 23 /* i.MX8MN does NOT support */ +#define IMX8MQ_RESET_MIPI_DSI_ESC_RESET_N 24 /* i.MX8MN does NOT support */ +#define IMX8MQ_RESET_MIPI_DSI_PCLK_RESET_N 25 /* i.MX8MN does NOT support */ +#define IMX8MQ_RESET_PCIEPHY 26 /* i.MX8MN does NOT support */ +#define IMX8MQ_RESET_PCIEPHY_PERST 27 /* i.MX8MN does NOT support */ +#define IMX8MQ_RESET_PCIE_CTRL_APPS_EN 28 /* i.MX8MN does NOT support */ +#define IMX8MQ_RESET_PCIE_CTRL_APPS_TURNOFF 29 /* i.MX8MN does NOT support */ +#define IMX8MQ_RESET_HDMI_PHY_APB_RESET 30 /* i.MX8MM/i.MX8MN does NOT support */ #define IMX8MQ_RESET_DISP_RESET 31 #define IMX8MQ_RESET_GPU_RESET 32 -#define IMX8MQ_RESET_VPU_RESET 33 -#define IMX8MQ_RESET_PCIEPHY2 34 /* i.MX8MM does NOT support */ -#define IMX8MQ_RESET_PCIEPHY2_PERST 35 /* i.MX8MM does NOT support */ -#define IMX8MQ_RESET_PCIE2_CTRL_APPS_EN 36 /* i.MX8MM does NOT support */ -#define IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF 37 /* i.MX8MM does NOT support */ -#define IMX8MQ_RESET_MIPI_CSI1_CORE_RESET 38 /* i.MX8MM does NOT support */ -#define IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET 39 /* i.MX8MM does NOT support */ -#define IMX8MQ_RESET_MIPI_CSI1_ESC_RESET 40 /* i.MX8MM does NOT support */ -#define IMX8MQ_RESET_MIPI_CSI2_CORE_RESET 41 /* i.MX8MM does NOT support */ -#define IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET 42 /* i.MX8MM does NOT support */ -#define IMX8MQ_RESET_MIPI_CSI2_ESC_RESET 43 /* i.MX8MM does NOT support */ -#define IMX8MQ_RESET_DDRC1_PRST 44 -#define IMX8MQ_RESET_DDRC1_CORE_RESET 45 -#define IMX8MQ_RESET_DDRC1_PHY_RESET 46 -#define IMX8MQ_RESET_DDRC2_PRST 47 /* i.MX8MM does NOT support */ -#define IMX8MQ_RESET_DDRC2_CORE_RESET 48 /* i.MX8MM does NOT support */ -#define IMX8MQ_RESET_DDRC2_PHY_RESET 49 /* i.MX8MM does NOT support */ +#define IMX8MQ_RESET_VPU_RESET 33 /* i.MX8MN does NOT support */ +#define IMX8MQ_RESET_PCIEPHY2 34 /* i.MX8MM/i.MX8MN does NOT support */ +#define IMX8MQ_RESET_PCIEPHY2_PERST 35 /* i.MX8MM/i.MX8MN does NOT support */ +#define IMX8MQ_RESET_PCIE2_CTRL_APPS_EN 36 /* i.MX8MM/i.MX8MN does NOT support */ +#define IMX8MQ_RESET_PCIE2_CTRL_APPS_TURNOFF 37 /* i.MX8MM/i.MX8MN does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI1_CORE_RESET 38 /* i.MX8MM/i.MX8MN does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI1_PHY_REF_RESET 39 /* i.MX8MM/i.MX8MN does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI1_ESC_RESET 40 /* i.MX8MM/i.MX8MN does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI2_CORE_RESET 41 /* i.MX8MM/i.MX8MN does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI2_PHY_REF_RESET 42 /* i.MX8MM/i.MX8MN does NOT support */ +#define IMX8MQ_RESET_MIPI_CSI2_ESC_RESET 43 /* i.MX8MM/i.MX8MN does NOT support */ +#define IMX8MQ_RESET_DDRC1_PRST 44 /* i.MX8MN does NOT support */ +#define IMX8MQ_RESET_DDRC1_CORE_RESET 45 /* i.MX8MN does NOT support */ +#define IMX8MQ_RESET_DDRC1_PHY_RESET 46 /* i.MX8MN does NOT support */ +#define IMX8MQ_RESET_DDRC2_PRST 47 /* i.MX8MM/i.MX8MN does NOT support */ +#define IMX8MQ_RESET_DDRC2_CORE_RESET 48 /* i.MX8MM/i.MX8MN does NOT support */ +#define IMX8MQ_RESET_DDRC2_PHY_RESET 49 /* i.MX8MM/i.MX8MN does NOT support */ #define IMX8MQ_RESET_NUM 50 diff --git a/include/dt-bindings/reset/qcom,gcc-msm8939.h b/include/dt-bindings/reset/qcom,gcc-msm8939.h new file mode 100644 index 000000000000..fa41ffeae7a2 --- /dev/null +++ b/include/dt-bindings/reset/qcom,gcc-msm8939.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2020 Linaro Limited + */ + +#ifndef _DT_BINDINGS_RESET_MSM_GCC_8939_H +#define _DT_BINDINGS_RESET_MSM_GCC_8939_H + +#define GCC_BLSP1_BCR 0 +#define GCC_BLSP1_QUP1_BCR 1 +#define GCC_BLSP1_UART1_BCR 2 +#define GCC_BLSP1_QUP2_BCR 3 +#define GCC_BLSP1_UART2_BCR 4 +#define GCC_BLSP1_QUP3_BCR 5 +#define GCC_BLSP1_QUP4_BCR 6 +#define GCC_BLSP1_QUP5_BCR 7 +#define GCC_BLSP1_QUP6_BCR 8 +#define GCC_IMEM_BCR 9 +#define GCC_SMMU_BCR 10 +#define GCC_APSS_TCU_BCR 11 +#define GCC_SMMU_XPU_BCR 12 +#define GCC_PCNOC_TBU_BCR 13 +#define GCC_PRNG_BCR 14 +#define GCC_BOOT_ROM_BCR 15 +#define GCC_CRYPTO_BCR 16 +#define GCC_SEC_CTRL_BCR 17 +#define GCC_AUDIO_CORE_BCR 18 +#define GCC_ULT_AUDIO_BCR 19 +#define GCC_DEHR_BCR 20 +#define GCC_SYSTEM_NOC_BCR 21 +#define GCC_PCNOC_BCR 22 +#define GCC_TCSR_BCR 23 +#define GCC_QDSS_BCR 24 +#define GCC_DCD_BCR 25 +#define GCC_MSG_RAM_BCR 26 +#define GCC_MPM_BCR 27 +#define GCC_SPMI_BCR 28 +#define GCC_SPDM_BCR 29 +#define GCC_MM_SPDM_BCR 30 +#define GCC_BIMC_BCR 31 +#define GCC_RBCPR_BCR 32 +#define GCC_TLMM_BCR 33 +#define GCC_USB_HS_BCR 34 +#define GCC_USB2A_PHY_BCR 35 +#define GCC_SDCC1_BCR 36 +#define GCC_SDCC2_BCR 37 +#define GCC_PDM_BCR 38 +#define GCC_SNOC_BUS_TIMEOUT0_BCR 39 +#define GCC_PCNOC_BUS_TIMEOUT0_BCR 40 +#define GCC_PCNOC_BUS_TIMEOUT1_BCR 41 +#define GCC_PCNOC_BUS_TIMEOUT2_BCR 42 +#define GCC_PCNOC_BUS_TIMEOUT3_BCR 43 +#define GCC_PCNOC_BUS_TIMEOUT4_BCR 44 +#define GCC_PCNOC_BUS_TIMEOUT5_BCR 45 +#define GCC_PCNOC_BUS_TIMEOUT6_BCR 46 +#define GCC_PCNOC_BUS_TIMEOUT7_BCR 47 +#define GCC_PCNOC_BUS_TIMEOUT8_BCR 48 +#define GCC_PCNOC_BUS_TIMEOUT9_BCR 49 +#define GCC_MMSS_BCR 50 +#define GCC_VENUS0_BCR 51 +#define GCC_MDSS_BCR 52 +#define GCC_CAMSS_PHY0_BCR 53 +#define GCC_CAMSS_CSI0_BCR 54 +#define GCC_CAMSS_CSI0PHY_BCR 55 +#define GCC_CAMSS_CSI0RDI_BCR 56 +#define GCC_CAMSS_CSI0PIX_BCR 57 +#define GCC_CAMSS_PHY1_BCR 58 +#define GCC_CAMSS_CSI1_BCR 59 +#define GCC_CAMSS_CSI1PHY_BCR 60 +#define GCC_CAMSS_CSI1RDI_BCR 61 +#define GCC_CAMSS_CSI1PIX_BCR 62 +#define GCC_CAMSS_ISPIF_BCR 63 +#define GCC_CAMSS_CCI_BCR 64 +#define GCC_CAMSS_MCLK0_BCR 65 +#define GCC_CAMSS_MCLK1_BCR 66 +#define GCC_CAMSS_GP0_BCR 67 +#define GCC_CAMSS_GP1_BCR 68 +#define GCC_CAMSS_TOP_BCR 69 +#define GCC_CAMSS_MICRO_BCR 70 +#define GCC_CAMSS_JPEG_BCR 71 +#define GCC_CAMSS_VFE_BCR 72 +#define GCC_CAMSS_CSI_VFE0_BCR 73 +#define GCC_OXILI_BCR 74 +#define GCC_GMEM_BCR 75 +#define GCC_CAMSS_AHB_BCR 76 +#define GCC_MDP_TBU_BCR 77 +#define GCC_GFX_TBU_BCR 78 +#define GCC_GFX_TCU_BCR 79 +#define GCC_MSS_TBU_AXI_BCR 80 +#define GCC_MSS_TBU_GSS_AXI_BCR 81 +#define GCC_MSS_TBU_Q6_AXI_BCR 82 +#define GCC_GTCU_AHB_BCR 83 +#define GCC_SMMU_CFG_BCR 84 +#define GCC_VFE_TBU_BCR 85 +#define GCC_VENUS_TBU_BCR 86 +#define GCC_JPEG_TBU_BCR 87 +#define GCC_PRONTO_TBU_BCR 88 +#define GCC_SMMU_CATS_BCR 89 +#define GCC_BLSP1_UART3_BCR 90 +#define GCC_CAMSS_CSI2_BCR 91 +#define GCC_CAMSS_CSI2PHY_BCR 92 +#define GCC_CAMSS_CSI2RDI_BCR 93 +#define GCC_CAMSS_CSI2PIX_BCR 94 +#define GCC_USB_FS_BCR 95 +#define GCC_BLSP1_QUP4_SPI_APPS_CBCR 96 +#define GCC_CAMSS_MCLK2_BCR 97 +#define GCC_CPP_TBU_BCR 98 +#define GCC_MDP_RT_TBU_BCR 99 + +#endif diff --git a/include/dt-bindings/reset/realtek,rtd1195.h b/include/dt-bindings/reset/realtek,rtd1195.h new file mode 100644 index 000000000000..27902abf935b --- /dev/null +++ b/include/dt-bindings/reset/realtek,rtd1195.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */ +/* + * Realtek RTD1195 reset controllers + * + * Copyright (c) 2017 Andreas Färber + */ +#ifndef DT_BINDINGS_RESET_RTD1195_H +#define DT_BINDINGS_RESET_RTD1195_H + +/* soft reset 1 */ +#define RTD1195_RSTN_MISC 0 +#define RTD1195_RSTN_RNG 1 +#define RTD1195_RSTN_USB3_POW 2 +#define RTD1195_RSTN_GSPI 3 +#define RTD1195_RSTN_USB3_P0_MDIO 4 +#define RTD1195_RSTN_VE_H265 5 +#define RTD1195_RSTN_USB 6 +#define RTD1195_RSTN_USB_PHY0 8 +#define RTD1195_RSTN_USB_PHY1 9 +#define RTD1195_RSTN_HDMIRX 11 +#define RTD1195_RSTN_HDMI 12 +#define RTD1195_RSTN_ETN 14 +#define RTD1195_RSTN_AIO 15 +#define RTD1195_RSTN_GPU 16 +#define RTD1195_RSTN_VE_H264 17 +#define RTD1195_RSTN_VE_JPEG 18 +#define RTD1195_RSTN_TVE 19 +#define RTD1195_RSTN_VO 20 +#define RTD1195_RSTN_LVDS 21 +#define RTD1195_RSTN_SE 22 +#define RTD1195_RSTN_DCU 23 +#define RTD1195_RSTN_DC_PHY 24 +#define RTD1195_RSTN_CP 25 +#define RTD1195_RSTN_MD 26 +#define RTD1195_RSTN_TP 27 +#define RTD1195_RSTN_AE 28 +#define RTD1195_RSTN_NF 29 +#define RTD1195_RSTN_MIPI 30 + +/* soft reset 2 */ +#define RTD1195_RSTN_ACPU 0 +#define RTD1195_RSTN_VCPU 1 +#define RTD1195_RSTN_PCR 9 +#define RTD1195_RSTN_CR 10 +#define RTD1195_RSTN_EMMC 11 +#define RTD1195_RSTN_SDIO 12 +#define RTD1195_RSTN_I2C_5 18 +#define RTD1195_RSTN_RTC 20 +#define RTD1195_RSTN_I2C_4 23 +#define RTD1195_RSTN_I2C_3 24 +#define RTD1195_RSTN_I2C_2 25 +#define RTD1195_RSTN_I2C_1 26 +#define RTD1195_RSTN_UR1 28 + +/* soft reset 3 */ +#define RTD1195_RSTN_SB2 0 + +/* iso soft reset */ +#define RTD1195_ISO_RSTN_VFD 0 +#define RTD1195_ISO_RSTN_IR 1 +#define RTD1195_ISO_RSTN_CEC0 2 +#define RTD1195_ISO_RSTN_CEC1 3 +#define RTD1195_ISO_RSTN_DP 4 +#define RTD1195_ISO_RSTN_CBUSTX 5 +#define RTD1195_ISO_RSTN_CBUSRX 6 +#define RTD1195_ISO_RSTN_EFUSE 7 +#define RTD1195_ISO_RSTN_UR0 8 +#define RTD1195_ISO_RSTN_GMAC 9 +#define RTD1195_ISO_RSTN_GPHY 10 +#define RTD1195_ISO_RSTN_I2C_0 11 +#define RTD1195_ISO_RSTN_I2C_6 12 +#define RTD1195_ISO_RSTN_CBUS 13 + +#endif diff --git a/include/dt-bindings/reset/realtek,rtd1295.h b/include/dt-bindings/reset/realtek,rtd1295.h index 2c0cb6afe816..dd89e4c80264 100644 --- a/include/dt-bindings/reset/realtek,rtd1295.h +++ b/include/dt-bindings/reset/realtek,rtd1295.h @@ -75,6 +75,9 @@ #define RTD1295_RSTN_CBUS_TX 30 #define RTD1295_RSTN_SDS_PHY 31 +/* soft reset 3 */ +#define RTD1295_RSTN_SB2 0 + /* soft reset 4 */ #define RTD1295_RSTN_DCPHY_CRT 0 #define RTD1295_RSTN_DCPHY_ALERT_RX 1 diff --git a/include/keys/big_key-type.h b/include/keys/big_key-type.h index 3fee04f81439..988d90d77f53 100644 --- a/include/keys/big_key-type.h +++ b/include/keys/big_key-type.h @@ -18,5 +18,6 @@ extern void big_key_revoke(struct key *key); extern void big_key_destroy(struct key *key); extern void big_key_describe(const struct key *big_key, struct seq_file *m); extern long big_key_read(const struct key *key, char *buffer, size_t buflen); +extern int big_key_update(struct key *key, struct key_preparsed_payload *prep); #endif /* _KEYS_BIG_KEY_TYPE_H */ diff --git a/include/keys/encrypted-type.h b/include/keys/encrypted-type.h index 9e9ccb20d586..38afb341c3f2 100644 --- a/include/keys/encrypted-type.h +++ b/include/keys/encrypted-type.h @@ -27,7 +27,7 @@ struct encrypted_key_payload { unsigned short payload_datalen; /* payload data length */ unsigned short encrypted_key_format; /* encrypted key format */ u8 *decrypted_data; /* decrypted data */ - u8 payload_data[0]; /* payload data + datablob + hmac */ + u8 payload_data[]; /* payload data + datablob + hmac */ }; extern struct key_type key_type_encrypted; diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h index a183278c3e9e..2b0b15a71228 100644 --- a/include/keys/rxrpc-type.h +++ b/include/keys/rxrpc-type.h @@ -28,7 +28,7 @@ struct rxkad_key { u8 primary_flag; /* T if key for primary cell for this user */ u16 ticket_len; /* length of ticket[] */ u8 session_key[8]; /* DES session key */ - u8 ticket[0]; /* the encrypted ticket */ + u8 ticket[]; /* the encrypted ticket */ }; /* @@ -100,7 +100,7 @@ struct rxrpc_key_data_v1 { u32 expiry; /* time_t */ u32 kvno; u8 session_key[8]; - u8 ticket[0]; + u8 ticket[]; }; /* diff --git a/include/keys/user-type.h b/include/keys/user-type.h index be61fcddc02a..386c31432789 100644 --- a/include/keys/user-type.h +++ b/include/keys/user-type.h @@ -27,7 +27,7 @@ struct user_key_payload { struct rcu_head rcu; /* RCU destructor */ unsigned short datalen; /* length of this data */ - char data[0] __aligned(__alignof__(u64)); /* actual data */ + char data[] __aligned(__alignof__(u64)); /* actual data */ }; extern struct key_type key_type_user; diff --git a/include/kunit/test.h b/include/kunit/test.h index 9b0c46a6ca1f..47e61e1d5337 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -175,7 +175,7 @@ struct kunit_suite { void (*exit)(struct kunit *test); struct kunit_case *test_cases; - /* private - internal use only */ + /* private: internal use only */ struct dentry *debugfs; char *log; }; @@ -232,12 +232,12 @@ void __kunit_test_suites_exit(struct kunit_suite **suites); * kunit_test_suites() - used to register one or more &struct kunit_suite * with KUnit. * - * @suites: a statically allocated list of &struct kunit_suite. + * @suites_list...: a statically allocated list of &struct kunit_suite. * - * Registers @suites with the test framework. See &struct kunit_suite for + * Registers @suites_list with the test framework. See &struct kunit_suite for * more information. * - * When builtin, KUnit tests are all run as late_initcalls; this means + * When builtin, KUnit tests are all run as late_initcalls; this means * that they cannot test anything where tests must run at a different init * phase. One significant restriction resulting from this is that KUnit * cannot reliably test anything that is initialize in the late_init phase; @@ -253,8 +253,8 @@ void __kunit_test_suites_exit(struct kunit_suite **suites); * tests from the same place, and at the very least to do so after * everything else is definitely initialized. */ -#define kunit_test_suites(...) \ - static struct kunit_suite *suites[] = { __VA_ARGS__, NULL}; \ +#define kunit_test_suites(suites_list...) \ + static struct kunit_suite *suites[] = {suites_list, NULL}; \ static int kunit_test_suites_init(void) \ { \ return __kunit_test_suites_init(suites); \ diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 69f4164d6477..a8d8fdcd3723 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -274,6 +274,8 @@ struct vgic_v2_cpu_if { u32 vgic_vmcr; u32 vgic_apr; u32 vgic_lr[VGIC_V2_MAX_LRS]; + + unsigned int used_lrs; }; struct vgic_v3_cpu_if { @@ -291,6 +293,8 @@ struct vgic_v3_cpu_if { * linking the Linux IRQ subsystem and the ITS together. */ struct its_vpe its_vpe; + + unsigned int used_lrs; }; struct vgic_cpu { @@ -300,7 +304,6 @@ struct vgic_cpu { struct vgic_v3_cpu_if vgic_v3; }; - unsigned int used_lrs; struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; raw_spinlock_t ap_list_lock; /* Protects the ap_list */ diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 26f0ecf401ea..0bbfd647f5c6 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -65,6 +65,7 @@ struct amba_device { struct device dev; struct resource res; struct clk *pclk; + struct device_dma_parameters dma_parms; unsigned int periphid; unsigned int cid; struct amba_cs_uci_id uci; diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 59494df0f55b..56d6a5c6e353 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -5,12 +5,15 @@ #ifndef __LINUX_ARM_SMCCC_H #define __LINUX_ARM_SMCCC_H +#include <linux/init.h> #include <uapi/linux/const.h> /* * This file provides common defines for ARM SMC Calling Convention as * specified in - * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html + * https://developer.arm.com/docs/den0028/latest + * + * This code is up-to-date with version DEN 0028 C */ #define ARM_SMCCC_STD_CALL _AC(0,U) @@ -56,6 +59,7 @@ #define ARM_SMCCC_VERSION_1_0 0x10000 #define ARM_SMCCC_VERSION_1_1 0x10001 +#define ARM_SMCCC_VERSION_1_2 0x10002 #define ARM_SMCCC_VERSION_FUNC_ID \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ @@ -98,6 +102,19 @@ enum arm_smccc_conduit { enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void); /** + * arm_smccc_get_version() + * + * Returns the version to be used for SMCCCv1.1 or later. + * + * When SMCCCv1.1 or above is not present, returns SMCCCv1.0, but this + * does not imply the presence of firmware or a valid conduit. Caller + * handling SMCCCv1.0 must determine the conduit by other means. + */ +u32 arm_smccc_get_version(void); + +void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit); + +/** * struct arm_smccc_res - Result from SMC/HVC call * @a0-a3 result values from registers 0 to 3 */ @@ -314,10 +331,14 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, */ #define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__) -/* Return codes defined in ARM DEN 0070A */ +/* + * Return codes defined in ARM DEN 0070A + * ARM DEN 0070A is now merged/consolidated into ARM DEN 0028 C + */ #define SMCCC_RET_SUCCESS 0 #define SMCCC_RET_NOT_SUPPORTED -1 #define SMCCC_RET_NOT_REQUIRED -2 +#define SMCCC_RET_INVALID_PARAMETER -3 /* * Like arm_smccc_1_1* but always returns SMCCC_RET_NOT_SUPPORTED. diff --git a/include/linux/atomic-arch-fallback.h b/include/linux/atomic-arch-fallback.h new file mode 100644 index 000000000000..bcb6aa27cfa6 --- /dev/null +++ b/include/linux/atomic-arch-fallback.h @@ -0,0 +1,2291 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-fallback.sh +// DO NOT MODIFY THIS FILE DIRECTLY + +#ifndef _LINUX_ATOMIC_FALLBACK_H +#define _LINUX_ATOMIC_FALLBACK_H + +#include <linux/compiler.h> + +#ifndef arch_xchg_relaxed +#define arch_xchg_relaxed arch_xchg +#define arch_xchg_acquire arch_xchg +#define arch_xchg_release arch_xchg +#else /* arch_xchg_relaxed */ + +#ifndef arch_xchg_acquire +#define arch_xchg_acquire(...) \ + __atomic_op_acquire(arch_xchg, __VA_ARGS__) +#endif + +#ifndef arch_xchg_release +#define arch_xchg_release(...) \ + __atomic_op_release(arch_xchg, __VA_ARGS__) +#endif + +#ifndef arch_xchg +#define arch_xchg(...) \ + __atomic_op_fence(arch_xchg, __VA_ARGS__) +#endif + +#endif /* arch_xchg_relaxed */ + +#ifndef arch_cmpxchg_relaxed +#define arch_cmpxchg_relaxed arch_cmpxchg +#define arch_cmpxchg_acquire arch_cmpxchg +#define arch_cmpxchg_release arch_cmpxchg +#else /* arch_cmpxchg_relaxed */ + +#ifndef arch_cmpxchg_acquire +#define arch_cmpxchg_acquire(...) \ + __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg_release +#define arch_cmpxchg_release(...) \ + __atomic_op_release(arch_cmpxchg, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg +#define arch_cmpxchg(...) \ + __atomic_op_fence(arch_cmpxchg, __VA_ARGS__) +#endif + +#endif /* arch_cmpxchg_relaxed */ + +#ifndef arch_cmpxchg64_relaxed +#define arch_cmpxchg64_relaxed arch_cmpxchg64 +#define arch_cmpxchg64_acquire arch_cmpxchg64 +#define arch_cmpxchg64_release arch_cmpxchg64 +#else /* arch_cmpxchg64_relaxed */ + +#ifndef arch_cmpxchg64_acquire +#define arch_cmpxchg64_acquire(...) \ + __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg64_release +#define arch_cmpxchg64_release(...) \ + __atomic_op_release(arch_cmpxchg64, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg64 +#define arch_cmpxchg64(...) \ + __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__) +#endif + +#endif /* arch_cmpxchg64_relaxed */ + +#ifndef arch_atomic_read_acquire +static __always_inline int +arch_atomic_read_acquire(const atomic_t *v) +{ + return smp_load_acquire(&(v)->counter); +} +#define arch_atomic_read_acquire arch_atomic_read_acquire +#endif + +#ifndef arch_atomic_set_release +static __always_inline void +arch_atomic_set_release(atomic_t *v, int i) +{ + smp_store_release(&(v)->counter, i); +} +#define arch_atomic_set_release arch_atomic_set_release +#endif + +#ifndef arch_atomic_add_return_relaxed +#define arch_atomic_add_return_acquire arch_atomic_add_return +#define arch_atomic_add_return_release arch_atomic_add_return +#define arch_atomic_add_return_relaxed arch_atomic_add_return +#else /* arch_atomic_add_return_relaxed */ + +#ifndef arch_atomic_add_return_acquire +static __always_inline int +arch_atomic_add_return_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_add_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire +#endif + +#ifndef arch_atomic_add_return_release +static __always_inline int +arch_atomic_add_return_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_add_return_relaxed(i, v); +} +#define arch_atomic_add_return_release arch_atomic_add_return_release +#endif + +#ifndef arch_atomic_add_return +static __always_inline int +arch_atomic_add_return(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_add_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_add_return arch_atomic_add_return +#endif + +#endif /* arch_atomic_add_return_relaxed */ + +#ifndef arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add +#define arch_atomic_fetch_add_release arch_atomic_fetch_add +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add +#else /* arch_atomic_fetch_add_relaxed */ + +#ifndef arch_atomic_fetch_add_acquire +static __always_inline int +arch_atomic_fetch_add_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_add_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire +#endif + +#ifndef arch_atomic_fetch_add_release +static __always_inline int +arch_atomic_fetch_add_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_add_relaxed(i, v); +} +#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release +#endif + +#ifndef arch_atomic_fetch_add +static __always_inline int +arch_atomic_fetch_add(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_add_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_add arch_atomic_fetch_add +#endif + +#endif /* arch_atomic_fetch_add_relaxed */ + +#ifndef arch_atomic_sub_return_relaxed +#define arch_atomic_sub_return_acquire arch_atomic_sub_return +#define arch_atomic_sub_return_release arch_atomic_sub_return +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return +#else /* arch_atomic_sub_return_relaxed */ + +#ifndef arch_atomic_sub_return_acquire +static __always_inline int +arch_atomic_sub_return_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_sub_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire +#endif + +#ifndef arch_atomic_sub_return_release +static __always_inline int +arch_atomic_sub_return_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_sub_return_relaxed(i, v); +} +#define arch_atomic_sub_return_release arch_atomic_sub_return_release +#endif + +#ifndef arch_atomic_sub_return +static __always_inline int +arch_atomic_sub_return(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_sub_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_sub_return arch_atomic_sub_return +#endif + +#endif /* arch_atomic_sub_return_relaxed */ + +#ifndef arch_atomic_fetch_sub_relaxed +#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub +#else /* arch_atomic_fetch_sub_relaxed */ + +#ifndef arch_atomic_fetch_sub_acquire +static __always_inline int +arch_atomic_fetch_sub_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_sub_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire +#endif + +#ifndef arch_atomic_fetch_sub_release +static __always_inline int +arch_atomic_fetch_sub_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_sub_relaxed(i, v); +} +#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release +#endif + +#ifndef arch_atomic_fetch_sub +static __always_inline int +arch_atomic_fetch_sub(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_sub_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_sub arch_atomic_fetch_sub +#endif + +#endif /* arch_atomic_fetch_sub_relaxed */ + +#ifndef arch_atomic_inc +static __always_inline void +arch_atomic_inc(atomic_t *v) +{ + arch_atomic_add(1, v); +} +#define arch_atomic_inc arch_atomic_inc +#endif + +#ifndef arch_atomic_inc_return_relaxed +#ifdef arch_atomic_inc_return +#define arch_atomic_inc_return_acquire arch_atomic_inc_return +#define arch_atomic_inc_return_release arch_atomic_inc_return +#define arch_atomic_inc_return_relaxed arch_atomic_inc_return +#endif /* arch_atomic_inc_return */ + +#ifndef arch_atomic_inc_return +static __always_inline int +arch_atomic_inc_return(atomic_t *v) +{ + return arch_atomic_add_return(1, v); +} +#define arch_atomic_inc_return arch_atomic_inc_return +#endif + +#ifndef arch_atomic_inc_return_acquire +static __always_inline int +arch_atomic_inc_return_acquire(atomic_t *v) +{ + return arch_atomic_add_return_acquire(1, v); +} +#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire +#endif + +#ifndef arch_atomic_inc_return_release +static __always_inline int +arch_atomic_inc_return_release(atomic_t *v) +{ + return arch_atomic_add_return_release(1, v); +} +#define arch_atomic_inc_return_release arch_atomic_inc_return_release +#endif + +#ifndef arch_atomic_inc_return_relaxed +static __always_inline int +arch_atomic_inc_return_relaxed(atomic_t *v) +{ + return arch_atomic_add_return_relaxed(1, v); +} +#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed +#endif + +#else /* arch_atomic_inc_return_relaxed */ + +#ifndef arch_atomic_inc_return_acquire +static __always_inline int +arch_atomic_inc_return_acquire(atomic_t *v) +{ + int ret = arch_atomic_inc_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire +#endif + +#ifndef arch_atomic_inc_return_release +static __always_inline int +arch_atomic_inc_return_release(atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_inc_return_relaxed(v); +} +#define arch_atomic_inc_return_release arch_atomic_inc_return_release +#endif + +#ifndef arch_atomic_inc_return +static __always_inline int +arch_atomic_inc_return(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_inc_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_inc_return arch_atomic_inc_return +#endif + +#endif /* arch_atomic_inc_return_relaxed */ + +#ifndef arch_atomic_fetch_inc_relaxed +#ifdef arch_atomic_fetch_inc +#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc +#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc +#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc +#endif /* arch_atomic_fetch_inc */ + +#ifndef arch_atomic_fetch_inc +static __always_inline int +arch_atomic_fetch_inc(atomic_t *v) +{ + return arch_atomic_fetch_add(1, v); +} +#define arch_atomic_fetch_inc arch_atomic_fetch_inc +#endif + +#ifndef arch_atomic_fetch_inc_acquire +static __always_inline int +arch_atomic_fetch_inc_acquire(atomic_t *v) +{ + return arch_atomic_fetch_add_acquire(1, v); +} +#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire +#endif + +#ifndef arch_atomic_fetch_inc_release +static __always_inline int +arch_atomic_fetch_inc_release(atomic_t *v) +{ + return arch_atomic_fetch_add_release(1, v); +} +#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release +#endif + +#ifndef arch_atomic_fetch_inc_relaxed +static __always_inline int +arch_atomic_fetch_inc_relaxed(atomic_t *v) +{ + return arch_atomic_fetch_add_relaxed(1, v); +} +#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed +#endif + +#else /* arch_atomic_fetch_inc_relaxed */ + +#ifndef arch_atomic_fetch_inc_acquire +static __always_inline int +arch_atomic_fetch_inc_acquire(atomic_t *v) +{ + int ret = arch_atomic_fetch_inc_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire +#endif + +#ifndef arch_atomic_fetch_inc_release +static __always_inline int +arch_atomic_fetch_inc_release(atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_inc_relaxed(v); +} +#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release +#endif + +#ifndef arch_atomic_fetch_inc +static __always_inline int +arch_atomic_fetch_inc(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_inc_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_inc arch_atomic_fetch_inc +#endif + +#endif /* arch_atomic_fetch_inc_relaxed */ + +#ifndef arch_atomic_dec +static __always_inline void +arch_atomic_dec(atomic_t *v) +{ + arch_atomic_sub(1, v); +} +#define arch_atomic_dec arch_atomic_dec +#endif + +#ifndef arch_atomic_dec_return_relaxed +#ifdef arch_atomic_dec_return +#define arch_atomic_dec_return_acquire arch_atomic_dec_return +#define arch_atomic_dec_return_release arch_atomic_dec_return +#define arch_atomic_dec_return_relaxed arch_atomic_dec_return +#endif /* arch_atomic_dec_return */ + +#ifndef arch_atomic_dec_return +static __always_inline int +arch_atomic_dec_return(atomic_t *v) +{ + return arch_atomic_sub_return(1, v); +} +#define arch_atomic_dec_return arch_atomic_dec_return +#endif + +#ifndef arch_atomic_dec_return_acquire +static __always_inline int +arch_atomic_dec_return_acquire(atomic_t *v) +{ + return arch_atomic_sub_return_acquire(1, v); +} +#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire +#endif + +#ifndef arch_atomic_dec_return_release +static __always_inline int +arch_atomic_dec_return_release(atomic_t *v) +{ + return arch_atomic_sub_return_release(1, v); +} +#define arch_atomic_dec_return_release arch_atomic_dec_return_release +#endif + +#ifndef arch_atomic_dec_return_relaxed +static __always_inline int +arch_atomic_dec_return_relaxed(atomic_t *v) +{ + return arch_atomic_sub_return_relaxed(1, v); +} +#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed +#endif + +#else /* arch_atomic_dec_return_relaxed */ + +#ifndef arch_atomic_dec_return_acquire +static __always_inline int +arch_atomic_dec_return_acquire(atomic_t *v) +{ + int ret = arch_atomic_dec_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire +#endif + +#ifndef arch_atomic_dec_return_release +static __always_inline int +arch_atomic_dec_return_release(atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_dec_return_relaxed(v); +} +#define arch_atomic_dec_return_release arch_atomic_dec_return_release +#endif + +#ifndef arch_atomic_dec_return +static __always_inline int +arch_atomic_dec_return(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_dec_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_dec_return arch_atomic_dec_return +#endif + +#endif /* arch_atomic_dec_return_relaxed */ + +#ifndef arch_atomic_fetch_dec_relaxed +#ifdef arch_atomic_fetch_dec +#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec +#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec +#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec +#endif /* arch_atomic_fetch_dec */ + +#ifndef arch_atomic_fetch_dec +static __always_inline int +arch_atomic_fetch_dec(atomic_t *v) +{ + return arch_atomic_fetch_sub(1, v); +} +#define arch_atomic_fetch_dec arch_atomic_fetch_dec +#endif + +#ifndef arch_atomic_fetch_dec_acquire +static __always_inline int +arch_atomic_fetch_dec_acquire(atomic_t *v) +{ + return arch_atomic_fetch_sub_acquire(1, v); +} +#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire +#endif + +#ifndef arch_atomic_fetch_dec_release +static __always_inline int +arch_atomic_fetch_dec_release(atomic_t *v) +{ + return arch_atomic_fetch_sub_release(1, v); +} +#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release +#endif + +#ifndef arch_atomic_fetch_dec_relaxed +static __always_inline int +arch_atomic_fetch_dec_relaxed(atomic_t *v) +{ + return arch_atomic_fetch_sub_relaxed(1, v); +} +#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed +#endif + +#else /* arch_atomic_fetch_dec_relaxed */ + +#ifndef arch_atomic_fetch_dec_acquire +static __always_inline int +arch_atomic_fetch_dec_acquire(atomic_t *v) +{ + int ret = arch_atomic_fetch_dec_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire +#endif + +#ifndef arch_atomic_fetch_dec_release +static __always_inline int +arch_atomic_fetch_dec_release(atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_dec_relaxed(v); +} +#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release +#endif + +#ifndef arch_atomic_fetch_dec +static __always_inline int +arch_atomic_fetch_dec(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_dec_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_dec arch_atomic_fetch_dec +#endif + +#endif /* arch_atomic_fetch_dec_relaxed */ + +#ifndef arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and +#define arch_atomic_fetch_and_release arch_atomic_fetch_and +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and +#else /* arch_atomic_fetch_and_relaxed */ + +#ifndef arch_atomic_fetch_and_acquire +static __always_inline int +arch_atomic_fetch_and_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_and_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire +#endif + +#ifndef arch_atomic_fetch_and_release +static __always_inline int +arch_atomic_fetch_and_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_and_relaxed(i, v); +} +#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release +#endif + +#ifndef arch_atomic_fetch_and +static __always_inline int +arch_atomic_fetch_and(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_and_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_and arch_atomic_fetch_and +#endif + +#endif /* arch_atomic_fetch_and_relaxed */ + +#ifndef arch_atomic_andnot +static __always_inline void +arch_atomic_andnot(int i, atomic_t *v) +{ + arch_atomic_and(~i, v); +} +#define arch_atomic_andnot arch_atomic_andnot +#endif + +#ifndef arch_atomic_fetch_andnot_relaxed +#ifdef arch_atomic_fetch_andnot +#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot +#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot +#endif /* arch_atomic_fetch_andnot */ + +#ifndef arch_atomic_fetch_andnot +static __always_inline int +arch_atomic_fetch_andnot(int i, atomic_t *v) +{ + return arch_atomic_fetch_and(~i, v); +} +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot +#endif + +#ifndef arch_atomic_fetch_andnot_acquire +static __always_inline int +arch_atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + return arch_atomic_fetch_and_acquire(~i, v); +} +#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire +#endif + +#ifndef arch_atomic_fetch_andnot_release +static __always_inline int +arch_atomic_fetch_andnot_release(int i, atomic_t *v) +{ + return arch_atomic_fetch_and_release(~i, v); +} +#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release +#endif + +#ifndef arch_atomic_fetch_andnot_relaxed +static __always_inline int +arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v) +{ + return arch_atomic_fetch_and_relaxed(~i, v); +} +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed +#endif + +#else /* arch_atomic_fetch_andnot_relaxed */ + +#ifndef arch_atomic_fetch_andnot_acquire +static __always_inline int +arch_atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_andnot_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire +#endif + +#ifndef arch_atomic_fetch_andnot_release +static __always_inline int +arch_atomic_fetch_andnot_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_andnot_relaxed(i, v); +} +#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release +#endif + +#ifndef arch_atomic_fetch_andnot +static __always_inline int +arch_atomic_fetch_andnot(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_andnot_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot +#endif + +#endif /* arch_atomic_fetch_andnot_relaxed */ + +#ifndef arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or +#define arch_atomic_fetch_or_release arch_atomic_fetch_or +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or +#else /* arch_atomic_fetch_or_relaxed */ + +#ifndef arch_atomic_fetch_or_acquire +static __always_inline int +arch_atomic_fetch_or_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_or_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire +#endif + +#ifndef arch_atomic_fetch_or_release +static __always_inline int +arch_atomic_fetch_or_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_or_relaxed(i, v); +} +#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release +#endif + +#ifndef arch_atomic_fetch_or +static __always_inline int +arch_atomic_fetch_or(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_or_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_or arch_atomic_fetch_or +#endif + +#endif /* arch_atomic_fetch_or_relaxed */ + +#ifndef arch_atomic_fetch_xor_relaxed +#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor +#else /* arch_atomic_fetch_xor_relaxed */ + +#ifndef arch_atomic_fetch_xor_acquire +static __always_inline int +arch_atomic_fetch_xor_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_xor_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire +#endif + +#ifndef arch_atomic_fetch_xor_release +static __always_inline int +arch_atomic_fetch_xor_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_xor_relaxed(i, v); +} +#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release +#endif + +#ifndef arch_atomic_fetch_xor +static __always_inline int +arch_atomic_fetch_xor(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_xor_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_xor arch_atomic_fetch_xor +#endif + +#endif /* arch_atomic_fetch_xor_relaxed */ + +#ifndef arch_atomic_xchg_relaxed +#define arch_atomic_xchg_acquire arch_atomic_xchg +#define arch_atomic_xchg_release arch_atomic_xchg +#define arch_atomic_xchg_relaxed arch_atomic_xchg +#else /* arch_atomic_xchg_relaxed */ + +#ifndef arch_atomic_xchg_acquire +static __always_inline int +arch_atomic_xchg_acquire(atomic_t *v, int i) +{ + int ret = arch_atomic_xchg_relaxed(v, i); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire +#endif + +#ifndef arch_atomic_xchg_release +static __always_inline int +arch_atomic_xchg_release(atomic_t *v, int i) +{ + __atomic_release_fence(); + return arch_atomic_xchg_relaxed(v, i); +} +#define arch_atomic_xchg_release arch_atomic_xchg_release +#endif + +#ifndef arch_atomic_xchg +static __always_inline int +arch_atomic_xchg(atomic_t *v, int i) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_xchg_relaxed(v, i); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_xchg arch_atomic_xchg +#endif + +#endif /* arch_atomic_xchg_relaxed */ + +#ifndef arch_atomic_cmpxchg_relaxed +#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg +#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg +#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg +#else /* arch_atomic_cmpxchg_relaxed */ + +#ifndef arch_atomic_cmpxchg_acquire +static __always_inline int +arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new) +{ + int ret = arch_atomic_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire +#endif + +#ifndef arch_atomic_cmpxchg_release +static __always_inline int +arch_atomic_cmpxchg_release(atomic_t *v, int old, int new) +{ + __atomic_release_fence(); + return arch_atomic_cmpxchg_relaxed(v, old, new); +} +#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release +#endif + +#ifndef arch_atomic_cmpxchg +static __always_inline int +arch_atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_cmpxchg arch_atomic_cmpxchg +#endif + +#endif /* arch_atomic_cmpxchg_relaxed */ + +#ifndef arch_atomic_try_cmpxchg_relaxed +#ifdef arch_atomic_try_cmpxchg +#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg +#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg +#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg +#endif /* arch_atomic_try_cmpxchg */ + +#ifndef arch_atomic_try_cmpxchg +static __always_inline bool +arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = arch_atomic_cmpxchg(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg +#endif + +#ifndef arch_atomic_try_cmpxchg_acquire +static __always_inline bool +arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = arch_atomic_cmpxchg_acquire(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire +#endif + +#ifndef arch_atomic_try_cmpxchg_release +static __always_inline bool +arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = arch_atomic_cmpxchg_release(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release +#endif + +#ifndef arch_atomic_try_cmpxchg_relaxed +static __always_inline bool +arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = arch_atomic_cmpxchg_relaxed(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed +#endif + +#else /* arch_atomic_try_cmpxchg_relaxed */ + +#ifndef arch_atomic_try_cmpxchg_acquire +static __always_inline bool +arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire +#endif + +#ifndef arch_atomic_try_cmpxchg_release +static __always_inline bool +arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + __atomic_release_fence(); + return arch_atomic_try_cmpxchg_relaxed(v, old, new); +} +#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release +#endif + +#ifndef arch_atomic_try_cmpxchg +static __always_inline bool +arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + bool ret; + __atomic_pre_full_fence(); + ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg +#endif + +#endif /* arch_atomic_try_cmpxchg_relaxed */ + +#ifndef arch_atomic_sub_and_test +/** + * arch_atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static __always_inline bool +arch_atomic_sub_and_test(int i, atomic_t *v) +{ + return arch_atomic_sub_return(i, v) == 0; +} +#define arch_atomic_sub_and_test arch_atomic_sub_and_test +#endif + +#ifndef arch_atomic_dec_and_test +/** + * arch_atomic_dec_and_test - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static __always_inline bool +arch_atomic_dec_and_test(atomic_t *v) +{ + return arch_atomic_dec_return(v) == 0; +} +#define arch_atomic_dec_and_test arch_atomic_dec_and_test +#endif + +#ifndef arch_atomic_inc_and_test +/** + * arch_atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static __always_inline bool +arch_atomic_inc_and_test(atomic_t *v) +{ + return arch_atomic_inc_return(v) == 0; +} +#define arch_atomic_inc_and_test arch_atomic_inc_and_test +#endif + +#ifndef arch_atomic_add_negative +/** + * arch_atomic_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static __always_inline bool +arch_atomic_add_negative(int i, atomic_t *v) +{ + return arch_atomic_add_return(i, v) < 0; +} +#define arch_atomic_add_negative arch_atomic_add_negative +#endif + +#ifndef arch_atomic_fetch_add_unless +/** + * arch_atomic_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns original value of @v + */ +static __always_inline int +arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int c = arch_atomic_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!arch_atomic_try_cmpxchg(v, &c, c + a)); + + return c; +} +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless +#endif + +#ifndef arch_atomic_add_unless +/** + * arch_atomic_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static __always_inline bool +arch_atomic_add_unless(atomic_t *v, int a, int u) +{ + return arch_atomic_fetch_add_unless(v, a, u) != u; +} +#define arch_atomic_add_unless arch_atomic_add_unless +#endif + +#ifndef arch_atomic_inc_not_zero +/** + * arch_atomic_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +static __always_inline bool +arch_atomic_inc_not_zero(atomic_t *v) +{ + return arch_atomic_add_unless(v, 1, 0); +} +#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero +#endif + +#ifndef arch_atomic_inc_unless_negative +static __always_inline bool +arch_atomic_inc_unless_negative(atomic_t *v) +{ + int c = arch_atomic_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!arch_atomic_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative +#endif + +#ifndef arch_atomic_dec_unless_positive +static __always_inline bool +arch_atomic_dec_unless_positive(atomic_t *v) +{ + int c = arch_atomic_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!arch_atomic_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive +#endif + +#ifndef arch_atomic_dec_if_positive +static __always_inline int +arch_atomic_dec_if_positive(atomic_t *v) +{ + int dec, c = arch_atomic_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!arch_atomic_try_cmpxchg(v, &c, dec)); + + return dec; +} +#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive +#endif + +#ifdef CONFIG_GENERIC_ATOMIC64 +#include <asm-generic/atomic64.h> +#endif + +#ifndef arch_atomic64_read_acquire +static __always_inline s64 +arch_atomic64_read_acquire(const atomic64_t *v) +{ + return smp_load_acquire(&(v)->counter); +} +#define arch_atomic64_read_acquire arch_atomic64_read_acquire +#endif + +#ifndef arch_atomic64_set_release +static __always_inline void +arch_atomic64_set_release(atomic64_t *v, s64 i) +{ + smp_store_release(&(v)->counter, i); +} +#define arch_atomic64_set_release arch_atomic64_set_release +#endif + +#ifndef arch_atomic64_add_return_relaxed +#define arch_atomic64_add_return_acquire arch_atomic64_add_return +#define arch_atomic64_add_return_release arch_atomic64_add_return +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return +#else /* arch_atomic64_add_return_relaxed */ + +#ifndef arch_atomic64_add_return_acquire +static __always_inline s64 +arch_atomic64_add_return_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_add_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire +#endif + +#ifndef arch_atomic64_add_return_release +static __always_inline s64 +arch_atomic64_add_return_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_add_return_relaxed(i, v); +} +#define arch_atomic64_add_return_release arch_atomic64_add_return_release +#endif + +#ifndef arch_atomic64_add_return +static __always_inline s64 +arch_atomic64_add_return(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_add_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_add_return arch_atomic64_add_return +#endif + +#endif /* arch_atomic64_add_return_relaxed */ + +#ifndef arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add +#else /* arch_atomic64_fetch_add_relaxed */ + +#ifndef arch_atomic64_fetch_add_acquire +static __always_inline s64 +arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_add_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire +#endif + +#ifndef arch_atomic64_fetch_add_release +static __always_inline s64 +arch_atomic64_fetch_add_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_add_relaxed(i, v); +} +#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release +#endif + +#ifndef arch_atomic64_fetch_add +static __always_inline s64 +arch_atomic64_fetch_add(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_add_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_add arch_atomic64_fetch_add +#endif + +#endif /* arch_atomic64_fetch_add_relaxed */ + +#ifndef arch_atomic64_sub_return_relaxed +#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return +#define arch_atomic64_sub_return_release arch_atomic64_sub_return +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return +#else /* arch_atomic64_sub_return_relaxed */ + +#ifndef arch_atomic64_sub_return_acquire +static __always_inline s64 +arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_sub_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire +#endif + +#ifndef arch_atomic64_sub_return_release +static __always_inline s64 +arch_atomic64_sub_return_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_sub_return_relaxed(i, v); +} +#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release +#endif + +#ifndef arch_atomic64_sub_return +static __always_inline s64 +arch_atomic64_sub_return(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_sub_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_sub_return arch_atomic64_sub_return +#endif + +#endif /* arch_atomic64_sub_return_relaxed */ + +#ifndef arch_atomic64_fetch_sub_relaxed +#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub +#else /* arch_atomic64_fetch_sub_relaxed */ + +#ifndef arch_atomic64_fetch_sub_acquire +static __always_inline s64 +arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_sub_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire +#endif + +#ifndef arch_atomic64_fetch_sub_release +static __always_inline s64 +arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_sub_relaxed(i, v); +} +#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release +#endif + +#ifndef arch_atomic64_fetch_sub +static __always_inline s64 +arch_atomic64_fetch_sub(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_sub_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub +#endif + +#endif /* arch_atomic64_fetch_sub_relaxed */ + +#ifndef arch_atomic64_inc +static __always_inline void +arch_atomic64_inc(atomic64_t *v) +{ + arch_atomic64_add(1, v); +} +#define arch_atomic64_inc arch_atomic64_inc +#endif + +#ifndef arch_atomic64_inc_return_relaxed +#ifdef arch_atomic64_inc_return +#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return +#define arch_atomic64_inc_return_release arch_atomic64_inc_return +#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return +#endif /* arch_atomic64_inc_return */ + +#ifndef arch_atomic64_inc_return +static __always_inline s64 +arch_atomic64_inc_return(atomic64_t *v) +{ + return arch_atomic64_add_return(1, v); +} +#define arch_atomic64_inc_return arch_atomic64_inc_return +#endif + +#ifndef arch_atomic64_inc_return_acquire +static __always_inline s64 +arch_atomic64_inc_return_acquire(atomic64_t *v) +{ + return arch_atomic64_add_return_acquire(1, v); +} +#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire +#endif + +#ifndef arch_atomic64_inc_return_release +static __always_inline s64 +arch_atomic64_inc_return_release(atomic64_t *v) +{ + return arch_atomic64_add_return_release(1, v); +} +#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release +#endif + +#ifndef arch_atomic64_inc_return_relaxed +static __always_inline s64 +arch_atomic64_inc_return_relaxed(atomic64_t *v) +{ + return arch_atomic64_add_return_relaxed(1, v); +} +#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed +#endif + +#else /* arch_atomic64_inc_return_relaxed */ + +#ifndef arch_atomic64_inc_return_acquire +static __always_inline s64 +arch_atomic64_inc_return_acquire(atomic64_t *v) +{ + s64 ret = arch_atomic64_inc_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire +#endif + +#ifndef arch_atomic64_inc_return_release +static __always_inline s64 +arch_atomic64_inc_return_release(atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_inc_return_relaxed(v); +} +#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release +#endif + +#ifndef arch_atomic64_inc_return +static __always_inline s64 +arch_atomic64_inc_return(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_inc_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_inc_return arch_atomic64_inc_return +#endif + +#endif /* arch_atomic64_inc_return_relaxed */ + +#ifndef arch_atomic64_fetch_inc_relaxed +#ifdef arch_atomic64_fetch_inc +#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc +#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc +#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc +#endif /* arch_atomic64_fetch_inc */ + +#ifndef arch_atomic64_fetch_inc +static __always_inline s64 +arch_atomic64_fetch_inc(atomic64_t *v) +{ + return arch_atomic64_fetch_add(1, v); +} +#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc +#endif + +#ifndef arch_atomic64_fetch_inc_acquire +static __always_inline s64 +arch_atomic64_fetch_inc_acquire(atomic64_t *v) +{ + return arch_atomic64_fetch_add_acquire(1, v); +} +#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire +#endif + +#ifndef arch_atomic64_fetch_inc_release +static __always_inline s64 +arch_atomic64_fetch_inc_release(atomic64_t *v) +{ + return arch_atomic64_fetch_add_release(1, v); +} +#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release +#endif + +#ifndef arch_atomic64_fetch_inc_relaxed +static __always_inline s64 +arch_atomic64_fetch_inc_relaxed(atomic64_t *v) +{ + return arch_atomic64_fetch_add_relaxed(1, v); +} +#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed +#endif + +#else /* arch_atomic64_fetch_inc_relaxed */ + +#ifndef arch_atomic64_fetch_inc_acquire +static __always_inline s64 +arch_atomic64_fetch_inc_acquire(atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_inc_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire +#endif + +#ifndef arch_atomic64_fetch_inc_release +static __always_inline s64 +arch_atomic64_fetch_inc_release(atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_inc_relaxed(v); +} +#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release +#endif + +#ifndef arch_atomic64_fetch_inc +static __always_inline s64 +arch_atomic64_fetch_inc(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_inc_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc +#endif + +#endif /* arch_atomic64_fetch_inc_relaxed */ + +#ifndef arch_atomic64_dec +static __always_inline void +arch_atomic64_dec(atomic64_t *v) +{ + arch_atomic64_sub(1, v); +} +#define arch_atomic64_dec arch_atomic64_dec +#endif + +#ifndef arch_atomic64_dec_return_relaxed +#ifdef arch_atomic64_dec_return +#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return +#define arch_atomic64_dec_return_release arch_atomic64_dec_return +#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return +#endif /* arch_atomic64_dec_return */ + +#ifndef arch_atomic64_dec_return +static __always_inline s64 +arch_atomic64_dec_return(atomic64_t *v) +{ + return arch_atomic64_sub_return(1, v); +} +#define arch_atomic64_dec_return arch_atomic64_dec_return +#endif + +#ifndef arch_atomic64_dec_return_acquire +static __always_inline s64 +arch_atomic64_dec_return_acquire(atomic64_t *v) +{ + return arch_atomic64_sub_return_acquire(1, v); +} +#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire +#endif + +#ifndef arch_atomic64_dec_return_release +static __always_inline s64 +arch_atomic64_dec_return_release(atomic64_t *v) +{ + return arch_atomic64_sub_return_release(1, v); +} +#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release +#endif + +#ifndef arch_atomic64_dec_return_relaxed +static __always_inline s64 +arch_atomic64_dec_return_relaxed(atomic64_t *v) +{ + return arch_atomic64_sub_return_relaxed(1, v); +} +#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed +#endif + +#else /* arch_atomic64_dec_return_relaxed */ + +#ifndef arch_atomic64_dec_return_acquire +static __always_inline s64 +arch_atomic64_dec_return_acquire(atomic64_t *v) +{ + s64 ret = arch_atomic64_dec_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire +#endif + +#ifndef arch_atomic64_dec_return_release +static __always_inline s64 +arch_atomic64_dec_return_release(atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_dec_return_relaxed(v); +} +#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release +#endif + +#ifndef arch_atomic64_dec_return +static __always_inline s64 +arch_atomic64_dec_return(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_dec_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_dec_return arch_atomic64_dec_return +#endif + +#endif /* arch_atomic64_dec_return_relaxed */ + +#ifndef arch_atomic64_fetch_dec_relaxed +#ifdef arch_atomic64_fetch_dec +#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec +#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec +#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec +#endif /* arch_atomic64_fetch_dec */ + +#ifndef arch_atomic64_fetch_dec +static __always_inline s64 +arch_atomic64_fetch_dec(atomic64_t *v) +{ + return arch_atomic64_fetch_sub(1, v); +} +#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec +#endif + +#ifndef arch_atomic64_fetch_dec_acquire +static __always_inline s64 +arch_atomic64_fetch_dec_acquire(atomic64_t *v) +{ + return arch_atomic64_fetch_sub_acquire(1, v); +} +#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire +#endif + +#ifndef arch_atomic64_fetch_dec_release +static __always_inline s64 +arch_atomic64_fetch_dec_release(atomic64_t *v) +{ + return arch_atomic64_fetch_sub_release(1, v); +} +#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release +#endif + +#ifndef arch_atomic64_fetch_dec_relaxed +static __always_inline s64 +arch_atomic64_fetch_dec_relaxed(atomic64_t *v) +{ + return arch_atomic64_fetch_sub_relaxed(1, v); +} +#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed +#endif + +#else /* arch_atomic64_fetch_dec_relaxed */ + +#ifndef arch_atomic64_fetch_dec_acquire +static __always_inline s64 +arch_atomic64_fetch_dec_acquire(atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_dec_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire +#endif + +#ifndef arch_atomic64_fetch_dec_release +static __always_inline s64 +arch_atomic64_fetch_dec_release(atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_dec_relaxed(v); +} +#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release +#endif + +#ifndef arch_atomic64_fetch_dec +static __always_inline s64 +arch_atomic64_fetch_dec(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_dec_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec +#endif + +#endif /* arch_atomic64_fetch_dec_relaxed */ + +#ifndef arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and +#else /* arch_atomic64_fetch_and_relaxed */ + +#ifndef arch_atomic64_fetch_and_acquire +static __always_inline s64 +arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_and_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire +#endif + +#ifndef arch_atomic64_fetch_and_release +static __always_inline s64 +arch_atomic64_fetch_and_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_and_relaxed(i, v); +} +#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release +#endif + +#ifndef arch_atomic64_fetch_and +static __always_inline s64 +arch_atomic64_fetch_and(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_and_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#endif + +#endif /* arch_atomic64_fetch_and_relaxed */ + +#ifndef arch_atomic64_andnot +static __always_inline void +arch_atomic64_andnot(s64 i, atomic64_t *v) +{ + arch_atomic64_and(~i, v); +} +#define arch_atomic64_andnot arch_atomic64_andnot +#endif + +#ifndef arch_atomic64_fetch_andnot_relaxed +#ifdef arch_atomic64_fetch_andnot +#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot +#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot +#endif /* arch_atomic64_fetch_andnot */ + +#ifndef arch_atomic64_fetch_andnot +static __always_inline s64 +arch_atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and(~i, v); +} +#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot +#endif + +#ifndef arch_atomic64_fetch_andnot_acquire +static __always_inline s64 +arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and_acquire(~i, v); +} +#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire +#endif + +#ifndef arch_atomic64_fetch_andnot_release +static __always_inline s64 +arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and_release(~i, v); +} +#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release +#endif + +#ifndef arch_atomic64_fetch_andnot_relaxed +static __always_inline s64 +arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and_relaxed(~i, v); +} +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed +#endif + +#else /* arch_atomic64_fetch_andnot_relaxed */ + +#ifndef arch_atomic64_fetch_andnot_acquire +static __always_inline s64 +arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire +#endif + +#ifndef arch_atomic64_fetch_andnot_release +static __always_inline s64 +arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_andnot_relaxed(i, v); +} +#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release +#endif + +#ifndef arch_atomic64_fetch_andnot +static __always_inline s64 +arch_atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_andnot_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot +#endif + +#endif /* arch_atomic64_fetch_andnot_relaxed */ + +#ifndef arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or +#else /* arch_atomic64_fetch_or_relaxed */ + +#ifndef arch_atomic64_fetch_or_acquire +static __always_inline s64 +arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_or_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire +#endif + +#ifndef arch_atomic64_fetch_or_release +static __always_inline s64 +arch_atomic64_fetch_or_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_or_relaxed(i, v); +} +#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release +#endif + +#ifndef arch_atomic64_fetch_or +static __always_inline s64 +arch_atomic64_fetch_or(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_or_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#endif + +#endif /* arch_atomic64_fetch_or_relaxed */ + +#ifndef arch_atomic64_fetch_xor_relaxed +#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor +#else /* arch_atomic64_fetch_xor_relaxed */ + +#ifndef arch_atomic64_fetch_xor_acquire +static __always_inline s64 +arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_xor_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire +#endif + +#ifndef arch_atomic64_fetch_xor_release +static __always_inline s64 +arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_xor_relaxed(i, v); +} +#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release +#endif + +#ifndef arch_atomic64_fetch_xor +static __always_inline s64 +arch_atomic64_fetch_xor(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_xor_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor +#endif + +#endif /* arch_atomic64_fetch_xor_relaxed */ + +#ifndef arch_atomic64_xchg_relaxed +#define arch_atomic64_xchg_acquire arch_atomic64_xchg +#define arch_atomic64_xchg_release arch_atomic64_xchg +#define arch_atomic64_xchg_relaxed arch_atomic64_xchg +#else /* arch_atomic64_xchg_relaxed */ + +#ifndef arch_atomic64_xchg_acquire +static __always_inline s64 +arch_atomic64_xchg_acquire(atomic64_t *v, s64 i) +{ + s64 ret = arch_atomic64_xchg_relaxed(v, i); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire +#endif + +#ifndef arch_atomic64_xchg_release +static __always_inline s64 +arch_atomic64_xchg_release(atomic64_t *v, s64 i) +{ + __atomic_release_fence(); + return arch_atomic64_xchg_relaxed(v, i); +} +#define arch_atomic64_xchg_release arch_atomic64_xchg_release +#endif + +#ifndef arch_atomic64_xchg +static __always_inline s64 +arch_atomic64_xchg(atomic64_t *v, s64 i) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_xchg_relaxed(v, i); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_xchg arch_atomic64_xchg +#endif + +#endif /* arch_atomic64_xchg_relaxed */ + +#ifndef arch_atomic64_cmpxchg_relaxed +#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg +#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg +#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg +#else /* arch_atomic64_cmpxchg_relaxed */ + +#ifndef arch_atomic64_cmpxchg_acquire +static __always_inline s64 +arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) +{ + s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire +#endif + +#ifndef arch_atomic64_cmpxchg_release +static __always_inline s64 +arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) +{ + __atomic_release_fence(); + return arch_atomic64_cmpxchg_relaxed(v, old, new); +} +#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release +#endif + +#ifndef arch_atomic64_cmpxchg +static __always_inline s64 +arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg +#endif + +#endif /* arch_atomic64_cmpxchg_relaxed */ + +#ifndef arch_atomic64_try_cmpxchg_relaxed +#ifdef arch_atomic64_try_cmpxchg +#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg +#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg +#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg +#endif /* arch_atomic64_try_cmpxchg */ + +#ifndef arch_atomic64_try_cmpxchg +static __always_inline bool +arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = arch_atomic64_cmpxchg(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg +#endif + +#ifndef arch_atomic64_try_cmpxchg_acquire +static __always_inline bool +arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = arch_atomic64_cmpxchg_acquire(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire +#endif + +#ifndef arch_atomic64_try_cmpxchg_release +static __always_inline bool +arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = arch_atomic64_cmpxchg_release(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release +#endif + +#ifndef arch_atomic64_try_cmpxchg_relaxed +static __always_inline bool +arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = arch_atomic64_cmpxchg_relaxed(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed +#endif + +#else /* arch_atomic64_try_cmpxchg_relaxed */ + +#ifndef arch_atomic64_try_cmpxchg_acquire +static __always_inline bool +arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire +#endif + +#ifndef arch_atomic64_try_cmpxchg_release +static __always_inline bool +arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + __atomic_release_fence(); + return arch_atomic64_try_cmpxchg_relaxed(v, old, new); +} +#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release +#endif + +#ifndef arch_atomic64_try_cmpxchg +static __always_inline bool +arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + bool ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg +#endif + +#endif /* arch_atomic64_try_cmpxchg_relaxed */ + +#ifndef arch_atomic64_sub_and_test +/** + * arch_atomic64_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic64_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static __always_inline bool +arch_atomic64_sub_and_test(s64 i, atomic64_t *v) +{ + return arch_atomic64_sub_return(i, v) == 0; +} +#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test +#endif + +#ifndef arch_atomic64_dec_and_test +/** + * arch_atomic64_dec_and_test - decrement and test + * @v: pointer of type atomic64_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static __always_inline bool +arch_atomic64_dec_and_test(atomic64_t *v) +{ + return arch_atomic64_dec_return(v) == 0; +} +#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test +#endif + +#ifndef arch_atomic64_inc_and_test +/** + * arch_atomic64_inc_and_test - increment and test + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static __always_inline bool +arch_atomic64_inc_and_test(atomic64_t *v) +{ + return arch_atomic64_inc_return(v) == 0; +} +#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test +#endif + +#ifndef arch_atomic64_add_negative +/** + * arch_atomic64_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic64_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static __always_inline bool +arch_atomic64_add_negative(s64 i, atomic64_t *v) +{ + return arch_atomic64_add_return(i, v) < 0; +} +#define arch_atomic64_add_negative arch_atomic64_add_negative +#endif + +#ifndef arch_atomic64_fetch_add_unless +/** + * arch_atomic64_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns original value of @v + */ +static __always_inline s64 +arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + s64 c = arch_atomic64_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!arch_atomic64_try_cmpxchg(v, &c, c + a)); + + return c; +} +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless +#endif + +#ifndef arch_atomic64_add_unless +/** + * arch_atomic64_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static __always_inline bool +arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u) +{ + return arch_atomic64_fetch_add_unless(v, a, u) != u; +} +#define arch_atomic64_add_unless arch_atomic64_add_unless +#endif + +#ifndef arch_atomic64_inc_not_zero +/** + * arch_atomic64_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +static __always_inline bool +arch_atomic64_inc_not_zero(atomic64_t *v) +{ + return arch_atomic64_add_unless(v, 1, 0); +} +#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero +#endif + +#ifndef arch_atomic64_inc_unless_negative +static __always_inline bool +arch_atomic64_inc_unless_negative(atomic64_t *v) +{ + s64 c = arch_atomic64_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative +#endif + +#ifndef arch_atomic64_dec_unless_positive +static __always_inline bool +arch_atomic64_dec_unless_positive(atomic64_t *v) +{ + s64 c = arch_atomic64_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive +#endif + +#ifndef arch_atomic64_dec_if_positive +static __always_inline s64 +arch_atomic64_dec_if_positive(atomic64_t *v) +{ + s64 dec, c = arch_atomic64_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!arch_atomic64_try_cmpxchg(v, &c, dec)); + + return dec; +} +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive +#endif + +#endif /* _LINUX_ATOMIC_FALLBACK_H */ +// 90cd26cfd69d2250303d654955a0cc12620fb91b diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h index a7d240e465c0..2c4927bf7b8d 100644 --- a/include/linux/atomic-fallback.h +++ b/include/linux/atomic-fallback.h @@ -6,6 +6,8 @@ #ifndef _LINUX_ATOMIC_FALLBACK_H #define _LINUX_ATOMIC_FALLBACK_H +#include <linux/compiler.h> + #ifndef xchg_relaxed #define xchg_relaxed xchg #define xchg_acquire xchg @@ -76,7 +78,7 @@ #endif /* cmpxchg64_relaxed */ #ifndef atomic_read_acquire -static inline int +static __always_inline int atomic_read_acquire(const atomic_t *v) { return smp_load_acquire(&(v)->counter); @@ -85,7 +87,7 @@ atomic_read_acquire(const atomic_t *v) #endif #ifndef atomic_set_release -static inline void +static __always_inline void atomic_set_release(atomic_t *v, int i) { smp_store_release(&(v)->counter, i); @@ -100,7 +102,7 @@ atomic_set_release(atomic_t *v, int i) #else /* atomic_add_return_relaxed */ #ifndef atomic_add_return_acquire -static inline int +static __always_inline int atomic_add_return_acquire(int i, atomic_t *v) { int ret = atomic_add_return_relaxed(i, v); @@ -111,7 +113,7 @@ atomic_add_return_acquire(int i, atomic_t *v) #endif #ifndef atomic_add_return_release -static inline int +static __always_inline int atomic_add_return_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -121,7 +123,7 @@ atomic_add_return_release(int i, atomic_t *v) #endif #ifndef atomic_add_return -static inline int +static __always_inline int atomic_add_return(int i, atomic_t *v) { int ret; @@ -142,7 +144,7 @@ atomic_add_return(int i, atomic_t *v) #else /* atomic_fetch_add_relaxed */ #ifndef atomic_fetch_add_acquire -static inline int +static __always_inline int atomic_fetch_add_acquire(int i, atomic_t *v) { int ret = atomic_fetch_add_relaxed(i, v); @@ -153,7 +155,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_add_release -static inline int +static __always_inline int atomic_fetch_add_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -163,7 +165,7 @@ atomic_fetch_add_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_add -static inline int +static __always_inline int atomic_fetch_add(int i, atomic_t *v) { int ret; @@ -184,7 +186,7 @@ atomic_fetch_add(int i, atomic_t *v) #else /* atomic_sub_return_relaxed */ #ifndef atomic_sub_return_acquire -static inline int +static __always_inline int atomic_sub_return_acquire(int i, atomic_t *v) { int ret = atomic_sub_return_relaxed(i, v); @@ -195,7 +197,7 @@ atomic_sub_return_acquire(int i, atomic_t *v) #endif #ifndef atomic_sub_return_release -static inline int +static __always_inline int atomic_sub_return_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -205,7 +207,7 @@ atomic_sub_return_release(int i, atomic_t *v) #endif #ifndef atomic_sub_return -static inline int +static __always_inline int atomic_sub_return(int i, atomic_t *v) { int ret; @@ -226,7 +228,7 @@ atomic_sub_return(int i, atomic_t *v) #else /* atomic_fetch_sub_relaxed */ #ifndef atomic_fetch_sub_acquire -static inline int +static __always_inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { int ret = atomic_fetch_sub_relaxed(i, v); @@ -237,7 +239,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_sub_release -static inline int +static __always_inline int atomic_fetch_sub_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -247,7 +249,7 @@ atomic_fetch_sub_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_sub -static inline int +static __always_inline int atomic_fetch_sub(int i, atomic_t *v) { int ret; @@ -262,7 +264,7 @@ atomic_fetch_sub(int i, atomic_t *v) #endif /* atomic_fetch_sub_relaxed */ #ifndef atomic_inc -static inline void +static __always_inline void atomic_inc(atomic_t *v) { atomic_add(1, v); @@ -278,7 +280,7 @@ atomic_inc(atomic_t *v) #endif /* atomic_inc_return */ #ifndef atomic_inc_return -static inline int +static __always_inline int atomic_inc_return(atomic_t *v) { return atomic_add_return(1, v); @@ -287,7 +289,7 @@ atomic_inc_return(atomic_t *v) #endif #ifndef atomic_inc_return_acquire -static inline int +static __always_inline int atomic_inc_return_acquire(atomic_t *v) { return atomic_add_return_acquire(1, v); @@ -296,7 +298,7 @@ atomic_inc_return_acquire(atomic_t *v) #endif #ifndef atomic_inc_return_release -static inline int +static __always_inline int atomic_inc_return_release(atomic_t *v) { return atomic_add_return_release(1, v); @@ -305,7 +307,7 @@ atomic_inc_return_release(atomic_t *v) #endif #ifndef atomic_inc_return_relaxed -static inline int +static __always_inline int atomic_inc_return_relaxed(atomic_t *v) { return atomic_add_return_relaxed(1, v); @@ -316,7 +318,7 @@ atomic_inc_return_relaxed(atomic_t *v) #else /* atomic_inc_return_relaxed */ #ifndef atomic_inc_return_acquire -static inline int +static __always_inline int atomic_inc_return_acquire(atomic_t *v) { int ret = atomic_inc_return_relaxed(v); @@ -327,7 +329,7 @@ atomic_inc_return_acquire(atomic_t *v) #endif #ifndef atomic_inc_return_release -static inline int +static __always_inline int atomic_inc_return_release(atomic_t *v) { __atomic_release_fence(); @@ -337,7 +339,7 @@ atomic_inc_return_release(atomic_t *v) #endif #ifndef atomic_inc_return -static inline int +static __always_inline int atomic_inc_return(atomic_t *v) { int ret; @@ -359,7 +361,7 @@ atomic_inc_return(atomic_t *v) #endif /* atomic_fetch_inc */ #ifndef atomic_fetch_inc -static inline int +static __always_inline int atomic_fetch_inc(atomic_t *v) { return atomic_fetch_add(1, v); @@ -368,7 +370,7 @@ atomic_fetch_inc(atomic_t *v) #endif #ifndef atomic_fetch_inc_acquire -static inline int +static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { return atomic_fetch_add_acquire(1, v); @@ -377,7 +379,7 @@ atomic_fetch_inc_acquire(atomic_t *v) #endif #ifndef atomic_fetch_inc_release -static inline int +static __always_inline int atomic_fetch_inc_release(atomic_t *v) { return atomic_fetch_add_release(1, v); @@ -386,7 +388,7 @@ atomic_fetch_inc_release(atomic_t *v) #endif #ifndef atomic_fetch_inc_relaxed -static inline int +static __always_inline int atomic_fetch_inc_relaxed(atomic_t *v) { return atomic_fetch_add_relaxed(1, v); @@ -397,7 +399,7 @@ atomic_fetch_inc_relaxed(atomic_t *v) #else /* atomic_fetch_inc_relaxed */ #ifndef atomic_fetch_inc_acquire -static inline int +static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { int ret = atomic_fetch_inc_relaxed(v); @@ -408,7 +410,7 @@ atomic_fetch_inc_acquire(atomic_t *v) #endif #ifndef atomic_fetch_inc_release -static inline int +static __always_inline int atomic_fetch_inc_release(atomic_t *v) { __atomic_release_fence(); @@ -418,7 +420,7 @@ atomic_fetch_inc_release(atomic_t *v) #endif #ifndef atomic_fetch_inc -static inline int +static __always_inline int atomic_fetch_inc(atomic_t *v) { int ret; @@ -433,7 +435,7 @@ atomic_fetch_inc(atomic_t *v) #endif /* atomic_fetch_inc_relaxed */ #ifndef atomic_dec -static inline void +static __always_inline void atomic_dec(atomic_t *v) { atomic_sub(1, v); @@ -449,7 +451,7 @@ atomic_dec(atomic_t *v) #endif /* atomic_dec_return */ #ifndef atomic_dec_return -static inline int +static __always_inline int atomic_dec_return(atomic_t *v) { return atomic_sub_return(1, v); @@ -458,7 +460,7 @@ atomic_dec_return(atomic_t *v) #endif #ifndef atomic_dec_return_acquire -static inline int +static __always_inline int atomic_dec_return_acquire(atomic_t *v) { return atomic_sub_return_acquire(1, v); @@ -467,7 +469,7 @@ atomic_dec_return_acquire(atomic_t *v) #endif #ifndef atomic_dec_return_release -static inline int +static __always_inline int atomic_dec_return_release(atomic_t *v) { return atomic_sub_return_release(1, v); @@ -476,7 +478,7 @@ atomic_dec_return_release(atomic_t *v) #endif #ifndef atomic_dec_return_relaxed -static inline int +static __always_inline int atomic_dec_return_relaxed(atomic_t *v) { return atomic_sub_return_relaxed(1, v); @@ -487,7 +489,7 @@ atomic_dec_return_relaxed(atomic_t *v) #else /* atomic_dec_return_relaxed */ #ifndef atomic_dec_return_acquire -static inline int +static __always_inline int atomic_dec_return_acquire(atomic_t *v) { int ret = atomic_dec_return_relaxed(v); @@ -498,7 +500,7 @@ atomic_dec_return_acquire(atomic_t *v) #endif #ifndef atomic_dec_return_release -static inline int +static __always_inline int atomic_dec_return_release(atomic_t *v) { __atomic_release_fence(); @@ -508,7 +510,7 @@ atomic_dec_return_release(atomic_t *v) #endif #ifndef atomic_dec_return -static inline int +static __always_inline int atomic_dec_return(atomic_t *v) { int ret; @@ -530,7 +532,7 @@ atomic_dec_return(atomic_t *v) #endif /* atomic_fetch_dec */ #ifndef atomic_fetch_dec -static inline int +static __always_inline int atomic_fetch_dec(atomic_t *v) { return atomic_fetch_sub(1, v); @@ -539,7 +541,7 @@ atomic_fetch_dec(atomic_t *v) #endif #ifndef atomic_fetch_dec_acquire -static inline int +static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { return atomic_fetch_sub_acquire(1, v); @@ -548,7 +550,7 @@ atomic_fetch_dec_acquire(atomic_t *v) #endif #ifndef atomic_fetch_dec_release -static inline int +static __always_inline int atomic_fetch_dec_release(atomic_t *v) { return atomic_fetch_sub_release(1, v); @@ -557,7 +559,7 @@ atomic_fetch_dec_release(atomic_t *v) #endif #ifndef atomic_fetch_dec_relaxed -static inline int +static __always_inline int atomic_fetch_dec_relaxed(atomic_t *v) { return atomic_fetch_sub_relaxed(1, v); @@ -568,7 +570,7 @@ atomic_fetch_dec_relaxed(atomic_t *v) #else /* atomic_fetch_dec_relaxed */ #ifndef atomic_fetch_dec_acquire -static inline int +static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { int ret = atomic_fetch_dec_relaxed(v); @@ -579,7 +581,7 @@ atomic_fetch_dec_acquire(atomic_t *v) #endif #ifndef atomic_fetch_dec_release -static inline int +static __always_inline int atomic_fetch_dec_release(atomic_t *v) { __atomic_release_fence(); @@ -589,7 +591,7 @@ atomic_fetch_dec_release(atomic_t *v) #endif #ifndef atomic_fetch_dec -static inline int +static __always_inline int atomic_fetch_dec(atomic_t *v) { int ret; @@ -610,7 +612,7 @@ atomic_fetch_dec(atomic_t *v) #else /* atomic_fetch_and_relaxed */ #ifndef atomic_fetch_and_acquire -static inline int +static __always_inline int atomic_fetch_and_acquire(int i, atomic_t *v) { int ret = atomic_fetch_and_relaxed(i, v); @@ -621,7 +623,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_and_release -static inline int +static __always_inline int atomic_fetch_and_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -631,7 +633,7 @@ atomic_fetch_and_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_and -static inline int +static __always_inline int atomic_fetch_and(int i, atomic_t *v) { int ret; @@ -646,7 +648,7 @@ atomic_fetch_and(int i, atomic_t *v) #endif /* atomic_fetch_and_relaxed */ #ifndef atomic_andnot -static inline void +static __always_inline void atomic_andnot(int i, atomic_t *v) { atomic_and(~i, v); @@ -662,7 +664,7 @@ atomic_andnot(int i, atomic_t *v) #endif /* atomic_fetch_andnot */ #ifndef atomic_fetch_andnot -static inline int +static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { return atomic_fetch_and(~i, v); @@ -671,7 +673,7 @@ atomic_fetch_andnot(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_acquire -static inline int +static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { return atomic_fetch_and_acquire(~i, v); @@ -680,7 +682,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_release -static inline int +static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { return atomic_fetch_and_release(~i, v); @@ -689,7 +691,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_relaxed -static inline int +static __always_inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { return atomic_fetch_and_relaxed(~i, v); @@ -700,7 +702,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v) #else /* atomic_fetch_andnot_relaxed */ #ifndef atomic_fetch_andnot_acquire -static inline int +static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { int ret = atomic_fetch_andnot_relaxed(i, v); @@ -711,7 +713,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_release -static inline int +static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -721,7 +723,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot -static inline int +static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { int ret; @@ -742,7 +744,7 @@ atomic_fetch_andnot(int i, atomic_t *v) #else /* atomic_fetch_or_relaxed */ #ifndef atomic_fetch_or_acquire -static inline int +static __always_inline int atomic_fetch_or_acquire(int i, atomic_t *v) { int ret = atomic_fetch_or_relaxed(i, v); @@ -753,7 +755,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_or_release -static inline int +static __always_inline int atomic_fetch_or_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -763,7 +765,7 @@ atomic_fetch_or_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_or -static inline int +static __always_inline int atomic_fetch_or(int i, atomic_t *v) { int ret; @@ -784,7 +786,7 @@ atomic_fetch_or(int i, atomic_t *v) #else /* atomic_fetch_xor_relaxed */ #ifndef atomic_fetch_xor_acquire -static inline int +static __always_inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { int ret = atomic_fetch_xor_relaxed(i, v); @@ -795,7 +797,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_xor_release -static inline int +static __always_inline int atomic_fetch_xor_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -805,7 +807,7 @@ atomic_fetch_xor_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_xor -static inline int +static __always_inline int atomic_fetch_xor(int i, atomic_t *v) { int ret; @@ -826,7 +828,7 @@ atomic_fetch_xor(int i, atomic_t *v) #else /* atomic_xchg_relaxed */ #ifndef atomic_xchg_acquire -static inline int +static __always_inline int atomic_xchg_acquire(atomic_t *v, int i) { int ret = atomic_xchg_relaxed(v, i); @@ -837,7 +839,7 @@ atomic_xchg_acquire(atomic_t *v, int i) #endif #ifndef atomic_xchg_release -static inline int +static __always_inline int atomic_xchg_release(atomic_t *v, int i) { __atomic_release_fence(); @@ -847,7 +849,7 @@ atomic_xchg_release(atomic_t *v, int i) #endif #ifndef atomic_xchg -static inline int +static __always_inline int atomic_xchg(atomic_t *v, int i) { int ret; @@ -868,7 +870,7 @@ atomic_xchg(atomic_t *v, int i) #else /* atomic_cmpxchg_relaxed */ #ifndef atomic_cmpxchg_acquire -static inline int +static __always_inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { int ret = atomic_cmpxchg_relaxed(v, old, new); @@ -879,7 +881,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new) #endif #ifndef atomic_cmpxchg_release -static inline int +static __always_inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { __atomic_release_fence(); @@ -889,7 +891,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new) #endif #ifndef atomic_cmpxchg -static inline int +static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; @@ -911,7 +913,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new) #endif /* atomic_try_cmpxchg */ #ifndef atomic_try_cmpxchg -static inline bool +static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { int r, o = *old; @@ -924,7 +926,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { int r, o = *old; @@ -937,7 +939,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_release -static inline bool +static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { int r, o = *old; @@ -950,7 +952,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_relaxed -static inline bool +static __always_inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { int r, o = *old; @@ -965,7 +967,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) #else /* atomic_try_cmpxchg_relaxed */ #ifndef atomic_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { bool ret = atomic_try_cmpxchg_relaxed(v, old, new); @@ -976,7 +978,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_release -static inline bool +static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { __atomic_release_fence(); @@ -986,7 +988,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg -static inline bool +static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { bool ret; @@ -1010,7 +1012,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) * true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { return atomic_sub_return(i, v) == 0; @@ -1027,7 +1029,7 @@ atomic_sub_and_test(int i, atomic_t *v) * returns true if the result is 0, or false for all other * cases. */ -static inline bool +static __always_inline bool atomic_dec_and_test(atomic_t *v) { return atomic_dec_return(v) == 0; @@ -1044,7 +1046,7 @@ atomic_dec_and_test(atomic_t *v) * and returns true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic_inc_and_test(atomic_t *v) { return atomic_inc_return(v) == 0; @@ -1062,7 +1064,7 @@ atomic_inc_and_test(atomic_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -static inline bool +static __always_inline bool atomic_add_negative(int i, atomic_t *v) { return atomic_add_return(i, v) < 0; @@ -1080,7 +1082,7 @@ atomic_add_negative(int i, atomic_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns original value of @v */ -static inline int +static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c = atomic_read(v); @@ -1105,7 +1107,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u) * Atomically adds @a to @v, if @v was not already @u. * Returns true if the addition was done. */ -static inline bool +static __always_inline bool atomic_add_unless(atomic_t *v, int a, int u) { return atomic_fetch_add_unless(v, a, u) != u; @@ -1121,7 +1123,7 @@ atomic_add_unless(atomic_t *v, int a, int u) * Atomically increments @v by 1, if @v is non-zero. * Returns true if the increment was done. */ -static inline bool +static __always_inline bool atomic_inc_not_zero(atomic_t *v) { return atomic_add_unless(v, 1, 0); @@ -1130,7 +1132,7 @@ atomic_inc_not_zero(atomic_t *v) #endif #ifndef atomic_inc_unless_negative -static inline bool +static __always_inline bool atomic_inc_unless_negative(atomic_t *v) { int c = atomic_read(v); @@ -1146,7 +1148,7 @@ atomic_inc_unless_negative(atomic_t *v) #endif #ifndef atomic_dec_unless_positive -static inline bool +static __always_inline bool atomic_dec_unless_positive(atomic_t *v) { int c = atomic_read(v); @@ -1162,7 +1164,7 @@ atomic_dec_unless_positive(atomic_t *v) #endif #ifndef atomic_dec_if_positive -static inline int +static __always_inline int atomic_dec_if_positive(atomic_t *v) { int dec, c = atomic_read(v); @@ -1178,15 +1180,12 @@ atomic_dec_if_positive(atomic_t *v) #define atomic_dec_if_positive atomic_dec_if_positive #endif -#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) -#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) - #ifdef CONFIG_GENERIC_ATOMIC64 #include <asm-generic/atomic64.h> #endif #ifndef atomic64_read_acquire -static inline s64 +static __always_inline s64 atomic64_read_acquire(const atomic64_t *v) { return smp_load_acquire(&(v)->counter); @@ -1195,7 +1194,7 @@ atomic64_read_acquire(const atomic64_t *v) #endif #ifndef atomic64_set_release -static inline void +static __always_inline void atomic64_set_release(atomic64_t *v, s64 i) { smp_store_release(&(v)->counter, i); @@ -1210,7 +1209,7 @@ atomic64_set_release(atomic64_t *v, s64 i) #else /* atomic64_add_return_relaxed */ #ifndef atomic64_add_return_acquire -static inline s64 +static __always_inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_add_return_relaxed(i, v); @@ -1221,7 +1220,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_add_return_release -static inline s64 +static __always_inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1231,7 +1230,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_add_return -static inline s64 +static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) { s64 ret; @@ -1252,7 +1251,7 @@ atomic64_add_return(s64 i, atomic64_t *v) #else /* atomic64_fetch_add_relaxed */ #ifndef atomic64_fetch_add_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_add_relaxed(i, v); @@ -1263,7 +1262,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_add_release -static inline s64 +static __always_inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1273,7 +1272,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_add -static inline s64 +static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { s64 ret; @@ -1294,7 +1293,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v) #else /* atomic64_sub_return_relaxed */ #ifndef atomic64_sub_return_acquire -static inline s64 +static __always_inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_sub_return_relaxed(i, v); @@ -1305,7 +1304,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_sub_return_release -static inline s64 +static __always_inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1315,7 +1314,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_sub_return -static inline s64 +static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { s64 ret; @@ -1336,7 +1335,7 @@ atomic64_sub_return(s64 i, atomic64_t *v) #else /* atomic64_fetch_sub_relaxed */ #ifndef atomic64_fetch_sub_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_sub_relaxed(i, v); @@ -1347,7 +1346,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_sub_release -static inline s64 +static __always_inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1357,7 +1356,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_sub -static inline s64 +static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { s64 ret; @@ -1372,7 +1371,7 @@ atomic64_fetch_sub(s64 i, atomic64_t *v) #endif /* atomic64_fetch_sub_relaxed */ #ifndef atomic64_inc -static inline void +static __always_inline void atomic64_inc(atomic64_t *v) { atomic64_add(1, v); @@ -1388,7 +1387,7 @@ atomic64_inc(atomic64_t *v) #endif /* atomic64_inc_return */ #ifndef atomic64_inc_return -static inline s64 +static __always_inline s64 atomic64_inc_return(atomic64_t *v) { return atomic64_add_return(1, v); @@ -1397,7 +1396,7 @@ atomic64_inc_return(atomic64_t *v) #endif #ifndef atomic64_inc_return_acquire -static inline s64 +static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { return atomic64_add_return_acquire(1, v); @@ -1406,7 +1405,7 @@ atomic64_inc_return_acquire(atomic64_t *v) #endif #ifndef atomic64_inc_return_release -static inline s64 +static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { return atomic64_add_return_release(1, v); @@ -1415,7 +1414,7 @@ atomic64_inc_return_release(atomic64_t *v) #endif #ifndef atomic64_inc_return_relaxed -static inline s64 +static __always_inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { return atomic64_add_return_relaxed(1, v); @@ -1426,7 +1425,7 @@ atomic64_inc_return_relaxed(atomic64_t *v) #else /* atomic64_inc_return_relaxed */ #ifndef atomic64_inc_return_acquire -static inline s64 +static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { s64 ret = atomic64_inc_return_relaxed(v); @@ -1437,7 +1436,7 @@ atomic64_inc_return_acquire(atomic64_t *v) #endif #ifndef atomic64_inc_return_release -static inline s64 +static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { __atomic_release_fence(); @@ -1447,7 +1446,7 @@ atomic64_inc_return_release(atomic64_t *v) #endif #ifndef atomic64_inc_return -static inline s64 +static __always_inline s64 atomic64_inc_return(atomic64_t *v) { s64 ret; @@ -1469,7 +1468,7 @@ atomic64_inc_return(atomic64_t *v) #endif /* atomic64_fetch_inc */ #ifndef atomic64_fetch_inc -static inline s64 +static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { return atomic64_fetch_add(1, v); @@ -1478,7 +1477,7 @@ atomic64_fetch_inc(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { return atomic64_fetch_add_acquire(1, v); @@ -1487,7 +1486,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_release -static inline s64 +static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { return atomic64_fetch_add_release(1, v); @@ -1496,7 +1495,7 @@ atomic64_fetch_inc_release(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_relaxed -static inline s64 +static __always_inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { return atomic64_fetch_add_relaxed(1, v); @@ -1507,7 +1506,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v) #else /* atomic64_fetch_inc_relaxed */ #ifndef atomic64_fetch_inc_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { s64 ret = atomic64_fetch_inc_relaxed(v); @@ -1518,7 +1517,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_release -static inline s64 +static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { __atomic_release_fence(); @@ -1528,7 +1527,7 @@ atomic64_fetch_inc_release(atomic64_t *v) #endif #ifndef atomic64_fetch_inc -static inline s64 +static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { s64 ret; @@ -1543,7 +1542,7 @@ atomic64_fetch_inc(atomic64_t *v) #endif /* atomic64_fetch_inc_relaxed */ #ifndef atomic64_dec -static inline void +static __always_inline void atomic64_dec(atomic64_t *v) { atomic64_sub(1, v); @@ -1559,7 +1558,7 @@ atomic64_dec(atomic64_t *v) #endif /* atomic64_dec_return */ #ifndef atomic64_dec_return -static inline s64 +static __always_inline s64 atomic64_dec_return(atomic64_t *v) { return atomic64_sub_return(1, v); @@ -1568,7 +1567,7 @@ atomic64_dec_return(atomic64_t *v) #endif #ifndef atomic64_dec_return_acquire -static inline s64 +static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { return atomic64_sub_return_acquire(1, v); @@ -1577,7 +1576,7 @@ atomic64_dec_return_acquire(atomic64_t *v) #endif #ifndef atomic64_dec_return_release -static inline s64 +static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { return atomic64_sub_return_release(1, v); @@ -1586,7 +1585,7 @@ atomic64_dec_return_release(atomic64_t *v) #endif #ifndef atomic64_dec_return_relaxed -static inline s64 +static __always_inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { return atomic64_sub_return_relaxed(1, v); @@ -1597,7 +1596,7 @@ atomic64_dec_return_relaxed(atomic64_t *v) #else /* atomic64_dec_return_relaxed */ #ifndef atomic64_dec_return_acquire -static inline s64 +static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { s64 ret = atomic64_dec_return_relaxed(v); @@ -1608,7 +1607,7 @@ atomic64_dec_return_acquire(atomic64_t *v) #endif #ifndef atomic64_dec_return_release -static inline s64 +static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { __atomic_release_fence(); @@ -1618,7 +1617,7 @@ atomic64_dec_return_release(atomic64_t *v) #endif #ifndef atomic64_dec_return -static inline s64 +static __always_inline s64 atomic64_dec_return(atomic64_t *v) { s64 ret; @@ -1640,7 +1639,7 @@ atomic64_dec_return(atomic64_t *v) #endif /* atomic64_fetch_dec */ #ifndef atomic64_fetch_dec -static inline s64 +static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { return atomic64_fetch_sub(1, v); @@ -1649,7 +1648,7 @@ atomic64_fetch_dec(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { return atomic64_fetch_sub_acquire(1, v); @@ -1658,7 +1657,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_release -static inline s64 +static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { return atomic64_fetch_sub_release(1, v); @@ -1667,7 +1666,7 @@ atomic64_fetch_dec_release(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_relaxed -static inline s64 +static __always_inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { return atomic64_fetch_sub_relaxed(1, v); @@ -1678,7 +1677,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v) #else /* atomic64_fetch_dec_relaxed */ #ifndef atomic64_fetch_dec_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { s64 ret = atomic64_fetch_dec_relaxed(v); @@ -1689,7 +1688,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_release -static inline s64 +static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { __atomic_release_fence(); @@ -1699,7 +1698,7 @@ atomic64_fetch_dec_release(atomic64_t *v) #endif #ifndef atomic64_fetch_dec -static inline s64 +static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { s64 ret; @@ -1720,7 +1719,7 @@ atomic64_fetch_dec(atomic64_t *v) #else /* atomic64_fetch_and_relaxed */ #ifndef atomic64_fetch_and_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_and_relaxed(i, v); @@ -1731,7 +1730,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_and_release -static inline s64 +static __always_inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1741,7 +1740,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_and -static inline s64 +static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { s64 ret; @@ -1756,7 +1755,7 @@ atomic64_fetch_and(s64 i, atomic64_t *v) #endif /* atomic64_fetch_and_relaxed */ #ifndef atomic64_andnot -static inline void +static __always_inline void atomic64_andnot(s64 i, atomic64_t *v) { atomic64_and(~i, v); @@ -1772,7 +1771,7 @@ atomic64_andnot(s64 i, atomic64_t *v) #endif /* atomic64_fetch_andnot */ #ifndef atomic64_fetch_andnot -static inline s64 +static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { return atomic64_fetch_and(~i, v); @@ -1781,7 +1780,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { return atomic64_fetch_and_acquire(~i, v); @@ -1790,7 +1789,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_release -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { return atomic64_fetch_and_release(~i, v); @@ -1799,7 +1798,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_relaxed -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { return atomic64_fetch_and_relaxed(~i, v); @@ -1810,7 +1809,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) #else /* atomic64_fetch_andnot_relaxed */ #ifndef atomic64_fetch_andnot_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_andnot_relaxed(i, v); @@ -1821,7 +1820,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_release -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1831,7 +1830,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot -static inline s64 +static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { s64 ret; @@ -1852,7 +1851,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) #else /* atomic64_fetch_or_relaxed */ #ifndef atomic64_fetch_or_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_or_relaxed(i, v); @@ -1863,7 +1862,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_or_release -static inline s64 +static __always_inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1873,7 +1872,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_or -static inline s64 +static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { s64 ret; @@ -1894,7 +1893,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v) #else /* atomic64_fetch_xor_relaxed */ #ifndef atomic64_fetch_xor_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_xor_relaxed(i, v); @@ -1905,7 +1904,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_xor_release -static inline s64 +static __always_inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1915,7 +1914,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_xor -static inline s64 +static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { s64 ret; @@ -1936,7 +1935,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v) #else /* atomic64_xchg_relaxed */ #ifndef atomic64_xchg_acquire -static inline s64 +static __always_inline s64 atomic64_xchg_acquire(atomic64_t *v, s64 i) { s64 ret = atomic64_xchg_relaxed(v, i); @@ -1947,7 +1946,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i) #endif #ifndef atomic64_xchg_release -static inline s64 +static __always_inline s64 atomic64_xchg_release(atomic64_t *v, s64 i) { __atomic_release_fence(); @@ -1957,7 +1956,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i) #endif #ifndef atomic64_xchg -static inline s64 +static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i) { s64 ret; @@ -1978,7 +1977,7 @@ atomic64_xchg(atomic64_t *v, s64 i) #else /* atomic64_cmpxchg_relaxed */ #ifndef atomic64_cmpxchg_acquire -static inline s64 +static __always_inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { s64 ret = atomic64_cmpxchg_relaxed(v, old, new); @@ -1989,7 +1988,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) #endif #ifndef atomic64_cmpxchg_release -static inline s64 +static __always_inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { __atomic_release_fence(); @@ -1999,7 +1998,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) #endif #ifndef atomic64_cmpxchg -static inline s64 +static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { s64 ret; @@ -2021,7 +2020,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) #endif /* atomic64_try_cmpxchg */ #ifndef atomic64_try_cmpxchg -static inline bool +static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2034,7 +2033,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2047,7 +2046,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_release -static inline bool +static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2060,7 +2059,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_relaxed -static inline bool +static __always_inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2075,7 +2074,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) #else /* atomic64_try_cmpxchg_relaxed */ #ifndef atomic64_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { bool ret = atomic64_try_cmpxchg_relaxed(v, old, new); @@ -2086,7 +2085,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_release -static inline bool +static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { __atomic_release_fence(); @@ -2096,7 +2095,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg -static inline bool +static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { bool ret; @@ -2120,7 +2119,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) * true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { return atomic64_sub_return(i, v) == 0; @@ -2137,7 +2136,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v) * returns true if the result is 0, or false for all other * cases. */ -static inline bool +static __always_inline bool atomic64_dec_and_test(atomic64_t *v) { return atomic64_dec_return(v) == 0; @@ -2154,7 +2153,7 @@ atomic64_dec_and_test(atomic64_t *v) * and returns true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic64_inc_and_test(atomic64_t *v) { return atomic64_inc_return(v) == 0; @@ -2172,7 +2171,7 @@ atomic64_inc_and_test(atomic64_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -static inline bool +static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) { return atomic64_add_return(i, v) < 0; @@ -2190,7 +2189,7 @@ atomic64_add_negative(s64 i, atomic64_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns original value of @v */ -static inline s64 +static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { s64 c = atomic64_read(v); @@ -2215,7 +2214,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) * Atomically adds @a to @v, if @v was not already @u. * Returns true if the addition was done. */ -static inline bool +static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { return atomic64_fetch_add_unless(v, a, u) != u; @@ -2231,7 +2230,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u) * Atomically increments @v by 1, if @v is non-zero. * Returns true if the increment was done. */ -static inline bool +static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) { return atomic64_add_unless(v, 1, 0); @@ -2240,7 +2239,7 @@ atomic64_inc_not_zero(atomic64_t *v) #endif #ifndef atomic64_inc_unless_negative -static inline bool +static __always_inline bool atomic64_inc_unless_negative(atomic64_t *v) { s64 c = atomic64_read(v); @@ -2256,7 +2255,7 @@ atomic64_inc_unless_negative(atomic64_t *v) #endif #ifndef atomic64_dec_unless_positive -static inline bool +static __always_inline bool atomic64_dec_unless_positive(atomic64_t *v) { s64 c = atomic64_read(v); @@ -2272,7 +2271,7 @@ atomic64_dec_unless_positive(atomic64_t *v) #endif #ifndef atomic64_dec_if_positive -static inline s64 +static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) { s64 dec, c = atomic64_read(v); @@ -2288,8 +2287,5 @@ atomic64_dec_if_positive(atomic64_t *v) #define atomic64_dec_if_positive atomic64_dec_if_positive #endif -#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) -#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) - #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// 25de4a2804d70f57e994fe3b419148658bb5378a +// 1fac0941c79bf0ae100723cc2ac9b94061f0b67a diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 4c0d009a46f0..571a11008ab5 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -25,6 +25,12 @@ * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. */ +#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) +#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) + +#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) +#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) + /* * The idea here is to build acquire/release variants by adding explicit * barriers on top of the relaxed variant. In the case where the relaxed @@ -71,7 +77,12 @@ __ret; \ }) +#ifdef ARCH_ATOMIC +#include <linux/atomic-arch-fallback.h> +#include <asm-generic/atomic-instrumented.h> +#else #include <linux/atomic-fallback.h> +#endif #include <asm-generic/atomic-long.h> diff --git a/include/linux/audit.h b/include/linux/audit.h index f9ceae57ca8d..3fcd9ee49734 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -19,7 +19,7 @@ struct audit_sig_info { uid_t uid; pid_t pid; - char ctx[0]; + char ctx[]; }; struct audit_buffer; @@ -94,6 +94,12 @@ struct audit_ntp_data { struct audit_ntp_data {}; #endif +enum audit_nfcfgop { + AUDIT_XT_OP_REGISTER, + AUDIT_XT_OP_REPLACE, + AUDIT_XT_OP_UNREGISTER, +}; + extern int is_audit_feature_set(int which); extern int __init audit_register_class(int class, unsigned *list); @@ -379,6 +385,8 @@ extern void __audit_log_kern_module(char *name); extern void __audit_fanotify(unsigned int response); extern void __audit_tk_injoffset(struct timespec64 offset); extern void __audit_ntp_log(const struct audit_ntp_data *ad); +extern void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, + enum audit_nfcfgop op); static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { @@ -514,6 +522,14 @@ static inline void audit_ntp_log(const struct audit_ntp_data *ad) __audit_ntp_log(ad); } +static inline void audit_log_nfcfg(const char *name, u8 af, + unsigned int nentries, + enum audit_nfcfgop op) +{ + if (audit_enabled) + __audit_log_nfcfg(name, af, nentries, op); +} + extern int audit_n_rules; extern int audit_signals; #else /* CONFIG_AUDITSYSCALL */ @@ -646,6 +662,12 @@ static inline void audit_ntp_log(const struct audit_ntp_data *ad) static inline void audit_ptrace(struct task_struct *t) { } + +static inline void audit_log_nfcfg(const char *name, u8 af, + unsigned int nentries, + enum audit_nfcfgop op) +{ } + #define audit_n_rules 0 #define audit_signals 0 #endif /* CONFIG_AUDITSYSCALL */ diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index ca956b672ac0..40bad71865ea 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -476,6 +476,7 @@ struct virtchnl_rss_key { u16 vsi_id; u16 key_len; u8 key[1]; /* RSS hash key, packed bytes */ + u8 pad[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); @@ -484,6 +485,7 @@ struct virtchnl_rss_lut { u16 vsi_id; u16 lut_entries; u8 lut[1]; /* RSS lookup table */ + u8 pad[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); @@ -572,6 +574,7 @@ struct virtchnl_filter { enum virtchnl_action action; u32 action_meta; u8 field_flags; + u8 pad[3]; }; VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); @@ -610,6 +613,7 @@ struct virtchnl_pf_event { /* link_speed provided in Mbps */ u32 link_speed; u8 link_status; + u8 pad[3]; } link_event_adv; } event_data; @@ -635,6 +639,7 @@ struct virtchnl_iwarp_qv_info { u16 ceq_idx; u16 aeq_idx; u8 itr_idx; + u8 pad[3]; }; VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info); diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 4fc87dee005a..90a7e844a098 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -54,7 +54,6 @@ enum wb_reason { WB_REASON_SYNC, WB_REASON_PERIODIC, WB_REASON_LAPTOP_TIMER, - WB_REASON_FREE_MORE_MEM, WB_REASON_FS_FREE_SPACE, /* * There is no bdi forker thread any more and works are done @@ -194,8 +193,6 @@ struct backing_dev_info { congested_fn *congested_fn; /* Function pointer if device is md/dm */ void *congested_data; /* Pointer to aux data for congested func */ - const char *name; - struct kref refcnt; /* Reference counter for the structure */ unsigned int capabilities; /* Device capabilities */ unsigned int min_ratio; @@ -220,6 +217,7 @@ struct backing_dev_info { wait_queue_head_t wb_waitq; struct device *dev; + char dev_name[64]; struct device *owner; struct timer_list laptop_mode_wb_timer; diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index f88197c1ffc2..6b3504bf7a42 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -33,14 +33,10 @@ int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...); __printf(2, 0) int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args); -int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner); +void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner); void bdi_unregister(struct backing_dev_info *bdi); -struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id); -static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask) -{ - return bdi_alloc_node(gfp_mask, NUMA_NO_NODE); -} +struct backing_dev_info *bdi_alloc(int node_id); void wb_start_background_writeback(struct bdi_writeback *wb); void wb_workfn(struct work_struct *work); @@ -505,13 +501,6 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi) (1 << WB_async_congested)); } -extern const char *bdi_unknown_name; - -static inline const char *bdi_dev_name(struct backing_dev_info *bdi) -{ - if (!bdi || !bdi->dev) - return bdi_unknown_name; - return dev_name(bdi->dev); -} +const char *bdi_dev_name(struct backing_dev_info *bdi); #endif /* _LINUX_BACKING_DEV_H */ diff --git a/include/linux/backlight.h b/include/linux/backlight.h index c7d6b2e8c3b5..56e4580d4f55 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -190,6 +190,7 @@ extern void backlight_force_update(struct backlight_device *bd, extern int backlight_register_notifier(struct notifier_block *nb); extern int backlight_unregister_notifier(struct notifier_block *nb); extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type); +struct backlight_device *backlight_device_get_by_name(const char *name); extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness); #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) diff --git a/include/linux/bch.h b/include/linux/bch.h index aa765af85c38..85fdce83d4e2 100644 --- a/include/linux/bch.h +++ b/include/linux/bch.h @@ -33,6 +33,7 @@ * @cache: log-based polynomial representation buffer * @elp: error locator polynomial * @poly_2t: temporary polynomials of degree 2t + * @swap_bits: swap bits within data and syndrome bytes */ struct bch_control { unsigned int m; @@ -51,16 +52,18 @@ struct bch_control { int *cache; struct gf_poly *elp; struct gf_poly *poly_2t[4]; + bool swap_bits; }; -struct bch_control *init_bch(int m, int t, unsigned int prim_poly); +struct bch_control *bch_init(int m, int t, unsigned int prim_poly, + bool swap_bits); -void free_bch(struct bch_control *bch); +void bch_free(struct bch_control *bch); -void encode_bch(struct bch_control *bch, const uint8_t *data, +void bch_encode(struct bch_control *bch, const uint8_t *data, unsigned int len, uint8_t *ecc); -int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, +int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len, const uint8_t *recv_ecc, const uint8_t *calc_ecc, const unsigned int *syn, unsigned int *errloc); diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index a345d9fed3d8..4a20b7517dd0 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -26,35 +26,27 @@ struct linux_binprm { unsigned long p; /* current top of mem */ unsigned long argmin; /* rlimit marker for copy_strings() */ unsigned int + /* Should an execfd be passed to userspace? */ + have_execfd:1, + + /* Use the creds of a script (see binfmt_misc) */ + execfd_creds:1, /* - * True after the bprm_set_creds hook has been called once - * (multiple calls can be made via prepare_binprm() for - * binfmt_script/misc). - */ - called_set_creds:1, - /* - * True if most recent call to the commoncaps bprm_set_creds - * hook (due to multiple prepare_binprm() calls from the - * binfmt_script/misc handlers) resulted in elevated - * privileges. - */ - cap_elevated:1, - /* - * Set by bprm_set_creds hook to indicate a privilege-gaining - * exec has happened. Used to sanitize execution environment - * and to set AT_SECURE auxv for glibc. + * Set by bprm_creds_for_exec hook to indicate a + * privilege-gaining exec has happened. Used to set + * AT_SECURE auxv for glibc. */ secureexec:1, /* - * Set by flush_old_exec, when exec_mmap has been called. - * This is past the point of no return, when the - * exec_update_mutex has been taken. + * Set when errors can no longer be returned to the + * original userspace. */ - called_exec_mmap:1; + point_of_no_return:1; #ifdef __alpha__ unsigned int taso:1; #endif - unsigned int recursion_depth; /* only for search_binary_handler() */ + struct file * executable; /* Executable to pass to the interpreter */ + struct file * interpreter; struct file * file; struct cred *cred; /* new credentials */ int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */ @@ -65,7 +57,7 @@ struct linux_binprm { of the time same as filename, but could be different for binfmt_{misc,script} */ unsigned interp_flags; - unsigned interp_data; + int execfd; /* File descriptor of the executable */ unsigned long loader, exec; struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */ @@ -76,10 +68,6 @@ struct linux_binprm { #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT) -/* fd of the binary should be passed to the interpreter */ -#define BINPRM_FLAGS_EXECFD_BIT 1 -#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT) - /* filename of the binary will be inaccessible after exec */ #define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2 #define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT) @@ -123,10 +111,8 @@ static inline void insert_binfmt(struct linux_binfmt *fmt) extern void unregister_binfmt(struct linux_binfmt *); -extern int prepare_binprm(struct linux_binprm *); extern int __must_check remove_arg_zero(struct linux_binprm *); -extern int search_binary_handler(struct linux_binprm *); -extern int flush_old_exec(struct linux_binprm * bprm); +extern int begin_new_exec(struct linux_binprm * bprm); extern void setup_new_exec(struct linux_binprm * bprm); extern void finalize_exec(struct linux_binprm *bprm); extern void would_dump(struct linux_binprm *, struct file *); @@ -144,9 +130,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm, extern int transfer_args_to_stack(struct linux_binprm *bprm, unsigned long *sp_location); extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm); -extern int copy_strings_kernel(int argc, const char *const *argv, - struct linux_binprm *bprm); -extern void install_exec_creds(struct linux_binprm *bprm); +int copy_string_kernel(const char *arg, struct linux_binprm *bprm); extern void set_binfmt(struct linux_binfmt *new); extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t); diff --git a/include/linux/bio.h b/include/linux/bio.h index c1c0f9ea4e63..91676d4b2dfe 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -70,7 +70,7 @@ static inline bool bio_has_data(struct bio *bio) return false; } -static inline bool bio_no_advance_iter(struct bio *bio) +static inline bool bio_no_advance_iter(const struct bio *bio) { return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE || @@ -138,8 +138,8 @@ static inline bool bio_next_segment(const struct bio *bio, #define bio_for_each_segment_all(bvl, bio, iter) \ for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) -static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, - unsigned bytes) +static inline void bio_advance_iter(const struct bio *bio, + struct bvec_iter *iter, unsigned int bytes) { iter->bi_sector += bytes >> 9; @@ -169,6 +169,14 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, #define bio_for_each_bvec(bvl, bio, iter) \ __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) +/* + * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the + * same reasons as bio_for_each_segment_all(). + */ +#define bio_for_each_bvec_all(bvl, bio, i) \ + for (i = 0, bvl = bio_first_bvec_all(bio); \ + i < (bio)->bi_vcnt; i++, bvl++) \ + #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) static inline unsigned bio_segments(struct bio *bio) @@ -319,7 +327,7 @@ struct bio_integrity_payload { struct work_struct bip_work; /* I/O completion */ struct bio_vec *bip_vec; - struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ + struct bio_vec bip_inline_vecs[];/* embedded bvec array */ }; #if defined(CONFIG_BLK_DEV_INTEGRITY) @@ -417,6 +425,7 @@ static inline void bio_io_error(struct bio *bio) static inline void bio_wouldblock_error(struct bio *bio) { + bio_set_flag(bio, BIO_QUIET); bio->bi_status = BLK_STS_AGAIN; bio_endio(bio); } @@ -444,12 +453,6 @@ void bio_release_pages(struct bio *bio, bool mark_dirty); extern void bio_set_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio); -void generic_start_io_acct(struct request_queue *q, int op, - unsigned long sectors, struct hd_struct *part); -void generic_end_io_acct(struct request_queue *q, int op, - struct hd_struct *part, - unsigned long start_time); - extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, struct bio *src, struct bvec_iter *src_iter); extern void bio_copy_data(struct bio *dst, struct bio *src); diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 9acf654f0b19..99f2ac30b1d9 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -72,7 +72,7 @@ static inline int get_bitmask_order(unsigned int count) static __always_inline unsigned long hweight_long(unsigned long w) { - return sizeof(w) == 4 ? hweight32(w) : hweight64(w); + return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w); } /** diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 35f8ffe92b70..a57ebe2f00ab 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -607,12 +607,14 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, u64_stats_update_begin(&bis->sync); /* - * If the bio is flagged with BIO_QUEUE_ENTERED it means this - * is a split bio and we would have already accounted for the - * size of the bio. + * If the bio is flagged with BIO_CGROUP_ACCT it means this is a + * split bio and we would have already accounted for the size of + * the bio. */ - if (!bio_flagged(bio, BIO_QUEUE_ENTERED)) + if (!bio_flagged(bio, BIO_CGROUP_ACCT)) { + bio_set_flag(bio, BIO_CGROUP_ACCT); bis->cur.bytes[rwd] += bio->bi_iter.bi_size; + } bis->cur.ios[rwd]++; u64_stats_update_end(&bis->sync); @@ -629,6 +631,8 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q, static inline void blkcg_use_delay(struct blkcg_gq *blkg) { + if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) + return; if (atomic_add_return(1, &blkg->use_delay) == 1) atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); } @@ -637,6 +641,8 @@ static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) { int old = atomic_read(&blkg->use_delay); + if (WARN_ON_ONCE(old < 0)) + return 0; if (old == 0) return 0; @@ -661,20 +667,39 @@ static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) return 1; } +/** + * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount + * @blkg: target blkg + * @delay: delay duration in nsecs + * + * When enabled with this function, the delay is not decayed and must be + * explicitly cleared with blkcg_clear_delay(). Must not be mixed with + * blkcg_[un]use_delay() and blkcg_add_delay() usages. + */ +static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay) +{ + int old = atomic_read(&blkg->use_delay); + + /* We only want 1 person setting the congestion count for this blkg. */ + if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old) + atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); + + atomic64_set(&blkg->delay_nsec, delay); +} + +/** + * blkcg_clear_delay - Disable allocator delay mechanism + * @blkg: target blkg + * + * Disable use_delay mechanism. See blkcg_set_delay(). + */ static inline void blkcg_clear_delay(struct blkcg_gq *blkg) { int old = atomic_read(&blkg->use_delay); - if (!old) - return; + /* We only want 1 person clearing the congestion count for this blkg. */ - while (old) { - int cur = atomic_cmpxchg(&blkg->use_delay, old, 0); - if (cur == old) { - atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); - break; - } - old = cur; - } + if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old) + atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); } void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h new file mode 100644 index 000000000000..e82342907f2b --- /dev/null +++ b/include/linux/blk-crypto.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2019 Google LLC + */ + +#ifndef __LINUX_BLK_CRYPTO_H +#define __LINUX_BLK_CRYPTO_H + +#include <linux/types.h> + +enum blk_crypto_mode_num { + BLK_ENCRYPTION_MODE_INVALID, + BLK_ENCRYPTION_MODE_AES_256_XTS, + BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV, + BLK_ENCRYPTION_MODE_ADIANTUM, + BLK_ENCRYPTION_MODE_MAX, +}; + +#define BLK_CRYPTO_MAX_KEY_SIZE 64 +/** + * struct blk_crypto_config - an inline encryption key's crypto configuration + * @crypto_mode: encryption algorithm this key is for + * @data_unit_size: the data unit size for all encryption/decryptions with this + * key. This is the size in bytes of each individual plaintext and + * ciphertext. This is always a power of 2. It might be e.g. the + * filesystem block size or the disk sector size. + * @dun_bytes: the maximum number of bytes of DUN used when using this key + */ +struct blk_crypto_config { + enum blk_crypto_mode_num crypto_mode; + unsigned int data_unit_size; + unsigned int dun_bytes; +}; + +/** + * struct blk_crypto_key - an inline encryption key + * @crypto_cfg: the crypto configuration (like crypto_mode, key size) for this + * key + * @data_unit_size_bits: log2 of data_unit_size + * @size: size of this key in bytes (determined by @crypto_cfg.crypto_mode) + * @raw: the raw bytes of this key. Only the first @size bytes are used. + * + * A blk_crypto_key is immutable once created, and many bios can reference it at + * the same time. It must not be freed until all bios using it have completed + * and it has been evicted from all devices on which it may have been used. + */ +struct blk_crypto_key { + struct blk_crypto_config crypto_cfg; + unsigned int data_unit_size_bits; + unsigned int size; + u8 raw[BLK_CRYPTO_MAX_KEY_SIZE]; +}; + +#define BLK_CRYPTO_MAX_IV_SIZE 32 +#define BLK_CRYPTO_DUN_ARRAY_SIZE (BLK_CRYPTO_MAX_IV_SIZE / sizeof(u64)) + +/** + * struct bio_crypt_ctx - an inline encryption context + * @bc_key: the key, algorithm, and data unit size to use + * @bc_dun: the data unit number (starting IV) to use + * + * A bio_crypt_ctx specifies that the contents of the bio will be encrypted (for + * write requests) or decrypted (for read requests) inline by the storage device + * or controller, or by the crypto API fallback. + */ +struct bio_crypt_ctx { + const struct blk_crypto_key *bc_key; + u64 bc_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; +}; + +#include <linux/blk_types.h> +#include <linux/blkdev.h> + +struct request; +struct request_queue; + +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + +static inline bool bio_has_crypt_ctx(struct bio *bio) +{ + return bio->bi_crypt_context; +} + +void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key, + const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], + gfp_t gfp_mask); + +bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc, + unsigned int bytes, + const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]); + +int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, + enum blk_crypto_mode_num crypto_mode, + unsigned int dun_bytes, + unsigned int data_unit_size); + +int blk_crypto_start_using_key(const struct blk_crypto_key *key, + struct request_queue *q); + +int blk_crypto_evict_key(struct request_queue *q, + const struct blk_crypto_key *key); + +bool blk_crypto_config_supported(struct request_queue *q, + const struct blk_crypto_config *cfg); + +#else /* CONFIG_BLK_INLINE_ENCRYPTION */ + +static inline bool bio_has_crypt_ctx(struct bio *bio) +{ + return false; +} + +#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ + +void __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask); +static inline void bio_crypt_clone(struct bio *dst, struct bio *src, + gfp_t gfp_mask) +{ + if (bio_has_crypt_ctx(src)) + __bio_crypt_clone(dst, src, gfp_mask); +} + +#endif /* __LINUX_BLK_CRYPTO_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index f389d7c724bd..d6fcae17da5a 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -140,6 +140,8 @@ struct blk_mq_hw_ctx { */ atomic_t nr_active; + /** @cpuhp_online: List to store request if CPU is going to die */ + struct hlist_node cpuhp_online; /** @cpuhp_dead: List to store request if some CPU die. */ struct hlist_node cpuhp_dead; /** @kobj: Kernel object for sysfs. */ @@ -173,7 +175,7 @@ struct blk_mq_hw_ctx { * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also * blk_mq_hw_ctx_size(). */ - struct srcu_struct srcu[0]; + struct srcu_struct srcu[]; }; /** @@ -391,6 +393,11 @@ struct blk_mq_ops { enum { BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_TAG_SHARED = 1 << 1, + /* + * Set when this device requires underlying blk-mq device for + * completing IO: + */ + BLK_MQ_F_STACKING = 1 << 2, BLK_MQ_F_BLOCKING = 1 << 5, BLK_MQ_F_NO_SCHED = 1 << 6, BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, @@ -400,6 +407,9 @@ enum { BLK_MQ_S_TAG_ACTIVE = 1, BLK_MQ_S_SCHED_RESTART = 2, + /* hw queue is inactive after all its CPUs become offline */ + BLK_MQ_S_INACTIVE = 3, + BLK_MQ_MAX_DEPTH = 10240, BLK_MQ_CPU_WORK_BATCH = 8, @@ -494,6 +504,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); bool blk_mq_complete_request(struct request *rq); +void blk_mq_force_complete_rq(struct request *rq); bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio, unsigned int nr_segs); bool blk_mq_queue_stopped(struct request_queue *q); @@ -508,6 +519,7 @@ void blk_mq_unquiesce_queue(struct request_queue *q); void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_run_hw_queues(struct request_queue *q, bool async); +void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs); void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, busy_tag_iter_fn *fn, void *priv); void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset); @@ -577,4 +589,6 @@ static inline void blk_mq_cleanup_rq(struct request *rq) rq->q->mq_ops->cleanup_rq(rq); } +blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio); + #endif diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 70254ae11769..ccb895f911b1 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -18,6 +18,7 @@ struct block_device; struct io_context; struct cgroup_subsys_state; typedef void (bio_end_io_t) (struct bio *); +struct bio_crypt_ctx; /* * Block error status values. See block/blk-core:blk_errors for the details. @@ -63,6 +64,18 @@ typedef u8 __bitwise blk_status_t; */ #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) +/* + * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone + * related resources are unavailable, but the driver can guarantee the queue + * will be rerun in the future once the resources become available again. + * + * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references + * a zone specific resource and IO to a different zone on the same device could + * still be served. Examples of that are zones that are write-locked, but a read + * to the same zone could be served. + */ +#define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14) + /** * blk_path_error - returns true if error may be path related * @error: status the request was completed with @@ -173,6 +186,11 @@ struct bio { u64 bi_iocost_cost; #endif #endif + +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + struct bio_crypt_ctx *bi_crypt_context; +#endif + union { #if defined(CONFIG_BLK_DEV_INTEGRITY) struct bio_integrity_payload *bi_integrity; /* data integrity */ @@ -198,7 +216,7 @@ struct bio { * double allocations for a small number of bio_vecs. This member * MUST obviously be kept at the very end of the bio. */ - struct bio_vec bi_inline_vecs[0]; + struct bio_vec bi_inline_vecs[]; }; #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) @@ -220,7 +238,7 @@ enum { * throttling rules. Don't do it again. */ BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion * of this bio. */ - BIO_QUEUE_ENTERED, /* can use blk_queue_enter_live() */ + BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ BIO_TRACKED, /* set if bio goes through the rq_qos path */ BIO_FLAG_LAST }; @@ -296,6 +314,8 @@ enum req_opf { REQ_OP_ZONE_CLOSE = 11, /* Transition a zone to full */ REQ_OP_ZONE_FINISH = 12, + /* write data at the current zone write pointer */ + REQ_OP_ZONE_APPEND = 13, /* SCSI passthrough using struct scsi_request */ REQ_OP_SCSI_IN = 32, @@ -323,7 +343,6 @@ enum req_flag_bits { __REQ_RAHEAD, /* read ahead, can fail anytime */ __REQ_BACKGROUND, /* background IO */ __REQ_NOWAIT, /* Don't wait if request will block */ - __REQ_NOWAIT_INLINE, /* Return would-block error inline */ /* * When a shared kthread needs to issue a bio for a cgroup, doing * so synchronously can lead to priority inversions as the kthread @@ -358,7 +377,6 @@ enum req_flag_bits { #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) #define REQ_NOWAIT (1ULL << __REQ_NOWAIT) -#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE) #define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 32868fbedc9e..8fd900998b4e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -43,6 +43,7 @@ struct pr_ops; struct rq_qos; struct blk_queue_stats; struct blk_stat_callback; +struct blk_keyslot_manager; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ @@ -82,8 +83,6 @@ typedef __u32 __bitwise req_flags_t; /* set for "ide_preempt" requests and also for requests for which the SCSI "quiesce" state must be ignored. */ #define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) -/* contains copies of user pages */ -#define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) /* vaguely specified driver internal error. Ignored by the block layer */ #define RQF_FAILED ((__force req_flags_t)(1 << 10)) /* don't warn about errors */ @@ -223,11 +222,14 @@ struct request { unsigned short nr_integrity_segments; #endif +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + struct bio_crypt_ctx *crypt_ctx; + struct blk_ksm_keyslot *crypt_keyslot; +#endif + unsigned short write_hint; unsigned short ioprio; - unsigned int extra_len; /* length of alignment and padding */ - enum mq_rq_state state; refcount_t ref; @@ -290,7 +292,6 @@ struct blk_queue_ctx; typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); struct bio_vec; -typedef int (dma_drain_needed_fn)(struct request *); enum blk_eh_timer_return { BLK_EH_DONE, /* drivers has completed the command */ @@ -336,6 +337,7 @@ struct queue_limits { unsigned int max_hw_discard_sectors; unsigned int max_write_same_sectors; unsigned int max_write_zeroes_sectors; + unsigned int max_zone_append_sectors; unsigned int discard_granularity; unsigned int discard_alignment; @@ -361,7 +363,8 @@ unsigned int blkdev_nr_zones(struct gendisk *disk); extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, sector_t sectors, sector_t nr_sectors, gfp_t gfp_mask); -extern int blk_revalidate_disk_zones(struct gendisk *disk); +int blk_revalidate_disk_zones(struct gendisk *disk, + void (*update_driver_data)(struct gendisk *disk)); extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); @@ -399,7 +402,6 @@ struct request_queue { struct rq_qos *rq_qos; make_request_fn *make_request_fn; - dma_drain_needed_fn *dma_drain_needed; const struct blk_mq_ops *mq_ops; @@ -469,11 +471,14 @@ struct request_queue { */ unsigned long nr_requests; /* Max # of requests */ - unsigned int dma_drain_size; - void *dma_drain_buffer; unsigned int dma_pad_mask; unsigned int dma_alignment; +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + /* Inline crypto capabilities */ + struct blk_keyslot_manager *ksm; +#endif + unsigned int rq_timeout; int poll_nsec; @@ -729,6 +734,16 @@ static inline unsigned int blk_queue_nr_zones(struct request_queue *q) { return 0; } +static inline bool blk_queue_zone_is_seq(struct request_queue *q, + sector_t sector) +{ + return false; +} +static inline unsigned int blk_queue_zone_no(struct request_queue *q, + sector_t sector) +{ + return 0; +} #endif /* CONFIG_BLK_DEV_ZONED */ static inline bool rq_is_sync(struct request *rq) @@ -747,6 +762,9 @@ static inline bool rq_mergeable(struct request *rq) if (req_op(rq) == REQ_OP_WRITE_ZEROES) return false; + if (req_op(rq) == REQ_OP_ZONE_APPEND) + return false; + if (rq->cmd_flags & REQ_NOMERGE_FLAGS) return false; if (rq->rq_flags & RQF_NOMERGE_FLAGS) @@ -1081,6 +1099,8 @@ extern void blk_queue_max_write_same_sectors(struct request_queue *q, extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, unsigned int max_write_same_sectors); extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); +extern void blk_queue_max_zone_append_sectors(struct request_queue *q, + unsigned int max_zone_append_sectors); extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); extern void blk_queue_alignment_offset(struct request_queue *q, unsigned int alignment); @@ -1099,9 +1119,6 @@ extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, sector_t offset); extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); -extern int blk_queue_dma_drain(struct request_queue *q, - dma_drain_needed_fn *dma_drain_needed, - void *buf, unsigned int size); extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); extern void blk_queue_dma_alignment(struct request_queue *, int); @@ -1138,7 +1155,15 @@ static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) return max_t(unsigned short, rq->nr_phys_segments, 1); } -extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); +int __blk_rq_map_sg(struct request_queue *q, struct request *rq, + struct scatterlist *sglist, struct scatterlist **last_sg); +static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, + struct scatterlist *sglist) +{ + struct scatterlist *last_sg = NULL; + + return __blk_rq_map_sg(q, rq, sglist, &last_sg); +} extern void blk_dump_rq_flags(struct request *, char *); extern long nr_blockdev_pages(void); @@ -1206,7 +1231,9 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) !list_empty(&plug->cb_list)); } -extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); +extern void blk_io_schedule(void); + +int blkdev_issue_flush(struct block_device *, gfp_t); extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page); @@ -1293,6 +1320,11 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q) return q->limits.max_segment_size; } +static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q) +{ + return q->limits.max_zone_append_sectors; +} + static inline unsigned queue_logical_block_size(const struct request_queue *q) { int retval = 512; @@ -1551,6 +1583,12 @@ struct blk_integrity *bdev_get_integrity(struct block_device *bdev) return blk_get_integrity(bdev->bd_disk); } +static inline bool +blk_integrity_queue_supports_integrity(struct request_queue *q) +{ + return q->integrity.profile; +} + static inline bool blk_integrity_rq(struct request *rq) { return rq->cmd_flags & REQ_INTEGRITY; @@ -1631,6 +1669,11 @@ static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) { return NULL; } +static inline bool +blk_integrity_queue_supports_integrity(struct request_queue *q) +{ + return false; +} static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) { return 0; @@ -1682,6 +1725,25 @@ static inline struct bio_vec *rq_integrity_vec(struct request *rq) #endif /* CONFIG_BLK_DEV_INTEGRITY */ +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + +bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q); + +void blk_ksm_unregister(struct request_queue *q); + +#else /* CONFIG_BLK_INLINE_ENCRYPTION */ + +static inline bool blk_ksm_register(struct blk_keyslot_manager *ksm, + struct request_queue *q) +{ + return true; +} + +static inline void blk_ksm_unregister(struct request_queue *q) { } + +#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ + + struct block_device_operations { int (*open) (struct block_device *, fmode_t); void (*release) (struct gendisk *, fmode_t); @@ -1719,6 +1781,7 @@ extern int bdev_write_page(struct block_device *, sector_t, struct page *, #ifdef CONFIG_BLK_DEV_ZONED bool blk_req_needs_zone_write_lock(struct request *rq); +bool blk_req_zone_write_trylock(struct request *rq); void __blk_req_zone_write_lock(struct request *rq); void __blk_req_zone_write_unlock(struct request *rq); @@ -1809,8 +1872,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) return false; } -static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, - sector_t *error_sector) +static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask) { return 0; } @@ -1830,4 +1892,32 @@ static inline void blk_wake_io_task(struct task_struct *waiter) wake_up_process(waiter); } +#ifdef CONFIG_BLOCK +unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, + unsigned int op); +void disk_end_io_acct(struct gendisk *disk, unsigned int op, + unsigned long start_time); + +/** + * bio_start_io_acct - start I/O accounting for bio based drivers + * @bio: bio to start account for + * + * Returns the start time that should be passed back to bio_end_io_acct(). + */ +static inline unsigned long bio_start_io_acct(struct bio *bio) +{ + return disk_start_io_acct(bio->bi_disk, bio_sectors(bio), bio_op(bio)); +} + +/** + * bio_end_io_acct - end I/O accounting for bio based drivers + * @bio: bio to end account for + * @start: start time returned by bio_start_io_acct() + */ +static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) +{ + return disk_end_io_acct(bio->bi_disk, bio_op(bio), start_time); +} +#endif /* CONFIG_BLOCK */ + #endif diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index c11b413d5b1a..c66c545e161a 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -57,8 +57,6 @@ struct bpf_cgroup_link { enum bpf_attach_type type; }; -extern const struct bpf_link_ops bpf_cgroup_link_lops; - struct bpf_prog_list { struct list_head node; struct bpf_prog *prog; @@ -100,8 +98,6 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, struct bpf_cgroup_link *link, enum bpf_attach_type type); -int __cgroup_bpf_replace(struct cgroup *cgrp, struct bpf_cgroup_link *link, - struct bpf_prog *new_prog); int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, union bpf_attr __user *uattr); @@ -112,8 +108,6 @@ int cgroup_bpf_attach(struct cgroup *cgrp, u32 flags); int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, enum bpf_attach_type type); -int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *old_prog, - struct bpf_prog *new_prog); int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, union bpf_attr __user *uattr); @@ -138,8 +132,7 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, struct ctl_table *table, int write, - void __user *buf, size_t *pcount, - loff_t *ppos, void **new_buf, + void **buf, size_t *pcount, loff_t *ppos, enum bpf_attach_type type); int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, @@ -302,12 +295,12 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, }) -#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \ +#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \ ({ \ int __ret = 0; \ if (cgroup_bpf_enabled) \ __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \ - buf, count, pos, nbuf, \ + buf, count, pos, \ BPF_CGROUP_SYSCTL); \ __ret; \ }) @@ -354,7 +347,6 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr, #else struct bpf_prog; -struct bpf_link; struct cgroup_bpf {}; static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } static inline void cgroup_bpf_offline(struct cgroup *cgrp) {} @@ -378,13 +370,6 @@ static inline int cgroup_bpf_link_attach(const union bpf_attr *attr, return -EINVAL; } -static inline int cgroup_bpf_replace(struct bpf_link *link, - struct bpf_prog *old_prog, - struct bpf_prog *new_prog) -{ - return -EINVAL; -} - static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { @@ -411,6 +396,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, } #define cgroup_bpf_enabled (0) +#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; }) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) @@ -429,7 +415,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; }) #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ optlen, max_optlen, retval) ({ retval; }) diff --git a/include/linux/bpf-netns.h b/include/linux/bpf-netns.h new file mode 100644 index 000000000000..4052d649f36d --- /dev/null +++ b/include/linux/bpf-netns.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BPF_NETNS_H +#define _BPF_NETNS_H + +#include <linux/mutex.h> +#include <uapi/linux/bpf.h> + +enum netns_bpf_attach_type { + NETNS_BPF_INVALID = -1, + NETNS_BPF_FLOW_DISSECTOR = 0, + MAX_NETNS_BPF_ATTACH_TYPE +}; + +static inline enum netns_bpf_attach_type +to_netns_bpf_attach_type(enum bpf_attach_type attach_type) +{ + switch (attach_type) { + case BPF_FLOW_DISSECTOR: + return NETNS_BPF_FLOW_DISSECTOR; + default: + return NETNS_BPF_INVALID; + } +} + +/* Protects updates to netns_bpf */ +extern struct mutex netns_bpf_mutex; + +union bpf_attr; +struct bpf_prog; + +#ifdef CONFIG_NET +int netns_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr); +int netns_bpf_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog); +int netns_bpf_prog_detach(const union bpf_attr *attr); +int netns_bpf_link_create(const union bpf_attr *attr, + struct bpf_prog *prog); +#else +static inline int netns_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + return -EOPNOTSUPP; +} + +static inline int netns_bpf_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} + +static inline int netns_bpf_prog_detach(const union bpf_attr *attr) +{ + return -EOPNOTSUPP; +} + +static inline int netns_bpf_link_create(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} +#endif + +#endif /* _BPF_NETNS_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index fd2b2322412d..07052d44bca1 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -19,6 +19,7 @@ #include <linux/mutex.h> #include <linux/module.h> #include <linux/kallsyms.h> +#include <linux/capability.h> struct bpf_verifier_env; struct bpf_verifier_log; @@ -31,6 +32,7 @@ struct seq_file; struct btf; struct btf_type; struct exception_table_entry; +struct seq_operations; extern struct idr btf_idr; extern spinlock_t btf_idr_lock; @@ -88,6 +90,8 @@ struct bpf_map_ops { int (*map_direct_value_meta)(const struct bpf_map *map, u64 imm, u32 *off); int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); + __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, + struct poll_table_struct *pts); }; struct bpf_map_memory { @@ -118,7 +122,7 @@ struct bpf_map { struct bpf_map_memory memory; char name[BPF_OBJ_NAME_LEN]; u32 btf_vmlinux_value_type_id; - bool unpriv_array; + bool bypass_spec_v1; bool frozen; /* write-once; write-protected by freeze_mutex */ /* 22 bytes hole */ @@ -242,6 +246,9 @@ enum bpf_arg_type { ARG_PTR_TO_LONG, /* pointer to long */ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ + ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ + ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */ + ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ }; /* type of values returned from helper functions */ @@ -253,6 +260,7 @@ enum bpf_return_type { RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ + RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */ }; /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs @@ -319,6 +327,9 @@ enum bpf_reg_type { PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ PTR_TO_BTF_ID, /* reg points to kernel struct */ + PTR_TO_BTF_ID_OR_NULL, /* reg points to kernel struct or NULL */ + PTR_TO_MEM, /* reg points to valid memory region */ + PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */ }; /* The information passed from prog-specific *_is_valid_access @@ -641,6 +652,12 @@ struct bpf_jit_poke_descriptor { u16 reason; }; +/* reg_type info for ctx arguments */ +struct bpf_ctx_arg_aux { + u32 offset; + enum bpf_reg_type reg_type; +}; + struct bpf_prog_aux { atomic64_t refcnt; u32 used_map_cnt; @@ -652,6 +669,8 @@ struct bpf_prog_aux { u32 func_cnt; /* used by non-func prog as the number of func progs */ u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ u32 attach_btf_id; /* in-kernel BTF type id to attach to */ + u32 ctx_arg_info_size; + const struct bpf_ctx_arg_aux *ctx_arg_info; struct bpf_prog *linked_prog; bool verifier_zext; /* Zero extensions has been inserted by verifier. */ bool offload_requested; @@ -987,6 +1006,7 @@ _out: \ #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); +extern struct mutex bpf_stats_enabled_mutex; /* * Block execution of BPF programs attached to instrumentation (perf, @@ -1020,15 +1040,18 @@ static inline void bpf_enable_instrumentation(void) extern const struct file_operations bpf_map_fops; extern const struct file_operations bpf_prog_fops; +extern const struct file_operations bpf_iter_fops; #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ extern const struct bpf_prog_ops _name ## _prog_ops; \ extern const struct bpf_verifier_ops _name ## _verifier_ops; #define BPF_MAP_TYPE(_id, _ops) \ extern const struct bpf_map_ops _ops; +#define BPF_LINK_TYPE(_id, _name) #include <linux/bpf_types.h> #undef BPF_PROG_TYPE #undef BPF_MAP_TYPE +#undef BPF_LINK_TYPE extern const struct bpf_prog_ops bpf_offload_prog_ops; extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; @@ -1077,29 +1100,59 @@ int generic_map_update_batch(struct bpf_map *map, int generic_map_delete_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr); +struct bpf_map *bpf_map_get_curr_or_next(u32 *id); extern int sysctl_unprivileged_bpf_disabled; +static inline bool bpf_allow_ptr_leaks(void) +{ + return perfmon_capable(); +} + +static inline bool bpf_bypass_spec_v1(void) +{ + return perfmon_capable(); +} + +static inline bool bpf_bypass_spec_v4(void) +{ + return perfmon_capable(); +} + int bpf_map_new_fd(struct bpf_map *map, int flags); int bpf_prog_new_fd(struct bpf_prog *prog); struct bpf_link { atomic64_t refcnt; + u32 id; + enum bpf_link_type type; const struct bpf_link_ops *ops; struct bpf_prog *prog; struct work_struct work; }; +struct bpf_link_primer { + struct bpf_link *link; + struct file *file; + int fd; + u32 id; +}; + struct bpf_link_ops { void (*release)(struct bpf_link *link); void (*dealloc)(struct bpf_link *link); - + int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, + struct bpf_prog *old_prog); + void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); + int (*fill_link_info)(const struct bpf_link *link, + struct bpf_link_info *info); }; -void bpf_link_init(struct bpf_link *link, const struct bpf_link_ops *ops, - struct bpf_prog *prog); -void bpf_link_cleanup(struct bpf_link *link, struct file *link_file, - int link_fd); +void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, + const struct bpf_link_ops *ops, struct bpf_prog *prog); +int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); +int bpf_link_settle(struct bpf_link_primer *primer); +void bpf_link_cleanup(struct bpf_link_primer *primer); void bpf_link_inc(struct bpf_link *link); void bpf_link_put(struct bpf_link *link); int bpf_link_new_fd(struct bpf_link *link); @@ -1109,6 +1162,40 @@ struct bpf_link *bpf_link_get_from_fd(u32 ufd); int bpf_obj_pin_user(u32 ufd, const char __user *pathname); int bpf_obj_get_user(const char __user *pathname, int flags); +#define BPF_ITER_FUNC_PREFIX "bpf_iter_" +#define DEFINE_BPF_ITER_FUNC(target, args...) \ + extern int bpf_iter_ ## target(args); \ + int __init bpf_iter_ ## target(args) { return 0; } + +typedef int (*bpf_iter_init_seq_priv_t)(void *private_data); +typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); + +#define BPF_ITER_CTX_ARG_MAX 2 +struct bpf_iter_reg { + const char *target; + const struct seq_operations *seq_ops; + bpf_iter_init_seq_priv_t init_seq_private; + bpf_iter_fini_seq_priv_t fini_seq_private; + u32 seq_priv_size; + u32 ctx_arg_info_size; + struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; +}; + +struct bpf_iter_meta { + __bpf_md_ptr(struct seq_file *, seq); + u64 session_id; + u64 seq_num; +}; + +int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); +void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); +bool bpf_iter_prog_supported(struct bpf_prog *prog); +int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int bpf_iter_new_fd(struct bpf_link *link); +bool bpf_link_is_iter(struct bpf_link *link); +struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); +int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); + int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, @@ -1163,6 +1250,7 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, struct net_device *dev_rx); int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, struct bpf_prog *xdp_prog); +bool dev_map_can_have_prog(struct bpf_map *map); struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); void __cpu_map_flush(void); @@ -1215,6 +1303,7 @@ int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog, struct bpf_prog *bpf_prog_by_id(u32 id); +const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { @@ -1275,6 +1364,10 @@ static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map { return NULL; } +static inline bool dev_map_can_have_prog(struct bpf_map *map) +{ + return false; +} static inline void __dev_flush(void) { @@ -1365,6 +1458,12 @@ static inline struct bpf_prog *bpf_prog_by_id(u32 id) { return ERR_PTR(-ENOTSUPP); } + +static inline const struct bpf_func_proto * +bpf_base_func_proto(enum bpf_func_id func_id) +{ + return NULL; +} #endif /* CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, @@ -1502,6 +1601,7 @@ extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; extern const struct bpf_func_proto bpf_get_numa_node_id_proto; extern const struct bpf_func_proto bpf_tail_call_proto; extern const struct bpf_func_proto bpf_ktime_get_ns_proto; +extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; extern const struct bpf_func_proto bpf_get_current_comm_proto; @@ -1523,13 +1623,23 @@ extern const struct bpf_func_proto bpf_strtoul_proto; extern const struct bpf_func_proto bpf_tcp_sock_proto; extern const struct bpf_func_proto bpf_jiffies64_proto; extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; +extern const struct bpf_func_proto bpf_event_output_data_proto; +extern const struct bpf_func_proto bpf_ringbuf_output_proto; +extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; +extern const struct bpf_func_proto bpf_ringbuf_submit_proto; +extern const struct bpf_func_proto bpf_ringbuf_discard_proto; +extern const struct bpf_func_proto bpf_ringbuf_query_proto; const struct bpf_func_proto *bpf_tracing_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); +const struct bpf_func_proto *tracing_prog_func_proto( + enum bpf_func_id func_id, const struct bpf_prog *prog); + /* Shared helpers among cBPF and eBPF. */ void bpf_user_rnd_init_once(void); u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); +u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); #if defined(CONFIG_NET) bool bpf_sock_common_is_valid_access(int off, int size, diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index ba0c2d56f8a3..a18ae82a298a 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -118,3 +118,14 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops) #if defined(CONFIG_BPF_JIT) BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops) #endif +BPF_MAP_TYPE(BPF_MAP_TYPE_RINGBUF, ringbuf_map_ops) + +BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint) +BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing) +#ifdef CONFIG_CGROUP_BPF +BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup) +#endif +BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter) +#ifdef CONFIG_NET +BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns) +#endif diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 6abd5a778fcd..ca08db4ffb5f 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -54,6 +54,8 @@ struct bpf_reg_state { u32 btf_id; /* for PTR_TO_BTF_ID */ + u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ + /* Max size from any of the above. */ unsigned long raw; }; @@ -63,6 +65,8 @@ struct bpf_reg_state { * offset, so they can share range knowledge. * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we * came from, when one is tested for != NULL. + * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation + * for the purpose of tracking that it's freed. * For PTR_TO_SOCKET this is used to share which pointers retain the * same reference to the socket, to determine proper reference freeing. */ @@ -375,6 +379,9 @@ struct bpf_verifier_env { u32 used_map_cnt; /* number of used maps */ u32 id_gen; /* used to generate unique reg IDs */ bool allow_ptr_leaks; + bool bpf_capable; + bool bypass_spec_v1; + bool bypass_spec_v4; bool seen_direct_write; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ const struct bpf_line_info *prev_linfo; diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 6462c5447872..6ad4c000661a 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -15,7 +15,9 @@ #define PHY_ID_BCMAC131 0x0143bc70 #define PHY_ID_BCM5481 0x0143bca0 #define PHY_ID_BCM5395 0x0143bcf0 +#define PHY_ID_BCM53125 0x03625f20 #define PHY_ID_BCM54810 0x03625d00 +#define PHY_ID_BCM54811 0x03625cc0 #define PHY_ID_BCM5482 0x0143bcb0 #define PHY_ID_BCM5411 0x00206070 #define PHY_ID_BCM5421 0x002060e0 @@ -24,6 +26,7 @@ #define PHY_ID_BCM5461 0x002060c0 #define PHY_ID_BCM54612E 0x03625e60 #define PHY_ID_BCM54616S 0x03625d10 +#define PHY_ID_BCM54140 0xae025009 #define PHY_ID_BCM57780 0x03625d90 #define PHY_ID_BCM89610 0x03625cd0 @@ -114,6 +117,14 @@ #define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10) #define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0) +#define MII_BCM54XX_RDB_ADDR 0x1e +#define MII_BCM54XX_RDB_DATA 0x1f + +/* legacy access control via rdb/expansion register */ +#define BCM54XX_RDB_REG0087 0x0087 +#define BCM54XX_EXP_REG7E (MII_BCM54XX_EXP_SEL_ER + 0x7E) +#define BCM54XX_ACCESS_MODE_LEGACY_EN BIT(15) + /* * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18) */ @@ -245,6 +256,7 @@ #define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0) #define BCM54810_SHD_CLK_CTL 0x3 #define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9) +#define BCM54810_SHD_SCR3_TRDDAPD 0x0100 /* BCM54612E Registers */ #define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34) @@ -289,4 +301,51 @@ #define MII_BRCM_CORE_EXPB0 0xB0 #define MII_BRCM_CORE_EXPB1 0xB1 +/* Enhanced Cable Diagnostics */ +#define BCM54XX_RDB_ECD_CTRL 0x2a0 +#define BCM54XX_EXP_ECD_CTRL (MII_BCM54XX_EXP_SEL_ER + 0xc0) + +#define BCM54XX_ECD_CTRL_CABLE_TYPE_CAT3 1 /* CAT3 or worse */ +#define BCM54XX_ECD_CTRL_CABLE_TYPE_CAT5 0 /* CAT5 or better */ +#define BCM54XX_ECD_CTRL_CABLE_TYPE_MASK BIT(0) /* cable type */ +#define BCM54XX_ECD_CTRL_INVALID BIT(3) /* invalid result */ +#define BCM54XX_ECD_CTRL_UNIT_CM 0 /* centimeters */ +#define BCM54XX_ECD_CTRL_UNIT_M 1 /* meters */ +#define BCM54XX_ECD_CTRL_UNIT_MASK BIT(10) /* cable length unit */ +#define BCM54XX_ECD_CTRL_IN_PROGRESS BIT(11) /* test in progress */ +#define BCM54XX_ECD_CTRL_BREAK_LINK BIT(12) /* unconnect link + * during test + */ +#define BCM54XX_ECD_CTRL_CROSS_SHORT_DIS BIT(13) /* disable inter-pair + * short check + */ +#define BCM54XX_ECD_CTRL_RUN BIT(15) /* run immediate */ + +#define BCM54XX_RDB_ECD_FAULT_TYPE 0x2a1 +#define BCM54XX_EXP_ECD_FAULT_TYPE (MII_BCM54XX_EXP_SEL_ER + 0xc1) +#define BCM54XX_ECD_FAULT_TYPE_INVALID 0x0 +#define BCM54XX_ECD_FAULT_TYPE_OK 0x1 +#define BCM54XX_ECD_FAULT_TYPE_OPEN 0x2 +#define BCM54XX_ECD_FAULT_TYPE_SAME_SHORT 0x3 /* short same pair */ +#define BCM54XX_ECD_FAULT_TYPE_CROSS_SHORT 0x4 /* short different pairs */ +#define BCM54XX_ECD_FAULT_TYPE_BUSY 0x9 +#define BCM54XX_ECD_FAULT_TYPE_PAIR_D_MASK GENMASK(3, 0) +#define BCM54XX_ECD_FAULT_TYPE_PAIR_C_MASK GENMASK(7, 4) +#define BCM54XX_ECD_FAULT_TYPE_PAIR_B_MASK GENMASK(11, 8) +#define BCM54XX_ECD_FAULT_TYPE_PAIR_A_MASK GENMASK(15, 12) +#define BCM54XX_ECD_PAIR_A_LENGTH_RESULTS 0x2a2 +#define BCM54XX_ECD_PAIR_B_LENGTH_RESULTS 0x2a3 +#define BCM54XX_ECD_PAIR_C_LENGTH_RESULTS 0x2a4 +#define BCM54XX_ECD_PAIR_D_LENGTH_RESULTS 0x2a5 + +#define BCM54XX_RDB_ECD_PAIR_A_LENGTH_RESULTS 0x2a2 +#define BCM54XX_EXP_ECD_PAIR_A_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc2) +#define BCM54XX_RDB_ECD_PAIR_B_LENGTH_RESULTS 0x2a3 +#define BCM54XX_EXP_ECD_PAIR_B_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc3) +#define BCM54XX_RDB_ECD_PAIR_C_LENGTH_RESULTS 0x2a4 +#define BCM54XX_EXP_ECD_PAIR_C_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc4) +#define BCM54XX_RDB_ECD_PAIR_D_LENGTH_RESULTS 0x2a5 +#define BCM54XX_EXP_ECD_PAIR_D_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc5) +#define BCM54XX_ECD_LENGTH_RESULTS_INVALID 0xffff + #endif /* _LINUX_BRCMPHY_H */ diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h index 8ed53d7524ea..e66b711d091e 100644 --- a/include/linux/bsearch.h +++ b/include/linux/bsearch.h @@ -4,7 +4,29 @@ #include <linux/types.h> -void *bsearch(const void *key, const void *base, size_t num, size_t size, - cmp_func_t cmp); +static __always_inline +void *__inline_bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp) +{ + const char *pivot; + int result; + + while (num > 0) { + pivot = base + (num >> 1) * size; + result = cmp(key, pivot); + + if (result == 0) + return (void *)pivot; + + if (result > 0) { + base = pivot + size; + num--; + } + num >>= 1; + } + + return NULL; +} + +extern void *bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp); #endif /* _LINUX_BSEARCH_H */ diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index e0b020eaf32e..22fb11e2d2e0 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -189,6 +189,8 @@ struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block, void __brelse(struct buffer_head *); void __bforget(struct buffer_head *); void __breadahead(struct block_device *, sector_t block, unsigned int size); +void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size, + gfp_t gfp); struct buffer_head *__bread_gfp(struct block_device *, sector_t block, unsigned size, gfp_t gfp); void invalidate_bh_lrus(void); @@ -270,14 +272,6 @@ void buffer_init(void); * inline definitions */ -static inline void attach_page_buffers(struct page *page, - struct buffer_head *head) -{ - get_page(page); - SetPagePrivate(page); - set_page_private(page, (unsigned long)head); -} - static inline void get_bh(struct buffer_head *bh) { atomic_inc(&bh->b_count); @@ -319,6 +313,12 @@ sb_breadahead(struct super_block *sb, sector_t block) __breadahead(sb->s_bdev, block, sb->s_blocksize); } +static inline void +sb_breadahead_unmovable(struct super_block *sb, sector_t block) +{ + __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0); +} + static inline struct buffer_head * sb_getblk(struct super_block *sb, sector_t block) { diff --git a/include/linux/bvec.h b/include/linux/bvec.h index a81c13ac1972..ac0c7299d5b8 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -12,8 +12,17 @@ #include <linux/errno.h> #include <linux/mm.h> -/* - * was unsigned short, but we might as well be ready for > 64kB I/O pages +/** + * struct bio_vec - a contiguous range of physical memory addresses + * @bv_page: First page associated with the address range. + * @bv_len: Number of bytes in the address range. + * @bv_offset: Start of the address range relative to the start of @bv_page. + * + * The following holds for a bvec if n * PAGE_SIZE < bv_offset + bv_len: + * + * nth_page(@bv_page, n) == @bv_page + n + * + * This holds because page_is_mergeable() checks the above property. */ struct bio_vec { struct page *bv_page; diff --git a/include/linux/cache.h b/include/linux/cache.h index 750621e41d1c..1aa8009f6d06 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -15,8 +15,14 @@ /* * __read_mostly is used to keep rarely changing variables out of frequently - * updated cachelines. If an architecture doesn't support it, ignore the - * hint. + * updated cachelines. Its use should be reserved for data that is used + * frequently in hot paths. Performance traces can help decide when to use + * this. You want __read_mostly data to be tightly packed, so that in the + * best case multiple frequently read variables for a hot path will be next + * to each other in order to reduce the number of cachelines needed to + * execute a critical path. We should be mindful and selective of its use. + * ie: if you're going to use it please supply a *good* justification in your + * commit log */ #ifndef __read_mostly #define __read_mostly diff --git a/include/linux/can/dev/peak_canfd.h b/include/linux/can/dev/peak_canfd.h index 511a37302fea..5fd627e9da19 100644 --- a/include/linux/can/dev/peak_canfd.h +++ b/include/linux/can/dev/peak_canfd.h @@ -189,7 +189,7 @@ struct __packed pucan_rx_msg { u8 client; __le16 flags; __le32 can_id; - u8 d[0]; + u8 d[]; }; /* uCAN error types */ @@ -266,7 +266,7 @@ struct __packed pucan_tx_msg { u8 client; __le16 flags; __le32 can_id; - u8 d[0]; + u8 d[]; }; /* build the cmd opcode_channel field with respect to the correct endianness */ diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index a954def26c0d..900b9f4e0605 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -34,7 +34,7 @@ struct can_skb_priv { int ifindex; int skbcnt; - struct can_frame cf[0]; + struct can_frame cf[]; }; static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb) diff --git a/include/linux/capability.h b/include/linux/capability.h index ecce0f43c73a..b4345b38a6be 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -251,6 +251,15 @@ extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns); +static inline bool perfmon_capable(void) +{ + return capable(CAP_PERFMON) || capable(CAP_SYS_ADMIN); +} + +static inline bool bpf_capable(void) +{ + return capable(CAP_BPF) || capable(CAP_SYS_ADMIN); +} /* audit system wants to get cap info from files as well */ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); diff --git a/include/linux/cb710.h b/include/linux/cb710.h index 60de3fedd3a7..405657a9a0d5 100644 --- a/include/linux/cb710.h +++ b/include/linux/cb710.h @@ -36,7 +36,7 @@ struct cb710_chip { unsigned slot_mask; unsigned slots; spinlock_t irq_lock; - struct cb710_slot slot[0]; + struct cb710_slot slot[]; }; /* NOTE: cb710_chip.slots is modified only during device init/exit and diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index 528271c60018..8543fa59da72 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -94,6 +94,11 @@ struct cdrom_device_ops { struct packet_command *); }; +int cdrom_multisession(struct cdrom_device_info *cdi, + struct cdrom_multisession *info); +int cdrom_read_tocentry(struct cdrom_device_info *cdi, + struct cdrom_tocentry *entry); + /* the general block_device operations structure: */ extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode); @@ -104,7 +109,7 @@ extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi, unsigned int clearing); extern int cdrom_media_changed(struct cdrom_device_info *); -extern int register_cdrom(struct cdrom_device_info *cdi); +extern int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi); extern void unregister_cdrom(struct cdrom_device_info *cdi); typedef struct { diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 525b7c3f1c81..e5ed1c541e7f 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -52,6 +52,7 @@ struct ceph_options { unsigned long osd_idle_ttl; /* jiffies */ unsigned long osd_keepalive_timeout; /* jiffies */ unsigned long osd_request_timeout; /* jiffies */ + u32 read_from_replica; /* CEPH_OSD_FLAG_BALANCE/LOCALIZE_READS */ /* * any type that can't be simply compared or doesn't need @@ -64,6 +65,7 @@ struct ceph_options { int num_mon; char *name; struct ceph_crypto_key *key; + struct rb_root crush_locs; }; /* @@ -73,6 +75,7 @@ struct ceph_options { #define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) #define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) #define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0 /* no timeout */ +#define CEPH_READ_FROM_REPLICA_DEFAULT 0 /* read from primary */ #define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000) #define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000) @@ -188,7 +191,7 @@ static inline int calc_pages_for(u64 off, u64 len) #define RB_CMP3WAY(a, b) ((a) < (b) ? -1 : (a) > (b)) #define DEFINE_RB_INSDEL_FUNCS2(name, type, keyfld, cmpexp, keyexp, nodefld) \ -static void insert_##name(struct rb_root *root, type *t) \ +static bool __insert_##name(struct rb_root *root, type *t) \ { \ struct rb_node **n = &root->rb_node; \ struct rb_node *parent = NULL; \ @@ -206,11 +209,17 @@ static void insert_##name(struct rb_root *root, type *t) \ else if (cmp > 0) \ n = &(*n)->rb_right; \ else \ - BUG(); \ + return false; \ } \ \ rb_link_node(&t->nodefld, parent, n); \ rb_insert_color(&t->nodefld, root); \ + return true; \ +} \ +static void __maybe_unused insert_##name(struct rb_root *root, type *t) \ +{ \ + if (!__insert_##name(root, t)) \ + BUG(); \ } \ static void erase_##name(struct rb_root *root, type *t) \ { \ diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h index dbb8a6959a73..ce4ffeb384d7 100644 --- a/include/linux/ceph/mon_client.h +++ b/include/linux/ceph/mon_client.h @@ -19,7 +19,7 @@ struct ceph_monmap { struct ceph_fsid fsid; u32 epoch; u32 num_mon; - struct ceph_entity_inst mon_inst[0]; + struct ceph_entity_inst mon_inst[]; }; struct ceph_mon_client; diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 9d9f745b98a1..c60b59e9291b 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -8,6 +8,7 @@ #include <linux/mempool.h> #include <linux/rbtree.h> #include <linux/refcount.h> +#include <linux/ktime.h> #include <linux/ceph/types.h> #include <linux/ceph/osdmap.h> @@ -135,6 +136,7 @@ struct ceph_osd_req_op { struct { u64 expected_object_size; u64 expected_write_size; + u32 flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */ } alloc_hint; struct { u64 snapid; @@ -164,6 +166,7 @@ struct ceph_osd_request_target { bool recovery_deletes; unsigned int flags; /* CEPH_OSD_FLAG_* */ + bool used_replica; bool paused; u32 epoch; @@ -213,6 +216,8 @@ struct ceph_osd_request { /* internal */ unsigned long r_stamp; /* jiffies, send or check time */ unsigned long r_start_stamp; /* jiffies */ + ktime_t r_start_latency; /* ktime_t */ + ktime_t r_end_latency; /* ktime_t */ int r_attempts; u32 r_map_dne_bound; @@ -468,7 +473,8 @@ extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, unsigned int which, u64 expected_object_size, - u64 expected_write_size); + u64 expected_write_size, + u32 flags); extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, struct ceph_snap_context *snapc, diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index 5e601975745f..3f4498fef6ad 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -302,9 +302,26 @@ bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap, int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap, const struct ceph_pg *raw_pgid); +struct crush_loc { + char *cl_type_name; + char *cl_name; +}; + +struct crush_loc_node { + struct rb_node cl_node; + struct crush_loc cl_loc; /* pointers into cl_data */ + char cl_data[]; +}; + +int ceph_parse_crush_location(char *crush_location, struct rb_root *locs); +int ceph_compare_crush_locs(struct rb_root *locs1, struct rb_root *locs2); +void ceph_clear_crush_locs(struct rb_root *locs); + +int ceph_get_crush_locality(struct ceph_osdmap *osdmap, int id, + struct rb_root *locs); + extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id); - extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id); extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name); u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id); diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h index 88ed3c5c04c5..3a518fd0eaad 100644 --- a/include/linux/ceph/rados.h +++ b/include/linux/ceph/rados.h @@ -465,6 +465,19 @@ enum { const char *ceph_osd_watch_op_name(int o); enum { + CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_WRITE = 1, + CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_WRITE = 2, + CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ = 4, + CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_READ = 8, + CEPH_OSD_ALLOC_HINT_FLAG_APPEND_ONLY = 16, + CEPH_OSD_ALLOC_HINT_FLAG_IMMUTABLE = 32, + CEPH_OSD_ALLOC_HINT_FLAG_SHORTLIVED = 64, + CEPH_OSD_ALLOC_HINT_FLAG_LONGLIVED = 128, + CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE = 256, + CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE = 512, +}; + +enum { CEPH_OSD_BACKOFF_OP_BLOCK = 1, CEPH_OSD_BACKOFF_OP_ACK_BLOCK = 2, CEPH_OSD_BACKOFF_OP_UNBLOCK = 3, @@ -517,6 +530,7 @@ struct ceph_osd_op { struct { __le64 expected_object_size; __le64 expected_write_size; + __le32 flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */ } __attribute__ ((packed)) alloc_hint; struct { __le64 snapid; diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h index 2b1b35240074..3f01d43f0598 100644 --- a/include/linux/clk/tegra.h +++ b/include/linux/clk/tegra.h @@ -131,6 +131,9 @@ extern void tegra210_set_sata_pll_seq_sw(bool state); extern void tegra210_put_utmipll_in_iddq(void); extern void tegra210_put_utmipll_out_iddq(void); extern int tegra210_clk_handle_mbist_war(unsigned int id); +extern void tegra210_clk_emc_dll_enable(bool flag); +extern void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value); +extern void tegra210_clk_emc_update_setting(u32 emc_src_value); struct clk; @@ -143,4 +146,28 @@ void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb, void *cb_arg); int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same); +struct tegra210_clk_emc_config { + unsigned long rate; + bool same_freq; + u32 value; + + unsigned long parent_rate; + u8 parent; +}; + +struct tegra210_clk_emc_provider { + struct module *owner; + struct device *dev; + + struct tegra210_clk_emc_config *configs; + unsigned int num_configs; + + int (*set_rate)(struct device *dev, + const struct tegra210_clk_emc_config *config); +}; + +int tegra210_clk_emc_attach(struct clk *clk, + struct tegra210_clk_emc_provider *provider); +void tegra210_clk_emc_detach(struct clk *clk); + #endif /* __LINUX_CLK_TEGRA_H_ */ diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 4b898cdbdf05..6fa0eea3f530 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -86,7 +86,7 @@ static inline unsigned long compact_gap(unsigned int order) #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; extern int sysctl_compaction_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *length, loff_t *ppos); + void *buffer, size_t *length, loff_t *ppos); extern int sysctl_extfrag_threshold; extern int sysctl_compact_unevictable_allowed; @@ -97,7 +97,7 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, struct page **page); extern void reset_isolation_suitable(pg_data_t *pgdat); extern enum compact_result compaction_suitable(struct zone *zone, int order, - unsigned int alloc_flags, int classzone_idx); + unsigned int alloc_flags, int highest_zoneidx); extern void defer_compaction(struct zone *zone, int order); extern bool compaction_deferred(struct zone *zone, int order); @@ -182,7 +182,7 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, extern int kcompactd_run(int nid); extern void kcompactd_stop(int nid); -extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); +extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx); #else static inline void reset_isolation_suitable(pg_data_t *pgdat) @@ -190,7 +190,7 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) } static inline enum compact_result compaction_suitable(struct zone *zone, int order, - int alloc_flags, int classzone_idx) + int alloc_flags, int highest_zoneidx) { return COMPACT_SKIPPED; } @@ -232,7 +232,8 @@ static inline void kcompactd_stop(int nid) { } -static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) +static inline void wakeup_kcompactd(pg_data_t *pgdat, + int order, int highest_zoneidx) { } diff --git a/include/linux/compat.h b/include/linux/compat.h index 0480ba4db592..e90100c0de72 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -402,8 +402,15 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, unsigned long bitmap_size); long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, unsigned long bitmap_size); -int copy_siginfo_from_user32(kernel_siginfo_t *to, const struct compat_siginfo __user *from); -int copy_siginfo_to_user32(struct compat_siginfo __user *to, const kernel_siginfo_t *from); +void copy_siginfo_to_external32(struct compat_siginfo *to, + const struct kernel_siginfo *from); +int copy_siginfo_from_user32(kernel_siginfo_t *to, + const struct compat_siginfo __user *from); +int __copy_siginfo_to_user32(struct compat_siginfo __user *to, + const kernel_siginfo_t *from); +#ifndef copy_siginfo_to_user32 +#define copy_siginfo_to_user32 __copy_siginfo_to_user32 +#endif int get_compat_sigevent(struct sigevent *event, const struct compat_sigevent __user *u_event); diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 333a6695a918..ee37256ec8bd 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -16,7 +16,7 @@ #define KASAN_ABI_VERSION 5 #if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer) -/* emulate gcc's __SANITIZE_ADDRESS__ flag */ +/* Emulate GCC's __SANITIZE_ADDRESS__ flag */ #define __SANITIZE_ADDRESS__ #define __no_sanitize_address \ __attribute__((no_sanitize("address", "hwaddress"))) @@ -24,6 +24,15 @@ #define __no_sanitize_address #endif +#if __has_feature(thread_sanitizer) +/* emulate gcc's __SANITIZE_THREAD__ flag */ +#define __SANITIZE_THREAD__ +#define __no_sanitize_thread \ + __attribute__((no_sanitize("thread"))) +#else +#define __no_sanitize_thread +#endif + /* * Not all versions of clang implement the the type-generic versions * of the builtin overflow checkers. Fortunately, clang implements @@ -42,3 +51,7 @@ * compilers, like ICC. */ #define barrier() __asm__ __volatile__("" : : : "memory") + +#if __has_feature(shadow_call_stack) +# define __noscs __attribute__((__no_sanitize__("shadow-call-stack"))) +#endif diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index d7ee4c6bad48..7dd4e0349ef3 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -10,7 +10,8 @@ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) -#if GCC_VERSION < 40600 +/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */ +#if GCC_VERSION < 40800 # error Sorry, your compiler is too old - please upgrade it. #endif @@ -126,9 +127,7 @@ #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__) #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ -#if GCC_VERSION >= 40800 #define __HAVE_BUILTIN_BSWAP16__ -#endif #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ #if GCC_VERSION >= 70000 @@ -145,6 +144,12 @@ #define __no_sanitize_address #endif +#if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__) +#define __no_sanitize_thread __attribute__((no_sanitize_thread)) +#else +#define __no_sanitize_thread +#endif + #if GCC_VERSION >= 50100 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 034b0a644efc..30827f82ad62 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -120,12 +120,65 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, /* Annotate a C jump table to allow objtool to follow the code flow */ #define __annotate_jump_table __section(.rodata..c_jump_table) +#ifdef CONFIG_DEBUG_ENTRY +/* Begin/end of an instrumentation safe region */ +#define instrumentation_begin() ({ \ + asm volatile("%c0:\n\t" \ + ".pushsection .discard.instr_begin\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) + +/* + * Because instrumentation_{begin,end}() can nest, objtool validation considers + * _begin() a +1 and _end() a -1 and computes a sum over the instructions. + * When the value is greater than 0, we consider instrumentation allowed. + * + * There is a problem with code like: + * + * noinstr void foo() + * { + * instrumentation_begin(); + * ... + * if (cond) { + * instrumentation_begin(); + * ... + * instrumentation_end(); + * } + * bar(); + * instrumentation_end(); + * } + * + * If instrumentation_end() would be an empty label, like all the other + * annotations, the inner _end(), which is at the end of a conditional block, + * would land on the instruction after the block. + * + * If we then consider the sum of the !cond path, we'll see that the call to + * bar() is with a 0-value, even though, we meant it to happen with a positive + * value. + * + * To avoid this, have _end() be a NOP instruction, this ensures it will be + * part of the condition block and does not escape. + */ +#define instrumentation_end() ({ \ + asm volatile("%c0: nop\n\t" \ + ".pushsection .discard.instr_end\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) +#endif /* CONFIG_DEBUG_ENTRY */ + #else #define annotate_reachable() #define annotate_unreachable() #define __annotate_jump_table #endif +#ifndef instrumentation_begin +#define instrumentation_begin() do { } while(0) +#define instrumentation_end() do { } while(0) +#endif + #ifndef ASM_UNREACHABLE # define ASM_UNREACHABLE #endif @@ -177,60 +230,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) #endif -#include <uapi/linux/types.h> - -#define __READ_ONCE_SIZE \ -({ \ - switch (size) { \ - case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ - case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ - case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ - case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ - default: \ - barrier(); \ - __builtin_memcpy((void *)res, (const void *)p, size); \ - barrier(); \ - } \ -}) - -static __always_inline -void __read_once_size(const volatile void *p, void *res, int size) -{ - __READ_ONCE_SIZE; -} - -#ifdef CONFIG_KASAN -/* - * We can't declare function 'inline' because __no_sanitize_address confilcts - * with inlining. Attempt to inline it may cause a build failure. - * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 - * '__maybe_unused' allows us to avoid defined-but-not-used warnings. - */ -# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused -#else -# define __no_kasan_or_inline __always_inline -#endif - -static __no_kasan_or_inline -void __read_once_size_nocheck(const volatile void *p, void *res, int size) -{ - __READ_ONCE_SIZE; -} - -static __always_inline void __write_once_size(volatile void *p, void *res, int size) -{ - switch (size) { - case 1: *(volatile __u8 *)p = *(__u8 *)res; break; - case 2: *(volatile __u16 *)p = *(__u16 *)res; break; - case 4: *(volatile __u32 *)p = *(__u32 *)res; break; - case 8: *(volatile __u64 *)p = *(__u64 *)res; break; - default: - barrier(); - __builtin_memcpy((void *)p, (const void *)res, size); - barrier(); - } -} - /* * Prevent the compiler from merging or refetching reads or writes. The * compiler is also forbidden from reordering successive instances of @@ -240,11 +239,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * statements. * * These two macros will also work on aggregate data types like structs or - * unions. If the size of the accessed data type exceeds the word size of - * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will - * fall back to memcpy(). There's at least two memcpy()s: one for the - * __builtin_memcpy() and then one for the macro doing the copy of variable - * - '__u' allocated on the stack. + * unions. * * Their two major use cases are: (1) Mediating communication between * process-level code and irq/NMI handlers, all running on the same CPU, @@ -255,24 +250,79 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s */ #include <asm/barrier.h> #include <linux/kasan-checks.h> +#include <linux/kcsan-checks.h> -#define __READ_ONCE(x, check) \ +/** + * data_race - mark an expression as containing intentional data races + * + * This data_race() macro is useful for situations in which data races + * should be forgiven. One example is diagnostic code that accesses + * shared variables but is not a part of the core synchronization design. + * + * This macro *does not* affect normal code generation, but is a hint + * to tooling that data races here are to be ignored. + */ +#define data_race(expr) \ ({ \ - union { typeof(x) __val; char __c[1]; } __u; \ - if (check) \ - __read_once_size(&(x), __u.__c, sizeof(x)); \ - else \ - __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ - smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ - __u.__val; \ + __unqual_scalar_typeof(({ expr; })) __v = ({ \ + __kcsan_disable_current(); \ + expr; \ + }); \ + __kcsan_enable_current(); \ + __v; \ }) -#define READ_ONCE(x) __READ_ONCE(x, 1) /* - * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need - * to hide memory access from KASAN. + * Use __READ_ONCE() instead of READ_ONCE() if you do not require any + * atomicity or dependency ordering guarantees. Note that this may result + * in tears! */ -#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) +#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x)) + +#define __READ_ONCE_SCALAR(x) \ +({ \ + __unqual_scalar_typeof(x) __x = __READ_ONCE(x); \ + smp_read_barrier_depends(); \ + (typeof(x))__x; \ +}) + +#define READ_ONCE(x) \ +({ \ + compiletime_assert_rwonce_type(x); \ + __READ_ONCE_SCALAR(x); \ +}) + +#define __WRITE_ONCE(x, val) \ +do { \ + *(volatile typeof(x) *)&(x) = (val); \ +} while (0) + +#define WRITE_ONCE(x, val) \ +do { \ + compiletime_assert_rwonce_type(x); \ + __WRITE_ONCE(x, val); \ +} while (0) + +static __no_sanitize_or_inline +unsigned long __read_once_word_nocheck(const void *addr) +{ + return __READ_ONCE(*(unsigned long *)addr); +} + +/* + * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a + * word from memory atomically but without telling KASAN/KCSAN. This is + * usually used by unwinding code when walking the stack of a running process. + */ +#define READ_ONCE_NOCHECK(x) \ +({ \ + unsigned long __x; \ + compiletime_assert(sizeof(x) == sizeof(__x), \ + "Unsupported access size for READ_ONCE_NOCHECK()."); \ + __x = __read_once_word_nocheck(&(x)); \ + smp_read_barrier_depends(); \ + (typeof(x))__x; \ +}) static __no_kasan_or_inline unsigned long read_word_at_a_time(const void *addr) @@ -281,14 +331,6 @@ unsigned long read_word_at_a_time(const void *addr) return *(unsigned long *)addr; } -#define WRITE_ONCE(x, val) \ -({ \ - union { typeof(x) __val; char __c[1]; } __u = \ - { .__val = (__force typeof(x)) (val) }; \ - __write_once_size(&(x), __u.__c, sizeof(x)); \ - __u.__val; \ -}) - #endif /* __KERNEL__ */ /* @@ -353,7 +395,23 @@ static inline void *offset_to_ptr(const int *off) compiletime_assert(__native_word(t), \ "Need native word sized stores/loads for atomicity.") +/* + * Yes, this permits 64-bit accesses on 32-bit architectures. These will + * actually be atomic in some cases (namely Armv7 + LPAE), but for others we + * rely on the access being split into 2x32-bit accesses for a 32-bit quantity + * (e.g. a virtual address) and a strong prevailing wind. + */ +#define compiletime_assert_rwonce_type(t) \ + compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \ + "Unsupported access size for {READ,WRITE}_ONCE().") + /* &a[0] degrades to a pointer: a different type from an array */ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) +/* + * This is needed in functions which generate the stack canary, see + * arch/x86/kernel/smpboot.c::start_secondary() for an example. + */ +#define prevent_tail_call_optimization() mb() + #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index e970f97a7fcb..e368384445b6 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -5,20 +5,20 @@ #ifndef __ASSEMBLY__ #ifdef __CHECKER__ -# define __user __attribute__((noderef, address_space(1))) # define __kernel __attribute__((address_space(0))) +# define __user __attribute__((noderef, address_space(__user))) # define __safe __attribute__((safe)) # define __force __attribute__((force)) # define __nocast __attribute__((nocast)) -# define __iomem __attribute__((noderef, address_space(2))) +# define __iomem __attribute__((noderef, address_space(__iomem))) # define __must_hold(x) __attribute__((context(x,1,1))) # define __acquires(x) __attribute__((context(x,0,1))) # define __releases(x) __attribute__((context(x,1,0))) # define __acquire(x) __context__(x,1) # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) -# define __percpu __attribute__((noderef, address_space(3))) -# define __rcu __attribute__((noderef, address_space(4))) +# define __percpu __attribute__((noderef, address_space(__percpu))) +# define __rcu __attribute__((noderef, address_space(__rcu))) # define __private __attribute__((noderef)) extern void __chk_user_ptr(const volatile void __user *); extern void __chk_io_ptr(const volatile void __iomem *); @@ -118,6 +118,10 @@ struct ftrace_likely_data { #define notrace __attribute__((__no_instrument_function__)) #endif +/* Section for code which can't be instrumented at all */ +#define noinstr \ + noinline notrace __attribute((__section__(".noinstr.text"))) + /* * it doesn't make sense on ARM (currently the only user of __naked) * to trace naked functions because then mcount is called without @@ -167,6 +171,38 @@ struct ftrace_likely_data { */ #define noinline_for_stack noinline +/* + * Sanitizer helper attributes: Because using __always_inline and + * __no_sanitize_* conflict, provide helper attributes that will either expand + * to __no_sanitize_* in compilation units where instrumentation is enabled + * (__SANITIZE_*__), or __always_inline in compilation units without + * instrumentation (__SANITIZE_*__ undefined). + */ +#ifdef __SANITIZE_ADDRESS__ +/* + * We can't declare function 'inline' because __no_sanitize_address conflicts + * with inlining. Attempt to inline it may cause a build failure. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + * '__maybe_unused' allows us to avoid defined-but-not-used warnings. + */ +# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused +# define __no_sanitize_or_inline __no_kasan_or_inline +#else +# define __no_kasan_or_inline __always_inline +#endif + +#define __no_kcsan __no_sanitize_thread +#ifdef __SANITIZE_THREAD__ +# define __no_kcsan_or_inline __no_kcsan notrace __maybe_unused +# define __no_sanitize_or_inline __no_kcsan_or_inline +#else +# define __no_kcsan_or_inline __always_inline +#endif + +#ifndef __no_sanitize_or_inline +#define __no_sanitize_or_inline __always_inline +#endif + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ @@ -193,6 +229,10 @@ struct ftrace_likely_data { # define randomized_struct_fields_end #endif +#ifndef __noscs +# define __noscs +#endif + #ifndef asm_volatile_goto #define asm_volatile_goto(x...) asm goto(x) #endif @@ -210,6 +250,53 @@ struct ftrace_likely_data { /* Are two types/vars the same type (ignoring qualifiers)? */ #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) +/* + * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving + * non-scalar types unchanged. + */ +#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900) || defined(__CHECKER__) +/* + * We build this out of a couple of helper macros in a vain attempt to + * help you keep your lunch down while reading it. + */ +#define __pick_scalar_type(x, type, otherwise) \ + __builtin_choose_expr(__same_type(x, type), (type)0, otherwise) + +/* + * 'char' is not type-compatible with either 'signed char' or 'unsigned char', + * so we include the naked type here as well as the signed/unsigned variants. + */ +#define __pick_integer_type(x, type, otherwise) \ + __pick_scalar_type(x, type, \ + __pick_scalar_type(x, unsigned type, \ + __pick_scalar_type(x, signed type, otherwise))) + +#define __unqual_scalar_typeof(x) typeof( \ + __pick_integer_type(x, char, \ + __pick_integer_type(x, short, \ + __pick_integer_type(x, int, \ + __pick_integer_type(x, long, \ + __pick_integer_type(x, long long, x)))))) +#else +/* + * If supported, prefer C11 _Generic for better compile-times. As above, 'char' + * is not type-compatible with 'signed char', and we define a separate case. + */ +#define __scalar_type_to_expr_cases(type) \ + unsigned type: (unsigned type)0, \ + signed type: (signed type)0 + +#define __unqual_scalar_typeof(x) typeof( \ + _Generic((x), \ + char: (char)0, \ + __scalar_type_to_expr_cases(char), \ + __scalar_type_to_expr_cases(short), \ + __scalar_type_to_expr_cases(int), \ + __scalar_type_to_expr_cases(long), \ + __scalar_type_to_expr_cases(long long), \ + default: (x))) +#endif + /* Is this type a native word size -- useful for atomic operations */ #define __native_word(t) \ (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \ diff --git a/include/linux/configfs.h b/include/linux/configfs.h index fa9490a8874c..2e8c69b43c64 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -13,7 +13,7 @@ * * configfs Copyright (C) 2005 Oracle. All rights reserved. * - * Please read Documentation/filesystems/configfs/configfs.txt before using + * Please read Documentation/filesystems/configfs.rst before using * the configfs interface, ESPECIALLY the parts about reference counts and * item destructors. */ diff --git a/include/linux/console.h b/include/linux/console.h index 7a140f4e5d0c..75dd20650fbe 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -134,7 +134,7 @@ static inline int con_debug_leave(void) */ #define CON_PRINTBUFFER (1) -#define CON_CONSDEV (2) /* Last on the command line */ +#define CON_CONSDEV (2) /* Preferred console, /dev/console */ #define CON_ENABLED (4) #define CON_BOOT (8) #define CON_ANYTIME (16) /* Safe to call when cpu is offline */ diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 8150f5ac176c..981b880d5b60 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -33,13 +33,13 @@ static inline void user_exit(void) } /* Called with interrupts disabled. */ -static inline void user_enter_irqoff(void) +static __always_inline void user_enter_irqoff(void) { if (context_tracking_enabled()) __context_tracking_enter(CONTEXT_USER); } -static inline void user_exit_irqoff(void) +static __always_inline void user_exit_irqoff(void) { if (context_tracking_enabled()) __context_tracking_exit(CONTEXT_USER); @@ -75,7 +75,7 @@ static inline void exception_exit(enum ctx_state prev_ctx) * is enabled. If context tracking is disabled, returns * CONTEXT_DISABLED. This should be used primarily for debugging. */ -static inline enum ctx_state ct_state(void) +static __always_inline enum ctx_state ct_state(void) { return context_tracking_enabled() ? this_cpu_read(context_tracking.state) : CONTEXT_DISABLED; @@ -101,12 +101,14 @@ static inline void context_tracking_init(void) { } #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN /* must be called with irqs disabled */ -static inline void guest_enter_irqoff(void) +static __always_inline void guest_enter_irqoff(void) { + instrumentation_begin(); if (vtime_accounting_enabled_this_cpu()) vtime_guest_enter(current); else current->flags |= PF_VCPU; + instrumentation_end(); if (context_tracking_enabled()) __context_tracking_enter(CONTEXT_GUEST); @@ -118,39 +120,48 @@ static inline void guest_enter_irqoff(void) * one time slice). Lets treat guest mode as quiescent state, just like * we do with user-mode execution. */ - if (!context_tracking_enabled_this_cpu()) + if (!context_tracking_enabled_this_cpu()) { + instrumentation_begin(); rcu_virt_note_context_switch(smp_processor_id()); + instrumentation_end(); + } } -static inline void guest_exit_irqoff(void) +static __always_inline void guest_exit_irqoff(void) { if (context_tracking_enabled()) __context_tracking_exit(CONTEXT_GUEST); + instrumentation_begin(); if (vtime_accounting_enabled_this_cpu()) vtime_guest_exit(current); else current->flags &= ~PF_VCPU; + instrumentation_end(); } #else -static inline void guest_enter_irqoff(void) +static __always_inline void guest_enter_irqoff(void) { /* * This is running in ioctl context so its safe * to assume that it's the stime pending cputime * to flush. */ + instrumentation_begin(); vtime_account_kernel(current); current->flags |= PF_VCPU; rcu_virt_note_context_switch(smp_processor_id()); + instrumentation_end(); } -static inline void guest_exit_irqoff(void) +static __always_inline void guest_exit_irqoff(void) { + instrumentation_begin(); /* Flush the guest cputime we spent on the guest */ vtime_account_kernel(current); current->flags &= ~PF_VCPU; + instrumentation_end(); } #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h index e7fe6678b7ad..65a60d3313b0 100644 --- a/include/linux/context_tracking_state.h +++ b/include/linux/context_tracking_state.h @@ -26,12 +26,12 @@ struct context_tracking { extern struct static_key_false context_tracking_key; DECLARE_PER_CPU(struct context_tracking, context_tracking); -static inline bool context_tracking_enabled(void) +static __always_inline bool context_tracking_enabled(void) { return static_branch_unlikely(&context_tracking_key); } -static inline bool context_tracking_enabled_cpu(int cpu) +static __always_inline bool context_tracking_enabled_cpu(int cpu) { return context_tracking_enabled() && per_cpu(context_tracking.active, cpu); } @@ -41,7 +41,7 @@ static inline bool context_tracking_enabled_this_cpu(void) return context_tracking_enabled() && __this_cpu_read(context_tracking.active); } -static inline bool context_tracking_in_user(void) +static __always_inline bool context_tracking_in_user(void) { return __this_cpu_read(context_tracking.state) == CONTEXT_USER; } diff --git a/include/linux/coredump.h b/include/linux/coredump.h index abf4b4e65dbb..7a899e83835d 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -22,4 +22,8 @@ extern void do_coredump(const kernel_siginfo_t *siginfo); static inline void do_coredump(const kernel_siginfo_t *siginfo) {} #endif +extern int core_uses_pid; +extern char core_pattern[]; +extern unsigned int core_pipe_limit; + #endif /* _LINUX_COREDUMP_H */ diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 193cc9dbf448..e3e9f0e3a878 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -100,10 +100,12 @@ union coresight_dev_subtype { }; /** - * struct coresight_platform_data - data harvested from the DT specification - * @nr_inport: number of input ports for this component. - * @nr_outport: number of output ports for this component. - * @conns: Array of nr_outport connections from this component + * struct coresight_platform_data - data harvested from the firmware + * specification. + * + * @nr_inport: Number of elements for the input connections. + * @nr_outport: Number of elements for the output connections. + * @conns: Sparse array of nr_outport connections from this component. */ struct coresight_platform_data { int nr_inport; @@ -140,12 +142,28 @@ struct coresight_desc { * @chid_fwnode: remote component's fwnode handle. * @child_dev: a @coresight_device representation of the component connected to @outport. + * @link: Representation of the connection as a sysfs link. */ struct coresight_connection { int outport; int child_port; struct fwnode_handle *child_fwnode; struct coresight_device *child_dev; + struct coresight_sysfs_link *link; +}; + +/** + * struct coresight_sysfs_link - representation of a connection in sysfs. + * @orig: Originating (master) coresight device for the link. + * @orig_name: Name to use for the link orig->target. + * @target: Target (slave) coresight device for the link. + * @target_name: Name to use for the link target->orig. + */ +struct coresight_sysfs_link { + struct coresight_device *orig; + const char *orig_name; + struct coresight_device *target; + const char *target_name; }; /** @@ -165,6 +183,9 @@ struct coresight_connection { * @ea: Device attribute for sink representation under PMU directory. * @ect_dev: Associated cross trigger device. Not part of the trace data * path or connections. + * @nr_links: number of sysfs links created to other components from this + * device. These will appear in the "connections" group. + * @has_conns_grp: Have added a "connections" group for sysfs links. */ struct coresight_device { struct coresight_platform_data *pdata; @@ -180,6 +201,9 @@ struct coresight_device { struct dev_ext_attribute *ea; /* cross trigger handling */ struct coresight_device *ect_dev; + /* sysfs links between components */ + int nr_links; + bool has_conns_grp; }; /* diff --git a/include/linux/cper.h b/include/linux/cper.h index 4f005d95ce88..8537e9282a65 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h @@ -521,6 +521,15 @@ struct cper_sec_pcie { u8 aer_info[96]; }; +/* Firmware Error Record Reference, UEFI v2.7 sec N.2.10 */ +struct cper_sec_fw_err_rec_ref { + u8 record_type; + u8 revision; + u8 reserved[6]; + u64 record_identifier; + guid_t record_identifier_guid; +}; + /* Reset to default packing */ #pragma pack() diff --git a/include/linux/cpu.h b/include/linux/cpu.h index beaed2dc269e..52692587f7fe 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -144,18 +144,8 @@ static inline void get_online_cpus(void) { cpus_read_lock(); } static inline void put_online_cpus(void) { cpus_read_unlock(); } #ifdef CONFIG_PM_SLEEP_SMP -int __freeze_secondary_cpus(int primary, bool suspend); -static inline int freeze_secondary_cpus(int primary) -{ - return __freeze_secondary_cpus(primary, true); -} - -static inline int disable_nonboot_cpus(void) -{ - return __freeze_secondary_cpus(0, false); -} - -void enable_nonboot_cpus(void); +extern int freeze_secondary_cpus(int primary); +extern void thaw_secondary_cpus(void); static inline int suspend_disable_secondary_cpus(void) { @@ -168,12 +158,11 @@ static inline int suspend_disable_secondary_cpus(void) } static inline void suspend_enable_secondary_cpus(void) { - return enable_nonboot_cpus(); + return thaw_secondary_cpus(); } #else /* !CONFIG_PM_SLEEP_SMP */ -static inline int disable_nonboot_cpus(void) { return 0; } -static inline void enable_nonboot_cpus(void) {} +static inline void thaw_secondary_cpus(void) {} static inline int suspend_disable_secondary_cpus(void) { return 0; } static inline void suspend_enable_secondary_cpus(void) { } #endif /* !CONFIG_PM_SLEEP_SMP */ diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h index 65501d8f9778..a3bdc8a98f2c 100644 --- a/include/linux/cpu_cooling.h +++ b/include/linux/cpu_cooling.h @@ -63,18 +63,10 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy) struct cpuidle_driver; #ifdef CONFIG_CPU_IDLE_THERMAL -int cpuidle_cooling_register(struct cpuidle_driver *drv); -int cpuidle_of_cooling_register(struct device_node *np, - struct cpuidle_driver *drv); +void cpuidle_cooling_register(struct cpuidle_driver *drv); #else /* CONFIG_CPU_IDLE_THERMAL */ -static inline int cpuidle_cooling_register(struct cpuidle_driver *drv) +static inline void cpuidle_cooling_register(struct cpuidle_driver *drv) { - return 0; -} -static inline int cpuidle_of_cooling_register(struct device_node *np, - struct cpuidle_driver *drv) -{ - return 0; } #endif /* CONFIG_CPU_IDLE_THERMAL */ diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h index 02edeafcb2bf..be8aea04d023 100644 --- a/include/linux/cpu_rmap.h +++ b/include/linux/cpu_rmap.h @@ -28,7 +28,7 @@ struct cpu_rmap { struct { u16 index; u16 dist; - } near[0]; + } near[]; }; #define CPU_RMAP_DIST_INF 0xffff diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index f7240251a949..3494f6763597 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -330,7 +330,7 @@ struct cpufreq_driver { * * get_intermediate should return a stable intermediate frequency * platform wants to switch to and target_intermediate() should set CPU - * to to that frequency, before jumping to the frequency corresponding + * to that frequency, before jumping to the frequency corresponding * to 'index'. Core will take care of sending notifications and driver * doesn't have to handle them in target_intermediate() or * target_index(). @@ -367,7 +367,7 @@ struct cpufreq_driver { /* platform specific boost support code */ bool boost_enabled; - int (*set_boost)(int state); + int (*set_boost)(struct cpufreq_policy *policy, int state); }; /* flags */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 77d70b633531..191772d4a4d7 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -102,6 +102,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING, + CPUHP_AP_IRQ_RISCV_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_MICROCODE_LOADER, @@ -142,6 +143,7 @@ enum cpuhp_state { CPUHP_AP_ARM_XEN_STARTING, CPUHP_AP_ARM_KVMPV_STARTING, CPUHP_AP_ARM_CORESIGHT_STARTING, + CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, CPUHP_AP_ARM64_ISNDEP_STARTING, CPUHP_AP_SMPCFD_DYING, CPUHP_AP_X86_TBOOT_DYING, @@ -152,6 +154,7 @@ enum cpuhp_state { CPUHP_AP_SMPBOOT_THREADS, CPUHP_AP_X86_VDSO_VMA_ONLINE, CPUHP_AP_IRQ_AFFINITY_ONLINE, + CPUHP_AP_BLK_MQ_ONLINE, CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS, CPUHP_AP_X86_INTEL_EPB_ONLINE, CPUHP_AP_PERF_ONLINE, diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 4664fc1871de..a5192b718dbe 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -5,9 +5,10 @@ #include <linux/kexec.h> #include <linux/proc_fs.h> #include <linux/elf.h> +#include <linux/pgtable.h> #include <uapi/linux/vmcore.h> -#include <asm/pgtable.h> /* for pgprot_t */ +#include <linux/pgtable.h> /* for pgprot_t */ #ifdef CONFIG_CRASH_DUMP #define ELFCORE_ADDR_MAX (-1ULL) @@ -97,8 +98,6 @@ extern void unregister_oldmem_pfn_is_ram(void); static inline bool is_kdump_kernel(void) { return 0; } #endif /* CONFIG_CRASH_DUMP */ -extern unsigned long saved_max_pfn; - /* Device Dump information to be filled by drivers */ struct vmcoredd_data { char dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Unique name of the dump */ diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h index 54741295c70b..33c16f2de7f6 100644 --- a/include/linux/crush/crush.h +++ b/include/linux/crush/crush.h @@ -87,7 +87,7 @@ struct crush_rule_mask { struct crush_rule { __u32 len; struct crush_rule_mask mask; - struct crush_rule_step steps[0]; + struct crush_rule_step steps[]; }; #define crush_rule_size(len) (sizeof(struct crush_rule) + \ @@ -301,6 +301,12 @@ struct crush_map { __u32 *choose_tries; #else + /* device/bucket type id -> type name (CrushWrapper::type_map) */ + struct rb_root type_names; + + /* device/bucket id -> name (CrushWrapper::name_map) */ + struct rb_root names; + /* CrushWrapper::choose_args */ struct rb_root choose_args; #endif @@ -342,4 +348,10 @@ struct crush_work { struct crush_work_bucket **work; /* Per-bucket working store */ }; +#ifdef __KERNEL__ +/* osdmap.c */ +void clear_crush_names(struct rb_root *root); +void clear_choose_args(struct crush_map *c); +#endif + #endif diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h deleted file mode 100644 index f6ba4c3e60d7..000000000000 --- a/include/linux/cryptohash.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __CRYPTOHASH_H -#define __CRYPTOHASH_H - -#include <uapi/linux/types.h> - -#define SHA_DIGEST_WORDS 5 -#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8) -#define SHA_WORKSPACE_WORDS 16 - -void sha_init(__u32 *buf); -void sha_transform(__u32 *digest, const char *data, __u32 *W); - -#endif diff --git a/include/linux/dasd_mod.h b/include/linux/dasd_mod.h new file mode 100644 index 000000000000..d39abad2ff6e --- /dev/null +++ b/include/linux/dasd_mod.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DASD_MOD_H +#define DASD_MOD_H + +#include <asm/dasd.h> + +extern int dasd_biodasdinfo(struct gendisk *disk, dasd_information2_t *info); + +#endif diff --git a/include/linux/dax.h b/include/linux/dax.h index d7af5d243f24..6904d4e0b2e0 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -5,7 +5,6 @@ #include <linux/fs.h> #include <linux/mm.h> #include <linux/radix-tree.h> -#include <asm/pgtable.h> /* Flag for synchronous flush */ #define DAXDEV_F_SYNC (1UL << 0) diff --git a/include/linux/dcache.h b/include/linux/dcache.h index c1488cc84fd9..a81f0c3cf352 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -177,6 +177,8 @@ struct dentry_operations { #define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */ +#define DCACHE_DONTCACHE 0x00000080 /* Purge from memory on final dput() */ + #define DCACHE_CANT_MOUNT 0x00000100 #define DCACHE_GENOCIDE 0x00000200 #define DCACHE_SHRINK_LIST 0x00000400 diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 257ab3c92cb8..e7e45f0cc7da 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -12,7 +12,7 @@ extern int debug_locks __read_mostly; extern int debug_locks_silent __read_mostly; -static inline int __debug_locks_off(void) +static __always_inline int __debug_locks_off(void) { return xchg(&debug_locks, 0); } diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index a274d95fa66e..63cb3606dea7 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -103,8 +103,8 @@ void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent, u8 *value); void debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent, u16 *value); -struct dentry *debugfs_create_u32(const char *name, umode_t mode, - struct dentry *parent, u32 *value); +void debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent, + u32 *value); void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent, u64 *value); struct dentry *debugfs_create_ulong(const char *name, umode_t mode, @@ -250,12 +250,8 @@ static inline void debugfs_create_u8(const char *name, umode_t mode, static inline void debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent, u16 *value) { } -static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode, - struct dentry *parent, - u32 *value) -{ - return ERR_PTR(-ENODEV); -} +static inline void debugfs_create_u32(const char *name, umode_t mode, + struct dentry *parent, u32 *value) { } static inline void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent, u64 *value) { } diff --git a/include/linux/delay.h b/include/linux/delay.h index 8e6828094c1e..5e016a4029d9 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -65,4 +65,15 @@ static inline void ssleep(unsigned int seconds) msleep(seconds * 1000); } +/* see Documentation/timers/timers-howto.rst for the thresholds */ +static inline void fsleep(unsigned long usecs) +{ + if (usecs <= 10) + udelay(usecs); + else if (usecs <= 20000) + usleep_range(usecs, 2 * usecs); + else + msleep(DIV_ROUND_UP(usecs, 1000)); +} + #endif /* defined(_LINUX_DELAY_H) */ diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h index 5aad06b4ca7b..3028b644b4fb 100644 --- a/include/linux/dev_printk.h +++ b/include/linux/dev_printk.h @@ -109,7 +109,8 @@ void _dev_info(const struct device *dev, const char *fmt, ...) #define dev_info(dev, fmt, ...) \ _dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__) -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) #define dev_dbg(dev, fmt, ...) \ dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__) #elif defined(DEBUG) @@ -181,7 +182,8 @@ do { \ dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__) #define dev_info_ratelimited(dev, fmt, ...) \ dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__) -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) /* descriptor check is first to prevent flooding with "callbacks suppressed" */ #define dev_dbg_ratelimited(dev, fmt, ...) \ do { \ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index af48d9da3916..8750f2dc5613 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -332,6 +332,8 @@ void *dm_per_bio_data(struct bio *bio, size_t data_size); struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); unsigned dm_bio_get_target_bio_nr(const struct bio *bio); +u64 dm_start_time_ns_from_clone(struct bio *bio); + int dm_register_target(struct target_type *t); void dm_unregister_target(struct target_type *t); @@ -557,13 +559,8 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) -#ifdef CONFIG_DM_DEBUG -#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__) +#define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__) #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) -#else -#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__) -#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__) -#endif #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 0 : scnprintf(result + sz, maxlen - sz, x)) diff --git a/include/linux/device.h b/include/linux/device.h index ac8e37cd716a..15460a5ac024 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -884,10 +884,6 @@ extern bool device_is_bound(struct device *dev); /* * Easy functions for dynamically creating devices on the fly */ -extern __printf(5, 0) -struct device *device_create_vargs(struct class *cls, struct device *parent, - dev_t devt, void *drvdata, - const char *fmt, va_list vargs); extern __printf(5, 6) struct device *device_create(struct class *cls, struct device *parent, dev_t devt, void *drvdata, diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h index fa35b52e0002..d02f32b7514e 100644 --- a/include/linux/device_cgroup.h +++ b/include/linux/device_cgroup.h @@ -1,6 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ #include <linux/fs.h> -#include <linux/bpf-cgroup.h> #define DEVCG_ACC_MKNOD 1 #define DEVCG_ACC_READ 2 @@ -11,16 +10,10 @@ #define DEVCG_DEV_CHAR 2 #define DEVCG_DEV_ALL 4 /* this represents all devices */ -#ifdef CONFIG_CGROUP_DEVICE -int devcgroup_check_permission(short type, u32 major, u32 minor, - short access); -#else -static inline int devcgroup_check_permission(short type, u32 major, u32 minor, - short access) -{ return 0; } -#endif #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) +int devcgroup_check_permission(short type, u32 major, u32 minor, + short access); static inline int devcgroup_inode_permission(struct inode *inode, int mask) { short type, access = 0; @@ -51,6 +44,9 @@ static inline int devcgroup_inode_mknod(int mode, dev_t dev) if (!S_ISBLK(mode) && !S_ISCHR(mode)) return 0; + if (S_ISCHR(mode) && dev == WHITEOUT_DEV) + return 0; + if (S_ISBLK(mode)) type = DEVCG_DEV_BLOCK; else @@ -61,6 +57,9 @@ static inline int devcgroup_inode_mknod(int mode, dev_t dev) } #else +static inline int devcgroup_check_permission(short type, u32 major, u32 minor, + short access) +{ return 0; } static inline int devcgroup_inode_permission(struct inode *inode, int mask) { return 0; } static inline int devcgroup_inode_mknod(int mode, dev_t dev) diff --git a/include/linux/digsig.h b/include/linux/digsig.h index 594fc66a395a..2ace69e41088 100644 --- a/include/linux/digsig.h +++ b/include/linux/digsig.h @@ -29,7 +29,7 @@ struct pubkey_hdr { uint32_t timestamp; /* key made, always 0 for now */ uint8_t algo; uint8_t nmpi; - char mpi[0]; + char mpi[]; } __packed; struct signature_hdr { @@ -39,7 +39,7 @@ struct signature_hdr { uint8_t hash; uint8_t keyid[8]; uint8_t nmpi; - char mpi[0]; + char mpi[]; } __packed; #if defined(CONFIG_SIGNATURE) || defined(CONFIG_SIGNATURE_MODULE) diff --git a/include/linux/dirent.h b/include/linux/dirent.h index fc61f3cff72f..99002220cd45 100644 --- a/include/linux/dirent.h +++ b/include/linux/dirent.h @@ -7,7 +7,7 @@ struct linux_dirent64 { s64 d_off; unsigned short d_reclen; unsigned char d_type; - char d_name[0]; + char d_name[]; }; #endif diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h index 3c8b7d274bd9..29d255fdd5d6 100644 --- a/include/linux/dm-bufio.h +++ b/include/linux/dm-bufio.h @@ -119,6 +119,11 @@ int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c); int dm_bufio_issue_flush(struct dm_bufio_client *c); /* + * Send a discard request to the underlying device. + */ +int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count); + +/* * Like dm_bufio_release but also move the buffer to the new * block. dm_bufio_write_dirty_buffers is needed to commit the new block. */ @@ -132,6 +137,13 @@ void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block); void dm_bufio_forget(struct dm_bufio_client *c, sector_t block); /* + * Free the given range of buffers. + * This is just a hint, if the buffer is in use or dirty, this function + * does nothing. + */ +void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks); + +/* * Set the minimum number of buffers before cleanup happens. */ void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 82e0a4a64601..ab0c156abee6 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -329,7 +329,6 @@ struct dma_buf { /** * struct dma_buf_attach_ops - importer operations for an attachment - * @move_notify: [optional] notification that the DMA-buf is moving * * Attachment operations implemented by the importer. */ @@ -343,7 +342,7 @@ struct dma_buf_attach_ops { bool allow_peer2peer; /** - * @move_notify + * @move_notify: [optional] notification that the DMA-buf is moving * * If this callback is provided the framework can avoid pinning the * backing store while mappings exists. diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 24b8684aa21d..136f984df0d9 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -67,6 +67,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size, } u64 dma_direct_get_required_mask(struct device *dev); +gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, + u64 *phys_mask); void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 330ad58fbf4d..78f677cf45ab 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -609,6 +609,86 @@ static inline void dma_sync_single_range_for_device(struct device *dev, return dma_sync_single_for_device(dev, addr + offset, size, dir); } +/** + * dma_map_sgtable - Map the given buffer for DMA + * @dev: The device for which to perform the DMA operation + * @sgt: The sg_table object describing the buffer + * @dir: DMA direction + * @attrs: Optional DMA attributes for the map operation + * + * Maps a buffer described by a scatterlist stored in the given sg_table + * object for the @dir DMA operation by the @dev device. After success the + * ownership for the buffer is transferred to the DMA domain. One has to + * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the + * ownership of the buffer back to the CPU domain before touching the + * buffer by the CPU. + * + * Returns 0 on success or -EINVAL on error during mapping the buffer. + */ +static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + int nents; + + nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); + if (nents <= 0) + return -EINVAL; + sgt->nents = nents; + return 0; +} + +/** + * dma_unmap_sgtable - Unmap the given buffer for DMA + * @dev: The device for which to perform the DMA operation + * @sgt: The sg_table object describing the buffer + * @dir: DMA direction + * @attrs: Optional DMA attributes for the unmap operation + * + * Unmaps a buffer described by a scatterlist stored in the given sg_table + * object for the @dir DMA operation by the @dev device. After this function + * the ownership of the buffer is transferred back to the CPU domain. + */ +static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); +} + +/** + * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access + * @dev: The device for which to perform the DMA operation + * @sgt: The sg_table object describing the buffer + * @dir: DMA direction + * + * Performs the needed cache synchronization and moves the ownership of the + * buffer back to the CPU domain, so it is safe to perform any access to it + * by the CPU. Before doing any further DMA operations, one has to transfer + * the ownership of the buffer back to the DMA domain by calling the + * dma_sync_sgtable_for_device(). + */ +static inline void dma_sync_sgtable_for_cpu(struct device *dev, + struct sg_table *sgt, enum dma_data_direction dir) +{ + dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir); +} + +/** + * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA + * @dev: The device for which to perform the DMA operation + * @sgt: The sg_table object describing the buffer + * @dir: DMA direction + * + * Performs the needed cache synchronization and moves the ownership of the + * buffer back to the DMA domain, so it is safe to perform the DMA operation. + * Once finished, one has to call dma_sync_sgtable_for_cpu() or + * dma_unmap_sgtable(). + */ +static inline void dma_sync_sgtable_for_device(struct device *dev, + struct sg_table *sgt, enum dma_data_direction dir) +{ + dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir); +} + #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) @@ -630,9 +710,9 @@ void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot, const void *caller); void dma_common_free_remap(void *cpu_addr, size_t size); -bool dma_in_atomic_pool(void *start, size_t size); -void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags); -bool dma_free_from_pool(void *start, size_t size); +void *dma_alloc_from_pool(struct device *dev, size_t size, + struct page **ret_page, gfp_t flags); +bool dma_free_from_pool(struct device *dev, void *start, size_t size); int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index b59f1b6be3e9..ca09a4e07d2d 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -3,7 +3,7 @@ #define _LINUX_DMA_NONCOHERENT_H 1 #include <linux/dma-mapping.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> #ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H #include <asm/dma-coherence.h> diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 21065c04c4ac..6283917edd90 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -83,9 +83,9 @@ enum dma_transfer_direction { /** * Interleaved Transfer Request * ---------------------------- - * A chunk is collection of contiguous bytes to be transfered. + * A chunk is collection of contiguous bytes to be transferred. * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG). - * ICGs may or maynot change between chunks. + * ICGs may or may not change between chunks. * A FRAME is the smallest series of contiguous {chunk,icg} pairs, * that when repeated an integral number of times, specifies the transfer. * A transfer template is specification of a Frame, the number of times @@ -153,7 +153,7 @@ struct dma_interleaved_template { bool dst_sgl; size_t numf; size_t frame_size; - struct data_chunk sgl[0]; + struct data_chunk sgl[]; }; /** @@ -341,13 +341,11 @@ struct dma_chan { * @chan: driver channel device * @device: sysfs device * @dev_id: parent dma_device dev_id - * @idr_ref: reference count to gate release of dma_device dev_id */ struct dma_chan_dev { struct dma_chan *chan; struct device device; int dev_id; - atomic_t *idr_ref; }; /** @@ -537,7 +535,7 @@ struct dmaengine_unmap_data { struct device *dev; struct kref kref; size_t len; - dma_addr_t addr[0]; + dma_addr_t addr[]; }; struct dma_async_tx_descriptor; @@ -835,6 +833,8 @@ struct dma_device { int dev_id; struct device *dev; struct module *owner; + struct ida chan_ida; + struct mutex chan_mutex; /* to protect chan_ida */ u32 src_addr_widths; u32 dst_addr_widths; @@ -1069,7 +1069,7 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan) * dmaengine_synchronize() needs to be called before it is safe to free * any memory that is accessed by previously submitted descriptors or before * freeing any resources accessed from within the completion callback of any - * perviously submitted descriptors. + * previously submitted descriptors. * * This function can be called from atomic context as well as from within a * complete callback of a descriptor submitted on the same channel. @@ -1091,7 +1091,7 @@ static inline int dmaengine_terminate_async(struct dma_chan *chan) * * Synchronizes to the DMA channel termination to the current context. When this * function returns it is guaranteed that all transfers for previously issued - * descriptors have stopped and and it is safe to free the memory assoicated + * descriptors have stopped and it is safe to free the memory associated * with them. Furthermore it is guaranteed that all complete callback functions * for a previously submitted descriptor have finished running and it is safe to * free resources accessed from within the complete callbacks. diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index c620d9139c28..311aa04e7520 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -12,11 +12,31 @@ struct sk_buff; struct net_device; struct packet_type; +struct dsa_8021q_crosschip_link { + struct list_head list; + int port; + struct dsa_switch *other_ds; + int other_port; + refcount_t refcount; +}; + +#define DSA_8021Q_N_SUBVLAN 8 + #if IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index, bool enabled); +int dsa_8021q_crosschip_bridge_join(struct dsa_switch *ds, int port, + struct dsa_switch *other_ds, + int other_port, + struct list_head *crosschip_links); + +int dsa_8021q_crosschip_bridge_leave(struct dsa_switch *ds, int port, + struct dsa_switch *other_ds, + int other_port, + struct list_head *crosschip_links); + struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, u16 tpid, u16 tci); @@ -24,10 +44,16 @@ u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port); u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port); +u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan); + int dsa_8021q_rx_switch_id(u16 vid); int dsa_8021q_rx_source_port(u16 vid); +u16 dsa_8021q_rx_subvlan(u16 vid); + +bool vid_is_dsa_8021q(u16 vid); + #else int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index, @@ -36,6 +62,22 @@ int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index, return 0; } +int dsa_8021q_crosschip_bridge_join(struct dsa_switch *ds, int port, + struct dsa_switch *other_ds, + int other_port, + struct list_head *crosschip_links) +{ + return 0; +} + +int dsa_8021q_crosschip_bridge_leave(struct dsa_switch *ds, int port, + struct dsa_switch *other_ds, + int other_port, + struct list_head *crosschip_links) +{ + return 0; +} + struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, u16 tpid, u16 tci) { @@ -52,6 +94,11 @@ u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port) return 0; } +u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan) +{ + return 0; +} + int dsa_8021q_rx_switch_id(u16 vid) { return 0; @@ -62,6 +109,16 @@ int dsa_8021q_rx_source_port(u16 vid) return 0; } +u16 dsa_8021q_rx_subvlan(u16 vid) +{ + return 0; +} + +bool vid_is_dsa_8021q(u16 vid) +{ + return false; +} + #endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */ #endif /* _NET_DSA_8021Q_H */ diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h index fa5735c353cd..dd93735ae228 100644 --- a/include/linux/dsa/sja1105.h +++ b/include/linux/dsa/sja1105.h @@ -9,6 +9,7 @@ #include <linux/skbuff.h> #include <linux/etherdevice.h> +#include <linux/dsa/8021q.h> #include <net/dsa.h> #define ETH_P_SJA1105 ETH_P_DSA_8021Q @@ -53,12 +54,14 @@ struct sja1105_skb_cb { ((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb)) struct sja1105_port { + u16 subvlan_map[DSA_8021Q_N_SUBVLAN]; struct kthread_worker *xmit_worker; struct kthread_work xmit_work; struct sk_buff_head xmit_queue; struct sja1105_tagger_data *data; struct dsa_port *dp; bool hwts_tx_en; + u16 xmit_tpid; }; #endif /* _NET_DSA_SJA1105_H */ diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 4cf02ecd67de..abcd5fde30eb 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -48,7 +48,7 @@ struct _ddebug { -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG_CORE) int ddebug_add_module(struct _ddebug *tab, unsigned int n, const char *modname); extern int ddebug_remove_module(const char *mod_name); diff --git a/include/linux/edac.h b/include/linux/edac.h index 0f20b986b0ab..6eb7d55d7c3d 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -31,14 +31,6 @@ struct device; extern int edac_op_state; struct bus_type *edac_get_sysfs_subsys(void); -int edac_get_report_status(void); -void edac_set_report_status(int new); - -enum { - EDAC_REPORTING_ENABLED, - EDAC_REPORTING_DISABLED, - EDAC_REPORTING_FORCE -}; static inline void opstate_init(void) { diff --git a/include/linux/efi.h b/include/linux/efi.h index 251f1f783cdf..2c6495f72f79 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -39,6 +39,7 @@ #define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1))) #define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1))) #define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1))) +#define EFI_TIMEOUT (18 | (1UL << (BITS_PER_LONG-1))) #define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1))) #define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1))) @@ -379,8 +380,8 @@ typedef union { typedef struct { efi_guid_t guid; - const char *name; unsigned long *ptr; + const char name[16]; } efi_config_table_type_t; #define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL) @@ -426,6 +427,7 @@ typedef struct { u32 tables; } efi_system_table_32_t; +typedef union efi_simple_text_input_protocol efi_simple_text_input_protocol_t; typedef union efi_simple_text_output_protocol efi_simple_text_output_protocol_t; typedef union { @@ -434,7 +436,7 @@ typedef union { unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */ u32 fw_revision; unsigned long con_in_handle; - unsigned long con_in; + efi_simple_text_input_protocol_t *con_in; unsigned long con_out_handle; efi_simple_text_output_protocol_t *con_out; unsigned long stderr_handle; @@ -1245,4 +1247,6 @@ struct linux_efi_memreserve { void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size); +char *efi_systab_show_arch(char *str); + #endif /* _LINUX_EFI_H */ diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 901bda352dcb..bacc40a0bdf3 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -39,7 +39,7 @@ struct elevator_mq_ops { void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); void (*requests_merged)(struct request_queue *, struct request *, struct request *); void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *); - void (*prepare_request)(struct request *, struct bio *bio); + void (*prepare_request)(struct request *); void (*finish_request)(struct request *); void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool); struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); diff --git a/include/linux/elf.h b/include/linux/elf.h index e3649b3e970e..5d5b0321da0b 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -2,6 +2,7 @@ #ifndef _LINUX_ELF_H #define _LINUX_ELF_H +#include <linux/types.h> #include <asm/elf.h> #include <uapi/linux/elf.h> @@ -21,6 +22,9 @@ SET_PERSONALITY(ex) #endif +#define ELF32_GNU_PROPERTY_ALIGN 4 +#define ELF64_GNU_PROPERTY_ALIGN 8 + #if ELF_CLASS == ELFCLASS32 extern Elf32_Dyn _DYNAMIC []; @@ -31,6 +35,7 @@ extern Elf32_Dyn _DYNAMIC []; #define elf_addr_t Elf32_Off #define Elf_Half Elf32_Half #define Elf_Word Elf32_Word +#define ELF_GNU_PROPERTY_ALIGN ELF32_GNU_PROPERTY_ALIGN #else @@ -42,6 +47,7 @@ extern Elf64_Dyn _DYNAMIC []; #define elf_addr_t Elf64_Off #define Elf_Half Elf64_Half #define Elf_Word Elf64_Word +#define ELF_GNU_PROPERTY_ALIGN ELF64_GNU_PROPERTY_ALIGN #endif @@ -56,4 +62,41 @@ static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm) { extern int elf_coredump_extra_notes_size(void); extern int elf_coredump_extra_notes_write(struct coredump_params *cprm); #endif + +/* + * NT_GNU_PROPERTY_TYPE_0 header: + * Keep this internal until/unless there is an agreed UAPI definition. + * pr_type values (GNU_PROPERTY_*) are public and defined in the UAPI header. + */ +struct gnu_property { + u32 pr_type; + u32 pr_datasz; +}; + +struct arch_elf_state; + +#ifndef CONFIG_ARCH_USE_GNU_PROPERTY +static inline int arch_parse_elf_property(u32 type, const void *data, + size_t datasz, bool compat, + struct arch_elf_state *arch) +{ + return 0; +} +#else +extern int arch_parse_elf_property(u32 type, const void *data, size_t datasz, + bool compat, struct arch_elf_state *arch); +#endif + +#ifdef CONFIG_ARCH_HAVE_ELF_PROT +int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, + bool has_interp, bool is_interp); +#else +static inline int arch_elf_adjust_prot(int prot, + const struct arch_elf_state *state, + bool has_interp, bool is_interp) +{ + return prot; +} +#endif + #endif /* _LINUX_ELF_H */ diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h index 594d4e78654f..69b136e4dd2b 100644 --- a/include/linux/elfnote.h +++ b/include/linux/elfnote.h @@ -54,7 +54,7 @@ .popsection ; #define ELFNOTE(name, type, desc) \ - ELFNOTE_START(name, type, "") \ + ELFNOTE_START(name, type, "a") \ desc ; \ ELFNOTE_END diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h index 564e96f625ff..1c630e2c2756 100644 --- a/include/linux/enclosure.h +++ b/include/linux/enclosure.h @@ -101,7 +101,7 @@ struct enclosure_device { struct device edev; struct enclosure_component_callbacks *cb; int components; - struct enclosure_component component[0]; + struct enclosure_component component[]; }; static inline struct enclosure_device * diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index d249b88a4d5a..ade6486a3382 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -36,7 +36,7 @@ struct em_cap_state { struct em_perf_domain { struct em_cap_state *table; int nr_cap_states; - unsigned long cpus[0]; + unsigned long cpus[]; }; #ifdef CONFIG_ENERGY_MODEL diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 8801f1f986e5..2e5debc0373c 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -20,6 +20,7 @@ #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/random.h> +#include <linux/crc32.h> #include <asm/unaligned.h> #include <asm/bitsperlong.h> @@ -266,6 +267,17 @@ static inline void eth_hw_addr_random(struct net_device *dev) } /** + * eth_hw_addr_crc - Calculate CRC from netdev_hw_addr + * @ha: pointer to hardware address + * + * Calculate CRC from a hardware address as basis for filter hashes. + */ +static inline u32 eth_hw_addr_crc(struct netdev_hw_addr *ha) +{ + return ether_crc(ETH_ALEN, ha->addr); +} + +/** * ether_addr_copy - Copy an Ethernet address * @dst: Pointer to a six-byte array Ethernet address destination * @src: Pointer to a six-byte array Ethernet address source diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index c1d379bf6ee1..a23b26eab479 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -35,7 +35,7 @@ struct compat_ethtool_rxnfc { compat_u64 data; struct compat_ethtool_rx_flow_spec fs; u32 rule_cnt; - u32 rule_locs[0]; + u32 rule_locs[]; }; #endif /* CONFIG_COMPAT */ @@ -462,7 +462,7 @@ int ethtool_check_ops(const struct ethtool_ops *ops); struct ethtool_rx_flow_rule { struct flow_rule *rule; - unsigned long priv[0]; + unsigned long priv[]; }; struct ethtool_rx_flow_spec_input { diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h index d01b77887f82..1e7bf78cb382 100644 --- a/include/linux/ethtool_netlink.h +++ b/include/linux/ethtool_netlink.h @@ -14,4 +14,58 @@ enum ethtool_multicast_groups { ETHNL_MCGRP_MONITOR, }; +struct phy_device; + +#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) +int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd); +void ethnl_cable_test_free(struct phy_device *phydev); +void ethnl_cable_test_finished(struct phy_device *phydev); +int ethnl_cable_test_result(struct phy_device *phydev, u8 pair, u8 result); +int ethnl_cable_test_fault_length(struct phy_device *phydev, u8 pair, u32 cm); +int ethnl_cable_test_amplitude(struct phy_device *phydev, u8 pair, s16 mV); +int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV); +int ethnl_cable_test_step(struct phy_device *phydev, u32 first, u32 last, + u32 step); +#else +static inline int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd) +{ + return -EOPNOTSUPP; +} + +static inline void ethnl_cable_test_free(struct phy_device *phydev) +{ +} + +static inline void ethnl_cable_test_finished(struct phy_device *phydev) +{ +} +static inline int ethnl_cable_test_result(struct phy_device *phydev, u8 pair, + u8 result) +{ + return -EOPNOTSUPP; +} + +static inline int ethnl_cable_test_fault_length(struct phy_device *phydev, + u8 pair, u32 cm) +{ + return -EOPNOTSUPP; +} + +static inline int ethnl_cable_test_amplitude(struct phy_device *phydev, + u8 pair, s16 mV) +{ + return -EOPNOTSUPP; +} + +static inline int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV) +{ + return -EOPNOTSUPP; +} + +static inline int ethnl_cable_test_step(struct phy_device *phydev, u32 first, + u32 last, u32 step) +{ + return -EOPNOTSUPP; +} +#endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */ #endif /* _LINUX_ETHTOOL_NETLINK_H_ */ diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index 3049a6c06d9e..b79fa9bb7359 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -47,8 +47,7 @@ * Directory entry modification events - reported only to directory * where entry is modified and not to a watching parent. */ -#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE | \ - FAN_DIR_MODIFY) +#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE) /* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */ #define FANOTIFY_INODE_EVENTS (FANOTIFY_DIRENT_EVENTS | \ diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h new file mode 100644 index 000000000000..4e624c466583 --- /dev/null +++ b/include/linux/fiemap.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FIEMAP_H +#define _LINUX_FIEMAP_H 1 + +#include <uapi/linux/fiemap.h> +#include <linux/fs.h> + +struct fiemap_extent_info { + unsigned int fi_flags; /* Flags as passed from user */ + unsigned int fi_extents_mapped; /* Number of mapped extents */ + unsigned int fi_extents_max; /* Size of fiemap_extent array */ + struct fiemap_extent __user *fi_extents_start; /* Start of + fiemap_extent array */ +}; + +int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo, + u64 start, u64 *len, u32 supported_flags); +int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, + u64 phys, u64 len, u32 flags); + +int generic_block_fiemap(struct inode *inode, + struct fiemap_extent_info *fieinfo, u64 start, u64 len, + get_block_t *get_block); + +#endif /* _LINUX_FIEMAP_H 1 */ diff --git a/include/linux/file.h b/include/linux/file.h index 142d102f285e..122f80084a3e 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -94,4 +94,6 @@ extern void fd_install(unsigned int fd, struct file *file); extern void flush_delayed_fput(void); extern void __fput_sync(struct file *); +extern unsigned int sysctl_nr_open_min, sysctl_nr_open_max; + #endif /* __LINUX_FILE_H */ diff --git a/include/linux/filter.h b/include/linux/filter.h index 9b5aa5c483cc..259377723603 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -16,11 +16,11 @@ #include <linux/workqueue.h> #include <linux/sched.h> #include <linux/capability.h> -#include <linux/cryptohash.h> #include <linux/set_memory.h> #include <linux/kallsyms.h> #include <linux/if_vlan.h> #include <linux/vmalloc.h> +#include <crypto/sha.h> #include <net/sch_generic.h> @@ -545,10 +545,8 @@ struct bpf_prog { unsigned int (*bpf_func)(const void *ctx, const struct bpf_insn *insn); /* Instructions for interpreter */ - union { - struct sock_filter insns[0]; - struct bpf_insn insnsi[0]; - }; + struct sock_filter insns[0]; + struct bpf_insn insnsi[]; }; struct sk_filter { @@ -746,7 +744,7 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog) { return round_up(bpf_prog_insn_size(prog) + - sizeof(__be64) + 1, SHA_MESSAGE_BYTES); + sizeof(__be64) + 1, SHA1_BLOCK_SIZE); } static inline unsigned int bpf_prog_size(unsigned int proglen) @@ -863,8 +861,6 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, bpf_aux_classic_check_t trans, bool save_orig); void bpf_prog_destroy(struct bpf_prog *fp); -const struct bpf_func_proto * -bpf_base_func_proto(enum bpf_func_id func_id); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_attach_bpf(u32 ufd, struct sock *sk); diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 4bbd0afd91b7..cb3e2c06ed8a 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -12,7 +12,6 @@ struct firmware { size_t size; const u8 *data; - struct page **pages; /* firmware loader private fields */ void *priv; diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h index 17ba4e405129..3fa418a4ca67 100644 --- a/include/linux/firmware/imx/sci.h +++ b/include/linux/firmware/imx/sci.h @@ -11,7 +11,6 @@ #define _SC_SCI_H #include <linux/firmware/imx/ipc.h> -#include <linux/firmware/imx/types.h> #include <linux/firmware/imx/svc/misc.h> #include <linux/firmware/imx/svc/pm.h> diff --git a/include/linux/firmware/imx/types.h b/include/linux/firmware/imx/types.h deleted file mode 100644 index 80821100e85f..000000000000 --- a/include/linux/firmware/imx/types.h +++ /dev/null @@ -1,65 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * Copyright (C) 2016 Freescale Semiconductor, Inc. - * Copyright 2017~2018 NXP - * - * Header file containing types used across multiple service APIs. - */ - -#ifndef _SC_TYPES_H -#define _SC_TYPES_H - -/* - * This type is used to indicate a control. - */ -enum imx_sc_ctrl { - IMX_SC_C_TEMP = 0, - IMX_SC_C_TEMP_HI = 1, - IMX_SC_C_TEMP_LOW = 2, - IMX_SC_C_PXL_LINK_MST1_ADDR = 3, - IMX_SC_C_PXL_LINK_MST2_ADDR = 4, - IMX_SC_C_PXL_LINK_MST_ENB = 5, - IMX_SC_C_PXL_LINK_MST1_ENB = 6, - IMX_SC_C_PXL_LINK_MST2_ENB = 7, - IMX_SC_C_PXL_LINK_SLV1_ADDR = 8, - IMX_SC_C_PXL_LINK_SLV2_ADDR = 9, - IMX_SC_C_PXL_LINK_MST_VLD = 10, - IMX_SC_C_PXL_LINK_MST1_VLD = 11, - IMX_SC_C_PXL_LINK_MST2_VLD = 12, - IMX_SC_C_SINGLE_MODE = 13, - IMX_SC_C_ID = 14, - IMX_SC_C_PXL_CLK_POLARITY = 15, - IMX_SC_C_LINESTATE = 16, - IMX_SC_C_PCIE_G_RST = 17, - IMX_SC_C_PCIE_BUTTON_RST = 18, - IMX_SC_C_PCIE_PERST = 19, - IMX_SC_C_PHY_RESET = 20, - IMX_SC_C_PXL_LINK_RATE_CORRECTION = 21, - IMX_SC_C_PANIC = 22, - IMX_SC_C_PRIORITY_GROUP = 23, - IMX_SC_C_TXCLK = 24, - IMX_SC_C_CLKDIV = 25, - IMX_SC_C_DISABLE_50 = 26, - IMX_SC_C_DISABLE_125 = 27, - IMX_SC_C_SEL_125 = 28, - IMX_SC_C_MODE = 29, - IMX_SC_C_SYNC_CTRL0 = 30, - IMX_SC_C_KACHUNK_CNT = 31, - IMX_SC_C_KACHUNK_SEL = 32, - IMX_SC_C_SYNC_CTRL1 = 33, - IMX_SC_C_DPI_RESET = 34, - IMX_SC_C_MIPI_RESET = 35, - IMX_SC_C_DUAL_MODE = 36, - IMX_SC_C_VOLTAGE = 37, - IMX_SC_C_PXL_LINK_SEL = 38, - IMX_SC_C_OFS_SEL = 39, - IMX_SC_C_OFS_AUDIO = 40, - IMX_SC_C_OFS_PERIPH = 41, - IMX_SC_C_OFS_IRQ = 42, - IMX_SC_C_RST0 = 43, - IMX_SC_C_RST1 = 44, - IMX_SC_C_SEL0 = 45, - IMX_SC_C_LAST -}; - -#endif /* _SC_TYPES_H */ diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h index 013ae4819deb..682dbf694007 100644 --- a/include/linux/firmware/intel/stratix10-smc.h +++ b/include/linux/firmware/intel/stratix10-smc.h @@ -54,32 +54,25 @@ * Secure monitor software doesn't recognize the request. * * INTEL_SIP_SMC_STATUS_OK: - * FPGA configuration completed successfully, - * In case of FPGA configuration write operation, it means secure monitor - * software can accept the next chunk of FPGA configuration data. + * Secure monitor software accepts the service client's request. * - * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY: - * In case of FPGA configuration write operation, it means secure monitor - * software is still processing previous data & can't accept the next chunk - * of data. Service driver needs to issue - * INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE call to query the - * completed block(s). + * INTEL_SIP_SMC_STATUS_BUSY: + * Secure monitor software is still processing service client's request. * - * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR: - * There is error during the FPGA configuration process. + * INTEL_SIP_SMC_STATUS_REJECTED: + * Secure monitor software reject the service client's request. * - * INTEL_SIP_SMC_REG_ERROR: - * There is error during a read or write operation of the protected registers. + * INTEL_SIP_SMC_STATUS_ERROR: + * There is error during the process of service request. * * INTEL_SIP_SMC_RSU_ERROR: - * There is error during a remote status update. + * There is error during the process of remote status update request. */ #define INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF #define INTEL_SIP_SMC_STATUS_OK 0x0 -#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY 0x1 -#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_REJECTED 0x2 -#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR 0x4 -#define INTEL_SIP_SMC_REG_ERROR 0x5 +#define INTEL_SIP_SMC_STATUS_BUSY 0x1 +#define INTEL_SIP_SMC_STATUS_REJECTED 0x2 +#define INTEL_SIP_SMC_STATUS_ERROR 0x4 #define INTEL_SIP_SMC_RSU_ERROR 0x7 /** @@ -95,7 +88,7 @@ * a2-7: not used. * * Return status: - * a0: INTEL_SIP_SMC_STATUS_OK, or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a0: INTEL_SIP_SMC_STATUS_OK, or INTEL_SIP_SMC_STATUS_ERROR. * a1-3: not used. */ #define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_START 1 @@ -115,8 +108,8 @@ * a3-7: not used. * * Return status: - * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or - * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_BUSY or + * INTEL_SIP_SMC_STATUS_ERROR. * a1: 64bit physical address of 1st completed memory block if any completed * block, otherwise zero value. * a2: 64bit physical address of 2nd completed memory block if any completed @@ -133,15 +126,15 @@ * * Sync call used by service driver at EL1 to track the completed write * transactions. This request is called after INTEL_SIP_SMC_FPGA_CONFIG_WRITE - * call returns INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY. + * call returns INTEL_SIP_SMC_STATUS_BUSY. * * Call register usage: * a0: INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE. * a1-7: not used. * * Return status: - * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or - * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_BUSY or + * INTEL_SIP_SMC_STATUS_ERROR. * a1: 64bit physical address of 1st completed memory block. * a2: 64bit physical address of 2nd completed memory block if * any completed block, otherwise zero value. @@ -164,8 +157,8 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) * a1-7: not used. * * Return status: - * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or - * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_BUSY or + * INTEL_SIP_SMC_STATUS_ERROR. * a1-3: not used. */ #define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_ISDONE 4 @@ -183,7 +176,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) * a1-7: not used. * * Return status: - * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR. * a1: start of physical address of reserved memory block. * a2: size of reserved memory block. * a3: not used. @@ -203,7 +196,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) * a1-7: not used. * * Return status: - * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR. * a1-3: not used. */ #define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK 6 diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h index 59bc6e2af693..64213c3e82f5 100644 --- a/include/linux/firmware/intel/stratix10-svc-client.h +++ b/include/linux/firmware/intel/stratix10-svc-client.h @@ -18,45 +18,37 @@ /** * Status of the sent command, in bit number * - * SVC_COMMAND_STATUS_RECONFIG_REQUEST_OK: - * Secure firmware accepts the request of FPGA reconfiguration. + * SVC_STATUS_OK: + * Secure firmware accepts the request issued by one of service clients. * - * SVC_STATUS_RECONFIG_BUFFER_SUBMITTED: - * Service client successfully submits FPGA configuration - * data buffer to secure firmware. + * SVC_STATUS_BUFFER_SUBMITTED: + * Service client successfully submits data buffer to secure firmware. * - * SVC_COMMAND_STATUS_RECONFIG_BUFFER_DONE: + * SVC_STATUS_BUFFER_DONE: * Secure firmware completes data process, ready to accept the * next WRITE transaction. * - * SVC_COMMAND_STATUS_RECONFIG_COMPLETED: - * Secure firmware completes FPGA configuration successfully, FPGA should - * be in user mode. + * SVC_STATUS_COMPLETED: + * Secure firmware completes service request successfully. In case of + * FPGA configuration, FPGA should be in user mode. * - * SVC_COMMAND_STATUS_RECONFIG_BUSY: - * FPGA configuration is still in process. + * SVC_COMMAND_STATUS_BUSY: + * Service request is still in process. * - * SVC_COMMAND_STATUS_RECONFIG_ERROR: - * Error encountered during FPGA configuration. + * SVC_COMMAND_STATUS_ERROR: + * Error encountered during the process of the service request. * - * SVC_STATUS_RSU_OK: - * Secure firmware accepts the request of remote status update (RSU). - * - * SVC_STATUS_RSU_ERROR: - * Error encountered during remote system update. - * - * SVC_STATUS_RSU_NO_SUPPORT: - * Secure firmware doesn't support RSU retry or notify feature. + * SVC_STATUS_NO_SUPPORT: + * Secure firmware doesn't support requested features such as RSU retry + * or RSU notify. */ -#define SVC_STATUS_RECONFIG_REQUEST_OK 0 -#define SVC_STATUS_RECONFIG_BUFFER_SUBMITTED 1 -#define SVC_STATUS_RECONFIG_BUFFER_DONE 2 -#define SVC_STATUS_RECONFIG_COMPLETED 3 -#define SVC_STATUS_RECONFIG_BUSY 4 -#define SVC_STATUS_RECONFIG_ERROR 5 -#define SVC_STATUS_RSU_OK 6 -#define SVC_STATUS_RSU_ERROR 7 -#define SVC_STATUS_RSU_NO_SUPPORT 8 +#define SVC_STATUS_OK 0 +#define SVC_STATUS_BUFFER_SUBMITTED 1 +#define SVC_STATUS_BUFFER_DONE 2 +#define SVC_STATUS_COMPLETED 3 +#define SVC_STATUS_BUSY 4 +#define SVC_STATUS_ERROR 5 +#define SVC_STATUS_NO_SUPPORT 6 /** * Flag bit for COMMAND_RECONFIG @@ -84,32 +76,29 @@ struct stratix10_svc_chan; * @COMMAND_NOOP: do 'dummy' request for integration/debug/trouble-shooting * * @COMMAND_RECONFIG: ask for FPGA configuration preparation, return status - * is SVC_STATUS_RECONFIG_REQUEST_OK + * is SVC_STATUS_OK * * @COMMAND_RECONFIG_DATA_SUBMIT: submit buffer(s) of bit-stream data for the - * FPGA configuration, return status is SVC_STATUS_RECONFIG_BUFFER_SUBMITTED, - * or SVC_STATUS_RECONFIG_ERROR + * FPGA configuration, return status is SVC_STATUS_SUBMITTED or SVC_STATUS_ERROR * * @COMMAND_RECONFIG_DATA_CLAIM: check the status of the configuration, return - * status is SVC_STATUS_RECONFIG_COMPLETED, or SVC_STATUS_RECONFIG_BUSY, or - * SVC_STATUS_RECONFIG_ERROR + * status is SVC_STATUS_COMPLETED, or SVC_STATUS_BUSY, or SVC_STATUS_ERROR * * @COMMAND_RECONFIG_STATUS: check the status of the configuration, return - * status is SVC_STATUS_RECONFIG_COMPLETED, or SVC_STATUS_RECONFIG_BUSY, or - * SVC_STATUS_RECONFIG_ERROR + * status is SVC_STATUS_COMPLETED, or SVC_STATUS_BUSY, or SVC_STATUS_ERROR * * @COMMAND_RSU_STATUS: request remote system update boot log, return status * is log data or SVC_STATUS_RSU_ERROR * * @COMMAND_RSU_UPDATE: set the offset of the bitstream to boot after reboot, - * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR * * @COMMAND_RSU_NOTIFY: report the status of hard processor system - * software to firmware, return status is SVC_STATUS_RSU_OK or - * SVC_STATUS_RSU_ERROR + * software to firmware, return status is SVC_STATUS_OK or + * SVC_STATUS_ERROR * * @COMMAND_RSU_RETRY: query firmware for the current image's retry counter, - * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR */ enum stratix10_svc_command_code { COMMAND_NOOP = 0, diff --git a/include/linux/firmware/trusted_foundations.h b/include/linux/firmware/trusted_foundations.h index 2549a2db56aa..be5984bda592 100644 --- a/include/linux/firmware/trusted_foundations.h +++ b/include/linux/firmware/trusted_foundations.h @@ -32,6 +32,7 @@ #define TF_PM_MODE_LP1_NO_MC_CLK 2 #define TF_PM_MODE_LP2 3 #define TF_PM_MODE_LP2_NOFLUSH_L2 4 +#define TF_PM_MODE_NONE 5 struct trusted_foundations_platform_data { unsigned int version_major; diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 8efa5ac22d7e..5968df82b991 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -42,6 +42,8 @@ #define ZYNQMP_PM_MAX_QOS 100U +#define GSS_NUM_REGS (4) + /* Node capabilities */ #define ZYNQMP_PM_CAPABILITY_ACCESS 0x1U #define ZYNQMP_PM_CAPABILITY_CONTEXT 0x2U @@ -62,6 +64,7 @@ enum pm_api_id { PM_GET_API_VERSION = 1, + PM_SYSTEM_SHUTDOWN = 12, PM_REQUEST_NODE = 13, PM_RELEASE_NODE, PM_SET_REQUIREMENT, @@ -107,6 +110,12 @@ enum pm_ioctl_id { IOCTL_GET_PLL_FRAC_MODE, IOCTL_SET_PLL_FRAC_DATA, IOCTL_GET_PLL_FRAC_DATA, + IOCTL_WRITE_GGS = 12, + IOCTL_READ_GGS = 13, + IOCTL_WRITE_PGGS = 14, + IOCTL_READ_PGGS = 15, + /* Set healthy bit value */ + IOCTL_SET_BOOT_HEALTH_STATUS = 17, }; enum pm_query_id { @@ -279,6 +288,18 @@ enum dll_reset_type { PM_DLL_RESET_PULSE, }; +enum zynqmp_pm_shutdown_type { + ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN, + ZYNQMP_PM_SHUTDOWN_TYPE_RESET, + ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY, +}; + +enum zynqmp_pm_shutdown_subtype { + ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM, + ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY, + ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM, +}; + /** * struct zynqmp_pm_query_data - PM query data * @qid: query ID @@ -293,49 +314,199 @@ struct zynqmp_pm_query_data { u32 arg3; }; -struct zynqmp_eemi_ops { - int (*get_api_version)(u32 *version); - int (*get_chipid)(u32 *idcode, u32 *version); - int (*fpga_load)(const u64 address, const u32 size, const u32 flags); - int (*fpga_get_status)(u32 *value); - int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out); - int (*clock_enable)(u32 clock_id); - int (*clock_disable)(u32 clock_id); - int (*clock_getstate)(u32 clock_id, u32 *state); - int (*clock_setdivider)(u32 clock_id, u32 divider); - int (*clock_getdivider)(u32 clock_id, u32 *divider); - int (*clock_setrate)(u32 clock_id, u64 rate); - int (*clock_getrate)(u32 clock_id, u64 *rate); - int (*clock_setparent)(u32 clock_id, u32 parent_id); - int (*clock_getparent)(u32 clock_id, u32 *parent_id); - int (*ioctl)(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2, u32 *out); - int (*reset_assert)(const enum zynqmp_pm_reset reset, - const enum zynqmp_pm_reset_action assert_flag); - int (*reset_get_status)(const enum zynqmp_pm_reset reset, u32 *status); - int (*init_finalize)(void); - int (*set_suspend_mode)(u32 mode); - int (*request_node)(const u32 node, - const u32 capabilities, - const u32 qos, - const enum zynqmp_pm_request_ack ack); - int (*release_node)(const u32 node); - int (*set_requirement)(const u32 node, - const u32 capabilities, - const u32 qos, - const enum zynqmp_pm_request_ack ack); - int (*aes)(const u64 address, u32 *out); -}; int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 *ret_payload); #if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE) -const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void); +int zynqmp_pm_get_api_version(u32 *version); +int zynqmp_pm_get_chipid(u32 *idcode, u32 *version); +int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out); +int zynqmp_pm_clock_enable(u32 clock_id); +int zynqmp_pm_clock_disable(u32 clock_id); +int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state); +int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider); +int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider); +int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate); +int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate); +int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id); +int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id); +int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode); +int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode); +int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data); +int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data); +int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value); +int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type); +int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset, + const enum zynqmp_pm_reset_action assert_flag); +int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status); +int zynqmp_pm_init_finalize(void); +int zynqmp_pm_set_suspend_mode(u32 mode); +int zynqmp_pm_request_node(const u32 node, const u32 capabilities, + const u32 qos, const enum zynqmp_pm_request_ack ack); +int zynqmp_pm_release_node(const u32 node); +int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities, + const u32 qos, + const enum zynqmp_pm_request_ack ack); +int zynqmp_pm_aes_engine(const u64 address, u32 *out); +int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags); +int zynqmp_pm_fpga_get_status(u32 *value); +int zynqmp_pm_write_ggs(u32 index, u32 value); +int zynqmp_pm_read_ggs(u32 index, u32 *value); +int zynqmp_pm_write_pggs(u32 index, u32 value); +int zynqmp_pm_read_pggs(u32 index, u32 *value); +int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype); +int zynqmp_pm_set_boot_health_status(u32 value); #else static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void) { return ERR_PTR(-ENODEV); } +static inline int zynqmp_pm_get_api_version(u32 *version) +{ + return -ENODEV; +} +static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version) +{ + return -ENODEV; +} +static inline int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, + u32 *out) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_enable(u32 clock_id) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_disable(u32 clock_id) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id) +{ + return -ENODEV; +} +static inline int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode) +{ + return -ENODEV; +} +static inline int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode) +{ + return -ENODEV; +} +static inline int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data) +{ + return -ENODEV; +} +static inline int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data) +{ + return -ENODEV; +} +static inline int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value) +{ + return -ENODEV; +} +static inline int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type) +{ + return -ENODEV; +} +static inline int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset, + const enum zynqmp_pm_reset_action assert_flag) +{ + return -ENODEV; +} +static inline int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, + u32 *status) +{ + return -ENODEV; +} +static inline int zynqmp_pm_init_finalize(void) +{ + return -ENODEV; +} +static inline int zynqmp_pm_set_suspend_mode(u32 mode) +{ + return -ENODEV; +} +static inline int zynqmp_pm_request_node(const u32 node, const u32 capabilities, + const u32 qos, + const enum zynqmp_pm_request_ack ack) +{ + return -ENODEV; +} +static inline int zynqmp_pm_release_node(const u32 node) +{ + return -ENODEV; +} +static inline int zynqmp_pm_set_requirement(const u32 node, + const u32 capabilities, + const u32 qos, + const enum zynqmp_pm_request_ack ack) +{ + return -ENODEV; +} +static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out) +{ + return -ENODEV; +} +static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size, + const u32 flags) +{ + return -ENODEV; +} +static inline int zynqmp_pm_fpga_get_status(u32 *value) +{ + return -ENODEV; +} +static inline int zynqmp_pm_write_ggs(u32 index, u32 value) +{ + return -ENODEV; +} +static inline int zynqmp_pm_read_ggs(u32 index, u32 *value) +{ + return -ENODEV; +} +static inline int zynqmp_pm_write_pggs(u32 index, u32 value) +{ + return -ENODEV; +} +static inline int zynqmp_pm_read_pggs(u32 index, u32 *value) +{ + return -ENODEV; +} +static inline int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype) +{ + return -ENODEV; +} +static inline int zynqmp_pm_set_boot_health_status(u32 value) +{ + return -ENODEV; +} #endif #endif /* __FIRMWARE_ZYNQMP_H__ */ diff --git a/include/linux/fpga/adi-axi-common.h b/include/linux/fpga/adi-axi-common.h index 7fc95d5c95bb..141ac3f251e6 100644 --- a/include/linux/fpga/adi-axi-common.h +++ b/include/linux/fpga/adi-axi-common.h @@ -11,9 +11,13 @@ #ifndef ADI_AXI_COMMON_H_ #define ADI_AXI_COMMON_H_ -#define ADI_AXI_REG_VERSION 0x0000 +#define ADI_AXI_REG_VERSION 0x0000 #define ADI_AXI_PCORE_VER(major, minor, patch) \ (((major) << 16) | ((minor) << 8) | (patch)) +#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff) +#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff) +#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff) + #endif /* ADI_AXI_COMMON_H_ */ diff --git a/include/linux/frame.h b/include/linux/frame.h index 02d3ca2d9598..303cda600e56 100644 --- a/include/linux/frame.h +++ b/include/linux/frame.h @@ -15,9 +15,20 @@ static void __used __section(.discard.func_stack_frame_non_standard) \ *__func_stack_frame_non_standard_##func = func +/* + * This macro indicates that the following intra-function call is valid. + * Any non-annotated intra-function call will cause objtool to issue a warning. + */ +#define ANNOTATE_INTRA_FUNCTION_CALL \ + 999: \ + .pushsection .discard.intra_function_calls; \ + .long 999b; \ + .popsection; + #else /* !CONFIG_STACK_VALIDATION */ #define STACK_FRAME_NON_STANDARD(func) +#define ANNOTATE_INTRA_FUNCTION_CALL #endif /* CONFIG_STACK_VALIDATION */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 4f6f59b4f22a..3f881a892ea7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -24,7 +24,6 @@ #include <linux/capability.h> #include <linux/semaphore.h> #include <linux/fcntl.h> -#include <linux/fiemap.h> #include <linux/rculist_bl.h> #include <linux/atomic.h> #include <linux/shrinker.h> @@ -48,6 +47,7 @@ struct backing_dev_info; struct bdi_writeback; struct bio; struct export_operations; +struct fiemap_extent_info; struct hd_geometry; struct iovec; struct kiocb; @@ -292,6 +292,7 @@ enum positive_aop_returns { struct page; struct address_space; struct writeback_control; +struct readahead_control; /* * Write life time hint values. @@ -375,6 +376,7 @@ struct address_space_operations { */ int (*readpages)(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages); + void (*readahead)(struct readahead_control *); int (*write_begin)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, @@ -976,6 +978,7 @@ struct file { #endif /* #ifdef CONFIG_EPOLL */ struct address_space *f_mapping; errseq_t f_wb_err; + errseq_t f_sb_err; /* for syncfs */ } __randomize_layout __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ @@ -983,7 +986,7 @@ struct file_handle { __u32 handle_bytes; int handle_type; /* file identifier */ - unsigned char f_handle[0]; + unsigned char f_handle[]; }; static inline struct file *get_file(struct file *f) @@ -1045,6 +1048,7 @@ struct lock_manager_operations { bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); + bool (*lm_breaker_owns_lease)(struct file_lock *); }; struct lock_manager { @@ -1409,6 +1413,8 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020 #define SB_I_UNTRUSTED_MOUNTER 0x00000040 +#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */ + /* Possible states of 'frozen' field */ enum { SB_UNFROZEN = 0, /* FS is unfrozen */ @@ -1520,6 +1526,9 @@ struct super_block { /* Being remounted read-only */ int s_readonly_remount; + /* per-sb errseq_t for reporting writeback errors via syncfs */ + errseq_t s_wb_err; + /* AIO completions deferred from interrupt context */ struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; @@ -1673,10 +1682,10 @@ static inline int sb_start_write_trylock(struct super_block *sb) * * Since page fault freeze protection behaves as a lock, users have to preserve * ordering of freeze protection and other filesystem locks. It is advised to - * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault + * put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault * handling code implies lock dependency: * - * mmap_sem + * mmap_lock * -> sb_start_pagefault */ static inline void sb_start_pagefault(struct super_block *sb) @@ -1721,7 +1730,11 @@ extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct ino extern int vfs_rmdir(struct inode *, struct dentry *); extern int vfs_unlink(struct inode *, struct dentry *, struct inode **); extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int); -extern int vfs_whiteout(struct inode *, struct dentry *); + +static inline int vfs_whiteout(struct inode *dir, struct dentry *dentry) +{ + return vfs_mknod(dir, dentry, S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); +} extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag); @@ -1745,19 +1758,6 @@ extern long compat_ptr_ioctl(struct file *file, unsigned int cmd, extern void inode_init_owner(struct inode *inode, const struct inode *dir, umode_t mode); extern bool may_open_dev(const struct path *path); -/* - * VFS FS_IOC_FIEMAP helper definitions. - */ -struct fiemap_extent_info { - unsigned int fi_flags; /* Flags as passed from user */ - unsigned int fi_extents_mapped; /* Number of mapped extents */ - unsigned int fi_extents_max; /* Size of fiemap_extent array */ - struct fiemap_extent __user *fi_extents_start; /* Start of - fiemap_extent array */ -}; -int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, - u64 phys, u64 len, u32 flags); -int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); /* * This is the "filldir" function type, used by readdir() to let @@ -2156,6 +2156,8 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, * * I_CREATING New object's inode in the middle of setting up. * + * I_DONTCACHE Evict inode as soon as it is not used anymore. + * * Q: What is the difference between I_WILL_FREE and I_FREEING? */ #define I_DIRTY_SYNC (1 << 0) @@ -2178,6 +2180,7 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, #define I_WB_SWITCH (1 << 13) #define I_OVL_INUSE (1 << 14) #define I_CREATING (1 << 15) +#define I_DONTCACHE (1 << 16) #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) @@ -2581,7 +2584,6 @@ extern struct kmem_cache *names_cachep; #ifdef CONFIG_BLOCK extern int register_blkdev(unsigned int, const char *); extern void unregister_blkdev(unsigned int, const char *); -extern void bdev_unhash_inode(dev_t dev); extern struct block_device *bdget(dev_t); extern struct block_device *bdgrab(struct block_device *bdev); extern void bd_set_size(struct block_device *, loff_t size); @@ -2590,7 +2592,6 @@ extern void bdput(struct block_device *); extern void invalidate_bdev(struct block_device *); extern void iterate_bdevs(void (*)(struct block_device *, void *), void *); extern int sync_blockdev(struct block_device *bdev); -extern void kill_bdev(struct block_device *); extern struct super_block *freeze_bdev(struct block_device *); extern void emergency_thaw_all(void); extern void emergency_thaw_bdev(struct super_block *sb); @@ -2606,7 +2607,6 @@ static inline bool sb_is_blkdev_sb(struct super_block *sb) #else static inline void bd_forget(struct inode *inode) {} static inline int sync_blockdev(struct block_device *bdev) { return 0; } -static inline void kill_bdev(struct block_device *bdev) {} static inline void invalidate_bdev(struct block_device *bdev) {} static inline struct super_block *freeze_bdev(struct block_device *sb) @@ -2637,7 +2637,6 @@ extern int sync_filesystem(struct super_block *); extern const struct file_operations def_blk_fops; extern const struct file_operations def_chr_fops; #ifdef CONFIG_BLOCK -extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder); @@ -2723,7 +2722,6 @@ extern bool is_bad_inode(struct inode *); extern int revalidate_disk(struct gendisk *); extern int check_disk_change(struct block_device *); extern int __invalidate_device(struct block_device *, bool); -extern int invalidate_partition(struct gendisk *, int); #endif unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end); @@ -2827,6 +2825,18 @@ static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) return errseq_sample(&mapping->wb_err); } +/** + * file_sample_sb_err - sample the current errseq_t to test for later errors + * @mapping: mapping to be sampled + * + * Grab the most current superblock-level errseq_t value for the given + * struct file. + */ +static inline errseq_t file_sample_sb_err(struct file *file) +{ + return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); +} + static inline int filemap_nr_thps(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS @@ -3049,8 +3059,10 @@ extern int inode_needs_sync(struct inode *inode); extern int generic_delete_inode(struct inode *inode); static inline int generic_drop_inode(struct inode *inode) { - return !inode->i_nlink || inode_unhashed(inode); + return !inode->i_nlink || inode_unhashed(inode) || + (inode->i_state & I_DONTCACHE); } +extern void d_mark_dontcache(struct inode *inode); extern struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, int (*test)(struct inode *, void *), @@ -3070,6 +3082,9 @@ extern struct inode *find_inode_nowait(struct super_block *, int (*match)(struct inode *, unsigned long, void *), void *data); +extern struct inode *find_inode_rcu(struct super_block *, unsigned long, + int (*)(struct inode *, void *), void *); +extern struct inode *find_inode_by_ino_rcu(struct super_block *, unsigned long); extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); extern int insert_inode_locked(struct inode *); #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -3124,6 +3139,8 @@ extern int generic_file_rw_checks(struct file *file_in, struct file *file_out); extern int generic_copy_file_checks(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t *count, unsigned int flags); +extern ssize_t generic_file_buffered_read(struct kiocb *iocb, + struct iov_iter *to, ssize_t already_read); extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); @@ -3299,14 +3316,6 @@ static inline int vfs_fstat(int fd, struct kstat *stat) extern const char *vfs_get_link(struct dentry *, struct delayed_call *); extern int vfs_readlink(struct dentry *, char __user *, int); -extern int __generic_block_fiemap(struct inode *inode, - struct fiemap_extent_info *fieinfo, - loff_t start, loff_t len, - get_block_t *get_block); -extern int generic_block_fiemap(struct inode *inode, - struct fiemap_extent_info *fieinfo, u64 start, - u64 len, get_block_t *get_block); - extern struct file_system_type *get_filesystem(struct file_system_type *fs); extern void put_filesystem(struct file_system_type *fs); extern struct file_system_type *get_fs_type(const char *name); @@ -3394,11 +3403,6 @@ extern void setattr_copy(struct inode *inode, const struct iattr *attr); extern int file_update_time(struct file *file); -static inline bool io_is_direct(struct file *filp) -{ - return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host); -} - static inline bool vma_is_dax(const struct vm_area_struct *vma) { return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); @@ -3423,7 +3427,7 @@ static inline int iocb_flags(struct file *file) int res = 0; if (file->f_flags & O_APPEND) res |= IOCB_APPEND; - if (io_is_direct(file)) + if (file->f_flags & O_DIRECT) res |= IOCB_DIRECT; if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host)) res |= IOCB_DSYNC; @@ -3536,11 +3540,11 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, struct ctl_table; int proc_nr_files(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); int proc_nr_dentry(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); int proc_nr_inodes(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); int __init get_filesystem_list(char *buf); #define __FMODE_EXEC ((__force int) FMODE_EXEC) diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index e6c3e4c61dad..5f24fcbfbfb4 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -85,7 +85,7 @@ struct p_log { * Superblock creation fills in ->root whereas reconfiguration begins with this * already set. * - * See Documentation/filesystems/mount_api.txt + * See Documentation/filesystems/mount_api.rst */ struct fs_context { const struct fs_context_operations *ops; diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index d5ba431b5d63..3f0b19dcfae7 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -6,7 +6,7 @@ * * NOTE!!! See: * - * Documentation/filesystems/caching/backend-api.txt + * Documentation/filesystems/caching/backend-api.rst * * for a description of the cache backend interface declared here. */ @@ -46,7 +46,7 @@ struct fscache_cache_tag { unsigned long flags; #define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */ atomic_t usage; - char name[0]; /* tag name */ + char name[]; /* tag name */ }; /* @@ -454,7 +454,7 @@ static inline void fscache_object_lookup_error(struct fscache_object *object) * Set the maximum size an object is permitted to reach, implying the highest * byte that may be written. Intended to be called by the attr_changed() op. * - * See Documentation/filesystems/caching/backend-api.txt for a complete + * See Documentation/filesystems/caching/backend-api.rst for a complete * description. */ static inline diff --git a/include/linux/fscache.h b/include/linux/fscache.h index ad044c0cb1f3..a1c928fe98e7 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -6,7 +6,7 @@ * * NOTE!!! See: * - * Documentation/filesystems/caching/netfs-api.txt + * Documentation/filesystems/caching/netfs-api.rst * * for a description of the network filesystem interface declared here. */ @@ -233,7 +233,7 @@ extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_ * * Register a filesystem as desiring caching services if they're available. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -253,7 +253,7 @@ int fscache_register_netfs(struct fscache_netfs *netfs) * Indicate that a filesystem no longer desires caching services for the * moment. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -270,7 +270,7 @@ void fscache_unregister_netfs(struct fscache_netfs *netfs) * Acquire a specific cache referral tag that can be used to select a specific * cache in which to cache an index. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -288,7 +288,7 @@ struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name) * * Release a reference to a cache referral tag previously looked up. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -315,7 +315,7 @@ void fscache_release_cache_tag(struct fscache_cache_tag *tag) * that can be used to locate files. This is done by requesting a cookie for * each index in the path to the file. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -351,7 +351,7 @@ struct fscache_cookie *fscache_acquire_cookie( * provided to update the auxiliary data in the cache before the object is * disconnected. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -394,7 +394,7 @@ int fscache_check_consistency(struct fscache_cookie *cookie, * cookie. The auxiliary data on the cookie will be updated first if @aux_data * is set. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -410,7 +410,7 @@ void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data) * * Permit data-storage cache objects to be pinned in the cache. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -425,7 +425,7 @@ int fscache_pin_cookie(struct fscache_cookie *cookie) * * Permit data-storage cache objects to be unpinned from the cache. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -441,7 +441,7 @@ void fscache_unpin_cookie(struct fscache_cookie *cookie) * changed. This includes the data size. These attributes will be obtained * through the get_attr() cookie definition op. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -463,7 +463,7 @@ int fscache_attr_changed(struct fscache_cookie *cookie) * * This can be called with spinlocks held. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -479,7 +479,7 @@ void fscache_invalidate(struct fscache_cookie *cookie) * * Wait for the invalidation of an object to complete. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -498,7 +498,7 @@ void fscache_wait_on_invalidate(struct fscache_cookie *cookie) * cookie so that a write to that object within the space can always be * honoured. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -533,7 +533,7 @@ int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size) * Else, if the page is unbacked, -ENODATA is returned and a block may have * been allocated in the cache. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -582,7 +582,7 @@ int fscache_read_or_alloc_page(struct fscache_cookie *cookie, * regard to different pages, the return values are prioritised in that order. * Any pages submitted for reading are removed from the pages list. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -617,7 +617,7 @@ int fscache_read_or_alloc_pages(struct fscache_cookie *cookie, * Else, a block will be allocated if one wasn't already, and 0 will be * returned * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -667,7 +667,7 @@ void fscache_readpages_cancel(struct fscache_cookie *cookie, * be cleared at the completion of the write to indicate the success or failure * of the operation. Note that the completion may happen before the return. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -693,7 +693,7 @@ int fscache_write_page(struct fscache_cookie *cookie, * Note that this cannot cancel any outstanding I/O operations between this * page and the cache. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -711,7 +711,7 @@ void fscache_uncache_page(struct fscache_cookie *cookie, * * Ask the cache if a page is being written to the cache. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -731,7 +731,7 @@ bool fscache_check_page_write(struct fscache_cookie *cookie, * Ask the cache to wake us up when a page is no longer being written to the * cache. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index e3c2d2a15525..2862ca5fea33 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -15,12 +15,15 @@ #include <linux/fs.h> #include <linux/mm.h> +#include <linux/parser.h> #include <linux/slab.h> #include <uapi/linux/fscrypt.h> #define FS_CRYPTO_BLOCK_SIZE 16 +union fscrypt_context; struct fscrypt_info; +struct seq_file; struct fscrypt_str { unsigned char *name; @@ -56,10 +59,12 @@ struct fscrypt_name { struct fscrypt_operations { unsigned int flags; const char *key_prefix; - int (*get_context)(struct inode *, void *, size_t); - int (*set_context)(struct inode *, const void *, size_t, void *); - bool (*dummy_context)(struct inode *); - bool (*empty_dir)(struct inode *); + int (*get_context)(struct inode *inode, void *ctx, size_t len); + int (*set_context)(struct inode *inode, const void *ctx, size_t len, + void *fs_data); + const union fscrypt_context *(*get_dummy_context)( + struct super_block *sb); + bool (*empty_dir)(struct inode *inode); unsigned int max_namelen; bool (*has_stable_inodes)(struct super_block *sb); void (*get_ino_and_lblk_bits)(struct super_block *sb, @@ -75,6 +80,7 @@ static inline bool fscrypt_has_encryption_key(const struct inode *inode) /** * fscrypt_needs_contents_encryption() - check whether an inode needs * contents encryption + * @inode: the inode to check * * Return: %true iff the inode is an encrypted regular file and the kernel was * built with fscrypt support. @@ -87,10 +93,12 @@ static inline bool fscrypt_needs_contents_encryption(const struct inode *inode) return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); } -static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +static inline const union fscrypt_context * +fscrypt_get_dummy_context(struct super_block *sb) { - return inode->i_sb->s_cop->dummy_context && - inode->i_sb->s_cop->dummy_context(inode); + if (!sb->s_cop->get_dummy_context) + return NULL; + return sb->s_cop->get_dummy_context(sb); } /* @@ -106,22 +114,21 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry) } /* crypto.c */ -extern void fscrypt_enqueue_decrypt_work(struct work_struct *); - -extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, - unsigned int len, - unsigned int offs, - gfp_t gfp_flags); -extern int fscrypt_encrypt_block_inplace(const struct inode *inode, - struct page *page, unsigned int len, - unsigned int offs, u64 lblk_num, - gfp_t gfp_flags); - -extern int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len, - unsigned int offs); -extern int fscrypt_decrypt_block_inplace(const struct inode *inode, - struct page *page, unsigned int len, - unsigned int offs, u64 lblk_num); +void fscrypt_enqueue_decrypt_work(struct work_struct *); + +struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, + unsigned int len, + unsigned int offs, + gfp_t gfp_flags); +int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page, + unsigned int len, unsigned int offs, + u64 lblk_num, gfp_t gfp_flags); + +int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len, + unsigned int offs); +int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page, + unsigned int len, unsigned int offs, + u64 lblk_num); static inline bool fscrypt_is_bounce_page(struct page *page) { @@ -133,78 +140,90 @@ static inline struct page *fscrypt_pagecache_page(struct page *bounce_page) return (struct page *)page_private(bounce_page); } -extern void fscrypt_free_bounce_page(struct page *bounce_page); +void fscrypt_free_bounce_page(struct page *bounce_page); /* policy.c */ -extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); -extern int fscrypt_ioctl_get_policy(struct file *, void __user *); -extern int fscrypt_ioctl_get_policy_ex(struct file *, void __user *); -extern int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg); -extern int fscrypt_has_permitted_context(struct inode *, struct inode *); -extern int fscrypt_inherit_context(struct inode *, struct inode *, - void *, bool); +int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg); +int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg); +int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *arg); +int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg); +int fscrypt_has_permitted_context(struct inode *parent, struct inode *child); +int fscrypt_inherit_context(struct inode *parent, struct inode *child, + void *fs_data, bool preload); + +struct fscrypt_dummy_context { + const union fscrypt_context *ctx; +}; + +int fscrypt_set_test_dummy_encryption(struct super_block *sb, + const substring_t *arg, + struct fscrypt_dummy_context *dummy_ctx); +void fscrypt_show_test_dummy_encryption(struct seq_file *seq, char sep, + struct super_block *sb); +static inline void +fscrypt_free_dummy_context(struct fscrypt_dummy_context *dummy_ctx) +{ + kfree(dummy_ctx->ctx); + dummy_ctx->ctx = NULL; +} + /* keyring.c */ -extern void fscrypt_sb_free(struct super_block *sb); -extern int fscrypt_ioctl_add_key(struct file *filp, void __user *arg); -extern int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg); -extern int fscrypt_ioctl_remove_key_all_users(struct file *filp, - void __user *arg); -extern int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg); +void fscrypt_sb_free(struct super_block *sb); +int fscrypt_ioctl_add_key(struct file *filp, void __user *arg); +int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg); +int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg); +int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg); /* keysetup.c */ -extern int fscrypt_get_encryption_info(struct inode *); -extern void fscrypt_put_encryption_info(struct inode *); -extern void fscrypt_free_inode(struct inode *); -extern int fscrypt_drop_inode(struct inode *inode); +int fscrypt_get_encryption_info(struct inode *inode); +void fscrypt_put_encryption_info(struct inode *inode); +void fscrypt_free_inode(struct inode *inode); +int fscrypt_drop_inode(struct inode *inode); /* fname.c */ -extern int fscrypt_setup_filename(struct inode *, const struct qstr *, - int lookup, struct fscrypt_name *); +int fscrypt_setup_filename(struct inode *inode, const struct qstr *iname, + int lookup, struct fscrypt_name *fname); static inline void fscrypt_free_filename(struct fscrypt_name *fname) { kfree(fname->crypto_buf.name); } -extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, - struct fscrypt_str *); -extern void fscrypt_fname_free_buffer(struct fscrypt_str *); -extern int fscrypt_fname_disk_to_usr(const struct inode *inode, - u32 hash, u32 minor_hash, - const struct fscrypt_str *iname, - struct fscrypt_str *oname); -extern bool fscrypt_match_name(const struct fscrypt_name *fname, - const u8 *de_name, u32 de_name_len); -extern u64 fscrypt_fname_siphash(const struct inode *dir, - const struct qstr *name); +int fscrypt_fname_alloc_buffer(const struct inode *inode, u32 max_encrypted_len, + struct fscrypt_str *crypto_str); +void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str); +int fscrypt_fname_disk_to_usr(const struct inode *inode, + u32 hash, u32 minor_hash, + const struct fscrypt_str *iname, + struct fscrypt_str *oname); +bool fscrypt_match_name(const struct fscrypt_name *fname, + const u8 *de_name, u32 de_name_len); +u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name); /* bio.c */ -extern void fscrypt_decrypt_bio(struct bio *); -extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, - unsigned int); +void fscrypt_decrypt_bio(struct bio *bio); +int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, + sector_t pblk, unsigned int len); /* hooks.c */ -extern int fscrypt_file_open(struct inode *inode, struct file *filp); -extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir, - struct dentry *dentry); -extern int __fscrypt_prepare_rename(struct inode *old_dir, - struct dentry *old_dentry, - struct inode *new_dir, - struct dentry *new_dentry, - unsigned int flags); -extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, - struct fscrypt_name *fname); -extern int fscrypt_prepare_setflags(struct inode *inode, - unsigned int oldflags, unsigned int flags); -extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, - unsigned int max_len, - struct fscrypt_str *disk_link); -extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, - unsigned int len, - struct fscrypt_str *disk_link); -extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, - unsigned int max_size, - struct delayed_call *done); +int fscrypt_file_open(struct inode *inode, struct file *filp); +int __fscrypt_prepare_link(struct inode *inode, struct inode *dir, + struct dentry *dentry); +int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags); +int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, + struct fscrypt_name *fname); +int fscrypt_prepare_setflags(struct inode *inode, + unsigned int oldflags, unsigned int flags); +int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link); +int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, + unsigned int len, struct fscrypt_str *disk_link); +const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, + unsigned int max_size, + struct delayed_call *done); static inline void fscrypt_set_ops(struct super_block *sb, const struct fscrypt_operations *s_cop) { @@ -222,9 +241,10 @@ static inline bool fscrypt_needs_contents_encryption(const struct inode *inode) return false; } -static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +static inline const union fscrypt_context * +fscrypt_get_dummy_context(struct super_block *sb) { - return false; + return NULL; } static inline void fscrypt_handle_d_move(struct dentry *dentry) @@ -319,6 +339,20 @@ static inline int fscrypt_inherit_context(struct inode *parent, return -EOPNOTSUPP; } +struct fscrypt_dummy_context { +}; + +static inline void fscrypt_show_test_dummy_encryption(struct seq_file *seq, + char sep, + struct super_block *sb) +{ +} + +static inline void +fscrypt_free_dummy_context(struct fscrypt_dummy_context *dummy_ctx) +{ +} + /* keyring.c */ static inline void fscrypt_sb_free(struct super_block *sb) { @@ -504,7 +538,7 @@ static inline void fscrypt_set_ops(struct super_block *sb, #endif /* !CONFIG_FS_ENCRYPTION */ /** - * fscrypt_require_key - require an inode's encryption key + * fscrypt_require_key() - require an inode's encryption key * @inode: the inode we need the key for * * If the inode is encrypted, set up its encryption key if not already done. @@ -530,7 +564,8 @@ static inline int fscrypt_require_key(struct inode *inode) } /** - * fscrypt_prepare_link - prepare to link an inode into a possibly-encrypted directory + * fscrypt_prepare_link() - prepare to link an inode into a possibly-encrypted + * directory * @old_dentry: an existing dentry for the inode being linked * @dir: the target directory * @dentry: negative dentry for the target filename @@ -557,7 +592,8 @@ static inline int fscrypt_prepare_link(struct dentry *old_dentry, } /** - * fscrypt_prepare_rename - prepare for a rename between possibly-encrypted directories + * fscrypt_prepare_rename() - prepare for a rename between possibly-encrypted + * directories * @old_dir: source directory * @old_dentry: dentry for source file * @new_dir: target directory @@ -590,7 +626,8 @@ static inline int fscrypt_prepare_rename(struct inode *old_dir, } /** - * fscrypt_prepare_lookup - prepare to lookup a name in a possibly-encrypted directory + * fscrypt_prepare_lookup() - prepare to lookup a name in a possibly-encrypted + * directory * @dir: directory being searched * @dentry: filename being looked up * @fname: (output) the name to use to search the on-disk directory @@ -623,7 +660,8 @@ static inline int fscrypt_prepare_lookup(struct inode *dir, } /** - * fscrypt_prepare_setattr - prepare to change a possibly-encrypted inode's attributes + * fscrypt_prepare_setattr() - prepare to change a possibly-encrypted inode's + * attributes * @dentry: dentry through which the inode is being changed * @attr: attributes to change * @@ -648,7 +686,7 @@ static inline int fscrypt_prepare_setattr(struct dentry *dentry, } /** - * fscrypt_prepare_symlink - prepare to create a possibly-encrypted symlink + * fscrypt_prepare_symlink() - prepare to create a possibly-encrypted symlink * @dir: directory in which the symlink is being created * @target: plaintext symlink target * @len: length of @target excluding null terminator @@ -676,7 +714,7 @@ static inline int fscrypt_prepare_symlink(struct inode *dir, unsigned int max_len, struct fscrypt_str *disk_link) { - if (IS_ENCRYPTED(dir) || fscrypt_dummy_context_enabled(dir)) + if (IS_ENCRYPTED(dir) || fscrypt_get_dummy_context(dir->i_sb) != NULL) return __fscrypt_prepare_symlink(dir, len, max_len, disk_link); disk_link->name = (unsigned char *)target; @@ -687,7 +725,7 @@ static inline int fscrypt_prepare_symlink(struct inode *dir, } /** - * fscrypt_encrypt_symlink - encrypt the symlink target if needed + * fscrypt_encrypt_symlink() - encrypt the symlink target if needed * @inode: symlink inode * @target: plaintext symlink target * @len: length of @target excluding null terminator diff --git a/include/linux/fsl/bestcomm/bestcomm.h b/include/linux/fsl/bestcomm/bestcomm.h index a0e2e6b19b57..154e541ce57e 100644 --- a/include/linux/fsl/bestcomm/bestcomm.h +++ b/include/linux/fsl/bestcomm/bestcomm.h @@ -27,7 +27,7 @@ */ struct bcom_bd { u32 status; - u32 data[0]; /* variable payload size */ + u32 data[]; /* variable payload size */ }; /* ======================================================================== */ diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h index 75884563059f..884b8f8ca06d 100644 --- a/include/linux/fsl/ptp_qoriq.h +++ b/include/linux/fsl/ptp_qoriq.h @@ -135,7 +135,7 @@ struct ptp_qoriq_registers { #define DEFAULT_CKSEL 1 #define DEFAULT_TMR_PRSC 2 #define DEFAULT_FIPER1_PERIOD 1000000000 -#define DEFAULT_FIPER2_PERIOD 100000 +#define DEFAULT_FIPER2_PERIOD 1000000000 struct ptp_qoriq { void __iomem *base; diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h index ecc604e61d61..78201a6d35f6 100644 --- a/include/linux/fsverity.h +++ b/include/linux/fsverity.h @@ -121,23 +121,23 @@ static inline struct fsverity_info *fsverity_get_info(const struct inode *inode) /* enable.c */ -extern int fsverity_ioctl_enable(struct file *filp, const void __user *arg); +int fsverity_ioctl_enable(struct file *filp, const void __user *arg); /* measure.c */ -extern int fsverity_ioctl_measure(struct file *filp, void __user *arg); +int fsverity_ioctl_measure(struct file *filp, void __user *arg); /* open.c */ -extern int fsverity_file_open(struct inode *inode, struct file *filp); -extern int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr); -extern void fsverity_cleanup_inode(struct inode *inode); +int fsverity_file_open(struct inode *inode, struct file *filp); +int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr); +void fsverity_cleanup_inode(struct inode *inode); /* verify.c */ -extern bool fsverity_verify_page(struct page *page); -extern void fsverity_verify_bio(struct bio *bio); -extern void fsverity_enqueue_verify_work(struct work_struct *work); +bool fsverity_verify_page(struct page *page); +void fsverity_verify_bio(struct bio *bio); +void fsverity_enqueue_verify_work(struct work_struct *work); #else /* !CONFIG_FS_VERITY */ @@ -200,6 +200,7 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work) /** * fsverity_active() - do reads from the inode need to go through fs-verity? + * @inode: inode to check * * This checks whether ->i_verity_info has been set. * @@ -207,6 +208,8 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work) * be verified or not. Don't use IS_VERITY() for this purpose; it's subject to * a race condition where the file is being read concurrently with * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before ->i_verity_info.) + * + * Return: true if reads need to go through fs-verity, otherwise false */ static inline bool fsverity_active(const struct inode *inode) { diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index db95244a62d4..e339dac91ee6 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -210,6 +210,29 @@ struct ftrace_ops { #endif }; +extern struct ftrace_ops __rcu *ftrace_ops_list; +extern struct ftrace_ops ftrace_list_end; + +/* + * Traverse the ftrace_global_list, invoking all entries. The reason that we + * can use rcu_dereference_raw_check() is that elements removed from this list + * are simply leaked, so there is no need to interact with a grace-period + * mechanism. The rcu_dereference_raw_check() calls are needed to handle + * concurrent insertions into the ftrace_global_list. + * + * Silly Alpha and silly pointer-speculation compiler optimizations! + */ +#define do_for_each_ftrace_op(op, list) \ + op = rcu_dereference_raw_check(list); \ + do + +/* + * Optimized for just a single item in the list (as that is the normal case). + */ +#define while_for_each_ftrace_op(op) \ + while (likely(op = rcu_dereference_raw_check((op)->next)) && \ + unlikely((op) != &ftrace_list_end)) + /* * Type of the current tracing. */ @@ -319,9 +342,8 @@ static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, extern int stack_tracer_enabled; -int stack_trace_sysctl(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); +int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ DECLARE_PER_CPU(int, disable_stack_tracer); @@ -1005,8 +1027,7 @@ extern void disable_trace_on_warning(void); extern int __disable_trace_on_warning; int tracepoint_printk_sysctl(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); #else /* CONFIG_TRACING */ static inline void disable_trace_on_warning(void) { } diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h index ccda97dc7f8b..0abd9a1d2852 100644 --- a/include/linux/ftrace_irq.h +++ b/include/linux/ftrace_irq.h @@ -2,15 +2,6 @@ #ifndef _LINUX_FTRACE_IRQ_H #define _LINUX_FTRACE_IRQ_H - -#ifdef CONFIG_FTRACE_NMI_ENTER -extern void arch_ftrace_nmi_enter(void); -extern void arch_ftrace_nmi_exit(void); -#else -static inline void arch_ftrace_nmi_enter(void) { } -static inline void arch_ftrace_nmi_exit(void) { } -#endif - #ifdef CONFIG_HWLAT_TRACER extern bool trace_hwlat_callback_enabled; extern void trace_hwlat_callback(bool enter); @@ -22,12 +13,10 @@ static inline void ftrace_nmi_enter(void) if (trace_hwlat_callback_enabled) trace_hwlat_callback(true); #endif - arch_ftrace_nmi_enter(); } static inline void ftrace_nmi_exit(void) { - arch_ftrace_nmi_exit(); #ifdef CONFIG_HWLAT_TRACER if (trace_hwlat_callback_enabled) trace_hwlat_callback(false); diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index e0abafbb17f8..9506f8ec0974 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -171,5 +171,7 @@ struct fwnode_operations { #define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev) extern u32 fw_devlink_get_flags(void); +void fw_devlink_pause(void); +void fw_devlink_resume(void); #endif diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 5b14a0f38124..0bd581003cd5 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -76,7 +76,7 @@ struct gen_pool_chunk { void *owner; /* private data to retrieve at alloc time */ unsigned long start_addr; /* start address of memory chunk */ unsigned long end_addr; /* end address of memory chunk (inclusive) */ - unsigned long bits[0]; /* bitmap for allocating memory chunk */ + unsigned long bits[]; /* bitmap for allocating memory chunk */ }; /* diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 9b3fffdf4011..392aad5e29a2 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -39,15 +39,6 @@ extern struct class block_class; #include <linux/fs.h> #include <linux/workqueue.h> -struct disk_stats { - u64 nsecs[NR_STAT_GROUPS]; - unsigned long sectors[NR_STAT_GROUPS]; - unsigned long ios[NR_STAT_GROUPS]; - unsigned long merges[NR_STAT_GROUPS]; - unsigned long io_ticks; - local_t in_flight[2]; -}; - #define PARTITION_META_INFO_VOLNAMELTH 64 /* * Enough for the string representation of any kind of UUID plus NULL. @@ -68,7 +59,13 @@ struct hd_struct { * can be non-atomic on 32bit machines with 64bit sector_t. */ sector_t nr_sects; +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) seqcount_t nr_sects_seq; +#endif + unsigned long stamp; + struct disk_stats __percpu *dkstats; + struct percpu_ref ref; + sector_t alignment_offset; unsigned int discard_alignment; struct device __dev; @@ -78,13 +75,6 @@ struct hd_struct { #ifdef CONFIG_FAIL_MAKE_REQUEST int make_it_fail; #endif - unsigned long stamp; -#ifdef CONFIG_SMP - struct disk_stats __percpu *dkstats; -#else - struct disk_stats dkstats; -#endif - struct percpu_ref ref; struct rcu_work rcu_work; }; @@ -169,8 +159,6 @@ struct disk_part_tbl { struct disk_events; struct badblocks; -#if defined(CONFIG_BLK_DEV_INTEGRITY) - struct blk_integrity { const struct blk_integrity_profile *profile; unsigned char flags; @@ -179,8 +167,6 @@ struct blk_integrity { unsigned char tag_size; }; -#endif /* CONFIG_BLK_DEV_INTEGRITY */ - struct gendisk { /* major, first_minor and minors are input parameters only, * don't use directly. Use disk_devt() and disk_max_parts(). @@ -217,11 +203,20 @@ struct gendisk { #ifdef CONFIG_BLK_DEV_INTEGRITY struct kobject integrity_kobj; #endif /* CONFIG_BLK_DEV_INTEGRITY */ +#if IS_ENABLED(CONFIG_CDROM) + struct cdrom_device_info *cdi; +#endif int node_id; struct badblocks *bb; struct lockdep_map lockdep_map; }; +#if IS_REACHABLE(CONFIG_CDROM) +#define disk_to_cdi(disk) ((disk)->cdi) +#else +#define disk_to_cdi(disk) NULL +#endif + static inline struct gendisk *part_to_disk(struct hd_struct *part) { if (likely(part)) { @@ -265,6 +260,13 @@ static inline void disk_put_part(struct hd_struct *part) put_device(part_to_dev(part)); } +static inline void hd_sects_seq_init(struct hd_struct *p) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + seqcount_init(&p->nr_sects_seq); +#endif +} + /* * Smarter partition iterator without context limits. */ @@ -339,7 +341,7 @@ extern dev_t blk_lookup_devt(const char *name, int partno); int bdev_disk_changed(struct block_device *bdev, bool invalidate); int blk_add_partitions(struct gendisk *disk, struct block_device *bdev); -int blk_drop_partitions(struct gendisk *disk, struct block_device *bdev); +int blk_drop_partitions(struct block_device *bdev); extern void printk_all_partitions(void); extern struct gendisk *__alloc_disk_node(int minors, int node_id); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 4aba4c86c626..67a0774e080b 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -110,6 +110,11 @@ struct vm_area_struct; * the caller guarantees the allocation will allow more memory to be freed * very shortly e.g. process exiting or swapping. Users either should * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). + * Users of this flag have to be extremely careful to not deplete the reserve + * completely and implement a throttling mechanism which controls the + * consumption of the reserve based on the amount of freed memory. + * Usage of a pre-allocated pool (e.g. mempool) should be always considered + * before using this flag. * * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. * This takes precedence over the %__GFP_MEMALLOC flag if both are set. @@ -307,7 +312,7 @@ struct vm_area_struct; #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) #define GFP_MOVABLE_SHIFT 3 -static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) +static inline int gfp_migratetype(const gfp_t gfp_flags) { VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index b8fc92c177eb..c4f272af7af5 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -253,6 +253,19 @@ struct gpio_irq_chip { * Store old irq_chip irq_disable callback */ void (*irq_disable)(struct irq_data *data); + /** + * @irq_unmask: + * + * Store old irq_chip irq_unmask callback + */ + void (*irq_unmask)(struct irq_data *data); + + /** + * @irq_mask: + * + * Store old irq_chip irq_mask callback + */ + void (*irq_mask)(struct irq_data *data); }; /** @@ -267,9 +280,9 @@ struct gpio_irq_chip { * @free: optional hook for chip-specific deactivation, such as * disabling module power and clock; may sleep * @get_direction: returns direction for signal "offset", 0=out, 1=in, - * (same as GPIOF_DIR_XXX), or negative error. - * It is recommended to always implement this function, even on - * input-only or output-only gpio chips. + * (same as GPIO_LINE_DIRECTION_OUT / GPIO_LINE_DIRECTION_IN), + * or negative error. It is recommended to always implement this + * function, even on input-only or output-only gpio chips. * @direction_input: configures signal "offset" as input, or returns error * This can be omitted on input-only or output-only gpio chips. * @direction_output: configures signal "offset" as output, or returns error @@ -349,30 +362,30 @@ struct gpio_chip { struct module *owner; int (*request)(struct gpio_chip *gc, - unsigned offset); + unsigned int offset); void (*free)(struct gpio_chip *gc, - unsigned offset); + unsigned int offset); int (*get_direction)(struct gpio_chip *gc, - unsigned offset); + unsigned int offset); int (*direction_input)(struct gpio_chip *gc, - unsigned offset); + unsigned int offset); int (*direction_output)(struct gpio_chip *gc, - unsigned offset, int value); + unsigned int offset, int value); int (*get)(struct gpio_chip *gc, - unsigned offset); + unsigned int offset); int (*get_multiple)(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits); void (*set)(struct gpio_chip *gc, - unsigned offset, int value); + unsigned int offset, int value); void (*set_multiple)(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits); int (*set_config)(struct gpio_chip *gc, - unsigned offset, + unsigned int offset, unsigned long config); int (*to_irq)(struct gpio_chip *gc, - unsigned offset); + unsigned int offset); void (*dbg_show)(struct seq_file *s, struct gpio_chip *gc); @@ -459,7 +472,7 @@ struct gpio_chip { }; extern const char *gpiochip_is_requested(struct gpio_chip *gc, - unsigned offset); + unsigned int offset); /* add/remove chips */ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, @@ -599,6 +612,9 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gc, bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc, unsigned int offset); +int gpiochip_irqchip_add_domain(struct gpio_chip *gc, + struct irq_domain *domain); + #ifdef CONFIG_LOCKDEP /* @@ -657,9 +673,9 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gc, } #endif /* CONFIG_LOCKDEP */ -int gpiochip_generic_request(struct gpio_chip *gc, unsigned offset); -void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset); -int gpiochip_generic_config(struct gpio_chip *gc, unsigned offset, +int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset); +void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset); +int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset, unsigned long config); /** diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h index 1ebe5be05d5f..781a053abbb9 100644 --- a/include/linux/gpio/machine.h +++ b/include/linux/gpio/machine.h @@ -20,8 +20,11 @@ enum gpio_lookup_flags { /** * struct gpiod_lookup - lookup table - * @chip_label: name of the chip the GPIO belongs to - * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO + * @key: either the name of the chip the GPIO belongs to, or the GPIO line name + * Note that GPIO line names are not guaranteed to be globally unique, + * so this will use the first match found! + * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO, or + * U16_MAX to indicate that @key is a GPIO line name * @con_id: name of the GPIO from the device's point of view * @idx: index of the GPIO in case several GPIOs share the same name * @flags: bitmask of gpio_lookup_flags GPIO_* values @@ -30,7 +33,7 @@ enum gpio_lookup_flags { * functions using platform data. */ struct gpiod_lookup { - const char *chip_label; + const char *key; u16 chip_hwnum; const char *con_id; unsigned int idx; @@ -63,17 +66,17 @@ struct gpiod_hog { /* * Simple definition of a single GPIO under a con_id */ -#define GPIO_LOOKUP(_chip_label, _chip_hwnum, _con_id, _flags) \ - GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, 0, _flags) +#define GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags) \ + GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, 0, _flags) /* * Use this macro if you need to have several GPIOs under the same con_id. * Each GPIO needs to use a different index and can be accessed using * gpiod_get_index() */ -#define GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, _idx, _flags) \ +#define GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, _idx, _flags) \ { \ - .chip_label = _chip_label, \ + .key = _key, \ .chip_hwnum = _chip_hwnum, \ .con_id = _con_id, \ .idx = _idx, \ diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h new file mode 100644 index 000000000000..4c1e6b34e824 --- /dev/null +++ b/include/linux/gpio/regmap.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _LINUX_GPIO_REGMAP_H +#define _LINUX_GPIO_REGMAP_H + +struct device; +struct gpio_regmap; +struct irq_domain; +struct regmap; + +#define GPIO_REGMAP_ADDR_ZERO ((unsigned long)(-1)) +#define GPIO_REGMAP_ADDR(addr) ((addr) ? : GPIO_REGMAP_ADDR_ZERO) + +/** + * struct gpio_regmap_config - Description of a generic regmap gpio_chip. + * @parent: The parent device + * @regmap: The regmap used to access the registers + * given, the name of the device is used + * @label: (Optional) Descriptive name for GPIO controller. + * If not given, the name of the device is used. + * @ngpio: Number of GPIOs + * @names: (Optional) Array of names for gpios + * @reg_dat_base: (Optional) (in) register base address + * @reg_set_base: (Optional) set register base address + * @reg_clr_base: (Optional) clear register base address + * @reg_dir_in_base: (Optional) in setting register base address + * @reg_dir_out_base: (Optional) out setting register base address + * @reg_stride: (Optional) May be set if the registers (of the + * same type, dat, set, etc) are not consecutive. + * @ngpio_per_reg: Number of GPIOs per register + * @irq_domain: (Optional) IRQ domain if the controller is + * interrupt-capable + * @reg_mask_xlate: (Optional) Translates base address and GPIO + * offset to a register/bitmask pair. If not + * given the default gpio_regmap_simple_xlate() + * is used. + * + * The ->reg_mask_xlate translates a given base address and GPIO offset to + * register and mask pair. The base address is one of the given register + * base addresses in this structure. + * + * Although all register base addresses are marked as optional, there are + * several rules: + * 1. if you only have @reg_dat_base set, then it is input-only + * 2. if you only have @reg_set_base set, then it is output-only + * 3. if you have either @reg_dir_in_base or @reg_dir_out_base set, then + * you have to set both @reg_dat_base and @reg_set_base + * 4. if you have @reg_set_base set, you may also set @reg_clr_base to have + * two different registers for setting and clearing the output. This is + * also valid for the output-only case. + * 5. @reg_dir_in_base and @reg_dir_out_base are exclusive; is there really + * hardware which has redundant registers? + * + * Note: All base addresses may have the special value %GPIO_REGMAP_ADDR_ZERO + * which forces the address to the value 0. + */ +struct gpio_regmap_config { + struct device *parent; + struct regmap *regmap; + + const char *label; + int ngpio; + const char *const *names; + + unsigned int reg_dat_base; + unsigned int reg_set_base; + unsigned int reg_clr_base; + unsigned int reg_dir_in_base; + unsigned int reg_dir_out_base; + int reg_stride; + int ngpio_per_reg; + struct irq_domain *irq_domain; + + int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base, + unsigned int offset, unsigned int *reg, + unsigned int *mask); +}; + +struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config); +void gpio_regmap_unregister(struct gpio_regmap *gpio); +struct gpio_regmap *devm_gpio_regmap_register(struct device *dev, + const struct gpio_regmap_config *config); +void gpio_regmap_set_drvdata(struct gpio_regmap *gpio, void *data); +void *gpio_regmap_get_drvdata(struct gpio_regmap *gpio); + +#endif /* _LINUX_GPIO_REGMAP_H */ diff --git a/include/linux/greybus/greybus_protocols.h b/include/linux/greybus/greybus_protocols.h index dfbc6c39a74b..aeb8f9243545 100644 --- a/include/linux/greybus/greybus_protocols.h +++ b/include/linux/greybus/greybus_protocols.h @@ -345,7 +345,7 @@ struct gb_cap_get_ims_certificate_request { struct gb_cap_get_ims_certificate_response { __u8 result_code; - __u8 certificate[0]; + __u8 certificate[]; } __packed; /* CAP authenticate request/response */ @@ -358,7 +358,7 @@ struct gb_cap_authenticate_request { struct gb_cap_authenticate_response { __u8 result_code; __u8 response[64]; - __u8 signature[0]; + __u8 signature[]; } __packed; @@ -642,7 +642,7 @@ struct gb_hid_get_report_request { struct gb_hid_set_report_request { __u8 report_type; __u8 report_id; - __u8 report[0]; + __u8 report[]; } __packed; /* HID input report request, via interrupt pipe */ @@ -680,7 +680,7 @@ struct gb_i2c_transfer_op { struct gb_i2c_transfer_request { __le16 op_count; - struct gb_i2c_transfer_op ops[0]; /* op_count of these */ + struct gb_i2c_transfer_op ops[]; /* op_count of these */ } __packed; struct gb_i2c_transfer_response { __u8 data[0]; /* inbound data */ @@ -908,7 +908,7 @@ struct gb_spi_transfer_request { __u8 chip_select; /* of the spi device */ __u8 mode; /* of the spi device */ __le16 count; - struct gb_spi_transfer transfers[0]; /* count of these */ + struct gb_spi_transfer transfers[]; /* count of these */ } __packed; struct gb_spi_transfer_response { @@ -1188,7 +1188,7 @@ struct gb_svc_pwrmon_rail_count_get_response { struct gb_svc_pwrmon_rail_names_get_response { __u8 status; - __u8 name[0][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE]; + __u8 name[][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE]; } __packed; #define GB_SVC_PWRMON_TYPE_CURR 0x01 @@ -1281,7 +1281,7 @@ struct gb_svc_intf_oops_request { struct gb_raw_send_request { __le32 len; - __u8 data[0]; + __u8 data[]; } __packed; @@ -1300,7 +1300,7 @@ struct gb_raw_send_request { /* Represents data from AP -> Module */ struct gb_uart_send_data_request { __le16 size; - __u8 data[0]; + __u8 data[]; } __packed; /* recv-data-request flags */ @@ -1313,7 +1313,7 @@ struct gb_uart_send_data_request { struct gb_uart_recv_data_request { __le16 size; __u8 flags; - __u8 data[0]; + __u8 data[]; } __packed; struct gb_uart_receive_credits_request { @@ -1382,14 +1382,14 @@ struct gb_loopback_transfer_request { __le32 len; __le32 reserved0; __le32 reserved1; - __u8 data[0]; + __u8 data[]; } __packed; struct gb_loopback_transfer_response { __le32 len; __le32 reserved0; __le32 reserved1; - __u8 data[0]; + __u8 data[]; } __packed; /* SDIO */ @@ -1530,13 +1530,13 @@ struct gb_sdio_transfer_request { __le16 data_blocks; __le16 data_blksz; - __u8 data[0]; + __u8 data[]; } __packed; struct gb_sdio_transfer_response { __le16 data_blocks; __le16 data_blksz; - __u8 data[0]; + __u8 data[]; } __packed; /* event request: generated by module and is defined as unidirectional */ @@ -1572,7 +1572,7 @@ struct gb_camera_configure_streams_request { __u8 flags; #define GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY 0x01 __le16 padding; - struct gb_camera_stream_config_request config[0]; + struct gb_camera_stream_config_request config[]; } __packed; /* Greybus Camera Configure Streams response payload */ @@ -1593,7 +1593,7 @@ struct gb_camera_configure_streams_response { __u8 flags; __u8 padding[2]; __le32 data_rate; - struct gb_camera_stream_config_response config[0]; + struct gb_camera_stream_config_response config[]; }; /* Greybus Camera Capture request payload - response has no payload */ @@ -1602,7 +1602,7 @@ struct gb_camera_capture_request { __u8 streams; __u8 padding; __le16 num_frames; - __u8 settings[0]; + __u8 settings[]; } __packed; /* Greybus Camera Flush response payload - request has no payload */ @@ -1616,7 +1616,7 @@ struct gb_camera_metadata_request { __le16 frame_number; __u8 stream; __u8 padding; - __u8 metadata[0]; + __u8 metadata[]; } __packed; /* Lights */ @@ -1993,7 +1993,7 @@ struct gb_audio_integer64 { struct gb_audio_enumerated { __le32 items; __le16 names_length; - __u8 names[0]; + __u8 names[]; } __packed; struct gb_audio_ctl_elem_info { /* See snd_ctl_elem_info in Linux source */ @@ -2033,7 +2033,7 @@ struct gb_audio_widget { __u8 type; /* GB_AUDIO_WIDGET_TYPE_* */ __u8 state; /* GB_AUDIO_WIDGET_STATE_* */ __u8 ncontrols; - struct gb_audio_control ctl[0]; /* 'ncontrols' entries */ + struct gb_audio_control ctl[]; /* 'ncontrols' entries */ } __packed; struct gb_audio_route { @@ -2059,7 +2059,7 @@ struct gb_audio_topology { * struct gb_audio_widget widgets[num_widgets]; * struct gb_audio_route routes[num_routes]; */ - __u8 data[0]; + __u8 data[]; } __packed; struct gb_audio_get_topology_size_response { @@ -2157,7 +2157,7 @@ struct gb_audio_streaming_event_request { struct gb_audio_send_data_request { __le64 timestamp; - __u8 data[0]; + __u8 data[]; } __packed; @@ -2171,7 +2171,7 @@ struct gb_audio_send_data_request { struct gb_log_send_log_request { __le16 len; - __u8 msg[0]; + __u8 msg[]; } __packed; #endif /* __GREYBUS_PROTOCOLS_H */ diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 7c8b82f69288..03c9fece7d43 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -2,31 +2,28 @@ #ifndef LINUX_HARDIRQ_H #define LINUX_HARDIRQ_H +#include <linux/context_tracking_state.h> #include <linux/preempt.h> #include <linux/lockdep.h> #include <linux/ftrace_irq.h> #include <linux/vtime.h> #include <asm/hardirq.h> - extern void synchronize_irq(unsigned int irq); extern bool synchronize_hardirq(unsigned int irq); -#if defined(CONFIG_TINY_RCU) - -static inline void rcu_nmi_enter(void) -{ -} +#ifdef CONFIG_NO_HZ_FULL +void __rcu_irq_enter_check_tick(void); +#else +static inline void __rcu_irq_enter_check_tick(void) { } +#endif -static inline void rcu_nmi_exit(void) +static __always_inline void rcu_irq_enter_check_tick(void) { + if (context_tracking_enabled()) + __rcu_irq_enter_check_tick(); } -#else -extern void rcu_nmi_enter(void); -extern void rcu_nmi_exit(void); -#endif - /* * It is safe to do non-atomic ops on ->hardirq_context, * because NMI handlers may not preempt and the ops are @@ -41,9 +38,24 @@ extern void rcu_nmi_exit(void); } while (0) /* + * Like __irq_enter() without time accounting for fast + * interrupts, e.g. reschedule IPI where time accounting + * is more expensive than the actual interrupt. + */ +#define __irq_enter_raw() \ + do { \ + preempt_count_add(HARDIRQ_OFFSET); \ + lockdep_hardirq_enter(); \ + } while (0) + +/* * Enter irq context (on NO_HZ, update jiffies): */ -extern void irq_enter(void); +void irq_enter(void); +/* + * Like irq_enter(), but RCU is already watching. + */ +void irq_enter_rcu(void); /* * Exit irq context without processing softirqs: @@ -56,34 +68,72 @@ extern void irq_enter(void); } while (0) /* + * Like __irq_exit() without time accounting + */ +#define __irq_exit_raw() \ + do { \ + lockdep_hardirq_exit(); \ + preempt_count_sub(HARDIRQ_OFFSET); \ + } while (0) + +/* * Exit irq context and process softirqs if needed: */ -extern void irq_exit(void); +void irq_exit(void); + +/* + * Like irq_exit(), but return with RCU watching. + */ +void irq_exit_rcu(void); #ifndef arch_nmi_enter #define arch_nmi_enter() do { } while (0) #define arch_nmi_exit() do { } while (0) #endif +#ifdef CONFIG_TINY_RCU +static inline void rcu_nmi_enter(void) { } +static inline void rcu_nmi_exit(void) { } +#else +extern void rcu_nmi_enter(void); +extern void rcu_nmi_exit(void); +#endif + +/* + * NMI vs Tracing + * -------------- + * + * We must not land in a tracer until (or after) we've changed preempt_count + * such that in_nmi() becomes true. To that effect all NMI C entry points must + * be marked 'notrace' and call nmi_enter() as soon as possible. + */ + +/* + * nmi_enter() can nest up to 15 times; see NMI_BITS. + */ #define nmi_enter() \ do { \ arch_nmi_enter(); \ printk_nmi_enter(); \ lockdep_off(); \ - ftrace_nmi_enter(); \ - BUG_ON(in_nmi()); \ - preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ + BUG_ON(in_nmi() == NMI_MASK); \ + __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ rcu_nmi_enter(); \ lockdep_hardirq_enter(); \ + instrumentation_begin(); \ + ftrace_nmi_enter(); \ + instrumentation_end(); \ } while (0) #define nmi_exit() \ do { \ + instrumentation_begin(); \ + ftrace_nmi_exit(); \ + instrumentation_end(); \ lockdep_hardirq_exit(); \ rcu_nmi_exit(); \ BUG_ON(!in_nmi()); \ - preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ - ftrace_nmi_exit(); \ + __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ lockdep_on(); \ printk_nmi_exit(); \ arch_nmi_exit(); \ diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index ff25aeb95ee4..9850d59d6f1c 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -220,6 +220,8 @@ ssize_t hdmi_drm_infoframe_pack(struct hdmi_drm_infoframe *frame, void *buffer, ssize_t hdmi_drm_infoframe_pack_only(const struct hdmi_drm_infoframe *frame, void *buffer, size_t size); int hdmi_drm_infoframe_check(struct hdmi_drm_infoframe *frame); +int hdmi_drm_infoframe_unpack_only(struct hdmi_drm_infoframe *frame, + const void *buffer, size_t size); enum hdmi_spd_sdi { HDMI_SPD_SDI_UNKNOWN, diff --git a/include/linux/highmem.h b/include/linux/highmem.h index ea5cdbd8c2c3..d6e82e3de027 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -32,8 +32,65 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size) #include <asm/kmap_types.h> #ifdef CONFIG_HIGHMEM +extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot); +extern void kunmap_atomic_high(void *kvaddr); #include <asm/highmem.h> +#ifndef ARCH_HAS_KMAP_FLUSH_TLB +static inline void kmap_flush_tlb(unsigned long addr) { } +#endif + +#ifndef kmap_prot +#define kmap_prot PAGE_KERNEL +#endif + +void *kmap_high(struct page *page); +static inline void *kmap(struct page *page) +{ + void *addr; + + might_sleep(); + if (!PageHighMem(page)) + addr = page_address(page); + else + addr = kmap_high(page); + kmap_flush_tlb((unsigned long)addr); + return addr; +} + +void kunmap_high(struct page *page); + +static inline void kunmap(struct page *page) +{ + might_sleep(); + if (!PageHighMem(page)) + return; + kunmap_high(page); +} + +/* + * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because + * no global lock is needed and because the kmap code must perform a global TLB + * invalidation when the kmap pool wraps. + * + * However when holding an atomic kmap is is not legal to sleep, so atomic + * kmaps are appropriate for short, tight code paths only. + * + * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap + * gives a more generic (and caching) interface. But kmap_atomic can + * be used in IRQ contexts, so in some (very limited) cases we need + * it. + */ +static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) +{ + preempt_disable(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + return kmap_atomic_high_prot(page, prot); +} +#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot) + /* declarations for linux/mm/highmem.c */ unsigned int nr_free_highpages(void); extern atomic_long_t _totalhigh_pages; @@ -77,15 +134,21 @@ static inline struct page *kmap_to_page(void *addr) static inline unsigned long totalhigh_pages(void) { return 0UL; } -#ifndef ARCH_HAS_KMAP static inline void *kmap(struct page *page) { might_sleep(); return page_address(page); } +static inline void kunmap_high(struct page *page) +{ +} + static inline void kunmap(struct page *page) { +#ifdef ARCH_HAS_FLUSH_ON_KUNMAP + kunmap_flush_on_unmap(page_address(page)); +#endif } static inline void *kmap_atomic(struct page *page) @@ -96,16 +159,20 @@ static inline void *kmap_atomic(struct page *page) } #define kmap_atomic_prot(page, prot) kmap_atomic(page) -static inline void __kunmap_atomic(void *addr) +static inline void kunmap_atomic_high(void *addr) { - pagefault_enable(); - preempt_enable(); + /* + * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic() + * handles re-enabling faults + preemption + */ +#ifdef ARCH_HAS_FLUSH_ON_KUNMAP + kunmap_flush_on_unmap(addr); +#endif } #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) #define kmap_flush_unused() do {} while(0) -#endif #endif /* CONFIG_HIGHMEM */ @@ -149,7 +216,9 @@ static inline void kmap_atomic_idx_pop(void) #define kunmap_atomic(addr) \ do { \ BUILD_BUG_ON(__same_type((addr), struct page *)); \ - __kunmap_atomic(addr); \ + kunmap_atomic_high(addr); \ + pagefault_enable(); \ + preempt_enable(); \ } while (0) diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 7475051100c7..f4a09ed223ac 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -10,7 +10,7 @@ #define LINUX_HMM_H #include <linux/kconfig.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> #include <linux/device.h> #include <linux/migrate.h> @@ -19,51 +19,47 @@ #include <linux/mmu_notifier.h> /* - * hmm_pfn_flag_e - HMM flag enums + * On output: + * 0 - The page is faultable and a future call with + * HMM_PFN_REQ_FAULT could succeed. + * HMM_PFN_VALID - the pfn field points to a valid PFN. This PFN is at + * least readable. If dev_private_owner is !NULL then this could + * point at a DEVICE_PRIVATE page. + * HMM_PFN_WRITE - if the page memory can be written to (requires HMM_PFN_VALID) + * HMM_PFN_ERROR - accessing the pfn is impossible and the device should + * fail. ie poisoned memory, special pages, no vma, etc * - * Flags: - * HMM_PFN_VALID: pfn is valid. It has, at least, read permission. - * HMM_PFN_WRITE: CPU page table has write permission set - * - * The driver provides a flags array for mapping page protections to device - * PTE bits. If the driver valid bit for an entry is bit 3, - * i.e., (entry & (1 << 3)), then the driver must provide - * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3. - * Same logic apply to all flags. This is the same idea as vm_page_prot in vma - * except that this is per device driver rather than per architecture. + * On input: + * 0 - Return the current state of the page, do not fault it. + * HMM_PFN_REQ_FAULT - The output must have HMM_PFN_VALID or hmm_range_fault() + * will fail + * HMM_PFN_REQ_WRITE - The output must have HMM_PFN_WRITE or hmm_range_fault() + * will fail. Must be combined with HMM_PFN_REQ_FAULT. */ -enum hmm_pfn_flag_e { - HMM_PFN_VALID = 0, - HMM_PFN_WRITE, - HMM_PFN_FLAG_MAX +enum hmm_pfn_flags { + /* Output flags */ + HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1), + HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2), + HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3), + + /* Input flags */ + HMM_PFN_REQ_FAULT = HMM_PFN_VALID, + HMM_PFN_REQ_WRITE = HMM_PFN_WRITE, + + HMM_PFN_FLAGS = HMM_PFN_VALID | HMM_PFN_WRITE | HMM_PFN_ERROR, }; /* - * hmm_pfn_value_e - HMM pfn special value - * - * Flags: - * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory - * HMM_PFN_NONE: corresponding CPU page table entry is pte_none() - * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the - * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not - * be mirrored by a device, because the entry will never have HMM_PFN_VALID - * set and the pfn value is undefined. + * hmm_pfn_to_page() - return struct page pointed to by a device entry * - * Driver provides values for none entry, error entry, and special entry. - * Driver can alias (i.e., use same value) error and special, but - * it should not alias none with error or special. - * - * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be: - * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous, - * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table entry, - * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one + * This must be called under the caller 'user_lock' after a successful + * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID + * already. */ -enum hmm_pfn_value_e { - HMM_PFN_ERROR, - HMM_PFN_NONE, - HMM_PFN_SPECIAL, - HMM_PFN_VALUE_MAX -}; +static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn) +{ + return pfn_to_page(hmm_pfn & ~HMM_PFN_FLAGS); +} /* * struct hmm_range - track invalidation lock on virtual address range @@ -72,12 +68,9 @@ enum hmm_pfn_value_e { * @notifier_seq: result of mmu_interval_read_begin() * @start: range virtual start address (inclusive) * @end: range virtual end address (exclusive) - * @pfns: array of pfns (big enough for the range) - * @flags: pfn flags to match device driver page table - * @values: pfn value for some special case (none, special, error, ...) + * @hmm_pfns: array of pfns (big enough for the range) * @default_flags: default flags for the range (write, read, ... see hmm doc) * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter - * @pfn_shift: pfn shift value (should be <= PAGE_SHIFT) * @dev_private_owner: owner of device private pages */ struct hmm_range { @@ -85,42 +78,16 @@ struct hmm_range { unsigned long notifier_seq; unsigned long start; unsigned long end; - uint64_t *pfns; - const uint64_t *flags; - const uint64_t *values; - uint64_t default_flags; - uint64_t pfn_flags_mask; - uint8_t pfn_shift; + unsigned long *hmm_pfns; + unsigned long default_flags; + unsigned long pfn_flags_mask; void *dev_private_owner; }; /* - * hmm_device_entry_to_page() - return struct page pointed to by a device entry - * @range: range use to decode device entry value - * @entry: device entry value to get corresponding struct page from - * Return: struct page pointer if entry is a valid, NULL otherwise - * - * If the device entry is valid (ie valid flag set) then return the struct page - * matching the entry value. Otherwise return NULL. - */ -static inline struct page *hmm_device_entry_to_page(const struct hmm_range *range, - uint64_t entry) -{ - if (entry == range->values[HMM_PFN_NONE]) - return NULL; - if (entry == range->values[HMM_PFN_ERROR]) - return NULL; - if (entry == range->values[HMM_PFN_SPECIAL]) - return NULL; - if (!(entry & range->flags[HMM_PFN_VALID])) - return NULL; - return pfn_to_page(entry >> range->pfn_shift); -} - -/* * Please see Documentation/vm/hmm.rst for how to use the range API. */ -long hmm_range_fault(struct hmm_range *range); +int hmm_range_fault(struct hmm_range *range); /* * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 62d216ff1097..c230b4e70d75 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -17,9 +17,12 @@ enum host1x_class { HOST1X_CLASS_GR3D = 0x60, }; +struct host1x; struct host1x_client; struct iommu_group; +u64 host1x_get_dma_mask(struct host1x *host1x); + /** * struct host1x_client_ops - host1x client operations * @init: host1x client initialization code diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index cfbb0a87c5f0..71f20776b06c 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -248,7 +248,7 @@ static inline int is_swap_pmd(pmd_t pmd) return !pmd_none(pmd) && !pmd_present(pmd); } -/* mmap_sem must be held on entry */ +/* mmap_lock must be held on entry */ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) { diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 43a1cef8f0f1..50650d0d01b9 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -9,7 +9,7 @@ #include <linux/cgroup.h> #include <linux/list.h> #include <linux/kref.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> struct ctl_table; struct user_struct; @@ -105,14 +105,13 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, void hugepage_put_subpool(struct hugepage_subpool *spool); void reset_vma_resv_huge_pages(struct vm_area_struct *vma); -int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); -int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); -int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); - -#ifdef CONFIG_NUMA -int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -#endif +int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); +int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); +int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); +int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, @@ -519,8 +518,8 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping, int __init __alloc_bootmem_huge_page(struct hstate *h); int __init alloc_bootmem_huge_page(struct hstate *h); -void __init hugetlb_bad_size(void); void __init hugetlb_add_hstate(unsigned order); +bool __init arch_hugetlb_valid_size(unsigned long size); struct hstate *size_to_hstate(unsigned long size); #ifndef HUGE_MAX_HSTATE @@ -591,6 +590,20 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h) #include <asm/hugetlb.h> +#ifndef is_hugepage_only_range +static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, unsigned long len) +{ + return 0; +} +#define is_hugepage_only_range is_hugepage_only_range +#endif + +#ifndef arch_clear_hugepage_flags +static inline void arch_clear_hugepage_flags(struct page *page) { } +#define arch_clear_hugepage_flags arch_clear_hugepage_flags +#endif + #ifndef arch_make_huge_pte static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, struct page *page, int writable) diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 6058c3844a76..d7d4250cd1e4 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -80,6 +80,10 @@ extern int dbg_reserve_bp_slot(struct perf_event *bp); extern int dbg_release_bp_slot(struct perf_event *bp); extern int reserve_bp_slot(struct perf_event *bp); extern void release_bp_slot(struct perf_event *bp); +int hw_breakpoint_weight(struct perf_event *bp); +int arch_reserve_bp_slot(struct perf_event *bp); +void arch_release_bp_slot(struct perf_event *bp); +void arch_unregister_hw_breakpoint(struct perf_event *bp); extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index 5e609f25878c..363d4a814aa1 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -436,6 +436,9 @@ devm_hwmon_device_register_with_info(struct device *dev, void hwmon_device_unregister(struct device *dev); void devm_hwmon_device_unregister(struct device *dev); +int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel); + /** * hwmon_is_bad_char - Is the char invalid in a hwmon name * @ch: the char to be considered diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 692c89ccf5df..40df3103e890 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -117,7 +117,7 @@ struct hv_ring_buffer { * Ring data starts here + RingDataStartOffset * !!! DO NOT place any fields below this !!! */ - u8 buffer[0]; + u8 buffer[]; } __packed; struct hv_ring_buffer_info { @@ -313,7 +313,7 @@ struct vmadd_remove_transfer_page_set { struct gpa_range { u32 byte_count; u32 byte_offset; - u64 pfn_array[0]; + u64 pfn_array[]; }; /* @@ -425,7 +425,7 @@ enum vmbus_channel_message_type { CHANNELMSG_19 = 19, CHANNELMSG_20 = 20, CHANNELMSG_TL_CONNECT_REQUEST = 21, - CHANNELMSG_22 = 22, + CHANNELMSG_MODIFYCHANNEL = 22, CHANNELMSG_TL_CONNECT_RESULT = 23, CHANNELMSG_COUNT }; @@ -563,7 +563,7 @@ struct vmbus_channel_gpadl_header { u32 gpadl; u16 range_buflen; u16 rangecount; - struct gpa_range range[0]; + struct gpa_range range[]; } __packed; /* This is the followup packet that contains more PFNs. */ @@ -571,7 +571,7 @@ struct vmbus_channel_gpadl_body { struct vmbus_channel_message_header header; u32 msgnumber; u32 gpadl; - u64 pfn[0]; + u64 pfn[]; } __packed; struct vmbus_channel_gpadl_created { @@ -620,6 +620,13 @@ struct vmbus_channel_tl_connect_request { guid_t host_service_id; } __packed; +/* Modify Channel parameters, cf. vmbus_send_modifychannel() */ +struct vmbus_channel_modifychannel { + struct vmbus_channel_message_header header; + u32 child_relid; + u32 target_vp; +} __packed; + struct vmbus_channel_version_response { struct vmbus_channel_message_header header; u8 version_supported; @@ -672,7 +679,7 @@ struct vmbus_channel_msginfo { * The channel message that goes out on the "wire". * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header */ - unsigned char msg[0]; + unsigned char msg[]; }; struct vmbus_close_msg { @@ -689,11 +696,6 @@ union hv_connection_id { } u; }; -enum hv_numa_policy { - HV_BALANCED = 0, - HV_LOCALIZED, -}; - enum vmbus_device_type { HV_IDE = 0, HV_SCSI, @@ -771,6 +773,15 @@ struct vmbus_channel { void (*onchannel_callback)(void *context); void *channel_callback_context; + void (*change_target_cpu_callback)(struct vmbus_channel *channel, + u32 old, u32 new); + + /* + * Synchronize channel scheduling and channel removal; see the inline + * comments in vmbus_chan_sched() and vmbus_reset_channel_cb(). + */ + spinlock_t sched_lock; + /* * A channel can be marked for one of three modes of reading: * BATCHED - callback called from taslket and should read @@ -802,10 +813,6 @@ struct vmbus_channel { u32 target_vp; /* The corresponding CPUID in the guest */ u32 target_cpu; - /* - * State to manage the CPU affiliation of channels. - */ - struct cpumask alloced_cpus_in_node; int numa_node; /* * Support for sub-channels. For high performance devices, @@ -854,11 +861,6 @@ struct vmbus_channel { * Support per-channel state for use by vmbus drivers. */ void *per_channel_state; - /* - * To support per-cpu lookup mapping of relid to channel, - * link up channels based on their CPU affinity. - */ - struct list_head percpu_list; /* * Defer freeing channel until after all cpu's have @@ -897,19 +899,14 @@ struct vmbus_channel { */ bool low_latency; + bool probe_done; + /* - * NUMA distribution policy: - * We support two policies: - * 1) Balanced: Here all performance critical channels are - * distributed evenly amongst all the NUMA nodes. - * This policy will be the default policy. - * 2) Localized: All channels of a given instance of a - * performance critical service will be assigned CPUs - * within a selected NUMA node. + * Cache the device ID here for easy access; this is useful, in + * particular, in situations where the channel's device_obj has + * not been allocated/initialized yet. */ - enum hv_numa_policy affinity_policy; - - bool probe_done; + u16 device_id; /* * We must offload the handling of the primary/sub channels @@ -964,12 +961,6 @@ static inline bool is_sub_channel(const struct vmbus_channel *c) return c->offermsg.offer.sub_channel_index != 0; } -static inline void set_channel_affinity_state(struct vmbus_channel *c, - enum hv_numa_policy policy) -{ - c->affinity_policy = policy; -} - static inline void set_channel_read_mode(struct vmbus_channel *c, enum hv_callback_mode mode) { @@ -1017,7 +1008,7 @@ static inline void clear_low_latency_mode(struct vmbus_channel *c) c->low_latency = false; } -void vmbus_onmessage(void *context); +void vmbus_onmessage(struct vmbus_channel_message_header *hdr); int vmbus_request_offers(void); @@ -1531,6 +1522,7 @@ extern __u32 vmbus_proto_version; int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id, const guid_t *shv_host_servie_id); +int vmbus_send_modifychannel(u32 child_relid, u32 target_vp); void vmbus_set_event(struct vmbus_channel *channel); /* Get the start of the ring buffer. */ diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h index c5a977320f82..98ef73b7c8fd 100644 --- a/include/linux/i2c-mux.h +++ b/include/linux/i2c-mux.h @@ -29,7 +29,7 @@ struct i2c_mux_core { int num_adapters; int max_adapters; - struct i2c_adapter *adapter[0]; + struct i2c_adapter *adapter[]; }; struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent, diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h index 8c5459034f92..1e4e0de4ef8b 100644 --- a/include/linux/i2c-smbus.h +++ b/include/linux/i2c-smbus.h @@ -2,7 +2,7 @@ /* * i2c-smbus.h - SMBus extensions to the I2C protocol * - * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de> + * Copyright (C) 2010-2019 Jean Delvare <jdelvare@suse.de> */ #ifndef _LINUX_I2C_SMBUS_H @@ -39,4 +39,10 @@ static inline int of_i2c_setup_smbus_alert(struct i2c_adapter *adap) } #endif +#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_DMI) +void i2c_register_spd(struct i2c_adapter *adap); +#else +static inline void i2c_register_spd(struct i2c_adapter *adap) { } +#endif + #endif /* _LINUX_I2C_SMBUS_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 456fc17ecb1c..b8b8963f8bb9 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -2,7 +2,7 @@ /* * i2c.h - definitions for the Linux i2c bus interface * Copyright (C) 1995-2000 Simon G. Vogl - * Copyright (C) 2013-2019 Wolfram Sang <wsa@the-dreams.de> + * Copyright (C) 2013-2019 Wolfram Sang <wsa@kernel.org> * * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and * Frodo Looijaard <frodol@dds.nl> @@ -351,14 +351,14 @@ static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj) return to_i2c_client(dev); } -static inline void *i2c_get_clientdata(const struct i2c_client *dev) +static inline void *i2c_get_clientdata(const struct i2c_client *client) { - return dev_get_drvdata(&dev->dev); + return dev_get_drvdata(&client->dev); } -static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) +static inline void i2c_set_clientdata(struct i2c_client *client, void *data) { - dev_set_drvdata(&dev->dev, data); + dev_set_drvdata(&client->dev, data); } /* I2C slave support */ @@ -408,7 +408,7 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; } * that are present. This information is used to grow the driver model tree. * For mainboards this is done statically using i2c_register_board_info(); * bus numbers identify adapters that aren't yet available. For add-on boards, - * i2c_new_device() does this dynamically with the adapter already known. + * i2c_new_client_device() does this dynamically with the adapter already known. */ struct i2c_board_info { char type[I2C_NAME_SIZE]; @@ -439,14 +439,12 @@ struct i2c_board_info { #if IS_ENABLED(CONFIG_I2C) -/* Add-on boards should register/unregister their devices; e.g. a board +/* + * Add-on boards should register/unregister their devices; e.g. a board * with integrated I2C, a config eeprom, sensors, and a codec that's * used in conjunction with the primary hardware. */ struct i2c_client * -i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info); - -struct i2c_client * i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info); /* If you don't know the exact address of an I2C device, use this variant @@ -461,12 +459,6 @@ i2c_new_scanned_device(struct i2c_adapter *adap, unsigned short const *addr_list, int (*probe)(struct i2c_adapter *adap, unsigned short addr)); -struct i2c_client * -i2c_new_probed_device(struct i2c_adapter *adap, - struct i2c_board_info *info, - unsigned short const *addr_list, - int (*probe)(struct i2c_adapter *adap, unsigned short addr)); - /* Common custom probe functions */ int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr); diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h index a445cd1a36c5..91a8612b8bf9 100644 --- a/include/linux/idle_inject.h +++ b/include/linux/idle_inject.h @@ -26,4 +26,8 @@ void idle_inject_set_duration(struct idle_inject_device *ii_dev, void idle_inject_get_duration(struct idle_inject_device *ii_dev, unsigned int *run_duration_us, unsigned int *idle_duration_us); + +void idle_inject_set_latency(struct idle_inject_device *ii_dev, + unsigned int latency_ns); + #endif /* __IDLE_INJECT_H__ */ diff --git a/include/linux/idr.h b/include/linux/idr.h index ac6e946b6767..3ade03e5c7af 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -171,7 +171,7 @@ static inline bool idr_is_empty(const struct idr *idr) */ static inline void idr_preload_end(void) { - preempt_enable(); + local_unlock(&radix_tree_preloads.lock); } /** diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 16268ef1cbcc..fe15f831841b 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -9,7 +9,7 @@ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (c) 2018 - 2019 Intel Corporation + * Copyright (c) 2018 - 2020 Intel Corporation */ #ifndef LINUX_IEEE80211_H @@ -105,6 +105,51 @@ /* extension, added by 802.11ad */ #define IEEE80211_STYPE_DMG_BEACON 0x0000 +#define IEEE80211_STYPE_S1G_BEACON 0x0010 + +/* bits unique to S1G beacon */ +#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100 + +/* see 802.11ah-2016 9.9 NDP CMAC frames */ +#define IEEE80211_S1G_1MHZ_NDP_BITS 25 +#define IEEE80211_S1G_1MHZ_NDP_BYTES 4 +#define IEEE80211_S1G_2MHZ_NDP_BITS 37 +#define IEEE80211_S1G_2MHZ_NDP_BYTES 5 + +#define IEEE80211_NDP_FTYPE_CTS 0 +#define IEEE80211_NDP_FTYPE_CF_END 0 +#define IEEE80211_NDP_FTYPE_PS_POLL 1 +#define IEEE80211_NDP_FTYPE_ACK 2 +#define IEEE80211_NDP_FTYPE_PS_POLL_ACK 3 +#define IEEE80211_NDP_FTYPE_BA 4 +#define IEEE80211_NDP_FTYPE_BF_REPORT_POLL 5 +#define IEEE80211_NDP_FTYPE_PAGING 6 +#define IEEE80211_NDP_FTYPE_PREQ 7 + +#define SM64(f, v) ((((u64)v) << f##_S) & f) + +/* NDP CMAC frame fields */ +#define IEEE80211_NDP_FTYPE 0x0000000000000007 +#define IEEE80211_NDP_FTYPE_S 0x0000000000000000 + +/* 1M Probe Request 11ah 9.9.3.1.1 */ +#define IEEE80211_NDP_1M_PREQ_ANO 0x0000000000000008 +#define IEEE80211_NDP_1M_PREQ_ANO_S 3 +#define IEEE80211_NDP_1M_PREQ_CSSID 0x00000000000FFFF0 +#define IEEE80211_NDP_1M_PREQ_CSSID_S 4 +#define IEEE80211_NDP_1M_PREQ_RTYPE 0x0000000000100000 +#define IEEE80211_NDP_1M_PREQ_RTYPE_S 20 +#define IEEE80211_NDP_1M_PREQ_RSV 0x0000000001E00000 +#define IEEE80211_NDP_1M_PREQ_RSV 0x0000000001E00000 +/* 2M Probe Request 11ah 9.9.3.1.2 */ +#define IEEE80211_NDP_2M_PREQ_ANO 0x0000000000000008 +#define IEEE80211_NDP_2M_PREQ_ANO_S 3 +#define IEEE80211_NDP_2M_PREQ_CSSID 0x0000000FFFFFFFF0 +#define IEEE80211_NDP_2M_PREQ_CSSID_S 4 +#define IEEE80211_NDP_2M_PREQ_RTYPE 0x0000001000000000 +#define IEEE80211_NDP_2M_PREQ_RTYPE_S 36 + +#define IEEE80211_ANO_NETTYPE_WILD 15 /* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */ #define IEEE80211_CTL_EXT_POLL 0x2000 @@ -121,6 +166,21 @@ #define IEEE80211_MAX_SN IEEE80211_SN_MASK #define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1) + +/* PV1 Layout 11ah 9.8.3.1 */ +#define IEEE80211_PV1_FCTL_VERS 0x0003 +#define IEEE80211_PV1_FCTL_FTYPE 0x001c +#define IEEE80211_PV1_FCTL_STYPE 0x00e0 +#define IEEE80211_PV1_FCTL_TODS 0x0100 +#define IEEE80211_PV1_FCTL_MOREFRAGS 0x0200 +#define IEEE80211_PV1_FCTL_PM 0x0400 +#define IEEE80211_PV1_FCTL_MOREDATA 0x0800 +#define IEEE80211_PV1_FCTL_PROTECTED 0x1000 +#define IEEE80211_PV1_FCTL_END_SP 0x2000 +#define IEEE80211_PV1_FCTL_RELAYED 0x4000 +#define IEEE80211_PV1_FCTL_ACK_POLICY 0x8000 +#define IEEE80211_PV1_FCTL_CTL_EXT 0x0f00 + static inline bool ieee80211_sn_less(u16 sn1, u16 sn2) { return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1); @@ -148,6 +208,7 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) #define IEEE80211_MAX_FRAG_THRESHOLD 2352 #define IEEE80211_MAX_RTS_THRESHOLD 2353 #define IEEE80211_MAX_AID 2007 +#define IEEE80211_MAX_AID_S1G 8191 #define IEEE80211_MAX_TIM_LEN 251 #define IEEE80211_MAX_MESH_PEERINGS 63 /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section @@ -372,6 +433,17 @@ static inline bool ieee80211_is_data(__le16 fc) } /** + * ieee80211_is_ext - check if type is IEEE80211_FTYPE_EXT + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_ext(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == + cpu_to_le16(IEEE80211_FTYPE_EXT); +} + + +/** * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set * @fc: frame control bytes in little-endian byteorder */ @@ -470,6 +542,18 @@ static inline bool ieee80211_is_beacon(__le16 fc) } /** + * ieee80211_is_s1g_beacon - check if IEEE80211_FTYPE_EXT && + * IEEE80211_STYPE_S1G_BEACON + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_s1g_beacon(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | + IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON); +} + +/** * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM * @fc: frame control bytes in little-endian byteorder */ @@ -716,7 +800,7 @@ struct ieee80211_msrment_ie { u8 token; u8 mode; u8 type; - u8 request[0]; + u8 request[]; } __packed; /** @@ -859,6 +943,7 @@ enum ieee80211_ht_chanwidth_values { * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: 80 MHz channel width * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: 160 MHz or 80+80 MHz channel width + * @IEEE80211_OPMODE_NOTIF_BW_160_80P80: 160 / 80+80 MHz indicator flag * @IEEE80211_OPMODE_NOTIF_RX_NSS_MASK: number of spatial streams mask * (the NSS value is the value of this field + 1) * @IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT: number of spatial streams shift @@ -866,11 +951,12 @@ enum ieee80211_ht_chanwidth_values { * using a beamforming steering matrix */ enum ieee80211_vht_opmode_bits { - IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 3, + IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 0x03, IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ = 0, IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ = 1, IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ = 2, IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ = 3, + IEEE80211_OPMODE_NOTIF_BW_160_80P80 = 0x04, IEEE80211_OPMODE_NOTIF_RX_NSS_MASK = 0x70, IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT = 4, IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF = 0x80, @@ -898,6 +984,59 @@ struct ieee80211_addba_ext_ie { u8 data; } __packed; +/** + * struct ieee80211_s1g_bcn_compat_ie + * + * S1G Beacon Compatibility element + */ +struct ieee80211_s1g_bcn_compat_ie { + __le16 compat_info; + __le16 beacon_int; + __le32 tsf_completion; +} __packed; + +/** + * struct ieee80211_s1g_oper_ie + * + * S1G Operation element + */ +struct ieee80211_s1g_oper_ie { + u8 ch_width; + u8 oper_class; + u8 primary_ch; + u8 oper_ch; + __le16 basic_mcs_nss; +} __packed; + +/** + * struct ieee80211_aid_response_ie + * + * AID Response element + */ +struct ieee80211_aid_response_ie { + __le16 aid; + u8 switch_count; + __le16 response_int; +} __packed; + +struct ieee80211_s1g_cap { + u8 capab_info[10]; + u8 supp_mcs_nss[5]; +} __packed; + +struct ieee80211_ext { + __le16 frame_control; + __le16 duration; + union { + struct { + u8 sa[ETH_ALEN]; + __le32 timestamp; + u8 change_seq; + u8 variable[0]; + } __packed s1g_beacon; + } u; +} __packed __aligned(2); + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -1065,6 +1204,7 @@ struct ieee80211_mgmt { /* Supported rates membership selectors */ #define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 #define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126 +#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122 /* mgmt header + 1 byte category code */ #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) @@ -1641,7 +1781,7 @@ struct ieee80211_he_operation { __le32 he_oper_params; __le16 he_mcs_nss_set; /* Optional 0,1,3,4,5,7 or 8 bytes: depends on @he_oper_params */ - u8 optional[0]; + u8 optional[]; } __packed; /** @@ -1653,7 +1793,7 @@ struct ieee80211_he_operation { struct ieee80211_he_spr { u8 he_sr_control; /* Optional 0 to 19 bytes: depends on @he_sr_control */ - u8 optional[0]; + u8 optional[]; } __packed; /** @@ -1731,6 +1871,9 @@ struct ieee80211_mu_edca_param_set { * @ext_nss_bw_capable: indicates whether or not the local transmitter * (rate scaling algorithm) can deal with the new logic * (dot11VHTExtendedNSSBWCapable) + * @max_vht_nss: current maximum NSS as advertised by the STA in + * operating mode notification, can be 0 in which case the + * capability data will be used to derive this (from MCS support) * * Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can * vary for a given BW/MCS. This function parses the data. @@ -1739,7 +1882,8 @@ struct ieee80211_mu_edca_param_set { */ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, enum ieee80211_vht_chanwidth bw, - int mcs, bool ext_nss_bw_capable); + int mcs, bool ext_nss_bw_capable, + unsigned int max_vht_nss); /* 802.11ax HE MAC capabilities */ #define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01 @@ -1814,6 +1958,8 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, #define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40 #define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80 +#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_SHIFT 3 + #define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01 #define IEEE80211_HE_MAC_CAP4_QTP 0x02 #define IEEE80211_HE_MAC_CAP4_BQR 0x04 @@ -1835,6 +1981,9 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, #define IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING 0x40 #define IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX 0x80 +#define IEEE80211_HE_VHT_MAX_AMPDU_FACTOR 20 +#define IEEE80211_HE_HT_MAX_AMPDU_FACTOR 16 + /* 802.11ax HE PHY capabilities */ #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04 @@ -2047,7 +2196,7 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) } /* HE Operation defines */ -#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000003 +#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007 #define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4 @@ -2060,6 +2209,28 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) #define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000 #define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000 +/** + * ieee80211_he_6ghz_oper - HE 6 GHz operation Information field + * @primary: primary channel + * @control: control flags + * @ccfs0: channel center frequency segment 0 + * @ccfs1: channel center frequency segment 1 + * @minrate: minimum rate (in 1 Mbps units) + */ +struct ieee80211_he_6ghz_oper { + u8 primary; +#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH 0x3 +#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ 0 +#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ 1 +#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2 +#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3 +#define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4 + u8 control; + u8 ccfs0; + u8 ccfs1; + u8 minrate; +} __packed; + /* * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size * @he_oper_ie: byte data of the He Operations IE, stating from the byte @@ -2086,7 +2257,7 @@ ieee80211_he_oper_size(const u8 *he_oper_ie) if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS) oper_len++; if (he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO) - oper_len += 4; + oper_len += sizeof(struct ieee80211_he_6ghz_oper); /* Add the first byte (extension ID) to the total length */ oper_len++; @@ -2094,6 +2265,34 @@ ieee80211_he_oper_size(const u8 *he_oper_ie) return oper_len; } +/** + * ieee80211_he_6ghz_oper - obtain 6 GHz operation field + * @he_oper: HE operation element (must be pre-validated for size) + * but may be %NULL + * + * Return: a pointer to the 6 GHz operation field, or %NULL + */ +static inline const struct ieee80211_he_6ghz_oper * +ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper) +{ + const u8 *ret = (void *)&he_oper->optional; + u32 he_oper_params; + + if (!he_oper) + return NULL; + + he_oper_params = le32_to_cpu(he_oper->he_oper_params); + + if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO)) + return NULL; + if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO) + ret += 3; + if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS) + ret++; + + return (void *)ret; +} + /* HE Spatial Reuse defines */ #define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT 0x4 #define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT 0x8 @@ -2130,6 +2329,86 @@ ieee80211_he_spr_size(const u8 *he_spr_ie) return spr_len; } +/* S1G Capabilities Information field */ +#define S1G_CAPAB_B0_S1G_LONG BIT(0) +#define S1G_CAPAB_B0_SGI_1MHZ BIT(1) +#define S1G_CAPAB_B0_SGI_2MHZ BIT(2) +#define S1G_CAPAB_B0_SGI_4MHZ BIT(3) +#define S1G_CAPAB_B0_SGI_8MHZ BIT(4) +#define S1G_CAPAB_B0_SGI_16MHZ BIT(5) +#define S1G_CAPAB_B0_SUPP_CH_WIDTH_MASK (BIT(6) | BIT(7)) +#define S1G_CAPAB_B0_SUPP_CH_WIDTH_SHIFT 6 + +#define S1G_CAPAB_B1_RX_LDPC BIT(0) +#define S1G_CAPAB_B1_TX_STBC BIT(1) +#define S1G_CAPAB_B1_RX_STBC BIT(2) +#define S1G_CAPAB_B1_SU_BFER BIT(3) +#define S1G_CAPAB_B1_SU_BFEE BIT(4) +#define S1G_CAPAB_B1_BFEE_STS_MASK (BIT(5) | BIT(6) | BIT(7)) +#define S1G_CAPAB_B1_BFEE_STS_SHIFT 5 + +#define S1G_CAPAB_B2_SOUNDING_DIMENSIONS_MASK (BIT(0) | BIT(1) | BIT(2)) +#define S1G_CAPAB_B2_SOUNDING_DIMENSIONS_SHIFT 0 +#define S1G_CAPAB_B2_MU_BFER BIT(3) +#define S1G_CAPAB_B2_MU_BFEE BIT(4) +#define S1G_CAPAB_B2_PLUS_HTC_VHT BIT(5) +#define S1G_CAPAB_B2_TRAVELING_PILOT_MASK (BIT(6) | BIT(7)) +#define S1G_CAPAB_B2_TRAVELING_PILOT_SHIFT 6 + +#define S1G_CAPAB_B3_RD_RESPONDER BIT(0) +#define S1G_CAPAB_B3_HT_DELAYED_BA BIT(1) +#define S1G_CAPAB_B3_MAX_MPDU_LEN BIT(2) +#define S1G_CAPAB_B3_MAX_AMPDU_LEN_EXP_MASK (BIT(3) | BIT(4)) +#define S1G_CAPAB_B3_MAX_AMPDU_LEN_EXP_SHIFT 3 +#define S1G_CAPAB_B3_MIN_MPDU_START_MASK (BIT(5) | BIT(6) | BIT(7)) +#define S1G_CAPAB_B3_MIN_MPDU_START_SHIFT 5 + +#define S1G_CAPAB_B4_UPLINK_SYNC BIT(0) +#define S1G_CAPAB_B4_DYNAMIC_AID BIT(1) +#define S1G_CAPAB_B4_BAT BIT(2) +#define S1G_CAPAB_B4_TIME_ADE BIT(3) +#define S1G_CAPAB_B4_NON_TIM BIT(4) +#define S1G_CAPAB_B4_GROUP_AID BIT(5) +#define S1G_CAPAB_B4_STA_TYPE_MASK (BIT(6) | BIT(7)) +#define S1G_CAPAB_B4_STA_TYPE_SHIFT 6 + +#define S1G_CAPAB_B5_CENT_AUTH_CONTROL BIT(0) +#define S1G_CAPAB_B5_DIST_AUTH_CONTROL BIT(1) +#define S1G_CAPAB_B5_AMSDU BIT(2) +#define S1G_CAPAB_B5_AMPDU BIT(3) +#define S1G_CAPAB_B5_ASYMMETRIC_BA BIT(4) +#define S1G_CAPAB_B5_FLOW_CONTROL BIT(5) +#define S1G_CAPAB_B5_SECTORIZED_BEAM_MASK (BIT(6) | BIT(7)) +#define S1G_CAPAB_B5_SECTORIZED_BEAM_SHIFT 6 + +#define S1G_CAPAB_B6_OBSS_MITIGATION BIT(0) +#define S1G_CAPAB_B6_FRAGMENT_BA BIT(1) +#define S1G_CAPAB_B6_NDP_PS_POLL BIT(2) +#define S1G_CAPAB_B6_RAW_OPERATION BIT(3) +#define S1G_CAPAB_B6_PAGE_SLICING BIT(4) +#define S1G_CAPAB_B6_TXOP_SHARING_IMP_ACK BIT(5) +#define S1G_CAPAB_B6_VHT_LINK_ADAPT_MASK (BIT(6) | BIT(7)) +#define S1G_CAPAB_B6_VHT_LINK_ADAPT_SHIFT 6 + +#define S1G_CAPAB_B7_TACK_AS_PS_POLL BIT(0) +#define S1G_CAPAB_B7_DUP_1MHZ BIT(1) +#define S1G_CAPAB_B7_MCS_NEGOTIATION BIT(2) +#define S1G_CAPAB_B7_1MHZ_CTL_RESPONSE_PREAMBLE BIT(3) +#define S1G_CAPAB_B7_NDP_BFING_REPORT_POLL BIT(4) +#define S1G_CAPAB_B7_UNSOLICITED_DYN_AID BIT(5) +#define S1G_CAPAB_B7_SECTOR_TRAINING_OPERATION BIT(6) +#define S1G_CAPAB_B7_TEMP_PS_MODE_SWITCH BIT(7) + +#define S1G_CAPAB_B8_TWT_GROUPING BIT(0) +#define S1G_CAPAB_B8_BDT BIT(1) +#define S1G_CAPAB_B8_COLOR_MASK (BIT(2) | BIT(3) | BIT(4)) +#define S1G_CAPAB_B8_COLOR_SHIFT 2 +#define S1G_CAPAB_B8_TWT_REQUEST BIT(5) +#define S1G_CAPAB_B8_TWT_RESPOND BIT(6) +#define S1G_CAPAB_B8_PV1_FRAME BIT(7) + +#define S1G_CAPAB_B9_LINK_ADAPT_PER_CONTROL_RESPONSE BIT(0) + /* Authentication algorithms */ #define WLAN_AUTH_OPEN 0 #define WLAN_AUTH_SHARED_KEY 1 @@ -2525,8 +2804,14 @@ enum ieee80211_eid { WLAN_EID_QUIET_CHANNEL = 198, WLAN_EID_OPMODE_NOTIF = 199, + WLAN_EID_REDUCED_NEIGHBOR_REPORT = 201, + + WLAN_EID_S1G_BCN_COMPAT = 213, + WLAN_EID_S1G_SHORT_BCN_INTERVAL = 214, + WLAN_EID_S1G_CAPABILITIES = 217, WLAN_EID_VENDOR_SPECIFIC = 221, WLAN_EID_QOS_PARAMETER = 222, + WLAN_EID_S1G_OPERATION = 232, WLAN_EID_CAG_NUMBER = 237, WLAN_EID_AP_CSN = 239, WLAN_EID_FILS_INDICATION = 240, @@ -2554,9 +2839,19 @@ enum ieee80211_eid_ext { WLAN_EID_EXT_UORA = 37, WLAN_EID_EXT_HE_MU_EDCA = 38, WLAN_EID_EXT_HE_SPR = 39, + WLAN_EID_EXT_NDP_FEEDBACK_REPORT_PARAMSET = 41, + WLAN_EID_EXT_BSS_COLOR_CHG_ANN = 42, + WLAN_EID_EXT_QUIET_TIME_PERIOD_SETUP = 43, + WLAN_EID_EXT_ESS_REPORT = 45, + WLAN_EID_EXT_OPS = 46, + WLAN_EID_EXT_HE_BSS_LOAD = 47, WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME = 52, WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION = 55, WLAN_EID_EXT_NON_INHERITANCE = 56, + WLAN_EID_EXT_KNOWN_BSSID = 57, + WLAN_EID_EXT_SHORT_SSID_LIST = 58, + WLAN_EID_EXT_HE_6GHZ_CAPA = 59, + WLAN_EID_EXT_UL_MU_POWER_CAPA = 60, }; /* Action category code */ @@ -2787,7 +3082,7 @@ enum ieee80211_tdls_actioncode { #define WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT BIT(7) /* Defines support for enhanced multi-bssid advertisement*/ -#define WLAN_EXT_CAPA11_EMA_SUPPORT BIT(1) +#define WLAN_EXT_CAPA11_EMA_SUPPORT BIT(3) /* TDLS specific payload type in the LLC/SNAP header */ #define WLAN_TDLS_SNAP_RFTYPE 0x2 @@ -3099,6 +3394,24 @@ struct ieee80211_tspec_ie { __le16 medium_time; } __packed; +struct ieee80211_he_6ghz_capa { + /* uses IEEE80211_HE_6GHZ_CAP_* below */ + __le16 capa; +} __packed; + +/* HE 6 GHz band capabilities */ +/* uses enum ieee80211_min_mpdu_spacing values */ +#define IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START 0x0007 +/* uses enum ieee80211_vht_max_ampdu_length_exp values */ +#define IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP 0x0038 +/* uses IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_* values */ +#define IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN 0x00c0 +/* WLAN_HT_CAP_SM_PS_* values */ +#define IEEE80211_HE_6GHZ_CAP_SM_PS 0x0600 +#define IEEE80211_HE_6GHZ_CAP_RD_RESPONDER 0x0800 +#define IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS 0x1000 +#define IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS 0x2000 + /** * ieee80211_get_qos_ctl - get pointer to qos control bytes * @hdr: the frame @@ -3323,6 +3636,18 @@ static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size) #define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) #define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) +/* convert frequencies */ +#define MHZ_TO_KHZ(freq) ((freq) * 1000) +#define KHZ_TO_MHZ(freq) ((freq) / 1000) +#define PR_KHZ(f) KHZ_TO_MHZ(f), f % 1000 +#define KHZ_F "%d.%03d" + +/* convert powers */ +#define DBI_TO_MBI(gain) ((gain) * 100) +#define MBI_TO_DBI(gain) ((gain) / 100) +#define DBM_TO_MBM(gain) ((gain) * 100) +#define MBM_TO_DBM(gain) ((gain) / 100) + /** * ieee80211_action_contains_tpc - checks if the frame contains TPC element * @skb: the skb containing the frame, length will be checked @@ -3430,4 +3755,30 @@ static inline bool for_each_element_completed(const struct element *element, #define WLAN_RSNX_CAPA_PROTECTED_TWT BIT(4) #define WLAN_RSNX_CAPA_SAE_H2E BIT(5) +/* + * reduced neighbor report, based on Draft P802.11ax_D5.0, + * section 9.4.2.170 + */ +#define IEEE80211_AP_INFO_TBTT_HDR_TYPE 0x03 +#define IEEE80211_AP_INFO_TBTT_HDR_FILTERED 0x04 +#define IEEE80211_AP_INFO_TBTT_HDR_COLOC 0x08 +#define IEEE80211_AP_INFO_TBTT_HDR_COUNT 0xF0 +#define IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM 8 +#define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM 12 + +#define IEEE80211_RNR_TBTT_PARAMS_OCT_RECOMMENDED 0x01 +#define IEEE80211_RNR_TBTT_PARAMS_SAME_SSID 0x02 +#define IEEE80211_RNR_TBTT_PARAMS_MULTI_BSSID 0x04 +#define IEEE80211_RNR_TBTT_PARAMS_TRANSMITTED_BSSID 0x08 +#define IEEE80211_RNR_TBTT_PARAMS_COLOC_ESS 0x10 +#define IEEE80211_RNR_TBTT_PARAMS_PROBE_ACTIVE 0x20 +#define IEEE80211_RNR_TBTT_PARAMS_COLOC_AP 0x40 + +struct ieee80211_neighbor_ap_info { + u8 tbtt_info_hdr; + u8 tbtt_info_len; + u8 op_class; + u8 channel; +} __packed; + #endif /* LINUX_IEEE80211_H */ diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 9e57c4411734..b3a8d3054af0 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -47,6 +47,8 @@ struct br_ip_list { #define BR_BCAST_FLOOD BIT(14) #define BR_NEIGH_SUPPRESS BIT(15) #define BR_ISOLATED BIT(16) +#define BR_MRP_AWARE BIT(17) +#define BR_MRP_LOST_CONT BIT(18) #define BR_DEFAULT_AGEING_TIME (300 * HZ) diff --git a/include/linux/if_team.h b/include/linux/if_team.h index ec7e4bd07f82..add607943c95 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -67,7 +67,7 @@ struct team_port { u16 queue_id; struct list_head qom_list; /* node in queue override mapping list */ struct rcu_head rcu; - long mode_priv[0]; + long mode_priv[]; }; static inline struct team_port *team_port_get_rcu(const struct net_device *dev) @@ -102,10 +102,7 @@ static inline bool team_port_dev_txable(const struct net_device *port_dev) static inline void team_netpoll_send_skb(struct team_port *port, struct sk_buff *skb) { - struct netpoll *np = port->np; - - if (np) - netpoll_send_skb(np, skb); + netpoll_send_skb(port->np, skb); } #else static inline void team_netpoll_send_skb(struct team_port *port, diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 463047d0190b..64ce8cd1cfaf 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -38,7 +38,7 @@ struct ip_sf_socklist { unsigned int sl_max; unsigned int sl_count; struct rcu_head rcu; - __be32 sl_addr[0]; + __be32 sl_addr[]; }; #define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \ @@ -123,7 +123,7 @@ extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex); extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, struct ip_msfilter __user *optval, int __user *optlen); extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, - struct group_filter __user *optval, int __user *optlen); + struct sockaddr_storage __user *p); extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, int dif, int sdif); extern void ip_mc_init_dev(struct in_device *); diff --git a/include/linux/ihex.h b/include/linux/ihex.h index 98cb5ce0b0a0..b824877e6d1b 100644 --- a/include/linux/ihex.h +++ b/include/linux/ihex.h @@ -18,7 +18,7 @@ struct ihex_binrec { __be32 addr; __be16 len; - uint8_t data[0]; + uint8_t data[]; } __attribute__((packed)); static inline uint16_t ihex_binrec_size(const struct ihex_binrec *p) diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index 5a127c0ed200..a3a838dcf8e4 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -133,62 +133,4 @@ void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev); int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig); -#define __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \ - _storagebits, _shift, _extend_name, _type, _mask_all) \ - { \ - .type = (_type), \ - .differential = (_channel2 == -1 ? 0 : 1), \ - .indexed = 1, \ - .channel = (_channel1), \ - .channel2 = (_channel2), \ - .address = (_address), \ - .extend_name = (_extend_name), \ - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ - BIT(IIO_CHAN_INFO_OFFSET), \ - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ - .info_mask_shared_by_all = _mask_all, \ - .scan_index = (_si), \ - .scan_type = { \ - .sign = 'u', \ - .realbits = (_bits), \ - .storagebits = (_storagebits), \ - .shift = (_shift), \ - .endianness = IIO_BE, \ - }, \ - } - -#define AD_SD_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \ - _storagebits, _shift) \ - __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \ - _storagebits, _shift, NULL, IIO_VOLTAGE, \ - BIT(IIO_CHAN_INFO_SAMP_FREQ)) - -#define AD_SD_SHORTED_CHANNEL(_si, _channel, _address, _bits, \ - _storagebits, _shift) \ - __AD_SD_CHANNEL(_si, _channel, _channel, _address, _bits, \ - _storagebits, _shift, "shorted", IIO_VOLTAGE, \ - BIT(IIO_CHAN_INFO_SAMP_FREQ)) - -#define AD_SD_CHANNEL(_si, _channel, _address, _bits, \ - _storagebits, _shift) \ - __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \ - _storagebits, _shift, NULL, IIO_VOLTAGE, \ - BIT(IIO_CHAN_INFO_SAMP_FREQ)) - -#define AD_SD_CHANNEL_NO_SAMP_FREQ(_si, _channel, _address, _bits, \ - _storagebits, _shift) \ - __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \ - _storagebits, _shift, NULL, IIO_VOLTAGE, 0) - -#define AD_SD_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \ - __AD_SD_CHANNEL(_si, 0, -1, _address, _bits, \ - _storagebits, _shift, NULL, IIO_TEMP, \ - BIT(IIO_CHAN_INFO_SAMP_FREQ)) - -#define AD_SD_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \ - _shift) \ - __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \ - _storagebits, _shift, "supply", IIO_VOLTAGE, \ - BIT(IIO_CHAN_INFO_SAMP_FREQ)) - #endif diff --git a/include/linux/iio/adc/adi-axi-adc.h b/include/linux/iio/adc/adi-axi-adc.h new file mode 100644 index 000000000000..c5d48e1c2d36 --- /dev/null +++ b/include/linux/iio/adc/adi-axi-adc.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Analog Devices Generic AXI ADC IP core driver/library + * Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip + * + * Copyright 2012-2020 Analog Devices Inc. + */ +#ifndef __ADI_AXI_ADC_H__ +#define __ADI_AXI_ADC_H__ + +struct device; +struct iio_chan_spec; + +/** + * struct adi_axi_adc_chip_info - Chip specific information + * @name Chip name + * @id Chip ID (usually product ID) + * @channels Channel specifications of type @struct axi_adc_chan_spec + * @num_channels Number of @channels + * @scale_table Supported scales by the chip; tuples of 2 ints + * @num_scales Number of scales in the table + * @max_rate Maximum sampling rate supported by the device + */ +struct adi_axi_adc_chip_info { + const char *name; + unsigned int id; + + const struct iio_chan_spec *channels; + unsigned int num_channels; + + const unsigned int (*scale_table)[2]; + int num_scales; + + unsigned long max_rate; +}; + +/** + * struct adi_axi_adc_conv - data of the ADC attached to the AXI ADC + * @chip_info chip info details for the client ADC + * @preenable_setup op to run in the client before enabling the AXI ADC + * @reg_access IIO debugfs_reg_access hook for the client ADC + * @read_raw IIO read_raw hook for the client ADC + * @write_raw IIO write_raw hook for the client ADC + */ +struct adi_axi_adc_conv { + const struct adi_axi_adc_chip_info *chip_info; + + int (*preenable_setup)(struct adi_axi_adc_conv *conv); + int (*reg_access)(struct adi_axi_adc_conv *conv, unsigned int reg, + unsigned int writeval, unsigned int *readval); + int (*read_raw)(struct adi_axi_adc_conv *conv, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask); + int (*write_raw)(struct adi_axi_adc_conv *conv, + struct iio_chan_spec const *chan, + int val, int val2, long mask); +}; + +struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev, + size_t sizeof_priv); + +void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv); + +#endif diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h index 016d8a068353..ff15c61bf319 100644 --- a/include/linux/iio/buffer-dma.h +++ b/include/linux/iio/buffer-dma.h @@ -11,7 +11,7 @@ #include <linux/kref.h> #include <linux/spinlock.h> #include <linux/mutex.h> -#include <linux/iio/buffer.h> +#include <linux/iio/buffer_impl.h> struct iio_dma_buffer_queue; struct iio_dma_buffer_ops; diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h index b3a57444a886..0e503db71289 100644 --- a/include/linux/iio/buffer-dmaengine.h +++ b/include/linux/iio/buffer-dmaengine.h @@ -14,4 +14,7 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, const char *channel); void iio_dmaengine_buffer_free(struct iio_buffer *buffer); +struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev, + const char *channel); + #endif diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h index a4d2d8061ef6..a63dc07b7350 100644 --- a/include/linux/iio/buffer_impl.h +++ b/include/linux/iio/buffer_impl.h @@ -94,12 +94,6 @@ struct iio_buffer { unsigned int watermark; /* private: */ - /* - * @scan_el_attrs: Control of scan elements if that scan mode - * control method is used. - */ - struct attribute_group *scan_el_attrs; - /* @scan_timestamp: Does the scan mode include a timestamp. */ bool scan_timestamp; @@ -115,9 +109,6 @@ struct iio_buffer { */ struct attribute_group scan_el_group; - /* @stufftoread: Flag to indicate new data. */ - bool stufftoread; - /* @attrs: Standard attributes of the buffer. */ const struct attribute **attrs; diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index 2bde8c912d4d..c4118dcb8e05 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h @@ -64,15 +64,6 @@ void iio_channel_release(struct iio_channel *chan); struct iio_channel *devm_iio_channel_get(struct device *dev, const char *consumer_channel); /** - * devm_iio_channel_release() - Resource managed version of - * iio_channel_release(). - * @dev: Pointer to consumer device for which resource - * is allocared. - * @chan: The channel to be released. - */ -void devm_iio_channel_release(struct device *dev, struct iio_channel *chan); - -/** * iio_channel_get_all() - get all channels associated with a client * @dev: Pointer to consumer device. * @@ -106,15 +97,6 @@ void iio_channel_release_all(struct iio_channel *chan); */ struct iio_channel *devm_iio_channel_get_all(struct device *dev); -/** - * devm_iio_channel_release_all() - Resource managed version of - * iio_channel_release_all(). - * @dev: Pointer to consumer device for which resource - * is allocared. - * @chan: Array channel to be released. - */ -void devm_iio_channel_release_all(struct device *dev, struct iio_channel *chan); - struct iio_cb_buffer; /** * iio_channel_get_all_cb() - register callback for triggered capture diff --git a/include/linux/iio/hw-consumer.h b/include/linux/iio/hw-consumer.h index 44d48bb1d39f..e8255c2e33bc 100644 --- a/include/linux/iio/hw-consumer.h +++ b/include/linux/iio/hw-consumer.h @@ -14,7 +14,6 @@ struct iio_hw_consumer; struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev); void iio_hw_consumer_free(struct iio_hw_consumer *hwc); struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev); -void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc); int iio_hw_consumer_enable(struct iio_hw_consumer *hwc); void iio_hw_consumer_disable(struct iio_hw_consumer *hwc); diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 17f56a070b20..a1be82e74c93 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -492,7 +492,7 @@ struct iio_buffer_setup_ops { * @buffer: [DRIVER] any buffer present * @buffer_list: [INTERN] list of all buffers currently attached * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux - * @mlock: [DRIVER] lock used to prevent simultaneous device state + * @mlock: [INTERN] lock used to prevent simultaneous device state * changes * @available_scan_masks: [DRIVER] optional array of allowed bitmasks * @masklength: [INTERN] the length of the mask established from @@ -593,17 +593,13 @@ void iio_device_unregister(struct iio_dev *indio_dev); * calls iio_device_register() internally. Refer to that function for more * information. * - * If an iio_dev registered with this function needs to be unregistered - * separately, devm_iio_device_unregister() must be used. - * * RETURNS: * 0 on success, negative error number on failure. */ #define devm_iio_device_register(dev, indio_dev) \ - __devm_iio_device_register((dev), (indio_dev), THIS_MODULE); + __devm_iio_device_register((dev), (indio_dev), THIS_MODULE) int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, struct module *this_mod); -void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev); int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp); int iio_device_claim_direct_mode(struct iio_dev *indio_dev); void iio_device_release_direct_mode(struct iio_dev *indio_dev); @@ -694,13 +690,9 @@ static inline struct iio_dev *iio_priv_to_dev(void *priv) } void iio_device_free(struct iio_dev *indio_dev); -int devm_iio_device_match(struct device *dev, void *res, void *data); struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv); -void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev); struct iio_trigger *devm_iio_trigger_alloc(struct device *dev, const char *fmt, ...); -void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig); - /** * iio_buffer_enabled() - helper function to test if the buffer is enabled * @indio_dev: IIO device structure for device diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index dd8219138c2e..2df67448f0d1 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -83,10 +83,13 @@ struct adis_data { * @trig: IIO trigger object data * @data: ADIS chip variant specific data * @burst: ADIS burst transfer information + * @burst_extra_len: Burst extra length. Should only be used by devices that can + * dynamically change their burst mode length. * @state_lock: Lock used by the device to protect state * @msg: SPI message object * @xfer: SPI transfer objects to be used for a @msg * @current_page: Some ADIS devices have registers, this selects current page + * @irq_flag: IRQ handling flags as passed to request_irq() * @buffer: Data buffer for information read from the device * @tx: DMA safe TX buffer for SPI transfers * @rx: DMA safe RX buffer for SPI transfers @@ -97,7 +100,7 @@ struct adis { const struct adis_data *data; struct adis_burst *burst; - + unsigned int burst_extra_len; /** * The state_lock is meant to be used during operations that require * a sequence of SPI R/W in order to protect the SPI transfer @@ -113,6 +116,7 @@ struct adis { struct spi_message msg; struct spi_transfer *xfer; unsigned int current_page; + unsigned long irq_flag; void *buffer; uint8_t tx[10] ____cacheline_aligned; @@ -331,6 +335,65 @@ static inline int adis_read_reg_32(struct adis *adis, unsigned int reg, return ret; } +int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask, + const u32 val, u8 size); +/** + * adis_update_bits_base() - ADIS Update bits function - Locked version + * @adis: The adis device + * @reg: The address of the lower of the two registers + * @mask: Bitmask to change + * @val: Value to be written + * @size: Size of the register to update + * + * Updates the desired bits of @reg in accordance with @mask and @val. + */ +static inline int adis_update_bits_base(struct adis *adis, unsigned int reg, + const u32 mask, const u32 val, u8 size) +{ + int ret; + + mutex_lock(&adis->state_lock); + ret = __adis_update_bits_base(adis, reg, mask, val, size); + mutex_unlock(&adis->state_lock); + return ret; +} + +/** + * adis_update_bits() - Wrapper macro for adis_update_bits_base - Locked version + * @adis: The adis device + * @reg: The address of the lower of the two registers + * @mask: Bitmask to change + * @val: Value to be written + * + * This macro evaluates the sizeof of @val at compile time and calls + * adis_update_bits_base() accordingly. Be aware that using MACROS/DEFINES for + * @val can lead to undesired behavior if the register to update is 16bit. + */ +#define adis_update_bits(adis, reg, mask, val) ({ \ + BUILD_BUG_ON(sizeof(val) == 1 || sizeof(val) == 8); \ + __builtin_choose_expr(sizeof(val) == 4, \ + adis_update_bits_base(adis, reg, mask, val, 4), \ + adis_update_bits_base(adis, reg, mask, val, 2)); \ +}) + +/** + * adis_update_bits() - Wrapper macro for adis_update_bits_base + * @adis: The adis device + * @reg: The address of the lower of the two registers + * @mask: Bitmask to change + * @val: Value to be written + * + * This macro evaluates the sizeof of @val at compile time and calls + * adis_update_bits_base() accordingly. Be aware that using MACROS/DEFINES for + * @val can lead to undesired behavior if the register to update is 16bit. + */ +#define __adis_update_bits(adis, reg, mask, val) ({ \ + BUILD_BUG_ON(sizeof(val) == 1 || sizeof(val) == 8); \ + __builtin_choose_expr(sizeof(val) == 4, \ + __adis_update_bits_base(adis, reg, mask, val, 4), \ + __adis_update_bits_base(adis, reg, mask, val, 2)); \ +}) + int adis_enable_irq(struct adis *adis, bool enable); int __adis_check_status(struct adis *adis); int __adis_initial_startup(struct adis *adis); @@ -441,18 +504,25 @@ int adis_single_conversion(struct iio_dev *indio_dev, * @en burst mode enabled * @reg_cmd register command that triggers burst * @extra_len extra length to account in the SPI RX buffer + * @burst_max_len holds the maximum burst size when the device supports + * more than one burst mode with different sizes */ struct adis_burst { bool en; unsigned int reg_cmd; - unsigned int extra_len; + const u32 extra_len; + const u32 burst_max_len; }; +int +devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, + irq_handler_t trigger_handler); int adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *)); void adis_cleanup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev); +int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev); int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev); void adis_remove_trigger(struct adis *adis); @@ -461,6 +531,13 @@ int adis_update_scan_mode(struct iio_dev *indio_dev, #else /* CONFIG_IIO_BUFFER */ +static inline int +devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, + irq_handler_t trigger_handler) +{ + return 0; +} + static inline int adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *)) { @@ -472,6 +549,12 @@ static inline void adis_cleanup_buffer_and_trigger(struct adis *adis, { } +static inline int devm_adis_probe_trigger(struct adis *adis, + struct iio_dev *indio_dev) +{ + return 0; +} + static inline int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev) { diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h index 764659e01b68..1fc1efa7799d 100644 --- a/include/linux/iio/kfifo_buf.h +++ b/include/linux/iio/kfifo_buf.h @@ -9,6 +9,5 @@ struct iio_buffer *iio_kfifo_allocate(void); void iio_kfifo_free(struct iio_buffer *r); struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev); -void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r); #endif diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index 84995e2967ac..cad8325903f9 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h @@ -141,9 +141,6 @@ int __devm_iio_trigger_register(struct device *dev, **/ void iio_trigger_unregister(struct iio_trigger *trig_info); -void devm_iio_trigger_unregister(struct device *dev, - struct iio_trigger *trig_info); - /** * iio_trigger_set_immutable() - set an immutable trigger on destination * diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h index 238ad30ce166..e99c91799359 100644 --- a/include/linux/iio/triggered_buffer.h +++ b/include/linux/iio/triggered_buffer.h @@ -18,7 +18,5 @@ int devm_iio_triggered_buffer_setup(struct device *dev, irqreturn_t (*h)(int irq, void *p), irqreturn_t (*thread)(int irq, void *p), const struct iio_buffer_setup_ops *ops); -void devm_iio_triggered_buffer_cleanup(struct device *dev, - struct iio_dev *indio_dev); #endif diff --git a/include/linux/ima.h b/include/linux/ima.h index aefe758f4466..9164e1534ec9 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -18,6 +18,7 @@ extern int ima_file_check(struct file *file, int mask); extern void ima_post_create_tmpfile(struct inode *inode); extern void ima_file_free(struct file *file); extern int ima_file_mmap(struct file *file, unsigned long prot); +extern int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot); extern int ima_load_data(enum kernel_load_data_id id); extern int ima_read_file(struct file *file, enum kernel_read_file_id id); extern int ima_post_read_file(struct file *file, void *buf, loff_t size, @@ -70,6 +71,12 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot) return 0; } +static inline int ima_file_mprotect(struct vm_area_struct *vma, + unsigned long prot) +{ + return 0; +} + static inline int ima_load_data(enum kernel_load_data_id id) { return 0; diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index ce9ed1c0602f..0ef2d800fda7 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@ -71,7 +71,11 @@ static inline size_t inet_diag_msg_attrs_size(void) + nla_total_size(1) /* INET_DIAG_SKV6ONLY */ #endif + nla_total_size(4) /* INET_DIAG_MARK */ - + nla_total_size(4); /* INET_DIAG_CLASS_ID */ + + nla_total_size(4) /* INET_DIAG_CLASS_ID */ +#ifdef CONFIG_SOCK_CGROUP_DATA + + nla_total_size_64bit(sizeof(u64)) /* INET_DIAG_CGROUP_ID */ +#endif + ; } int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, struct inet_diag_msg *r, int ext, diff --git a/include/linux/input/gp2ap002a00f.h b/include/linux/input/gp2ap002a00f.h deleted file mode 100644 index 3614a13a8297..000000000000 --- a/include/linux/input/gp2ap002a00f.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _GP2AP002A00F_H_ -#define _GP2AP002A00F_H_ - -#include <linux/i2c.h> - -#define GP2A_I2C_NAME "gp2ap002a00f" - -/** - * struct gp2a_platform_data - Sharp gp2ap002a00f proximity platform data - * @vout_gpio: The gpio connected to the object detected pin (VOUT) - * @wakeup: Set to true if the proximity can wake the device from suspend - * @hw_setup: Callback for setting up hardware such as gpios and vregs - * @hw_shutdown: Callback for properly shutting down hardware - */ -struct gp2a_platform_data { - int vout_gpio; - bool wakeup; - int (*hw_setup)(struct i2c_client *client); - int (*hw_shutdown)(struct i2c_client *client); -}; - -#endif diff --git a/include/linux/input/lm8333.h b/include/linux/input/lm8333.h index 79f918c6e8c5..906da5fc06e0 100644 --- a/include/linux/input/lm8333.h +++ b/include/linux/input/lm8333.h @@ -1,6 +1,6 @@ /* * public include for LM8333 keypad driver - same license as driver - * Copyright (C) 2012 Wolfram Sang, Pengutronix <w.sang@pengutronix.de> + * Copyright (C) 2012 Wolfram Sang, Pengutronix <kernel@pengutronix.de> */ #ifndef _LM8333_H diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h index 9e409bb13642..3b8580bd33c1 100644 --- a/include/linux/input/mt.h +++ b/include/linux/input/mt.h @@ -100,6 +100,11 @@ static inline bool input_is_mt_axis(int axis) bool input_mt_report_slot_state(struct input_dev *dev, unsigned int tool_type, bool active); +static inline void input_mt_report_slot_inactive(struct input_dev *dev) +{ + input_mt_report_slot_state(dev, 0, false); +} + void input_mt_report_finger_count(struct input_dev *dev, int count); void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count); void input_mt_drop_unused(struct input_dev *dev); diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h new file mode 100644 index 000000000000..43e6ea591975 --- /dev/null +++ b/include/linux/instrumented.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * This header provides generic wrappers for memory access instrumentation that + * the compiler cannot emit for: KASAN, KCSAN. + */ +#ifndef _LINUX_INSTRUMENTED_H +#define _LINUX_INSTRUMENTED_H + +#include <linux/compiler.h> +#include <linux/kasan-checks.h> +#include <linux/kcsan-checks.h> +#include <linux/types.h> + +/** + * instrument_read - instrument regular read access + * + * Instrument a regular read access. The instrumentation should be inserted + * before the actual read happens. + * + * @ptr address of access + * @size size of access + */ +static __always_inline void instrument_read(const volatile void *v, size_t size) +{ + kasan_check_read(v, size); + kcsan_check_read(v, size); +} + +/** + * instrument_write - instrument regular write access + * + * Instrument a regular write access. The instrumentation should be inserted + * before the actual write happens. + * + * @ptr address of access + * @size size of access + */ +static __always_inline void instrument_write(const volatile void *v, size_t size) +{ + kasan_check_write(v, size); + kcsan_check_write(v, size); +} + +/** + * instrument_atomic_read - instrument atomic read access + * + * Instrument an atomic read access. The instrumentation should be inserted + * before the actual read happens. + * + * @ptr address of access + * @size size of access + */ +static __always_inline void instrument_atomic_read(const volatile void *v, size_t size) +{ + kasan_check_read(v, size); + kcsan_check_atomic_read(v, size); +} + +/** + * instrument_atomic_write - instrument atomic write access + * + * Instrument an atomic write access. The instrumentation should be inserted + * before the actual write happens. + * + * @ptr address of access + * @size size of access + */ +static __always_inline void instrument_atomic_write(const volatile void *v, size_t size) +{ + kasan_check_write(v, size); + kcsan_check_atomic_write(v, size); +} + +/** + * instrument_copy_to_user - instrument reads of copy_to_user + * + * Instrument reads from kernel memory, that are due to copy_to_user (and + * variants). The instrumentation must be inserted before the accesses. + * + * @to destination address + * @from source address + * @n number of bytes to copy + */ +static __always_inline void +instrument_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + kasan_check_read(from, n); + kcsan_check_read(from, n); +} + +/** + * instrument_copy_from_user - instrument writes of copy_from_user + * + * Instrument writes to kernel memory, that are due to copy_from_user (and + * variants). The instrumentation should be inserted before the accesses. + * + * @to destination address + * @from source address + * @n number of bytes to copy + */ +static __always_inline void +instrument_copy_from_user(const void *to, const void __user *from, unsigned long n) +{ + kasan_check_write(to, n); + kcsan_check_write(to, n); +} + +#endif /* _LINUX_INSTRUMENTED_H */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 980234ae0312..4100bd224f5c 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -19,6 +19,7 @@ #include <linux/iommu.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/dmar.h> +#include <linux/ioasid.h> #include <asm/cacheflush.h> #include <asm/iommu.h> @@ -42,6 +43,9 @@ #define DMA_FL_PTE_PRESENT BIT_ULL(0) #define DMA_FL_PTE_XD BIT_ULL(63) +#define ADDR_WIDTH_5LEVEL (57) +#define ADDR_WIDTH_4LEVEL (48) + #define CONTEXT_TT_MULTI_LEVEL 0 #define CONTEXT_TT_DEV_IOTLB 1 #define CONTEXT_TT_PASS_THROUGH 2 @@ -166,6 +170,7 @@ #define ecap_smpwc(e) (((e) >> 48) & 0x1) #define ecap_flts(e) (((e) >> 47) & 0x1) #define ecap_slts(e) (((e) >> 46) & 0x1) +#define ecap_vcs(e) (((e) >> 44) & 0x1) #define ecap_smts(e) (((e) >> 43) & 0x1) #define ecap_dit(e) ((e >> 41) & 0x1) #define ecap_pasid(e) ((e >> 40) & 0x1) @@ -191,6 +196,9 @@ #define ecap_max_handle_mask(e) ((e >> 20) & 0xf) #define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */ +/* Virtual command interface capability */ +#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */ + /* IOTLB_REG */ #define DMA_TLB_FLUSH_GRANU_OFFSET 60 #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) @@ -284,6 +292,9 @@ /* PRS_REG */ #define DMA_PRS_PPR ((u32)1) +#define DMA_PRS_PRO ((u32)2) + +#define DMA_VCS_PAS ((u64)1) #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ do { \ @@ -324,6 +335,8 @@ enum { #define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) #define QI_IWD_STATUS_WRITE (((u64)1) << 5) +#define QI_IWD_FENCE (((u64)1) << 6) +#define QI_IWD_PRQ_DRAIN (((u64)1) << 7) #define QI_IOTLB_DID(did) (((u64)did) << 16) #define QI_IOTLB_DR(dr) (((u64)dr) << 7) @@ -331,7 +344,7 @@ enum { #define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) #define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) #define QI_IOTLB_IH(ih) (((u64)ih) << 6) -#define QI_IOTLB_AM(am) (((u8)am)) +#define QI_IOTLB_AM(am) (((u8)am) & 0x3f) #define QI_CC_FM(fm) (((u64)fm) << 48) #define QI_CC_SID(sid) (((u64)sid) << 32) @@ -350,16 +363,21 @@ enum { #define QI_PC_DID(did) (((u64)did) << 16) #define QI_PC_GRAN(gran) (((u64)gran) << 4) -#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0)) -#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) +/* PASID cache invalidation granu */ +#define QI_PC_ALL_PASIDS 0 +#define QI_PC_PASID_SEL 1 #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) -#define QI_EIOTLB_AM(am) (((u64)am)) +#define QI_EIOTLB_AM(am) (((u64)am) & 0x3f) #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) #define QI_EIOTLB_DID(did) (((u64)did) << 16) #define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4) +/* QI Dev-IOTLB inv granu */ +#define QI_DEV_IOTLB_GRAN_ALL 1 +#define QI_DEV_IOTLB_GRAN_PASID_SEL 0 + #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) #define QI_DEV_EIOTLB_GLOB(g) ((u64)g) @@ -480,6 +498,23 @@ struct context_entry { u64 hi; }; +/* si_domain contains mulitple devices */ +#define DOMAIN_FLAG_STATIC_IDENTITY BIT(0) + +/* + * When VT-d works in the scalable mode, it allows DMA translation to + * happen through either first level or second level page table. This + * bit marks that the DMA translation for the domain goes through the + * first level page table, otherwise, it goes through the second level. + */ +#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1) + +/* + * Domain represents a virtual machine which demands iommu nested + * translation mode support. + */ +#define DOMAIN_FLAG_NESTING_MODE BIT(2) + struct dmar_domain { int nid; /* node id */ @@ -529,6 +564,7 @@ struct intel_iommu { u64 reg_size; /* size of hw register set */ u64 cap; u64 ecap; + u64 vccap; u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ raw_spinlock_t register_lock; /* protect register handling */ int seq_id; /* sequence id of the iommu */ @@ -549,6 +585,8 @@ struct intel_iommu { #ifdef CONFIG_INTEL_IOMMU_SVM struct page_req_dsc *prq; unsigned char prq_name[16]; /* Name for PRQ interrupt */ + struct completion prq_complete; + struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */ #endif struct q_inval *qi; /* Queued invalidation info */ u32 *iommu_state; /* Store iommu states between suspend and resume.*/ @@ -571,6 +609,7 @@ struct device_domain_info { struct list_head auxiliary_domains; /* auxiliary domains * attached to this device */ + u32 segment; /* PCI segment number */ u8 bus; /* PCI bus number */ u8 devfn; /* PCI devfn number */ u16 pfsid; /* SRIOV physical function source ID */ @@ -595,6 +634,12 @@ static inline void __iommu_flush_cache( clflush_cache_range(addr, size); } +/* Convert generic struct iommu_domain to private struct dmar_domain */ +static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom) +{ + return container_of(dom, struct dmar_domain, domain); +} + /* * 0: readable * 1: writable @@ -653,9 +698,23 @@ extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int size_order, u64 type); extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, u16 qdep, u64 addr, unsigned mask); + void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, unsigned long npages, bool ih); -extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); + +void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, + u32 pasid, u16 qdep, u64 addr, + unsigned int size_order, u64 granu); +void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, + int pasid); + +int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, + unsigned int count, unsigned long options); +/* + * Options used in qi_submit_sync: + * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8. + */ +#define QI_OPT_WAIT_DRAIN BIT(0) extern int dmar_ir_support(void); @@ -667,12 +726,19 @@ int for_each_device_domain(int (*fn)(struct device_domain_info *info, void iommu_flush_write_buffer(struct intel_iommu *iommu); int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev); struct dmar_domain *find_domain(struct device *dev); +struct device_domain_info *get_domain_info(struct device *dev); #ifdef CONFIG_INTEL_IOMMU_SVM extern void intel_svm_check(struct intel_iommu *iommu); extern int intel_svm_enable_prq(struct intel_iommu *iommu); extern int intel_svm_finish_prq(struct intel_iommu *iommu); - +int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, + struct iommu_gpasid_bind_data *data); +int intel_svm_unbind_gpasid(struct device *dev, int pasid); +struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, + void *drvdata); +void intel_svm_unbind(struct iommu_sva *handle); +int intel_svm_get_pasid(struct iommu_sva *handle); struct svm_dev_ops; struct intel_svm_dev { @@ -680,6 +746,8 @@ struct intel_svm_dev { struct rcu_head rcu; struct device *dev; struct svm_dev_ops *ops; + struct iommu_sva sva; + int pasid; int users; u16 did; u16 dev_iotlb:1; @@ -689,9 +757,11 @@ struct intel_svm_dev { struct intel_svm { struct mmu_notifier notifier; struct mm_struct *mm; + struct intel_iommu *iommu; int flags; int pasid; + int gpasid; /* In case that guest PASID is different from host PASID */ struct list_head devs; struct list_head list; }; diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index d7c403d0dd27..c9e7e601950d 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -21,7 +21,6 @@ struct svm_dev_ops { #define SVM_REQ_EXEC (1<<1) #define SVM_REQ_PRIV (1<<0) - /* * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main" * PASID for the current process. Even if a PASID already exists, a new one @@ -44,90 +43,17 @@ struct svm_dev_ops { * do such IOTLB flushes automatically. */ #define SVM_FLAG_SUPERVISOR_MODE (1<<1) - -#ifdef CONFIG_INTEL_IOMMU_SVM - -/** - * intel_svm_bind_mm() - Bind the current process to a PASID - * @dev: Device to be granted access - * @pasid: Address for allocated PASID - * @flags: Flags. Later for requesting supervisor mode, etc. - * @ops: Callbacks to device driver - * - * This function attempts to enable PASID support for the given device. - * If the @pasid argument is non-%NULL, a PASID is allocated for access - * to the MM of the current process. - * - * By using a %NULL value for the @pasid argument, this function can - * be used to simply validate that PASID support is available for the - * given device — i.e. that it is behind an IOMMU which has the - * requisite support, and is enabled. - * - * Page faults are handled transparently by the IOMMU code, and there - * should be no need for the device driver to be involved. If a page - * fault cannot be handled (i.e. is an invalid address rather than - * just needs paging in), then the page request will be completed by - * the core IOMMU code with appropriate status, and the device itself - * can then report the resulting fault to its driver via whatever - * mechanism is appropriate. - * - * Multiple calls from the same process may result in the same PASID - * being re-used. A reference count is kept. - */ -extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, - struct svm_dev_ops *ops); - -/** - * intel_svm_unbind_mm() - Unbind a specified PASID - * @dev: Device for which PASID was allocated - * @pasid: PASID value to be unbound - * - * This function allows a PASID to be retired when the device no - * longer requires access to the address space of a given process. - * - * If the use count for the PASID in question reaches zero, the - * PASID is revoked and may no longer be used by hardware. - * - * Device drivers are required to ensure that no access (including - * page requests) is currently outstanding for the PASID in question, - * before calling this function. +/* + * The SVM_FLAG_GUEST_MODE flag is used when a PASID bind is for guest + * processes. Compared to the host bind, the primary differences are: + * 1. mm life cycle management + * 2. fault reporting */ -extern int intel_svm_unbind_mm(struct device *dev, int pasid); - -/** - * intel_svm_is_pasid_valid() - check if pasid is valid - * @dev: Device for which PASID was allocated - * @pasid: PASID value to be checked - * - * This function checks if the specified pasid is still valid. A - * valid pasid means the backing mm is still having a valid user. - * For kernel callers init_mm is always valid. for other mm, if mm->mm_users - * is non-zero, it is valid. - * - * returns -EINVAL if invalid pasid, 0 if pasid ref count is invalid - * 1 if pasid is valid. +#define SVM_FLAG_GUEST_MODE (1<<2) +/* + * The SVM_FLAG_GUEST_PASID flag is used when a guest has its own PASID space, + * which requires guest and host PASID translation at both directions. */ -extern int intel_svm_is_pasid_valid(struct device *dev, int pasid); - -#else /* CONFIG_INTEL_IOMMU_SVM */ - -static inline int intel_svm_bind_mm(struct device *dev, int *pasid, - int flags, struct svm_dev_ops *ops) -{ - return -ENOSYS; -} - -static inline int intel_svm_unbind_mm(struct device *dev, int pasid) -{ - BUG(); -} - -static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid) -{ - return -EINVAL; -} -#endif /* CONFIG_INTEL_IOMMU_SVM */ - -#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL)) +#define SVM_FLAG_GUEST_PASID (1<<3) #endif /* __INTEL_SVM_H__ */ diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h index d70a914cba11..3a63d98613fc 100644 --- a/include/linux/interconnect.h +++ b/include/linux/interconnect.h @@ -28,9 +28,14 @@ struct device; struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id); struct icc_path *of_icc_get(struct device *dev, const char *name); +struct icc_path *devm_of_icc_get(struct device *dev, const char *name); +struct icc_path *of_icc_get_by_index(struct device *dev, int idx); void icc_put(struct icc_path *path); +int icc_enable(struct icc_path *path); +int icc_disable(struct icc_path *path); int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw); void icc_set_tag(struct icc_path *path, u32 tag); +const char *icc_get_name(struct icc_path *path); #else @@ -46,10 +51,31 @@ static inline struct icc_path *of_icc_get(struct device *dev, return NULL; } +static inline struct icc_path *devm_of_icc_get(struct device *dev, + const char *name) +{ + return NULL; +} + +static inline struct icc_path *of_icc_get_by_index(struct device *dev, int idx) +{ + return NULL; +} + static inline void icc_put(struct icc_path *path) { } +static inline int icc_enable(struct icc_path *path) +{ + return 0; +} + +static inline int icc_disable(struct icc_path *path) +{ + return 0; +} + static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw) { return 0; @@ -59,6 +85,11 @@ static inline void icc_set_tag(struct icc_path *path, u32 tag) { } +static inline const char *icc_get_name(struct icc_path *path) +{ + return NULL; +} + #endif /* CONFIG_INTERCONNECT */ #endif /* __LINUX_INTERCONNECT_H */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 80f637c3a6f3..5db970b6615a 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -760,8 +760,10 @@ extern int arch_early_irq_init(void); /* * We want to know which function is an entrypoint of a hardirq or a softirq. */ -#define __irq_entry __attribute__((__section__(".irqentry.text"))) -#define __softirq_entry \ - __attribute__((__section__(".softirqentry.text"))) +#ifndef __irq_entry +# define __irq_entry __attribute__((__section__(".irqentry.text"))) +#endif + +#define __softirq_entry __attribute__((__section__(".softirqentry.text"))) #endif diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index b336622612f3..0beaa3eba155 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/bug.h> #include <linux/io.h> +#include <linux/pgtable.h> #include <asm/page.h> /* @@ -99,7 +100,6 @@ io_mapping_unmap(void __iomem *vaddr) #else #include <linux/uaccess.h> -#include <asm/pgtable.h> /* Create the io_mapping object*/ static inline struct io_mapping * diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 8b09463dae0d..4d1d3c3469e9 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -155,8 +155,7 @@ loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length, ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, const struct iomap_ops *ops); int iomap_readpage(struct page *page, const struct iomap_ops *ops); -int iomap_readpages(struct address_space *mapping, struct list_head *pages, - unsigned nr_pages, const struct iomap_ops *ops); +void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); int iomap_set_page_dirty(struct page *page); int iomap_is_partially_uptodate(struct page *page, unsigned long from, unsigned long count); @@ -178,7 +177,7 @@ int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops); int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, - loff_t start, loff_t len, const struct iomap_ops *ops); + u64 start, u64 len, const struct iomap_ops *ops); loff_t iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops); loff_t iomap_seek_data(struct inode *inode, loff_t offset, @@ -252,6 +251,8 @@ int iomap_writepages(struct address_space *mapping, struct iomap_dio_ops { int (*end_io)(struct kiocb *iocb, ssize_t size, int error, unsigned flags); + blk_qc_t (*submit_io)(struct inode *inode, struct iomap *iomap, + struct bio *bio, loff_t file_offset); }; ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 7ef8b0bda695..5f0b7859d2eb 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -53,8 +53,6 @@ struct iommu_fault_event; typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, unsigned long, int, void *); -typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *, - void *); typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); struct iommu_domain_geometry { @@ -171,25 +169,6 @@ enum iommu_dev_features { #define IOMMU_PASID_INVALID (-1U) -/** - * struct iommu_sva_ops - device driver callbacks for an SVA context - * - * @mm_exit: called when the mm is about to be torn down by exit_mmap. After - * @mm_exit returns, the device must not issue any more transaction - * with the PASID given as argument. - * - * The @mm_exit handler is allowed to sleep. Be careful about the - * locks taken in @mm_exit, because they might lead to deadlocks if - * they are also held when dropping references to the mm. Consider the - * following call chain: - * mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A) - * Using mmput_async() prevents this scenario. - * - */ -struct iommu_sva_ops { - iommu_mm_exit_handler_t mm_exit; -}; - #ifdef CONFIG_IOMMU_API /** @@ -223,8 +202,10 @@ struct iommu_iotlb_gather { * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush * queue * @iova_to_phys: translate iova to physical address - * @add_device: add device to iommu grouping - * @remove_device: remove device from iommu grouping + * @probe_device: Add device to iommu driver handling + * @release_device: Remove device from iommu driver handling + * @probe_finalize: Do final setup work after the device is added to an IOMMU + * group and attached to the groups domain * @device_group: find iommu group for a particular device * @domain_get_attr: Query domain attributes * @domain_set_attr: Change domain attributes @@ -248,6 +229,10 @@ struct iommu_iotlb_gather { * @cache_invalidate: invalidate translation caches * @sva_bind_gpasid: bind guest pasid and mm * @sva_unbind_gpasid: unbind guest pasid and mm + * @def_domain_type: device default domain type, return value: + * - IOMMU_DOMAIN_IDENTITY: must use an identity domain + * - IOMMU_DOMAIN_DMA: must use a dma domain + * - 0: use the default setting * @pgsize_bitmap: bitmap of all possible supported page sizes * @owner: Driver module providing these ops */ @@ -269,8 +254,9 @@ struct iommu_ops { void (*iotlb_sync)(struct iommu_domain *domain, struct iommu_iotlb_gather *iotlb_gather); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); - int (*add_device)(struct device *dev); - void (*remove_device)(struct device *dev); + struct iommu_device *(*probe_device)(struct device *dev); + void (*release_device)(struct device *dev); + void (*probe_finalize)(struct device *dev); struct iommu_group *(*device_group)(struct device *dev); int (*domain_get_attr)(struct iommu_domain *domain, enum iommu_attr attr, void *data); @@ -318,6 +304,8 @@ struct iommu_ops { int (*sva_unbind_gpasid)(struct device *dev, int pasid); + int (*def_domain_type)(struct device *dev); + unsigned long pgsize_bitmap; struct module *owner; }; @@ -369,6 +357,7 @@ struct iommu_fault_param { * * @fault_param: IOMMU detected device fault reporting data * @fwspec: IOMMU fwspec data + * @iommu_dev: IOMMU device this device is linked to * @priv: IOMMU Driver private data * * TODO: migrate other per device data pointers under iommu_dev_data, e.g. @@ -378,6 +367,7 @@ struct dev_iommu { struct mutex lock; struct iommu_fault_param *fault_param; struct iommu_fwspec *fwspec; + struct iommu_device *iommu_dev; void *priv; }; @@ -430,6 +420,7 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) #define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */ extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); +extern int bus_iommu_probe(struct bus_type *bus); extern bool iommu_present(struct bus_type *bus); extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap); extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); @@ -466,12 +457,26 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io extern void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token); +/** + * iommu_map_sgtable - Map the given buffer to the IOMMU domain + * @domain: The IOMMU domain to perform the mapping + * @iova: The start address to map the buffer + * @sgt: The sg_table object describing the buffer + * @prot: IOMMU protection bits + * + * Creates a mapping at @iova for the buffer described by a scatterlist + * stored in the given sg_table object in the provided IOMMU domain. + */ +static inline size_t iommu_map_sgtable(struct iommu_domain *domain, + unsigned long iova, struct sg_table *sgt, int prot) +{ + return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot); +} + extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); extern void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list); -extern int iommu_request_dm_for_dev(struct device *dev); -extern int iommu_request_dma_domain_for_dev(struct device *dev); extern void iommu_set_default_passthrough(bool cmd_line); extern void iommu_set_default_translated(bool cmd_line); extern bool iommu_default_passthrough(void); @@ -515,7 +520,6 @@ extern int iommu_page_response(struct device *dev, struct iommu_page_response *msg); extern int iommu_group_id(struct iommu_group *group); -extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, @@ -605,7 +609,6 @@ struct iommu_fwspec { */ struct iommu_sva { struct device *dev; - const struct iommu_sva_ops *ops; }; int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, @@ -653,8 +656,6 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata); void iommu_sva_unbind_device(struct iommu_sva *handle); -int iommu_sva_set_ops(struct iommu_sva *handle, - const struct iommu_sva_ops *ops); int iommu_sva_get_pasid(struct iommu_sva *handle); #else /* CONFIG_IOMMU_API */ @@ -793,16 +794,6 @@ static inline int iommu_get_group_resv_regions(struct iommu_group *group, return -ENODEV; } -static inline int iommu_request_dm_for_dev(struct device *dev) -{ - return -ENODEV; -} - -static inline int iommu_request_dma_domain_for_dev(struct device *dev) -{ - return -ENODEV; -} - static inline void iommu_set_default_passthrough(bool cmd_line) { } @@ -1058,12 +1049,6 @@ static inline void iommu_sva_unbind_device(struct iommu_sva *handle) { } -static inline int iommu_sva_set_ops(struct iommu_sva *handle, - const struct iommu_sva_ops *ops) -{ - return -EINVAL; -} - static inline int iommu_sva_get_pasid(struct iommu_sva *handle) { return IOMMU_PASID_INVALID; diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index cb20c733b15a..bc89ac625f26 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h @@ -58,6 +58,48 @@ }) /** + * read_poll_timeout_atomic - Periodically poll an address until a condition is + * met or a timeout occurs + * @op: accessor function (takes @addr as its only argument) + * @addr: Address to poll + * @val: Variable to read the value into + * @cond: Break condition (usually involving @val) + * @delay_us: Time to udelay between reads in us (0 tight-loops). Should + * be less than ~10us since udelay is used (see + * Documentation/timers/timers-howto.rst). + * @timeout_us: Timeout in us, 0 means never timeout + * @delay_before_read: if it is true, delay @delay_us before read. + * + * Returns 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @args is stored in @val. + * + * When available, you'll probably want to use one of the specialized + * macros defined below rather than this macro directly. + */ +#define read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, \ + delay_before_read, args...) \ +({ \ + u64 __timeout_us = (timeout_us); \ + unsigned long __delay_us = (delay_us); \ + ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ + if (delay_before_read && __delay_us) \ + udelay(__delay_us); \ + for (;;) { \ + (val) = op(args); \ + if (cond) \ + break; \ + if (__timeout_us && \ + ktime_compare(ktime_get(), __timeout) > 0) { \ + (val) = op(args); \ + break; \ + } \ + if (__delay_us) \ + udelay(__delay_us); \ + } \ + (cond) ? 0 : -ETIMEDOUT; \ +}) + +/** * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs * @op: accessor function (takes @addr as its only argument) * @addr: Address to poll @@ -96,25 +138,7 @@ * macros defined below rather than this macro directly. */ #define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \ -({ \ - u64 __timeout_us = (timeout_us); \ - unsigned long __delay_us = (delay_us); \ - ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ - for (;;) { \ - (val) = op(addr); \ - if (cond) \ - break; \ - if (__timeout_us && \ - ktime_compare(ktime_get(), __timeout) > 0) { \ - (val) = op(addr); \ - break; \ - } \ - if (__delay_us) \ - udelay(__delay_us); \ - } \ - (cond) ? 0 : -ETIMEDOUT; \ -}) - + read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, false, addr) #define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \ readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us) diff --git a/include/linux/ioport.h b/include/linux/ioport.h index a9b9170b5dd2..6c2b06fe8beb 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -103,6 +103,7 @@ struct resource { #define IORESOURCE_MEM_32BIT (3<<3) #define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */ #define IORESOURCE_MEM_EXPANSIONROM (1<<6) +#define IORESOURCE_MEM_DRIVER_MANAGED (1<<7) /* PnP I/O specific bits (IORESOURCE_BITS) */ #define IORESOURCE_IO_16BIT_ADDR (1<<0) @@ -301,5 +302,11 @@ struct resource *devm_request_free_mem_region(struct device *dev, struct resource *request_free_mem_region(struct resource *base, unsigned long size, const char *name); +#ifdef CONFIG_IO_STRICT_DEVMEM +void revoke_devmem(struct resource *res); +#else +static inline void revoke_devmem(struct resource *res) { }; +#endif + #endif /* __ASSEMBLY__ */ #endif /* _LINUX_IOPORT_H */ diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index c309f43bde45..a06a78c67f19 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -68,6 +68,8 @@ struct ipc_namespace { struct user_namespace *user_ns; struct ucounts *ucounts; + struct llist_node mnt_llist; + struct ns_common ns; } __randomize_layout; diff --git a/include/linux/irq.h b/include/linux/irq.h index 9315fbb87db3..8d5bc2c237d7 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -573,8 +573,6 @@ enum { #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS struct irqaction; -extern int setup_irq(unsigned int irq, struct irqaction *new); -extern void remove_irq(unsigned int irq, struct irqaction *act); extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); extern void remove_percpu_irq(unsigned int irq, struct irqaction *act); @@ -1043,7 +1041,7 @@ struct irq_chip_generic { unsigned long unused; struct irq_domain *domain; struct list_head list; - struct irq_chip_type chip_types[0]; + struct irq_chip_type chip_types[]; }; /** @@ -1079,7 +1077,7 @@ struct irq_domain_chip_generic { unsigned int irq_flags_to_clear; unsigned int irq_flags_to_set; enum irq_gc_flags gc_flags; - struct irq_chip_generic *gc[0]; + struct irq_chip_generic *gc[]; }; /* Generic chip callback functions */ diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h index 4500d453a63e..ab831e5ae748 100644 --- a/include/linux/irq_sim.h +++ b/include/linux/irq_sim.h @@ -1,41 +1,26 @@ /* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2017-2018 Bartosz Golaszewski <brgl@bgdev.pl> + * Copyright (C) 2020 Bartosz Golaszewski <bgolaszewski@baylibre.com> */ #ifndef _LINUX_IRQ_SIM_H #define _LINUX_IRQ_SIM_H -#include <linux/irq_work.h> #include <linux/device.h> +#include <linux/fwnode.h> +#include <linux/irqdomain.h> /* * Provides a framework for allocating simulated interrupts which can be * requested like normal irqs and enqueued from process context. */ -struct irq_sim_work_ctx { - struct irq_work work; - unsigned long *pending; -}; - -struct irq_sim_irq_ctx { - int irqnum; - bool enabled; -}; - -struct irq_sim { - struct irq_sim_work_ctx work_ctx; - int irq_base; - unsigned int irq_count; - struct irq_sim_irq_ctx *irqs; -}; - -int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs); -int devm_irq_sim_init(struct device *dev, struct irq_sim *sim, - unsigned int num_irqs); -void irq_sim_fini(struct irq_sim *sim); -void irq_sim_fire(struct irq_sim *sim, unsigned int offset); -int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset); +struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode, + unsigned int num_irqs); +struct irq_domain *devm_irq_domain_create_sim(struct device *dev, + struct fwnode_handle *fwnode, + unsigned int num_irqs); +void irq_domain_remove_sim(struct irq_domain *domain); #endif /* _LINUX_IRQ_SIM_H */ diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 3b752e80c017..2735da5f839e 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -13,6 +13,8 @@ * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed */ +/* flags share CSD_FLAG_ space */ + #define IRQ_WORK_PENDING BIT(0) #define IRQ_WORK_BUSY BIT(1) @@ -23,9 +25,12 @@ #define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY) +/* + * structure shares layout with single_call_data_t. + */ struct irq_work { - atomic_t flags; struct llist_node llnode; + atomic_t flags; void (*func)(struct irq_work *); }; @@ -53,9 +58,11 @@ void irq_work_sync(struct irq_work *work); void irq_work_run(void); bool irq_work_needs_cpu(void); +void irq_work_single(void *arg); #else static inline bool irq_work_needs_cpu(void) { return false; } static inline void irq_work_run(void) { } +static inline void irq_work_single(void *arg) { } #endif #endif /* _LINUX_IRQ_WORK_H */ diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 765d9b769b69..6c36b6cc3edf 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -243,6 +243,7 @@ #define GICR_TYPER_PLPIS (1U << 0) #define GICR_TYPER_VLPIS (1U << 1) +#define GICR_TYPER_DIRTY (1U << 2) #define GICR_TYPER_DirectLPIS (1U << 3) #define GICR_TYPER_LAST (1U << 4) #define GICR_TYPER_RVPEID (1U << 7) @@ -686,6 +687,7 @@ struct rdists { bool has_vlpis; bool has_rvpeid; bool has_direct_lpi; + bool has_vpend_valid_dirty; }; struct irq_domain; diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 8d062e86d954..b37350c4fe37 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -450,6 +450,7 @@ extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler, void *handler_data, const char *handler_name); +extern void irq_domain_reset_irq_data(struct irq_data *irq_data); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, unsigned int flags, unsigned int size, @@ -491,7 +492,6 @@ extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, irq_hw_number_t hwirq, struct irq_chip *chip, void *chip_data); -extern void irq_domain_reset_irq_data(struct irq_data *irq_data); extern void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs); diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 61a9ced3aa50..6384d2813ded 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -19,16 +19,20 @@ #ifdef CONFIG_PROVE_LOCKING extern void lockdep_softirqs_on(unsigned long ip); extern void lockdep_softirqs_off(unsigned long ip); + extern void lockdep_hardirqs_on_prepare(unsigned long ip); extern void lockdep_hardirqs_on(unsigned long ip); extern void lockdep_hardirqs_off(unsigned long ip); #else static inline void lockdep_softirqs_on(unsigned long ip) { } static inline void lockdep_softirqs_off(unsigned long ip) { } + static inline void lockdep_hardirqs_on_prepare(unsigned long ip) { } static inline void lockdep_hardirqs_on(unsigned long ip) { } static inline void lockdep_hardirqs_off(unsigned long ip) { } #endif #ifdef CONFIG_TRACE_IRQFLAGS + extern void trace_hardirqs_on_prepare(void); + extern void trace_hardirqs_off_finish(void); extern void trace_hardirqs_on(void); extern void trace_hardirqs_off(void); # define lockdep_hardirq_context(p) ((p)->hardirq_context) @@ -96,6 +100,8 @@ do { \ } while (0) #else +# define trace_hardirqs_on_prepare() do { } while (0) +# define trace_hardirqs_off_finish() do { } while (0) # define trace_hardirqs_on() do { } while (0) # define trace_hardirqs_off() do { } while (0) # define lockdep_hardirq_context(p) 0 diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index f613d8529863..d56128df2aff 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -766,6 +766,11 @@ struct journal_s int j_errno; /** + * @j_abort_mutex: Lock the whole aborting procedure. + */ + struct mutex j_abort_mutex; + + /** * @j_sb_buffer: The first part of the superblock buffer. */ struct buffer_head *j_sb_buffer; @@ -1247,7 +1252,6 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3) #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file * data write error in ordered * mode */ -#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */ /* * Function declarations for the journaling transaction and buffer diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 657a83b943f0..98338dc6b5d2 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -165,9 +165,9 @@ static inline int kallsyms_show_value(void) #endif /*CONFIG_KALLSYMS*/ -static inline void print_ip_sym(unsigned long ip) +static inline void print_ip_sym(const char *loglvl, unsigned long ip) { - printk("[<%px>] %pS\n", (void *) ip, (void *) ip); + printk("%s[<%px>] %pS\n", loglvl, (void *) ip, (void *) ip); } #endif /*_LINUX_KALLSYMS_H*/ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 31314ca7c635..82522e996c76 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -11,8 +11,8 @@ struct task_struct; #ifdef CONFIG_KASAN +#include <linux/pgtable.h> #include <asm/kasan.h> -#include <asm/pgtable.h> extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE]; diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index cc8fa109cfa3..9d12c970f18f 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -2,6 +2,8 @@ #ifndef __LINUX_KCONFIG_H #define __LINUX_KCONFIG_H +/* CONFIG_CC_VERSION_TEXT (Do not delete this comment. See help in Kconfig) */ + #include <generated/autoconf.h> #ifdef CONFIG_CPU_BIG_ENDIAN diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h new file mode 100644 index 000000000000..7b0b9c44f5f3 --- /dev/null +++ b/include/linux/kcsan-checks.h @@ -0,0 +1,430 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_KCSAN_CHECKS_H +#define _LINUX_KCSAN_CHECKS_H + +/* Note: Only include what is already included by compiler.h. */ +#include <linux/compiler_attributes.h> +#include <linux/types.h> + +/* + * ACCESS TYPE MODIFIERS + * + * <none>: normal read access; + * WRITE : write access; + * ATOMIC: access is atomic; + * ASSERT: access is not a regular access, but an assertion; + * SCOPED: access is a scoped access; + */ +#define KCSAN_ACCESS_WRITE 0x1 +#define KCSAN_ACCESS_ATOMIC 0x2 +#define KCSAN_ACCESS_ASSERT 0x4 +#define KCSAN_ACCESS_SCOPED 0x8 + +/* + * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used + * even in compilation units that selectively disable KCSAN, but must use KCSAN + * to validate access to an address. Never use these in header files! + */ +#ifdef CONFIG_KCSAN +/** + * __kcsan_check_access - check generic access for races + * + * @ptr: address of access + * @size: size of access + * @type: access type modifier + */ +void __kcsan_check_access(const volatile void *ptr, size_t size, int type); + +/** + * kcsan_disable_current - disable KCSAN for the current context + * + * Supports nesting. + */ +void kcsan_disable_current(void); + +/** + * kcsan_enable_current - re-enable KCSAN for the current context + * + * Supports nesting. + */ +void kcsan_enable_current(void); +void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */ + +/** + * kcsan_nestable_atomic_begin - begin nestable atomic region + * + * Accesses within the atomic region may appear to race with other accesses but + * should be considered atomic. + */ +void kcsan_nestable_atomic_begin(void); + +/** + * kcsan_nestable_atomic_end - end nestable atomic region + */ +void kcsan_nestable_atomic_end(void); + +/** + * kcsan_flat_atomic_begin - begin flat atomic region + * + * Accesses within the atomic region may appear to race with other accesses but + * should be considered atomic. + */ +void kcsan_flat_atomic_begin(void); + +/** + * kcsan_flat_atomic_end - end flat atomic region + */ +void kcsan_flat_atomic_end(void); + +/** + * kcsan_atomic_next - consider following accesses as atomic + * + * Force treating the next n memory accesses for the current context as atomic + * operations. + * + * @n: number of following memory accesses to treat as atomic. + */ +void kcsan_atomic_next(int n); + +/** + * kcsan_set_access_mask - set access mask + * + * Set the access mask for all accesses for the current context if non-zero. + * Only value changes to bits set in the mask will be reported. + * + * @mask: bitmask + */ +void kcsan_set_access_mask(unsigned long mask); + +/* Scoped access information. */ +struct kcsan_scoped_access { + struct list_head list; + const volatile void *ptr; + size_t size; + int type; +}; +/* + * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes + * out of scope; relies on attribute "cleanup", which is supported by all + * compilers that support KCSAN. + */ +#define __kcsan_cleanup_scoped \ + __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access))) + +/** + * kcsan_begin_scoped_access - begin scoped access + * + * Begin scoped access and initialize @sa, which will cause KCSAN to + * continuously check the memory range in the current thread until + * kcsan_end_scoped_access() is called for @sa. + * + * Scoped accesses are implemented by appending @sa to an internal list for the + * current execution context, and then checked on every call into the KCSAN + * runtime. + * + * @ptr: address of access + * @size: size of access + * @type: access type modifier + * @sa: struct kcsan_scoped_access to use for the scope of the access + */ +struct kcsan_scoped_access * +kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type, + struct kcsan_scoped_access *sa); + +/** + * kcsan_end_scoped_access - end scoped access + * + * End a scoped access, which will stop KCSAN checking the memory range. + * Requires that kcsan_begin_scoped_access() was previously called once for @sa. + * + * @sa: a previously initialized struct kcsan_scoped_access + */ +void kcsan_end_scoped_access(struct kcsan_scoped_access *sa); + + +#else /* CONFIG_KCSAN */ + +static inline void __kcsan_check_access(const volatile void *ptr, size_t size, + int type) { } + +static inline void kcsan_disable_current(void) { } +static inline void kcsan_enable_current(void) { } +static inline void kcsan_enable_current_nowarn(void) { } +static inline void kcsan_nestable_atomic_begin(void) { } +static inline void kcsan_nestable_atomic_end(void) { } +static inline void kcsan_flat_atomic_begin(void) { } +static inline void kcsan_flat_atomic_end(void) { } +static inline void kcsan_atomic_next(int n) { } +static inline void kcsan_set_access_mask(unsigned long mask) { } + +struct kcsan_scoped_access { }; +#define __kcsan_cleanup_scoped __maybe_unused +static inline struct kcsan_scoped_access * +kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type, + struct kcsan_scoped_access *sa) { return sa; } +static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { } + +#endif /* CONFIG_KCSAN */ + +#ifdef __SANITIZE_THREAD__ +/* + * Only calls into the runtime when the particular compilation unit has KCSAN + * instrumentation enabled. May be used in header files. + */ +#define kcsan_check_access __kcsan_check_access + +/* + * Only use these to disable KCSAN for accesses in the current compilation unit; + * calls into libraries may still perform KCSAN checks. + */ +#define __kcsan_disable_current kcsan_disable_current +#define __kcsan_enable_current kcsan_enable_current_nowarn +#else +static inline void kcsan_check_access(const volatile void *ptr, size_t size, + int type) { } +static inline void __kcsan_enable_current(void) { } +static inline void __kcsan_disable_current(void) { } +#endif + +/** + * __kcsan_check_read - check regular read access for races + * + * @ptr: address of access + * @size: size of access + */ +#define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0) + +/** + * __kcsan_check_write - check regular write access for races + * + * @ptr: address of access + * @size: size of access + */ +#define __kcsan_check_write(ptr, size) \ + __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE) + +/** + * kcsan_check_read - check regular read access for races + * + * @ptr: address of access + * @size: size of access + */ +#define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0) + +/** + * kcsan_check_write - check regular write access for races + * + * @ptr: address of access + * @size: size of access + */ +#define kcsan_check_write(ptr, size) \ + kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE) + +/* + * Check for atomic accesses: if atomic accesses are not ignored, this simply + * aliases to kcsan_check_access(), otherwise becomes a no-op. + */ +#ifdef CONFIG_KCSAN_IGNORE_ATOMICS +#define kcsan_check_atomic_read(...) do { } while (0) +#define kcsan_check_atomic_write(...) do { } while (0) +#else +#define kcsan_check_atomic_read(ptr, size) \ + kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC) +#define kcsan_check_atomic_write(ptr, size) \ + kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE) +#endif + +/** + * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var + * + * Assert that there are no concurrent writes to @var; other readers are + * allowed. This assertion can be used to specify properties of concurrent code, + * where violation cannot be detected as a normal data race. + * + * For example, if we only have a single writer, but multiple concurrent + * readers, to avoid data races, all these accesses must be marked; even + * concurrent marked writes racing with the single writer are bugs. + * Unfortunately, due to being marked, they are no longer data races. For cases + * like these, we can use the macro as follows: + * + * .. code-block:: c + * + * void writer(void) { + * spin_lock(&update_foo_lock); + * ASSERT_EXCLUSIVE_WRITER(shared_foo); + * WRITE_ONCE(shared_foo, ...); + * spin_unlock(&update_foo_lock); + * } + * void reader(void) { + * // update_foo_lock does not need to be held! + * ... = READ_ONCE(shared_foo); + * } + * + * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough + * checking if a clear scope where no concurrent writes are expected exists. + * + * @var: variable to assert on + */ +#define ASSERT_EXCLUSIVE_WRITER(var) \ + __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT) + +/* + * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is + * expected to be unique for the scope in which instances of kcsan_scoped_access + * are declared. + */ +#define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix +#define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \ + struct kcsan_scoped_access __kcsan_scoped_name(id, _) \ + __kcsan_cleanup_scoped; \ + struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \ + __maybe_unused = kcsan_begin_scoped_access( \ + &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \ + &__kcsan_scoped_name(id, _)) + +/** + * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope + * + * Scoped variant of ASSERT_EXCLUSIVE_WRITER(). + * + * Assert that there are no concurrent writes to @var for the duration of the + * scope in which it is introduced. This provides a better way to fully cover + * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and + * increases the likelihood for KCSAN to detect racing accesses. + * + * For example, it allows finding race-condition bugs that only occur due to + * state changes within the scope itself: + * + * .. code-block:: c + * + * void writer(void) { + * spin_lock(&update_foo_lock); + * { + * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo); + * WRITE_ONCE(shared_foo, 42); + * ... + * // shared_foo should still be 42 here! + * } + * spin_unlock(&update_foo_lock); + * } + * void buggy(void) { + * if (READ_ONCE(shared_foo) == 42) + * WRITE_ONCE(shared_foo, 1); // bug! + * } + * + * @var: variable to assert on + */ +#define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \ + __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__) + +/** + * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var + * + * Assert that there are no concurrent accesses to @var (no readers nor + * writers). This assertion can be used to specify properties of concurrent + * code, where violation cannot be detected as a normal data race. + * + * For example, where exclusive access is expected after determining no other + * users of an object are left, but the object is not actually freed. We can + * check that this property actually holds as follows: + * + * .. code-block:: c + * + * if (refcount_dec_and_test(&obj->refcnt)) { + * ASSERT_EXCLUSIVE_ACCESS(*obj); + * do_some_cleanup(obj); + * release_for_reuse(obj); + * } + * + * Note: ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough + * checking if a clear scope where no concurrent accesses are expected exists. + * + * Note: For cases where the object is freed, `KASAN <kasan.html>`_ is a better + * fit to detect use-after-free bugs. + * + * @var: variable to assert on + */ +#define ASSERT_EXCLUSIVE_ACCESS(var) \ + __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT) + +/** + * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope + * + * Scoped variant of ASSERT_EXCLUSIVE_ACCESS(). + * + * Assert that there are no concurrent accesses to @var (no readers nor writers) + * for the entire duration of the scope in which it is introduced. This provides + * a better way to fully cover the enclosing scope, compared to multiple + * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect + * racing accesses. + * + * @var: variable to assert on + */ +#define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \ + __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__) + +/** + * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var + * + * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER(). + * + * Assert that there are no concurrent writes to a subset of bits in @var; + * concurrent readers are permitted. This assertion captures more detailed + * bit-level properties, compared to the other (word granularity) assertions. + * Only the bits set in @mask are checked for concurrent modifications, while + * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits + * are ignored. + * + * Use this for variables, where some bits must not be modified concurrently, + * yet other bits are expected to be modified concurrently. + * + * For example, variables where, after initialization, some bits are read-only, + * but other bits may still be modified concurrently. A reader may wish to + * assert that this is true as follows: + * + * .. code-block:: c + * + * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK); + * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT; + * + * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed + * to access the masked bits only, and KCSAN optimistically assumes it is + * therefore safe, even in the presence of data races, and marking it with + * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that + * it may still be advisable to do so, since we cannot reason about all compiler + * optimizations when it comes to bit manipulations (on the reader and writer + * side). If you are sure nothing can go wrong, we can write the above simply + * as: + * + * .. code-block:: c + * + * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK); + * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT; + * + * Another example, where this may be used, is when certain bits of @var may + * only be modified when holding the appropriate lock, but other bits may still + * be modified concurrently. Writers, where other bits may change concurrently, + * could use the assertion as follows: + * + * .. code-block:: c + * + * spin_lock(&foo_lock); + * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK); + * old_flags = flags; + * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT); + * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... } + * spin_unlock(&foo_lock); + * + * @var: variable to assert on + * @mask: only check for modifications to bits set in @mask + */ +#define ASSERT_EXCLUSIVE_BITS(var, mask) \ + do { \ + kcsan_set_access_mask(mask); \ + __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\ + kcsan_set_access_mask(0); \ + kcsan_atomic_next(1); \ + } while (0) + +#endif /* _LINUX_KCSAN_CHECKS_H */ diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h new file mode 100644 index 000000000000..53340d8789f9 --- /dev/null +++ b/include/linux/kcsan.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_KCSAN_H +#define _LINUX_KCSAN_H + +#include <linux/kcsan-checks.h> +#include <linux/types.h> + +#ifdef CONFIG_KCSAN + +/* + * Context for each thread of execution: for tasks, this is stored in + * task_struct, and interrupts access internal per-CPU storage. + */ +struct kcsan_ctx { + int disable_count; /* disable counter */ + int atomic_next; /* number of following atomic ops */ + + /* + * We distinguish between: (a) nestable atomic regions that may contain + * other nestable regions; and (b) flat atomic regions that do not keep + * track of nesting. Both (a) and (b) are entirely independent of each + * other, and a flat region may be started in a nestable region or + * vice-versa. + * + * This is required because, for example, in the annotations for + * seqlocks, we declare seqlock writer critical sections as (a) nestable + * atomic regions, but reader critical sections as (b) flat atomic + * regions, but have encountered cases where seqlock reader critical + * sections are contained within writer critical sections (the opposite + * may be possible, too). + * + * To support these cases, we independently track the depth of nesting + * for (a), and whether the leaf level is flat for (b). + */ + int atomic_nest_count; + bool in_flat_atomic; + + /* + * Access mask for all accesses if non-zero. + */ + unsigned long access_mask; + + /* List of scoped accesses. */ + struct list_head scoped_accesses; +}; + +/** + * kcsan_init - initialize KCSAN runtime + */ +void kcsan_init(void); + +#else /* CONFIG_KCSAN */ + +static inline void kcsan_init(void) { } + +#endif /* CONFIG_KCSAN */ + +#endif /* _LINUX_KCSAN_H */ diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 24cd447659e0..0125a677b67f 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -125,7 +125,7 @@ extern const char *kdb_diemsg; #define KDB_FLAG_NO_I8042 (1 << 7) /* No i8042 chip is available, do * not use keyboard */ -extern int kdb_flags; /* Global flags, see kdb_state for per cpu state */ +extern unsigned int kdb_flags; /* Global flags, see kdb_state for per cpu state */ extern void kdb_save_flags(void); extern void kdb_restore_flags(void); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 9b7a8d74a9d6..82d91547d122 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -520,6 +520,12 @@ static inline u32 int_sqrt64(u64 x) } #endif +#ifdef CONFIG_SMP +extern unsigned int sysctl_oops_all_cpu_backtrace; +#else +#define sysctl_oops_all_cpu_backtrace 0 +#endif /* CONFIG_SMP */ + extern void bust_spinlocks(int yes); extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ extern int panic_timeout; @@ -528,6 +534,8 @@ extern int panic_on_oops; extern int panic_on_unrecovered_nmi; extern int panic_on_io_nmi; extern int panic_on_warn; +extern unsigned long panic_on_taint; +extern bool panic_on_taint_nousertaint; extern int sysctl_panic_on_rcu_stall; extern int sysctl_panic_on_stackoverflow; @@ -596,6 +604,7 @@ extern enum system_states { #define TAINT_AUX 16 #define TAINT_RANDSTRUCT 17 #define TAINT_FLAGS_COUNT 18 +#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1) struct taint_flag { char c_true; /* character printed when tainted */ diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 1776eb2e43a4..ea67910ae6b7 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -208,7 +208,7 @@ struct crash_mem_range { struct crash_mem { unsigned int max_nr_ranges; unsigned int nr_ranges; - struct crash_mem_range ranges[0]; + struct crash_mem_range ranges[]; }; extern int crash_exclude_mem_range(struct crash_mem *mem, diff --git a/include/linux/key.h b/include/linux/key.h index 6cf8e71cf8b7..0f2e24f13c2b 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -71,6 +71,23 @@ struct net; #define KEY_PERM_UNDEF 0xffffffff +/* + * The permissions required on a key that we're looking up. + */ +enum key_need_perm { + KEY_NEED_UNSPECIFIED, /* Needed permission unspecified */ + KEY_NEED_VIEW, /* Require permission to view attributes */ + KEY_NEED_READ, /* Require permission to read content */ + KEY_NEED_WRITE, /* Require permission to update / modify */ + KEY_NEED_SEARCH, /* Require permission to search (keyring) or find (key) */ + KEY_NEED_LINK, /* Require permission to link */ + KEY_NEED_SETATTR, /* Require permission to change attributes */ + KEY_NEED_UNLINK, /* Require permission to unlink key */ + KEY_SYSADMIN_OVERRIDE, /* Special: override by CAP_SYS_ADMIN */ + KEY_AUTHTOKEN_OVERRIDE, /* Special: override by possession of auth token */ + KEY_DEFER_PERM_CHECK, /* Special: permission check is deferred */ +}; + struct seq_file; struct user_struct; struct signal_struct; @@ -176,6 +193,9 @@ struct key { struct list_head graveyard_link; struct rb_node serial_node; }; +#ifdef CONFIG_KEY_NOTIFICATIONS + struct watch_list *watchers; /* Entities watching this key for changes */ +#endif struct rw_semaphore sem; /* change vs change sem */ struct key_user *user; /* owner of this key */ void *security; /* security data for this key */ @@ -417,20 +437,9 @@ static inline key_serial_t key_serial(const struct key *key) extern void key_set_timeout(struct key *, unsigned); extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags, - key_perm_t perm); + enum key_need_perm need_perm); extern void key_free_user_ns(struct user_namespace *); -/* - * The permissions required on a key that we're looking up. - */ -#define KEY_NEED_VIEW 0x01 /* Require permission to view attributes */ -#define KEY_NEED_READ 0x02 /* Require permission to read content */ -#define KEY_NEED_WRITE 0x04 /* Require permission to update / modify */ -#define KEY_NEED_SEARCH 0x08 /* Require permission to search (keyring) or find (key) */ -#define KEY_NEED_LINK 0x10 /* Require permission to link */ -#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */ -#define KEY_NEED_ALL 0x3f /* All the above permissions */ - static inline short key_read_state(const struct key *key) { /* Barrier versus mark_key_instantiated(). */ diff --git a/include/linux/keyslot-manager.h b/include/linux/keyslot-manager.h new file mode 100644 index 000000000000..18f3f5346843 --- /dev/null +++ b/include/linux/keyslot-manager.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2019 Google LLC + */ + +#ifndef __LINUX_KEYSLOT_MANAGER_H +#define __LINUX_KEYSLOT_MANAGER_H + +#include <linux/bio.h> +#include <linux/blk-crypto.h> + +struct blk_keyslot_manager; + +/** + * struct blk_ksm_ll_ops - functions to manage keyslots in hardware + * @keyslot_program: Program the specified key into the specified slot in the + * inline encryption hardware. + * @keyslot_evict: Evict key from the specified keyslot in the hardware. + * The key is provided so that e.g. dm layers can evict + * keys from the devices that they map over. + * Returns 0 on success, -errno otherwise. + * + * This structure should be provided by storage device drivers when they set up + * a keyslot manager - this structure holds the function ptrs that the keyslot + * manager will use to manipulate keyslots in the hardware. + */ +struct blk_ksm_ll_ops { + int (*keyslot_program)(struct blk_keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot); + int (*keyslot_evict)(struct blk_keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot); +}; + +struct blk_keyslot_manager { + /* + * The struct blk_ksm_ll_ops that this keyslot manager will use + * to perform operations like programming and evicting keys on the + * device + */ + struct blk_ksm_ll_ops ksm_ll_ops; + + /* + * The maximum number of bytes supported for specifying the data unit + * number. + */ + unsigned int max_dun_bytes_supported; + + /* + * Array of size BLK_ENCRYPTION_MODE_MAX of bitmasks that represents + * whether a crypto mode and data unit size are supported. The i'th + * bit of crypto_mode_supported[crypto_mode] is set iff a data unit + * size of (1 << i) is supported. We only support data unit sizes + * that are powers of 2. + */ + unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX]; + + /* Device for runtime power management (NULL if none) */ + struct device *dev; + + /* Here onwards are *private* fields for internal keyslot manager use */ + + unsigned int num_slots; + + /* Protects programming and evicting keys from the device */ + struct rw_semaphore lock; + + /* List of idle slots, with least recently used slot at front */ + wait_queue_head_t idle_slots_wait_queue; + struct list_head idle_slots; + spinlock_t idle_slots_lock; + + /* + * Hash table which maps struct *blk_crypto_key to keyslots, so that we + * can find a key's keyslot in O(1) time rather than O(num_slots). + * Protected by 'lock'. + */ + struct hlist_head *slot_hashtable; + unsigned int log_slot_ht_size; + + /* Per-keyslot data */ + struct blk_ksm_keyslot *slots; +}; + +int blk_ksm_init(struct blk_keyslot_manager *ksm, unsigned int num_slots); + +blk_status_t blk_ksm_get_slot_for_key(struct blk_keyslot_manager *ksm, + const struct blk_crypto_key *key, + struct blk_ksm_keyslot **slot_ptr); + +unsigned int blk_ksm_get_slot_idx(struct blk_ksm_keyslot *slot); + +void blk_ksm_put_slot(struct blk_ksm_keyslot *slot); + +bool blk_ksm_crypto_cfg_supported(struct blk_keyslot_manager *ksm, + const struct blk_crypto_config *cfg); + +int blk_ksm_evict_key(struct blk_keyslot_manager *ksm, + const struct blk_crypto_key *key); + +void blk_ksm_reprogram_all_keys(struct blk_keyslot_manager *ksm); + +void blk_ksm_destroy(struct blk_keyslot_manager *ksm); + +#endif /* __LINUX_KEYSLOT_MANAGER_H */ diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index b072aeb1fd78..c62d76478adc 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -269,6 +269,9 @@ struct kgdb_arch { * @write_char: Pointer to a function that will write one char. * @flush: Pointer to a function that will flush any pending writes. * @init: Pointer to a function that will initialize the device. + * @deinit: Pointer to a function that will deinit the device. Implies that + * this I/O driver is temporary and expects to be replaced. Called when + * an I/O driver is replaced or explicitly unregistered. * @pre_exception: Pointer to a function that will do any prep work for * the I/O driver. * @post_exception: Pointer to a function that will do any cleanup work @@ -282,6 +285,7 @@ struct kgdb_io { void (*write_char) (u8); void (*flush) (void); int (*init) (void); + void (*deinit) (void); void (*pre_exception) (void); void (*post_exception) (void); int is_console; @@ -298,7 +302,7 @@ extern bool kgdb_nmi_poll_knock(void); #else static inline int kgdb_register_nmi_console(void) { return 0; } static inline int kgdb_unregister_nmi_console(void) { return 0; } -static inline bool kgdb_nmi_poll_knock(void) { return 1; } +static inline bool kgdb_nmi_poll_knock(void) { return true; } #endif extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops); @@ -323,7 +327,7 @@ extern void gdbstub_exit(int status); extern int kgdb_single_step; extern atomic_t kgdb_active; #define in_dbg_master() \ - (raw_smp_processor_id() == atomic_read(&kgdb_active)) + (irqs_disabled() && (smp_processor_id() == atomic_read(&kgdb_active))) extern bool dbg_is_early; extern void __init dbg_late_init(void); extern void kgdb_panic(const char *msg); diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h index 2e7a1e032c71..3378bcbe585e 100644 --- a/include/linux/kmsg_dump.h +++ b/include/linux/kmsg_dump.h @@ -25,9 +25,8 @@ enum kmsg_dump_reason { KMSG_DUMP_PANIC, KMSG_DUMP_OOPS, KMSG_DUMP_EMERG, - KMSG_DUMP_RESTART, - KMSG_DUMP_HALT, - KMSG_DUMP_POWEROFF, + KMSG_DUMP_SHUTDOWN, + KMSG_DUMP_MAX }; /** @@ -71,6 +70,8 @@ void kmsg_dump_rewind(struct kmsg_dumper *dumper); int kmsg_dump_register(struct kmsg_dumper *dumper); int kmsg_dump_unregister(struct kmsg_dumper *dumper); + +const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason); #else static inline void kmsg_dump(enum kmsg_dump_reason reason) { @@ -112,6 +113,11 @@ static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper) { return -EINVAL; } + +static inline const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason) +{ + return "Disabled"; +} #endif #endif /* _LINUX_KMSG_DUMP_H */ diff --git a/include/linux/kobject.h b/include/linux/kobject.h index e2ca0a292e21..6cba088bee24 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -7,7 +7,7 @@ * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com> * Copyright (c) 2006-2008 Novell Inc. * - * Please read Documentation/kobject.txt before using the kobject + * Please read Documentation/core-api/kobject.rst before using the kobject * interface, ESPECIALLY the parts about reference counts and object * destructors. */ @@ -29,7 +29,7 @@ #include <linux/uidgid.h> #define UEVENT_HELPER_PATH_LEN 256 -#define UEVENT_NUM_ENVP 32 /* number of env pointers */ +#define UEVENT_NUM_ENVP 64 /* number of env pointers */ #define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */ #ifdef CONFIG_UEVENT_HELPER diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h index 069aa2ebef90..2b5b64256cf4 100644 --- a/include/linux/kobject_ns.h +++ b/include/linux/kobject_ns.h @@ -8,7 +8,7 @@ * * Split from kobject.h by David Howells (dhowells@redhat.com) * - * Please read Documentation/kobject.txt before using the kobject + * Please read Documentation/core-api/kobject.rst before using the kobject * interface, ESPECIALLY the parts about reference counts and object * destructors. */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 04bdaf01112c..6adf90f248d7 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -161,7 +161,7 @@ struct kretprobe_instance { kprobe_opcode_t *ret_addr; struct task_struct *task; void *fp; - char data[0]; + char data[]; }; struct kretprobe_blackpoint { @@ -312,7 +312,7 @@ DEFINE_INSN_CACHE_OPS(optinsn); #ifdef CONFIG_SYSCTL extern int sysctl_kprobes_optimization; extern int proc_kprobes_optimization_handler(struct ctl_table *table, - int write, void __user *buffer, + int write, void *buffer, size_t *length, loff_t *ppos); #endif extern void wait_for_kprobe_optimizer(void); @@ -350,6 +350,10 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) return this_cpu_ptr(&kprobe_ctlblk); } +extern struct kprobe kprobe_busy; +void kprobe_busy_begin(void); +void kprobe_busy_end(void); + kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset); int register_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p); diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 8bbcaad7ef0f..65b81e0c494d 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -5,6 +5,8 @@ #include <linux/err.h> #include <linux/sched.h> +struct mm_struct; + __printf(4, 5) struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), void *data, @@ -57,6 +59,7 @@ bool kthread_should_stop(void); bool kthread_should_park(void); bool __kthread_should_park(struct task_struct *k); bool kthread_freezable_should_stop(bool *was_frozen); +void *kthread_func(struct task_struct *k); void *kthread_data(struct task_struct *k); void *kthread_probe_data(struct task_struct *k); int kthread_park(struct task_struct *k); @@ -198,6 +201,9 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work); void kthread_destroy_worker(struct kthread_worker *worker); +void kthread_use_mm(struct mm_struct *mm); +void kthread_unuse_mm(struct mm_struct *mm); + struct cgroup_subsys_state; #ifdef CONFIG_BLK_CGROUP diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 6d58beb65454..d564855243d8 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -23,7 +23,7 @@ #include <linux/irqflags.h> #include <linux/context_tracking.h> #include <linux/irqbypass.h> -#include <linux/swait.h> +#include <linux/rcuwait.h> #include <linux/refcount.h> #include <linux/nospec.h> #include <asm/signal.h> @@ -206,6 +206,7 @@ struct kvm_async_pf { unsigned long addr; struct kvm_arch_async_pf arch; bool wakeup_all; + bool notpresent_injected; }; void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); @@ -277,7 +278,7 @@ struct kvm_vcpu { struct mutex mutex; struct kvm_run *run; - struct swait_queue_head wq; + struct rcuwait wait; struct pid __rcu *pid; int sigset_active; sigset_t sigset; @@ -318,7 +319,6 @@ struct kvm_vcpu { bool preempted; bool ready; struct kvm_vcpu_arch arch; - struct dentry *debugfs_dentry; }; static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) @@ -409,7 +409,7 @@ struct kvm_irq_routing_table { * Array indexed by gsi. Each entry contains list of irq chips * the gsi is connected to. */ - struct hlist_head map[0]; + struct hlist_head map[]; }; #endif @@ -503,6 +503,7 @@ struct kvm { struct srcu_struct srcu; struct srcu_struct irq_srcu; pid_t userspace_pid; + unsigned int max_halt_poll_ns; }; #define kvm_err(fmt, ...) \ @@ -733,6 +734,9 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); +int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, unsigned int offset, + unsigned long len); int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len); int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, @@ -813,8 +817,11 @@ void kvm_flush_remote_tlbs(struct kvm *kvm); void kvm_reload_remote_mmus(struct kvm *kvm); bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, + struct kvm_vcpu *except, unsigned long *vcpu_bitmap, cpumask_var_t tmp); bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); +bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, + struct kvm_vcpu *except); bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, unsigned long *vcpu_bitmap); @@ -866,7 +873,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state); int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg); -int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu); int kvm_arch_init(void *opaque); void kvm_arch_exit(void); @@ -881,7 +888,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS -void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); +void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); #endif int kvm_arch_hardware_enable(void); @@ -956,12 +963,12 @@ static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) } #endif -static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) +static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) { #ifdef __KVM_HAVE_ARCH_WQP - return vcpu->arch.wqp; + return vcpu->arch.waitp; #else - return &vcpu->wq; + return &vcpu->wait; #endif } @@ -1048,7 +1055,7 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn) start = slot + 1; } - if (gfn >= memslots[start].base_gfn && + if (start < slots->used_slots && gfn >= memslots[start].base_gfn && gfn < memslots[start].base_gfn + memslots[start].npages) { atomic_set(&slots->lru_slot, start); return &memslots[start]; @@ -1130,6 +1137,11 @@ struct kvm_stats_debugfs_item { #define KVM_DBGFS_GET_MODE(dbgfs_item) \ ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644) +#define VM_STAT(n, x, ...) \ + { n, offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ } +#define VCPU_STAT(n, x, ...) \ + { n, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ } + extern struct kvm_stats_debugfs_item debugfs_entries[]; extern struct dentry *kvm_debugfs_dir; @@ -1352,6 +1364,12 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) } #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ +static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot) +{ + return (memslot && memslot->id < KVM_USER_MEM_SLOTS && + !(memslot->flags & KVM_MEMSLOT_INVALID)); +} + struct kvm_vcpu *kvm_get_running_vcpu(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); @@ -1403,8 +1421,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, } #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ -int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, - unsigned long start, unsigned long end, bool blockable); +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end); #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h index 9022f0c2e2e4..abe3d95f795b 100644 --- a/include/linux/latencytop.h +++ b/include/linux/latencytop.h @@ -38,8 +38,8 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter) void clear_tsk_latency_tracing(struct task_struct *p); -extern int sysctl_latencytop(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); +int sysctl_latencytop(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); #else diff --git a/include/linux/libata.h b/include/linux/libata.h index cffa4714bfa8..e7e5256817dc 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -22,6 +22,7 @@ #include <linux/acpi.h> #include <linux/cdrom.h> #include <linux/sched.h> +#include <linux/async.h> /* * Define if arch has non-standard setup. This is a _PCI_ standard @@ -609,7 +610,7 @@ struct ata_host { struct task_struct *eh_owner; struct ata_port *simplex_claimed; /* channel owning the DMA */ - struct ata_port *ports[0]; + struct ata_port *ports[]; }; struct ata_queued_cmd { @@ -872,6 +873,8 @@ struct ata_port { struct timer_list fastdrain_timer; unsigned long fastdrain_cnt; + async_cookie_t cookie; + int em_message_type; void *private_data; @@ -1092,6 +1095,11 @@ extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd, #define ATA_SCSI_COMPAT_IOCTL /* empty */ #endif extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd); +#if IS_ENABLED(CONFIG_ATA) +bool ata_scsi_dma_need_drain(struct request *rq); +#else +#define ata_scsi_dma_need_drain NULL +#endif extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, unsigned int cmd, void __user *arg); extern bool ata_link_online(struct ata_link *link); @@ -1387,6 +1395,7 @@ extern struct device_attribute *ata_common_sdev_attrs[]; .ioctl = ata_scsi_ioctl, \ ATA_SCSI_COMPAT_IOCTL \ .queuecommand = ata_scsi_queuecmd, \ + .dma_need_drain = ata_scsi_dma_need_drain, \ .can_queue = ATA_DEF_QUEUE, \ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ .this_id = ATA_SHT_THIS_ID, \ diff --git a/include/linux/linear_range.h b/include/linux/linear_range.h new file mode 100644 index 000000000000..17b5943727d5 --- /dev/null +++ b/include/linux/linear_range.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2020 ROHM Semiconductors */ + +#ifndef LINEAR_RANGE_H +#define LINEAR_RANGE_H + +#include <linux/types.h> + +/** + * struct linear_range - table of selector - value pairs + * + * Define a lookup-table for range of values. Intended to help when looking + * for a register value matching certaing physical measure (like voltage). + * Usable when increment of one in register always results a constant increment + * of the physical measure (like voltage). + * + * @min: Lowest value in range + * @min_sel: Lowest selector for range + * @max_sel: Highest selector for range + * @step: Value step size + */ +struct linear_range { + unsigned int min; + unsigned int min_sel; + unsigned int max_sel; + unsigned int step; +}; + +unsigned int linear_range_values_in_range(const struct linear_range *r); +unsigned int linear_range_values_in_range_array(const struct linear_range *r, + int ranges); +unsigned int linear_range_get_max_value(const struct linear_range *r); + +int linear_range_get_value(const struct linear_range *r, unsigned int selector, + unsigned int *val); +int linear_range_get_value_array(const struct linear_range *r, int ranges, + unsigned int selector, unsigned int *val); +int linear_range_get_selector_low(const struct linear_range *r, + unsigned int val, unsigned int *selector, + bool *found); +int linear_range_get_selector_high(const struct linear_range *r, + unsigned int val, unsigned int *selector, + bool *found); +int linear_range_get_selector_low_array(const struct linear_range *r, + int ranges, unsigned int val, + unsigned int *selector, bool *found); + +#endif diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 9280209d1f62..d796ec20d114 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -105,7 +105,7 @@ /* === DEPRECATED annotations === */ -#ifndef CONFIG_X86 +#ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS #ifndef GLOBAL /* deprecated, use SYM_DATA*, SYM_ENTRY, or similar */ #define GLOBAL(name) \ @@ -118,10 +118,10 @@ #define ENTRY(name) \ SYM_FUNC_START(name) #endif -#endif /* CONFIG_X86 */ +#endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */ #endif /* LINKER_SCRIPT */ -#ifndef CONFIG_X86 +#ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS #ifndef WEAK /* deprecated, use SYM_FUNC_START_WEAK* */ #define WEAK(name) \ @@ -143,7 +143,7 @@ #define ENDPROC(name) \ SYM_FUNC_END(name) #endif -#endif /* CONFIG_X86 */ +#endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */ /* === generic annotations === */ diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index d5ceb2839a2d..9dcaa3e582c9 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -34,7 +34,7 @@ struct list_lru_one { struct list_lru_memcg { struct rcu_head rcu; /* array of per cgroup lists, indexed by memcg_cache_id */ - struct list_lru_one *lru[0]; + struct list_lru_one *lru[]; }; struct list_lru_node { diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index e894e74905f3..2614247a9781 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -195,9 +195,6 @@ struct klp_patch { int klp_enable_patch(struct klp_patch *); -void arch_klp_init_object_loaded(struct klp_patch *patch, - struct klp_object *obj); - /* Called from the module loader during module coming/going states */ int klp_module_coming(struct module *mod); void klp_module_going(struct module *mod); @@ -234,6 +231,11 @@ void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor); struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id); struct klp_state *klp_get_prev_state(unsigned long id); +int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, + const char *shstrtab, const char *strtab, + unsigned int symindex, unsigned int secindex, + const char *objname); + #else /* !CONFIG_LIVEPATCH */ static inline int klp_module_coming(struct module *mod) { return 0; } @@ -242,6 +244,15 @@ static inline bool klp_patch_pending(struct task_struct *task) { return false; } static inline void klp_update_patch_state(struct task_struct *task) {} static inline void klp_copy_process(struct task_struct *child) {} +static inline +int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, + const char *shstrtab, const char *strtab, + unsigned int symindex, unsigned int secindex, + const char *objname) +{ + return 0; +} + #endif /* CONFIG_LIVEPATCH */ #endif /* _LINUX_LIVEPATCH_H_ */ diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h new file mode 100644 index 000000000000..e55010fa7329 --- /dev/null +++ b/include/linux/local_lock.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_LOCAL_LOCK_H +#define _LINUX_LOCAL_LOCK_H + +#include <linux/local_lock_internal.h> + +/** + * local_lock_init - Runtime initialize a lock instance + */ +#define local_lock_init(lock) __local_lock_init(lock) + +/** + * local_lock - Acquire a per CPU local lock + * @lock: The lock variable + */ +#define local_lock(lock) __local_lock(lock) + +/** + * local_lock_irq - Acquire a per CPU local lock and disable interrupts + * @lock: The lock variable + */ +#define local_lock_irq(lock) __local_lock_irq(lock) + +/** + * local_lock_irqsave - Acquire a per CPU local lock, save and disable + * interrupts + * @lock: The lock variable + * @flags: Storage for interrupt flags + */ +#define local_lock_irqsave(lock, flags) \ + __local_lock_irqsave(lock, flags) + +/** + * local_unlock - Release a per CPU local lock + * @lock: The lock variable + */ +#define local_unlock(lock) __local_unlock(lock) + +/** + * local_unlock_irq - Release a per CPU local lock and enable interrupts + * @lock: The lock variable + */ +#define local_unlock_irq(lock) __local_unlock_irq(lock) + +/** + * local_unlock_irqrestore - Release a per CPU local lock and restore + * interrupt flags + * @lock: The lock variable + * @flags: Interrupt flags to restore + */ +#define local_unlock_irqrestore(lock, flags) \ + __local_unlock_irqrestore(lock, flags) + +#endif diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h new file mode 100644 index 000000000000..4a8795b21d77 --- /dev/null +++ b/include/linux/local_lock_internal.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_LOCAL_LOCK_H +# error "Do not include directly, include linux/local_lock.h" +#endif + +#include <linux/percpu-defs.h> +#include <linux/lockdep.h> + +typedef struct { +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; + struct task_struct *owner; +#endif +} local_lock_t; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define LL_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + } +#else +# define LL_DEP_MAP_INIT(lockname) +#endif + +#define INIT_LOCAL_LOCK(lockname) { LL_DEP_MAP_INIT(lockname) } + +#define __local_lock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ + lockdep_init_map_wait(&(lock)->dep_map, #lock, &__key, 0, LD_WAIT_CONFIG);\ +} while (0) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static inline void local_lock_acquire(local_lock_t *l) +{ + lock_map_acquire(&l->dep_map); + DEBUG_LOCKS_WARN_ON(l->owner); + l->owner = current; +} + +static inline void local_lock_release(local_lock_t *l) +{ + DEBUG_LOCKS_WARN_ON(l->owner != current); + l->owner = NULL; + lock_map_release(&l->dep_map); +} + +#else /* CONFIG_DEBUG_LOCK_ALLOC */ +static inline void local_lock_acquire(local_lock_t *l) { } +static inline void local_lock_release(local_lock_t *l) { } +#endif /* !CONFIG_DEBUG_LOCK_ALLOC */ + +#define __local_lock(lock) \ + do { \ + preempt_disable(); \ + local_lock_acquire(this_cpu_ptr(lock)); \ + } while (0) + +#define __local_lock_irq(lock) \ + do { \ + local_irq_disable(); \ + local_lock_acquire(this_cpu_ptr(lock)); \ + } while (0) + +#define __local_lock_irqsave(lock, flags) \ + do { \ + local_irq_save(flags); \ + local_lock_acquire(this_cpu_ptr(lock)); \ + } while (0) + +#define __local_unlock(lock) \ + do { \ + local_lock_release(this_cpu_ptr(lock)); \ + preempt_enable(); \ + } while (0) + +#define __local_unlock_irq(lock) \ + do { \ + local_lock_release(this_cpu_ptr(lock)); \ + local_irq_enable(); \ + } while (0) + +#define __local_unlock_irqrestore(lock, flags) \ + do { \ + local_lock_release(this_cpu_ptr(lock)); \ + local_irq_restore(flags); \ + } while (0) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 206774ac6946..8fce5c98a4b0 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -308,8 +308,27 @@ extern void lockdep_set_selftest_task(struct task_struct *task); extern void lockdep_init_task(struct task_struct *task); -extern void lockdep_off(void); -extern void lockdep_on(void); +/* + * Split the recrursion counter in two to readily detect 'off' vs recursion. + */ +#define LOCKDEP_RECURSION_BITS 16 +#define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) +#define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1) + +/* + * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due + * to header dependencies. + */ + +#define lockdep_off() \ +do { \ + current->lockdep_recursion += LOCKDEP_OFF; \ +} while (0) + +#define lockdep_on() \ +do { \ + current->lockdep_recursion -= LOCKDEP_OFF; \ +} while (0) extern void lockdep_register_key(struct lock_class_key *key); extern void lockdep_unregister_key(struct lock_class_key *key); diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index 99d629fd9944..28f23b341c1c 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h @@ -75,6 +75,7 @@ struct common_audit_data { #define LSM_AUDIT_DATA_IBPKEY 13 #define LSM_AUDIT_DATA_IBENDPORT 14 #define LSM_AUDIT_DATA_LOCKDOWN 15 +#define LSM_AUDIT_DATA_NOTIFICATION 16 union { struct path path; struct dentry *dentry; diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 9cd4455528e5..6791813cd439 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -49,13 +49,14 @@ LSM_HOOK(int, 0, syslog, int type) LSM_HOOK(int, 0, settime, const struct timespec64 *ts, const struct timezone *tz) LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages) -LSM_HOOK(int, 0, bprm_set_creds, struct linux_binprm *bprm) +LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm) +LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file) LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm) LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm) LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm) LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc, struct fs_context *src_sc) -LSM_HOOK(int, 0, fs_context_parse_param, struct fs_context *fc, +LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc, struct fs_parameter *param) LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb) LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb) @@ -190,6 +191,8 @@ LSM_HOOK(int, 0, kernel_post_read_file, struct file *file, char *buf, loff_t size, enum kernel_read_file_id id) LSM_HOOK(int, 0, task_fix_setuid, struct cred *new, const struct cred *old, int flags) +LSM_HOOK(int, 0, task_fix_setgid, struct cred *new, const struct cred * old, + int flags) LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid) LSM_HOOK(int, 0, task_getpgid, struct task_struct *p) LSM_HOOK(int, 0, task_getsid, struct task_struct *p) @@ -243,7 +246,7 @@ LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, char *name, char **value) LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size) LSM_HOOK(int, 0, ismaclabel, const char *name) -LSM_HOOK(int, 0, secid_to_secctx, u32 secid, char **secdata, +LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, char **secdata, u32 *seclen) LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid) LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen) @@ -253,6 +256,15 @@ LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen) LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx, u32 *ctxlen) +#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE) +LSM_HOOK(int, 0, post_notification, const struct cred *w_cred, + const struct cred *cred, struct watch_notification *n) +#endif /* CONFIG_SECURITY && CONFIG_WATCH_QUEUE */ + +#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS) +LSM_HOOK(int, 0, watch_key, struct key *key) +#endif /* CONFIG_SECURITY && CONFIG_KEY_NOTIFICATIONS */ + #ifdef CONFIG_SECURITY_NETWORK LSM_HOOK(int, 0, unix_stream_connect, struct sock *sock, struct sock *other, struct sock *newsk) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 988ca0df7824..95b7c1d32062 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -34,40 +34,48 @@ * * Security hooks for program execution operations. * - * @bprm_set_creds: - * Save security information in the bprm->security field, typically based - * on information about the bprm->file, for later use by the apply_creds - * hook. This hook may also optionally check permissions (e.g. for - * transitions between security domains). - * This hook may be called multiple times during a single execve, e.g. for - * interpreters. The hook can tell whether it has already been called by - * checking to see if @bprm->security is non-NULL. If so, then the hook - * may decide either to retain the security information saved earlier or - * to replace it. The hook must set @bprm->secureexec to 1 if a "secure - * exec" has happened as a result of this hook call. The flag is used to - * indicate the need for a sanitized execution environment, and is also - * passed in the ELF auxiliary table on the initial stack to indicate - * whether libc should enable secure mode. + * @bprm_creds_for_exec: + * If the setup in prepare_exec_creds did not setup @bprm->cred->security + * properly for executing @bprm->file, update the LSM's portion of + * @bprm->cred->security to be what commit_creds needs to install for the + * new program. This hook may also optionally check permissions + * (e.g. for transitions between security domains). + * The hook must set @bprm->secureexec to 1 if AT_SECURE should be set to + * request libc enable secure mode. + * @bprm contains the linux_binprm structure. + * Return 0 if the hook is successful and permission is granted. + * @bprm_creds_from_file: + * If @file is setpcap, suid, sgid or otherwise marked to change + * privilege upon exec, update @bprm->cred to reflect that change. + * This is called after finding the binary that will be executed. + * without an interpreter. This ensures that the credentials will not + * be derived from a script that the binary will need to reopen, which + * when reopend may end up being a completely different file. This + * hook may also optionally check permissions (e.g. for transitions + * between security domains). + * The hook must set @bprm->secureexec to 1 if AT_SECURE should be set to + * request libc enable secure mode. + * The hook must add to @bprm->per_clear any personality flags that + * should be cleared from current->personality. * @bprm contains the linux_binprm structure. * Return 0 if the hook is successful and permission is granted. * @bprm_check_security: * This hook mediates the point when a search for a binary handler will - * begin. It allows a check the @bprm->security value which is set in the - * preceding set_creds call. The primary difference from set_creds is - * that the argv list and envp list are reliably available in @bprm. This - * hook may be called multiple times during a single execve; and in each - * pass set_creds is called first. + * begin. It allows a check against the @bprm->cred->security value + * which was set in the preceding creds_for_exec call. The argv list and + * envp list are reliably available in @bprm. This hook may be called + * multiple times during a single execve. * @bprm contains the linux_binprm structure. * Return 0 if the hook is successful and permission is granted. * @bprm_committing_creds: * Prepare to install the new security attributes of a process being * transformed by an execve operation, based on the old credentials * pointed to by @current->cred and the information set in @bprm->cred by - * the bprm_set_creds hook. @bprm points to the linux_binprm structure. - * This hook is a good place to perform state changes on the process such - * as closing open file descriptors to which access will no longer be - * granted when the attributes are changed. This is called immediately - * before commit_creds(). + * the bprm_creds_for_exec hook. @bprm points to the linux_binprm + * structure. This hook is a good place to perform state changes on the + * process such as closing open file descriptors to which access will no + * longer be granted when the attributes are changed. This is called + * immediately before commit_creds(). * @bprm_committed_creds: * Tidy up after the installation of the new security attributes of a * process being transformed by an execve operation. The new credentials @@ -77,7 +85,7 @@ * state. This is called immediately after commit_creds(). * * Security hooks for mount using fs_context. - * [See also Documentation/filesystems/mount_api.txt] + * [See also Documentation/filesystems/mount_api.rst] * * @fs_context_dup: * Allocate and attach a security structure to sc->security. This pointer @@ -651,6 +659,15 @@ * @old is the set of credentials that are being replaces * @flags contains one of the LSM_SETID_* values. * Return 0 on success. + * @task_fix_setgid: + * Update the module's state after setting one or more of the group + * identity attributes of the current process. The @flags parameter + * indicates which of the set*gid system calls invoked this hook. + * @new is the set of credentials that will be installed. Modifications + * should be made to this rather than to @current->cred. + * @old is the set of credentials that are being replaced. + * @flags contains one of the LSM_SETID_* values. + * Return 0 on success. * @task_setpgid: * Check permission before setting the process group identifier of the * process @p to @pgid. @@ -1437,6 +1454,20 @@ * @ctx is a pointer in which to place the allocated security context. * @ctxlen points to the place to put the length of @ctx. * + * Security hooks for the general notification queue: + * + * @post_notification: + * Check to see if a watch notification can be posted to a particular + * queue. + * @w_cred: The credentials of the whoever set the watch. + * @cred: The event-triggerer's credentials + * @n: The notification being posted + * + * @watch_key: + * Check to see if a process is allowed to watch for event notifications + * from a key or keyring. + * @key: The key to watch. + * * Security hooks for using the eBPF maps and programs functionalities through * eBPF syscalls. * diff --git a/include/linux/mdio.h b/include/linux/mdio.h index 917e4bb2ed71..36d2e0673d03 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -9,6 +9,13 @@ #include <uapi/linux/mdio.h> #include <linux/mod_devicetable.h> +/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit + * IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. + */ +#define MII_ADDR_C45 (1<<30) +#define MII_DEVADDR_C45_SHIFT 16 +#define MII_REGADDR_C45_MASK GENMASK(15, 0) + struct gpio_desc; struct mii_bus; @@ -326,6 +333,30 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val); int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set); +static inline u32 mdiobus_c45_addr(int devad, u16 regnum) +{ + return MII_ADDR_C45 | devad << MII_DEVADDR_C45_SHIFT | regnum; +} + +static inline int __mdiobus_c45_read(struct mii_bus *bus, int prtad, int devad, + u16 regnum) +{ + return __mdiobus_read(bus, prtad, mdiobus_c45_addr(devad, regnum)); +} + +static inline int __mdiobus_c45_write(struct mii_bus *bus, int prtad, int devad, + u16 regnum, u16 val) +{ + return __mdiobus_write(bus, prtad, mdiobus_c45_addr(devad, regnum), + val); +} + +static inline int mdiobus_c45_read(struct mii_bus *bus, int prtad, int devad, + u16 regnum) +{ + return mdiobus_read(bus, prtad, mdiobus_c45_addr(devad, regnum)); +} + int mdiobus_register_device(struct mdio_device *mdiodev); int mdiobus_unregister_device(struct mdio_device *mdiodev); bool mdiobus_is_registered_device(struct mii_bus *bus, int addr); diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 6bc37a731d27..017fae833d4a 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -41,7 +41,7 @@ enum memblock_flags { /** * struct memblock_region - represents a memory region - * @base: physical address of the region + * @base: base address of the region * @size: size of the region * @flags: memory region attributes * @nid: NUMA node id @@ -50,7 +50,7 @@ struct memblock_region { phys_addr_t base; phys_addr_t size; enum memblock_flags flags; -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +#ifdef CONFIG_NEED_MULTIPLE_NODES int nid; #endif }; @@ -75,7 +75,7 @@ struct memblock_type { * struct memblock - memblock allocator metadata * @bottom_up: is bottom up direction? * @current_limit: physical address of the current allocation limit - * @memory: usabe memory regions + * @memory: usable memory regions * @reserved: reserved memory regions * @physmem: all physical memory */ @@ -215,7 +215,6 @@ static inline bool memblock_is_nomap(struct memblock_region *m) return m->flags & MEMBLOCK_NOMAP; } -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, unsigned long *end_pfn); void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, @@ -234,7 +233,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, @@ -275,6 +273,9 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \ for (; i != U64_MAX; \ __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) + +int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask); + #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ /** @@ -310,10 +311,10 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ nid, flags, p_start, p_end, p_nid) -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int memblock_set_node(phys_addr_t base, phys_addr_t size, struct memblock_type *type, int nid); +#ifdef CONFIG_NEED_MULTIPLE_NODES static inline void memblock_set_region_node(struct memblock_region *r, int nid) { r->nid = nid; @@ -332,7 +333,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r) { return 0; } -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ +#endif /* CONFIG_NEED_MULTIPLE_NODES */ /* Flags for memblock allocation APIs */ #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 1b4150ff64be..e77197a62809 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -29,10 +29,7 @@ struct kmem_cache; /* Cgroup-specific page state, on top of universal node page state */ enum memcg_stat_item { - MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS, - MEMCG_RSS, - MEMCG_RSS_HUGE, - MEMCG_SWAP, + MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, MEMCG_SOCK, /* XXX: why are these zone and not node counters? */ MEMCG_KERNEL_STACK_KB, @@ -45,6 +42,7 @@ enum memcg_memory_event { MEMCG_MAX, MEMCG_OOM, MEMCG_OOM_KILL, + MEMCG_SWAP_HIGH, MEMCG_SWAP_MAX, MEMCG_SWAP_FAIL, MEMCG_NR_MEMORY_EVENTS, @@ -106,7 +104,7 @@ struct lruvec_stat { */ struct memcg_shrinker_map { struct rcu_head rcu; - unsigned long map[0]; + unsigned long map[]; }; /* @@ -148,7 +146,7 @@ struct mem_cgroup_threshold_ary { /* Size of entries[] */ unsigned int size; /* Array of thresholds */ - struct mem_cgroup_threshold entries[0]; + struct mem_cgroup_threshold entries[]; }; struct mem_cgroup_thresholds { @@ -215,9 +213,6 @@ struct mem_cgroup { struct page_counter kmem; struct page_counter tcpmem; - /* Upper bound of normal memory consumption range */ - unsigned long high; - /* Range enforcement for interrupt charges */ struct work_struct high_work; @@ -360,16 +355,8 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, struct mem_cgroup *memcg); -int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, - gfp_t gfp_mask, struct mem_cgroup **memcgp, - bool compound); -int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm, - gfp_t gfp_mask, struct mem_cgroup **memcgp, - bool compound); -void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, - bool lrucare, bool compound); -void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, - bool compound); +int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); + void mem_cgroup_uncharge(struct page *page); void mem_cgroup_uncharge_list(struct list_head *page_list); @@ -570,7 +557,7 @@ struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); #ifdef CONFIG_MEMCG_SWAP -extern int do_swap_account; +extern bool cgroup_memory_noswap; #endif struct mem_cgroup *lock_page_memcg(struct page *page); @@ -710,16 +697,17 @@ static inline void mod_lruvec_state(struct lruvec *lruvec, static inline void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx, int val) { + struct page *head = compound_head(page); /* rmap on tail pages */ pg_data_t *pgdat = page_pgdat(page); struct lruvec *lruvec; /* Untracked pages have no memcg, no lruvec. Update only the node */ - if (!page->mem_cgroup) { + if (!head->mem_cgroup) { __mod_node_page_state(pgdat, idx, val); return; } - lruvec = mem_cgroup_lruvec(page->mem_cgroup, pgdat); + lruvec = mem_cgroup_lruvec(head->mem_cgroup, pgdat); __mod_lruvec_state(lruvec, idx, val); } @@ -783,6 +771,8 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg, atomic_long_inc(&memcg->memory_events[event]); cgroup_file_notify(&memcg->events_file); + if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) + break; if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS) break; } while ((memcg = parent_mem_cgroup(memcg)) && @@ -847,37 +837,12 @@ static inline enum mem_cgroup_protection mem_cgroup_protected( return MEMCG_PROT_NONE; } -static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, - gfp_t gfp_mask, - struct mem_cgroup **memcgp, - bool compound) +static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask) { - *memcgp = NULL; return 0; } -static inline int mem_cgroup_try_charge_delay(struct page *page, - struct mm_struct *mm, - gfp_t gfp_mask, - struct mem_cgroup **memcgp, - bool compound) -{ - *memcgp = NULL; - return 0; -} - -static inline void mem_cgroup_commit_charge(struct page *page, - struct mem_cgroup *memcg, - bool lrucare, bool compound) -{ -} - -static inline void mem_cgroup_cancel_charge(struct page *page, - struct mem_cgroup *memcg, - bool compound) -{ -} - static inline void mem_cgroup_uncharge(struct page *page) { } @@ -1277,6 +1242,19 @@ static inline void dec_lruvec_page_state(struct page *page, mod_lruvec_page_state(page, idx, -1); } +static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) +{ + struct mem_cgroup *memcg; + + memcg = lruvec_memcg(lruvec); + if (!memcg) + return NULL; + memcg = parent_mem_cgroup(memcg); + if (!memcg) + return NULL; + return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); +} + #ifdef CONFIG_CGROUP_WRITEBACK struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 93d9ada74ddd..375515803cd8 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -314,19 +314,13 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {} #ifdef CONFIG_MEMORY_HOTREMOVE -extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); extern void try_offline_node(int nid); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern int remove_memory(int nid, u64 start, u64 size); extern void __remove_memory(int nid, u64 start, u64 size); +extern int offline_and_remove_memory(int nid, u64 start, u64 size); #else -static inline bool is_mem_section_removable(unsigned long pfn, - unsigned long nr_pages) -{ - return false; -} - static inline void try_offline_node(int nid) {} static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) @@ -349,6 +343,8 @@ extern void __ref free_area_init_core_hotplug(int nid); extern int __add_memory(int nid, u64 start, u64 size); extern int add_memory(int nid, u64 start, u64 size); extern int add_memory_resource(int nid, struct resource *resource); +extern int add_memory_driver_managed(int nid, u64 start, u64 size, + const char *resource_name); extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); extern void remove_pfn_range_from_zone(struct zone *zone, diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 8165278c348a..ea9c15b60a96 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -31,7 +31,7 @@ struct mm_struct; * Locking policy for interlave: * In process context there is no locking because only the process accesses * its own state. All vma manipulation is somewhat protected by a down_read on - * mmap_sem. + * mmap_lock. * * Freeing policy: * Mempolicy objects are reference counted. A mempolicy will be freed when diff --git a/include/linux/memstick.h b/include/linux/memstick.h index 216a713bef7f..da4c65f9435f 100644 --- a/include/linux/memstick.h +++ b/include/linux/memstick.h @@ -288,7 +288,7 @@ struct memstick_host { int (*set_param)(struct memstick_host *host, enum memstick_param param, int value); - unsigned long private[0] ____cacheline_aligned; + unsigned long private[] ____cacheline_aligned; }; struct memstick_driver { diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index d01d1299e49d..ab76cdd06199 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -70,11 +70,11 @@ struct mfd_cell { size_t pdata_size; /* device properties passed to the sub devices drivers */ - struct property_entry *properties; + const struct property_entry *properties; /* * Device Tree compatible string - * See: Documentation/devicetree/usage-model.txt Chapter 2.2 for details + * See: Documentation/devicetree/usage-model.rst Chapter 2.2 for details */ const char *of_compatible; diff --git a/include/linux/mfd/gsc.h b/include/linux/mfd/gsc.h new file mode 100644 index 000000000000..6bd639c285b4 --- /dev/null +++ b/include/linux/mfd/gsc.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Gateworks Corporation + */ +#ifndef __LINUX_MFD_GSC_H_ +#define __LINUX_MFD_GSC_H_ + +#include <linux/regmap.h> + +/* Device Addresses */ +#define GSC_MISC 0x20 +#define GSC_UPDATE 0x21 +#define GSC_GPIO 0x23 +#define GSC_HWMON 0x29 +#define GSC_EEPROM0 0x50 +#define GSC_EEPROM1 0x51 +#define GSC_EEPROM2 0x52 +#define GSC_EEPROM3 0x53 +#define GSC_RTC 0x68 + +/* Register offsets */ +enum { + GSC_CTRL_0 = 0x00, + GSC_CTRL_1 = 0x01, + GSC_TIME = 0x02, + GSC_TIME_ADD = 0x06, + GSC_IRQ_STATUS = 0x0A, + GSC_IRQ_ENABLE = 0x0B, + GSC_FW_CRC = 0x0C, + GSC_FW_VER = 0x0E, + GSC_WP = 0x0F, +}; + +/* Bit definitions */ +#define GSC_CTRL_0_PB_HARD_RESET 0 +#define GSC_CTRL_0_PB_CLEAR_SECURE_KEY 1 +#define GSC_CTRL_0_PB_SOFT_POWER_DOWN 2 +#define GSC_CTRL_0_PB_BOOT_ALTERNATE 3 +#define GSC_CTRL_0_PERFORM_CRC 4 +#define GSC_CTRL_0_TAMPER_DETECT 5 +#define GSC_CTRL_0_SWITCH_HOLD 6 + +#define GSC_CTRL_1_SLEEP_ENABLE 0 +#define GSC_CTRL_1_SLEEP_ACTIVATE 1 +#define GSC_CTRL_1_SLEEP_ADD 2 +#define GSC_CTRL_1_SLEEP_NOWAKEPB 3 +#define GSC_CTRL_1_WDT_TIME 4 +#define GSC_CTRL_1_WDT_ENABLE 5 +#define GSC_CTRL_1_SWITCH_BOOT_ENABLE 6 +#define GSC_CTRL_1_SWITCH_BOOT_CLEAR 7 + +#define GSC_IRQ_PB 0 +#define GSC_IRQ_KEY_ERASED 1 +#define GSC_IRQ_EEPROM_WP 2 +#define GSC_IRQ_RESV 3 +#define GSC_IRQ_GPIO 4 +#define GSC_IRQ_TAMPER 5 +#define GSC_IRQ_WDT_TIMEOUT 6 +#define GSC_IRQ_SWITCH_HOLD 7 + +int gsc_read(void *context, unsigned int reg, unsigned int *val); +int gsc_write(void *context, unsigned int reg, unsigned int val); + +struct gsc_dev { + struct device *dev; + + struct i2c_client *i2c; /* 0x20: interrupt controller, WDT */ + struct i2c_client *i2c_hwmon; /* 0x29: hwmon, fan controller */ + + struct regmap *regmap; + + unsigned int fwver; + unsigned short fwcrc; +}; + +#endif /* __LINUX_MFD_GSC_H_ */ diff --git a/include/linux/mfd/intel_pmc_bxt.h b/include/linux/mfd/intel_pmc_bxt.h new file mode 100644 index 000000000000..f51a43d25ffd --- /dev/null +++ b/include/linux/mfd/intel_pmc_bxt.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef MFD_INTEL_PMC_BXT_H +#define MFD_INTEL_PMC_BXT_H + +/* GCR reg offsets from GCR base */ +#define PMC_GCR_PMC_CFG_REG 0x08 +#define PMC_GCR_TELEM_DEEP_S0IX_REG 0x78 +#define PMC_GCR_TELEM_SHLW_S0IX_REG 0x80 + +/* PMC_CFG_REG bit masks */ +#define PMC_CFG_NO_REBOOT_EN BIT(4) + +/** + * struct intel_pmc_dev - Intel PMC device structure + * @dev: Pointer to the parent PMC device + * @scu: Pointer to the SCU IPC device data structure + * @gcr_mem_base: Virtual base address of GCR (Global Configuration Registers) + * @gcr_lock: Lock used to serialize access to GCR registers + * @telem_base: Pointer to telemetry SSRAM base resource or %NULL if not + * available + */ +struct intel_pmc_dev { + struct device *dev; + struct intel_scu_ipc_dev *scu; + void __iomem *gcr_mem_base; + spinlock_t gcr_lock; + struct resource *telem_base; +}; + +#if IS_ENABLED(CONFIG_MFD_INTEL_PMC_BXT) +int intel_pmc_gcr_read64(struct intel_pmc_dev *pmc, u32 offset, u64 *data); +int intel_pmc_gcr_update(struct intel_pmc_dev *pmc, u32 offset, u32 mask, u32 val); +int intel_pmc_s0ix_counter_read(struct intel_pmc_dev *pmc, u64 *data); +#else +static inline int intel_pmc_gcr_read64(struct intel_pmc_dev *pmc, u32 offset, + u64 *data) +{ + return -ENOTSUPP; +} + +static inline int intel_pmc_gcr_update(struct intel_pmc_dev *pmc, u32 offset, + u32 mask, u32 val) +{ + return -ENOTSUPP; +} + +static inline int intel_pmc_s0ix_counter_read(struct intel_pmc_dev *pmc, u64 *data) +{ + return -ENOTSUPP; +} +#endif + +#endif /* MFD_INTEL_PMC_BXT_H */ diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h index bfecd6bd4990..6a88e34cb955 100644 --- a/include/linux/mfd/intel_soc_pmic.h +++ b/include/linux/mfd/intel_soc_pmic.h @@ -13,6 +13,20 @@ #include <linux/regmap.h> +/** + * struct intel_soc_pmic - Intel SoC PMIC data + * @irq: Master interrupt number of the parent PMIC device + * @regmap: Pointer to the parent PMIC device regmap structure + * @irq_chip_data: IRQ chip data for the PMIC itself + * @irq_chip_data_pwrbtn: Chained IRQ chip data for the Power Button + * @irq_chip_data_tmu: Chained IRQ chip data for the Time Management Unit + * @irq_chip_data_bcu: Chained IRQ chip data for the Burst Control Unit + * @irq_chip_data_adc: Chained IRQ chip data for the General Purpose ADC + * @irq_chip_data_chgr: Chained IRQ chip data for the External Charger + * @irq_chip_data_crit: Chained IRQ chip data for the Critical Event Handler + * @dev: Pointer to the parent PMIC device + * @scu: Pointer to the SCU IPC device data structure + */ struct intel_soc_pmic { int irq; struct regmap *regmap; @@ -24,6 +38,7 @@ struct intel_soc_pmic { struct regmap_irq_chip_data *irq_chip_data_chgr; struct regmap_irq_chip_data *irq_chip_data_crit; struct device *dev; + struct intel_scu_ipc_dev *scu; }; int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address, diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h index 061af220dcd3..79c020bd0c70 100644 --- a/include/linux/mfd/max8998.h +++ b/include/linux/mfd/max8998.h @@ -39,6 +39,7 @@ enum { MAX8998_ENVICHG, MAX8998_ESAFEOUT1, MAX8998_ESAFEOUT2, + MAX8998_CHARGER, }; /** diff --git a/include/linux/mfd/mp2629.h b/include/linux/mfd/mp2629.h new file mode 100644 index 000000000000..89b706900b57 --- /dev/null +++ b/include/linux/mfd/mp2629.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2020 Monolithic Power Systems, Inc + */ + +#ifndef __MP2629_H__ +#define __MP2629_H__ + +#include <linux/device.h> +#include <linux/regmap.h> + +struct mp2629_data { + struct device *dev; + struct regmap *regmap; +}; + +enum mp2629_adc_chan { + MP2629_BATT_VOLT, + MP2629_SYSTEM_VOLT, + MP2629_INPUT_VOLT, + MP2629_BATT_CURRENT, + MP2629_INPUT_CURRENT, + MP2629_ADC_CHAN_END +}; + +#endif diff --git a/include/linux/mfd/mt6358/core.h b/include/linux/mfd/mt6358/core.h new file mode 100644 index 000000000000..c5a11b7458d4 --- /dev/null +++ b/include/linux/mfd/mt6358/core.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 MediaTek Inc. + */ + +#ifndef __MFD_MT6358_CORE_H__ +#define __MFD_MT6358_CORE_H__ + +#define MT6358_REG_WIDTH 16 + +struct irq_top_t { + int hwirq_base; + unsigned int num_int_regs; + unsigned int num_int_bits; + unsigned int en_reg; + unsigned int en_reg_shift; + unsigned int sta_reg; + unsigned int sta_reg_shift; + unsigned int top_offset; +}; + +struct pmic_irq_data { + unsigned int num_top; + unsigned int num_pmic_irqs; + unsigned short top_int_status_reg; + bool *enable_hwirq; + bool *cache_hwirq; +}; + +enum mt6358_irq_top_status_shift { + MT6358_BUCK_TOP = 0, + MT6358_LDO_TOP, + MT6358_PSC_TOP, + MT6358_SCK_TOP, + MT6358_BM_TOP, + MT6358_HK_TOP, + MT6358_AUD_TOP, + MT6358_MISC_TOP, +}; + +enum mt6358_irq_numbers { + MT6358_IRQ_VPROC11_OC = 0, + MT6358_IRQ_VPROC12_OC, + MT6358_IRQ_VCORE_OC, + MT6358_IRQ_VGPU_OC, + MT6358_IRQ_VMODEM_OC, + MT6358_IRQ_VDRAM1_OC, + MT6358_IRQ_VS1_OC, + MT6358_IRQ_VS2_OC, + MT6358_IRQ_VPA_OC, + MT6358_IRQ_VCORE_PREOC, + MT6358_IRQ_VFE28_OC = 16, + MT6358_IRQ_VXO22_OC, + MT6358_IRQ_VRF18_OC, + MT6358_IRQ_VRF12_OC, + MT6358_IRQ_VEFUSE_OC, + MT6358_IRQ_VCN33_OC, + MT6358_IRQ_VCN28_OC, + MT6358_IRQ_VCN18_OC, + MT6358_IRQ_VCAMA1_OC, + MT6358_IRQ_VCAMA2_OC, + MT6358_IRQ_VCAMD_OC, + MT6358_IRQ_VCAMIO_OC, + MT6358_IRQ_VLDO28_OC, + MT6358_IRQ_VA12_OC, + MT6358_IRQ_VAUX18_OC, + MT6358_IRQ_VAUD28_OC, + MT6358_IRQ_VIO28_OC, + MT6358_IRQ_VIO18_OC, + MT6358_IRQ_VSRAM_PROC11_OC, + MT6358_IRQ_VSRAM_PROC12_OC, + MT6358_IRQ_VSRAM_OTHERS_OC, + MT6358_IRQ_VSRAM_GPU_OC, + MT6358_IRQ_VDRAM2_OC, + MT6358_IRQ_VMC_OC, + MT6358_IRQ_VMCH_OC, + MT6358_IRQ_VEMC_OC, + MT6358_IRQ_VSIM1_OC, + MT6358_IRQ_VSIM2_OC, + MT6358_IRQ_VIBR_OC, + MT6358_IRQ_VUSB_OC, + MT6358_IRQ_VBIF28_OC, + MT6358_IRQ_PWRKEY = 48, + MT6358_IRQ_HOMEKEY, + MT6358_IRQ_PWRKEY_R, + MT6358_IRQ_HOMEKEY_R, + MT6358_IRQ_NI_LBAT_INT, + MT6358_IRQ_CHRDET, + MT6358_IRQ_CHRDET_EDGE, + MT6358_IRQ_VCDT_HV_DET, + MT6358_IRQ_RTC = 64, + MT6358_IRQ_FG_BAT0_H = 80, + MT6358_IRQ_FG_BAT0_L, + MT6358_IRQ_FG_CUR_H, + MT6358_IRQ_FG_CUR_L, + MT6358_IRQ_FG_ZCV, + MT6358_IRQ_FG_BAT1_H, + MT6358_IRQ_FG_BAT1_L, + MT6358_IRQ_FG_N_CHARGE_L, + MT6358_IRQ_FG_IAVG_H, + MT6358_IRQ_FG_IAVG_L, + MT6358_IRQ_FG_TIME_H, + MT6358_IRQ_FG_DISCHARGE, + MT6358_IRQ_FG_CHARGE, + MT6358_IRQ_BATON_LV = 96, + MT6358_IRQ_BATON_HT, + MT6358_IRQ_BATON_BAT_IN, + MT6358_IRQ_BATON_BAT_OUT, + MT6358_IRQ_BIF, + MT6358_IRQ_BAT_H = 112, + MT6358_IRQ_BAT_L, + MT6358_IRQ_BAT2_H, + MT6358_IRQ_BAT2_L, + MT6358_IRQ_BAT_TEMP_H, + MT6358_IRQ_BAT_TEMP_L, + MT6358_IRQ_AUXADC_IMP, + MT6358_IRQ_NAG_C_DLTV, + MT6358_IRQ_AUDIO = 128, + MT6358_IRQ_ACCDET = 133, + MT6358_IRQ_ACCDET_EINT0, + MT6358_IRQ_ACCDET_EINT1, + MT6358_IRQ_SPI_CMD_ALERT = 144, + MT6358_IRQ_NR, +}; + +#define MT6358_IRQ_BUCK_BASE MT6358_IRQ_VPROC11_OC +#define MT6358_IRQ_LDO_BASE MT6358_IRQ_VFE28_OC +#define MT6358_IRQ_PSC_BASE MT6358_IRQ_PWRKEY +#define MT6358_IRQ_SCK_BASE MT6358_IRQ_RTC +#define MT6358_IRQ_BM_BASE MT6358_IRQ_FG_BAT0_H +#define MT6358_IRQ_HK_BASE MT6358_IRQ_BAT_H +#define MT6358_IRQ_AUD_BASE MT6358_IRQ_AUDIO +#define MT6358_IRQ_MISC_BASE MT6358_IRQ_SPI_CMD_ALERT + +#define MT6358_IRQ_BUCK_BITS (MT6358_IRQ_VCORE_PREOC - MT6358_IRQ_BUCK_BASE + 1) +#define MT6358_IRQ_LDO_BITS (MT6358_IRQ_VBIF28_OC - MT6358_IRQ_LDO_BASE + 1) +#define MT6358_IRQ_PSC_BITS (MT6358_IRQ_VCDT_HV_DET - MT6358_IRQ_PSC_BASE + 1) +#define MT6358_IRQ_SCK_BITS (MT6358_IRQ_RTC - MT6358_IRQ_SCK_BASE + 1) +#define MT6358_IRQ_BM_BITS (MT6358_IRQ_BIF - MT6358_IRQ_BM_BASE + 1) +#define MT6358_IRQ_HK_BITS (MT6358_IRQ_NAG_C_DLTV - MT6358_IRQ_HK_BASE + 1) +#define MT6358_IRQ_AUD_BITS (MT6358_IRQ_ACCDET_EINT1 - MT6358_IRQ_AUD_BASE + 1) +#define MT6358_IRQ_MISC_BITS \ + (MT6358_IRQ_SPI_CMD_ALERT - MT6358_IRQ_MISC_BASE + 1) + +#define MT6358_TOP_GEN(sp) \ +{ \ + .hwirq_base = MT6358_IRQ_##sp##_BASE, \ + .num_int_regs = \ + ((MT6358_IRQ_##sp##_BITS - 1) / MT6358_REG_WIDTH) + 1, \ + .num_int_bits = MT6358_IRQ_##sp##_BITS, \ + .en_reg = MT6358_##sp##_TOP_INT_CON0, \ + .en_reg_shift = 0x6, \ + .sta_reg = MT6358_##sp##_TOP_INT_STATUS0, \ + .sta_reg_shift = 0x2, \ + .top_offset = MT6358_##sp##_TOP, \ +} + +#endif /* __MFD_MT6358_CORE_H__ */ diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h new file mode 100644 index 000000000000..2ad0b312aa28 --- /dev/null +++ b/include/linux/mfd/mt6358/registers.h @@ -0,0 +1,282 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 MediaTek Inc. + */ + +#ifndef __MFD_MT6358_REGISTERS_H__ +#define __MFD_MT6358_REGISTERS_H__ + +/* PMIC Registers */ +#define MT6358_SWCID 0xa +#define MT6358_MISC_TOP_INT_CON0 0x188 +#define MT6358_MISC_TOP_INT_STATUS0 0x194 +#define MT6358_TOP_INT_STATUS0 0x19e +#define MT6358_SCK_TOP_INT_CON0 0x52e +#define MT6358_SCK_TOP_INT_STATUS0 0x53a +#define MT6358_EOSC_CALI_CON0 0x540 +#define MT6358_EOSC_CALI_CON1 0x542 +#define MT6358_RTC_MIX_CON0 0x544 +#define MT6358_RTC_MIX_CON1 0x546 +#define MT6358_RTC_MIX_CON2 0x548 +#define MT6358_RTC_DSN_ID 0x580 +#define MT6358_RTC_DSN_REV0 0x582 +#define MT6358_RTC_DBI 0x584 +#define MT6358_RTC_DXI 0x586 +#define MT6358_RTC_BBPU 0x588 +#define MT6358_RTC_IRQ_STA 0x58a +#define MT6358_RTC_IRQ_EN 0x58c +#define MT6358_RTC_CII_EN 0x58e +#define MT6358_RTC_AL_MASK 0x590 +#define MT6358_RTC_TC_SEC 0x592 +#define MT6358_RTC_TC_MIN 0x594 +#define MT6358_RTC_TC_HOU 0x596 +#define MT6358_RTC_TC_DOM 0x598 +#define MT6358_RTC_TC_DOW 0x59a +#define MT6358_RTC_TC_MTH 0x59c +#define MT6358_RTC_TC_YEA 0x59e +#define MT6358_RTC_AL_SEC 0x5a0 +#define MT6358_RTC_AL_MIN 0x5a2 +#define MT6358_RTC_AL_HOU 0x5a4 +#define MT6358_RTC_AL_DOM 0x5a6 +#define MT6358_RTC_AL_DOW 0x5a8 +#define MT6358_RTC_AL_MTH 0x5aa +#define MT6358_RTC_AL_YEA 0x5ac +#define MT6358_RTC_OSC32CON 0x5ae +#define MT6358_RTC_POWERKEY1 0x5b0 +#define MT6358_RTC_POWERKEY2 0x5b2 +#define MT6358_RTC_PDN1 0x5b4 +#define MT6358_RTC_PDN2 0x5b6 +#define MT6358_RTC_SPAR0 0x5b8 +#define MT6358_RTC_SPAR1 0x5ba +#define MT6358_RTC_PROT 0x5bc +#define MT6358_RTC_DIFF 0x5be +#define MT6358_RTC_CALI 0x5c0 +#define MT6358_RTC_WRTGR 0x5c2 +#define MT6358_RTC_CON 0x5c4 +#define MT6358_RTC_SEC_CTRL 0x5c6 +#define MT6358_RTC_INT_CNT 0x5c8 +#define MT6358_RTC_SEC_DAT0 0x5ca +#define MT6358_RTC_SEC_DAT1 0x5cc +#define MT6358_RTC_SEC_DAT2 0x5ce +#define MT6358_RTC_SEC_DSN_ID 0x600 +#define MT6358_RTC_SEC_DSN_REV0 0x602 +#define MT6358_RTC_SEC_DBI 0x604 +#define MT6358_RTC_SEC_DXI 0x606 +#define MT6358_RTC_TC_SEC_SEC 0x608 +#define MT6358_RTC_TC_MIN_SEC 0x60a +#define MT6358_RTC_TC_HOU_SEC 0x60c +#define MT6358_RTC_TC_DOM_SEC 0x60e +#define MT6358_RTC_TC_DOW_SEC 0x610 +#define MT6358_RTC_TC_MTH_SEC 0x612 +#define MT6358_RTC_TC_YEA_SEC 0x614 +#define MT6358_RTC_SEC_CK_PDN 0x616 +#define MT6358_RTC_SEC_WRTGR 0x618 +#define MT6358_PSC_TOP_INT_CON0 0x910 +#define MT6358_PSC_TOP_INT_STATUS0 0x91c +#define MT6358_BM_TOP_INT_CON0 0xc32 +#define MT6358_BM_TOP_INT_CON1 0xc38 +#define MT6358_BM_TOP_INT_STATUS0 0xc4a +#define MT6358_BM_TOP_INT_STATUS1 0xc4c +#define MT6358_HK_TOP_INT_CON0 0xf92 +#define MT6358_HK_TOP_INT_STATUS0 0xf9e +#define MT6358_BUCK_TOP_INT_CON0 0x1318 +#define MT6358_BUCK_TOP_INT_STATUS0 0x1324 +#define MT6358_BUCK_VPROC11_CON0 0x1388 +#define MT6358_BUCK_VPROC11_DBG0 0x139e +#define MT6358_BUCK_VPROC11_DBG1 0x13a0 +#define MT6358_BUCK_VPROC11_ELR0 0x13a6 +#define MT6358_BUCK_VPROC12_CON0 0x1408 +#define MT6358_BUCK_VPROC12_DBG0 0x141e +#define MT6358_BUCK_VPROC12_DBG1 0x1420 +#define MT6358_BUCK_VPROC12_ELR0 0x1426 +#define MT6358_BUCK_VCORE_CON0 0x1488 +#define MT6358_BUCK_VCORE_DBG0 0x149e +#define MT6358_BUCK_VCORE_DBG1 0x14a0 +#define MT6358_BUCK_VCORE_ELR0 0x14aa +#define MT6358_BUCK_VGPU_CON0 0x1508 +#define MT6358_BUCK_VGPU_DBG0 0x151e +#define MT6358_BUCK_VGPU_DBG1 0x1520 +#define MT6358_BUCK_VGPU_ELR0 0x1526 +#define MT6358_BUCK_VMODEM_CON0 0x1588 +#define MT6358_BUCK_VMODEM_DBG0 0x159e +#define MT6358_BUCK_VMODEM_DBG1 0x15a0 +#define MT6358_BUCK_VMODEM_ELR0 0x15a6 +#define MT6358_BUCK_VDRAM1_CON0 0x1608 +#define MT6358_BUCK_VDRAM1_DBG0 0x161e +#define MT6358_BUCK_VDRAM1_DBG1 0x1620 +#define MT6358_BUCK_VDRAM1_ELR0 0x1626 +#define MT6358_BUCK_VS1_CON0 0x1688 +#define MT6358_BUCK_VS1_DBG0 0x169e +#define MT6358_BUCK_VS1_DBG1 0x16a0 +#define MT6358_BUCK_VS1_ELR0 0x16ae +#define MT6358_BUCK_VS2_CON0 0x1708 +#define MT6358_BUCK_VS2_DBG0 0x171e +#define MT6358_BUCK_VS2_DBG1 0x1720 +#define MT6358_BUCK_VS2_ELR0 0x172e +#define MT6358_BUCK_VPA_CON0 0x1788 +#define MT6358_BUCK_VPA_CON1 0x178a +#define MT6358_BUCK_VPA_ELR0 MT6358_BUCK_VPA_CON1 +#define MT6358_BUCK_VPA_DBG0 0x1792 +#define MT6358_BUCK_VPA_DBG1 0x1794 +#define MT6358_VPROC_ANA_CON0 0x180c +#define MT6358_VCORE_VGPU_ANA_CON0 0x1828 +#define MT6358_VMODEM_ANA_CON0 0x1888 +#define MT6358_VDRAM1_ANA_CON0 0x1896 +#define MT6358_VS1_ANA_CON0 0x18a2 +#define MT6358_VS2_ANA_CON0 0x18ae +#define MT6358_VPA_ANA_CON0 0x18ba +#define MT6358_LDO_TOP_INT_CON0 0x1a50 +#define MT6358_LDO_TOP_INT_CON1 0x1a56 +#define MT6358_LDO_TOP_INT_STATUS0 0x1a68 +#define MT6358_LDO_TOP_INT_STATUS1 0x1a6a +#define MT6358_LDO_VXO22_CON0 0x1a88 +#define MT6358_LDO_VXO22_CON1 0x1a96 +#define MT6358_LDO_VA12_CON0 0x1a9c +#define MT6358_LDO_VA12_CON1 0x1aaa +#define MT6358_LDO_VAUX18_CON0 0x1ab0 +#define MT6358_LDO_VAUX18_CON1 0x1abe +#define MT6358_LDO_VAUD28_CON0 0x1ac4 +#define MT6358_LDO_VAUD28_CON1 0x1ad2 +#define MT6358_LDO_VIO28_CON0 0x1ad8 +#define MT6358_LDO_VIO28_CON1 0x1ae6 +#define MT6358_LDO_VIO18_CON0 0x1aec +#define MT6358_LDO_VIO18_CON1 0x1afa +#define MT6358_LDO_VDRAM2_CON0 0x1b08 +#define MT6358_LDO_VDRAM2_CON1 0x1b16 +#define MT6358_LDO_VEMC_CON0 0x1b1c +#define MT6358_LDO_VEMC_CON1 0x1b2a +#define MT6358_LDO_VUSB_CON0_0 0x1b30 +#define MT6358_LDO_VUSB_CON1 0x1b40 +#define MT6358_LDO_VSRAM_PROC11_CON0 0x1b46 +#define MT6358_LDO_VSRAM_PROC11_DBG0 0x1b60 +#define MT6358_LDO_VSRAM_PROC11_DBG1 0x1b62 +#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON0 0x1b64 +#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON1 0x1b66 +#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON2 0x1b68 +#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON3 0x1b6a +#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON0 0x1b6c +#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON1 0x1b6e +#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON2 0x1b70 +#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON3 0x1b72 +#define MT6358_LDO_VSRAM_WAKEUP_CON0 0x1b74 +#define MT6358_LDO_GON1_ELR_NUM 0x1b76 +#define MT6358_LDO_VDRAM2_ELR0 0x1b78 +#define MT6358_LDO_VSRAM_PROC12_CON0 0x1b88 +#define MT6358_LDO_VSRAM_PROC12_DBG0 0x1ba2 +#define MT6358_LDO_VSRAM_PROC12_DBG1 0x1ba4 +#define MT6358_LDO_VSRAM_OTHERS_CON0 0x1ba6 +#define MT6358_LDO_VSRAM_OTHERS_DBG0 0x1bc0 +#define MT6358_LDO_VSRAM_OTHERS_DBG1 0x1bc2 +#define MT6358_LDO_VSRAM_GPU_CON0 0x1bc8 +#define MT6358_LDO_VSRAM_GPU_DBG0 0x1be2 +#define MT6358_LDO_VSRAM_GPU_DBG1 0x1be4 +#define MT6358_LDO_VSRAM_CON0 0x1bee +#define MT6358_LDO_VSRAM_CON1 0x1bf0 +#define MT6358_LDO_VSRAM_CON2 0x1bf2 +#define MT6358_LDO_VSRAM_CON3 0x1bf4 +#define MT6358_LDO_VFE28_CON0 0x1c08 +#define MT6358_LDO_VFE28_CON1 0x1c16 +#define MT6358_LDO_VFE28_CON2 0x1c18 +#define MT6358_LDO_VFE28_CON3 0x1c1a +#define MT6358_LDO_VRF18_CON0 0x1c1c +#define MT6358_LDO_VRF18_CON1 0x1c2a +#define MT6358_LDO_VRF18_CON2 0x1c2c +#define MT6358_LDO_VRF18_CON3 0x1c2e +#define MT6358_LDO_VRF12_CON0 0x1c30 +#define MT6358_LDO_VRF12_CON1 0x1c3e +#define MT6358_LDO_VRF12_CON2 0x1c40 +#define MT6358_LDO_VRF12_CON3 0x1c42 +#define MT6358_LDO_VEFUSE_CON0 0x1c44 +#define MT6358_LDO_VEFUSE_CON1 0x1c52 +#define MT6358_LDO_VEFUSE_CON2 0x1c54 +#define MT6358_LDO_VEFUSE_CON3 0x1c56 +#define MT6358_LDO_VCN18_CON0 0x1c58 +#define MT6358_LDO_VCN18_CON1 0x1c66 +#define MT6358_LDO_VCN18_CON2 0x1c68 +#define MT6358_LDO_VCN18_CON3 0x1c6a +#define MT6358_LDO_VCAMA1_CON0 0x1c6c +#define MT6358_LDO_VCAMA1_CON1 0x1c7a +#define MT6358_LDO_VCAMA1_CON2 0x1c7c +#define MT6358_LDO_VCAMA1_CON3 0x1c7e +#define MT6358_LDO_VCAMA2_CON0 0x1c88 +#define MT6358_LDO_VCAMA2_CON1 0x1c96 +#define MT6358_LDO_VCAMA2_CON2 0x1c98 +#define MT6358_LDO_VCAMA2_CON3 0x1c9a +#define MT6358_LDO_VCAMD_CON0 0x1c9c +#define MT6358_LDO_VCAMD_CON1 0x1caa +#define MT6358_LDO_VCAMD_CON2 0x1cac +#define MT6358_LDO_VCAMD_CON3 0x1cae +#define MT6358_LDO_VCAMIO_CON0 0x1cb0 +#define MT6358_LDO_VCAMIO_CON1 0x1cbe +#define MT6358_LDO_VCAMIO_CON2 0x1cc0 +#define MT6358_LDO_VCAMIO_CON3 0x1cc2 +#define MT6358_LDO_VMC_CON0 0x1cc4 +#define MT6358_LDO_VMC_CON1 0x1cd2 +#define MT6358_LDO_VMC_CON2 0x1cd4 +#define MT6358_LDO_VMC_CON3 0x1cd6 +#define MT6358_LDO_VMCH_CON0 0x1cd8 +#define MT6358_LDO_VMCH_CON1 0x1ce6 +#define MT6358_LDO_VMCH_CON2 0x1ce8 +#define MT6358_LDO_VMCH_CON3 0x1cea +#define MT6358_LDO_VIBR_CON0 0x1d08 +#define MT6358_LDO_VIBR_CON1 0x1d16 +#define MT6358_LDO_VIBR_CON2 0x1d18 +#define MT6358_LDO_VIBR_CON3 0x1d1a +#define MT6358_LDO_VCN33_CON0_0 0x1d1c +#define MT6358_LDO_VCN33_CON0_1 0x1d2a +#define MT6358_LDO_VCN33_CON1 0x1d2c +#define MT6358_LDO_VCN33_BT_CON1 MT6358_LDO_VCN33_CON1 +#define MT6358_LDO_VCN33_WIFI_CON1 MT6358_LDO_VCN33_CON1 +#define MT6358_LDO_VCN33_CON2 0x1d2e +#define MT6358_LDO_VCN33_CON3 0x1d30 +#define MT6358_LDO_VLDO28_CON0_0 0x1d32 +#define MT6358_LDO_VLDO28_CON0_1 0x1d40 +#define MT6358_LDO_VLDO28_CON1 0x1d42 +#define MT6358_LDO_VLDO28_CON2 0x1d44 +#define MT6358_LDO_VLDO28_CON3 0x1d46 +#define MT6358_LDO_VSIM1_CON0 0x1d48 +#define MT6358_LDO_VSIM1_CON1 0x1d56 +#define MT6358_LDO_VSIM1_CON2 0x1d58 +#define MT6358_LDO_VSIM1_CON3 0x1d5a +#define MT6358_LDO_VSIM2_CON0 0x1d5c +#define MT6358_LDO_VSIM2_CON1 0x1d6a +#define MT6358_LDO_VSIM2_CON2 0x1d6c +#define MT6358_LDO_VSIM2_CON3 0x1d6e +#define MT6358_LDO_VCN28_CON0 0x1d88 +#define MT6358_LDO_VCN28_CON1 0x1d96 +#define MT6358_LDO_VCN28_CON2 0x1d98 +#define MT6358_LDO_VCN28_CON3 0x1d9a +#define MT6358_VRTC28_CON0 0x1d9c +#define MT6358_LDO_VBIF28_CON0 0x1d9e +#define MT6358_LDO_VBIF28_CON1 0x1dac +#define MT6358_LDO_VBIF28_CON2 0x1dae +#define MT6358_LDO_VBIF28_CON3 0x1db0 +#define MT6358_VCAMA1_ANA_CON0 0x1e08 +#define MT6358_VCAMA2_ANA_CON0 0x1e0c +#define MT6358_VCN33_ANA_CON0 0x1e28 +#define MT6358_VSIM1_ANA_CON0 0x1e2c +#define MT6358_VSIM2_ANA_CON0 0x1e30 +#define MT6358_VUSB_ANA_CON0 0x1e34 +#define MT6358_VEMC_ANA_CON0 0x1e38 +#define MT6358_VLDO28_ANA_CON0 0x1e3c +#define MT6358_VIO28_ANA_CON0 0x1e40 +#define MT6358_VIBR_ANA_CON0 0x1e44 +#define MT6358_VMCH_ANA_CON0 0x1e48 +#define MT6358_VMC_ANA_CON0 0x1e4c +#define MT6358_VRF18_ANA_CON0 0x1e88 +#define MT6358_VCN18_ANA_CON0 0x1e8c +#define MT6358_VCAMIO_ANA_CON0 0x1e90 +#define MT6358_VIO18_ANA_CON0 0x1e94 +#define MT6358_VEFUSE_ANA_CON0 0x1e98 +#define MT6358_VRF12_ANA_CON0 0x1e9c +#define MT6358_VSRAM_PROC11_ANA_CON0 0x1ea0 +#define MT6358_VSRAM_PROC12_ANA_CON0 0x1ea4 +#define MT6358_VSRAM_OTHERS_ANA_CON0 0x1ea6 +#define MT6358_VSRAM_GPU_ANA_CON0 0x1ea8 +#define MT6358_VDRAM2_ANA_CON0 0x1eaa +#define MT6358_VCAMD_ANA_CON0 0x1eae +#define MT6358_VA12_ANA_CON0 0x1eb2 +#define MT6358_AUD_TOP_INT_CON0 0x2228 +#define MT6358_AUD_TOP_INT_STATUS0 0x2234 + +#endif /* __MFD_MT6358_REGISTERS_H__ */ diff --git a/include/linux/mfd/mt6360.h b/include/linux/mfd/mt6360.h new file mode 100644 index 000000000000..ea1304035d4d --- /dev/null +++ b/include/linux/mfd/mt6360.h @@ -0,0 +1,240 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 MediaTek Inc. + */ + +#ifndef __MT6360_H__ +#define __MT6360_H__ + +#include <linux/regmap.h> + +enum { + MT6360_SLAVE_PMU = 0, + MT6360_SLAVE_PMIC, + MT6360_SLAVE_LDO, + MT6360_SLAVE_TCPC, + MT6360_SLAVE_MAX, +}; + +#define MT6360_PMU_SLAVEID (0x34) +#define MT6360_PMIC_SLAVEID (0x1A) +#define MT6360_LDO_SLAVEID (0x64) +#define MT6360_TCPC_SLAVEID (0x4E) + +struct mt6360_pmu_data { + struct i2c_client *i2c[MT6360_SLAVE_MAX]; + struct device *dev; + struct regmap *regmap; + struct regmap_irq_chip_data *irq_data; + unsigned int chip_rev; +}; + +/* PMU register defininition */ +#define MT6360_PMU_DEV_INFO (0x00) +#define MT6360_PMU_CORE_CTRL1 (0x01) +#define MT6360_PMU_RST1 (0x02) +#define MT6360_PMU_CRCEN (0x03) +#define MT6360_PMU_RST_PAS_CODE1 (0x04) +#define MT6360_PMU_RST_PAS_CODE2 (0x05) +#define MT6360_PMU_CORE_CTRL2 (0x06) +#define MT6360_PMU_TM_PAS_CODE1 (0x07) +#define MT6360_PMU_TM_PAS_CODE2 (0x08) +#define MT6360_PMU_TM_PAS_CODE3 (0x09) +#define MT6360_PMU_TM_PAS_CODE4 (0x0A) +#define MT6360_PMU_IRQ_IND (0x0B) +#define MT6360_PMU_IRQ_MASK (0x0C) +#define MT6360_PMU_IRQ_SET (0x0D) +#define MT6360_PMU_SHDN_CTRL (0x0E) +#define MT6360_PMU_TM_INF (0x0F) +#define MT6360_PMU_I2C_CTRL (0x10) +#define MT6360_PMU_CHG_CTRL1 (0x11) +#define MT6360_PMU_CHG_CTRL2 (0x12) +#define MT6360_PMU_CHG_CTRL3 (0x13) +#define MT6360_PMU_CHG_CTRL4 (0x14) +#define MT6360_PMU_CHG_CTRL5 (0x15) +#define MT6360_PMU_CHG_CTRL6 (0x16) +#define MT6360_PMU_CHG_CTRL7 (0x17) +#define MT6360_PMU_CHG_CTRL8 (0x18) +#define MT6360_PMU_CHG_CTRL9 (0x19) +#define MT6360_PMU_CHG_CTRL10 (0x1A) +#define MT6360_PMU_CHG_CTRL11 (0x1B) +#define MT6360_PMU_CHG_CTRL12 (0x1C) +#define MT6360_PMU_CHG_CTRL13 (0x1D) +#define MT6360_PMU_CHG_CTRL14 (0x1E) +#define MT6360_PMU_CHG_CTRL15 (0x1F) +#define MT6360_PMU_CHG_CTRL16 (0x20) +#define MT6360_PMU_CHG_AICC_RESULT (0x21) +#define MT6360_PMU_DEVICE_TYPE (0x22) +#define MT6360_PMU_QC_CONTROL1 (0x23) +#define MT6360_PMU_QC_CONTROL2 (0x24) +#define MT6360_PMU_QC30_CONTROL1 (0x25) +#define MT6360_PMU_QC30_CONTROL2 (0x26) +#define MT6360_PMU_USB_STATUS1 (0x27) +#define MT6360_PMU_QC_STATUS1 (0x28) +#define MT6360_PMU_QC_STATUS2 (0x29) +#define MT6360_PMU_CHG_PUMP (0x2A) +#define MT6360_PMU_CHG_CTRL17 (0x2B) +#define MT6360_PMU_CHG_CTRL18 (0x2C) +#define MT6360_PMU_CHRDET_CTRL1 (0x2D) +#define MT6360_PMU_CHRDET_CTRL2 (0x2E) +#define MT6360_PMU_DPDN_CTRL (0x2F) +#define MT6360_PMU_CHG_HIDDEN_CTRL1 (0x30) +#define MT6360_PMU_CHG_HIDDEN_CTRL2 (0x31) +#define MT6360_PMU_CHG_HIDDEN_CTRL3 (0x32) +#define MT6360_PMU_CHG_HIDDEN_CTRL4 (0x33) +#define MT6360_PMU_CHG_HIDDEN_CTRL5 (0x34) +#define MT6360_PMU_CHG_HIDDEN_CTRL6 (0x35) +#define MT6360_PMU_CHG_HIDDEN_CTRL7 (0x36) +#define MT6360_PMU_CHG_HIDDEN_CTRL8 (0x37) +#define MT6360_PMU_CHG_HIDDEN_CTRL9 (0x38) +#define MT6360_PMU_CHG_HIDDEN_CTRL10 (0x39) +#define MT6360_PMU_CHG_HIDDEN_CTRL11 (0x3A) +#define MT6360_PMU_CHG_HIDDEN_CTRL12 (0x3B) +#define MT6360_PMU_CHG_HIDDEN_CTRL13 (0x3C) +#define MT6360_PMU_CHG_HIDDEN_CTRL14 (0x3D) +#define MT6360_PMU_CHG_HIDDEN_CTRL15 (0x3E) +#define MT6360_PMU_CHG_HIDDEN_CTRL16 (0x3F) +#define MT6360_PMU_CHG_HIDDEN_CTRL17 (0x40) +#define MT6360_PMU_CHG_HIDDEN_CTRL18 (0x41) +#define MT6360_PMU_CHG_HIDDEN_CTRL19 (0x42) +#define MT6360_PMU_CHG_HIDDEN_CTRL20 (0x43) +#define MT6360_PMU_CHG_HIDDEN_CTRL21 (0x44) +#define MT6360_PMU_CHG_HIDDEN_CTRL22 (0x45) +#define MT6360_PMU_CHG_HIDDEN_CTRL23 (0x46) +#define MT6360_PMU_CHG_HIDDEN_CTRL24 (0x47) +#define MT6360_PMU_CHG_HIDDEN_CTRL25 (0x48) +#define MT6360_PMU_BC12_CTRL (0x49) +#define MT6360_PMU_CHG_STAT (0x4A) +#define MT6360_PMU_RESV1 (0x4B) +#define MT6360_PMU_TYPEC_OTP_TH_SEL_CODEH (0x4E) +#define MT6360_PMU_TYPEC_OTP_TH_SEL_CODEL (0x4F) +#define MT6360_PMU_TYPEC_OTP_HYST_TH (0x50) +#define MT6360_PMU_TYPEC_OTP_CTRL (0x51) +#define MT6360_PMU_ADC_BAT_DATA_H (0x52) +#define MT6360_PMU_ADC_BAT_DATA_L (0x53) +#define MT6360_PMU_IMID_BACKBST_ON (0x54) +#define MT6360_PMU_IMID_BACKBST_OFF (0x55) +#define MT6360_PMU_ADC_CONFIG (0x56) +#define MT6360_PMU_ADC_EN2 (0x57) +#define MT6360_PMU_ADC_IDLE_T (0x58) +#define MT6360_PMU_ADC_RPT_1 (0x5A) +#define MT6360_PMU_ADC_RPT_2 (0x5B) +#define MT6360_PMU_ADC_RPT_3 (0x5C) +#define MT6360_PMU_ADC_RPT_ORG1 (0x5D) +#define MT6360_PMU_ADC_RPT_ORG2 (0x5E) +#define MT6360_PMU_BAT_OVP_TH_SEL_CODEH (0x5F) +#define MT6360_PMU_BAT_OVP_TH_SEL_CODEL (0x60) +#define MT6360_PMU_CHG_CTRL19 (0x61) +#define MT6360_PMU_VDDASUPPLY (0x62) +#define MT6360_PMU_BC12_MANUAL (0x63) +#define MT6360_PMU_CHGDET_FUNC (0x64) +#define MT6360_PMU_FOD_CTRL (0x65) +#define MT6360_PMU_CHG_CTRL20 (0x66) +#define MT6360_PMU_CHG_HIDDEN_CTRL26 (0x67) +#define MT6360_PMU_CHG_HIDDEN_CTRL27 (0x68) +#define MT6360_PMU_RESV2 (0x69) +#define MT6360_PMU_USBID_CTRL1 (0x6D) +#define MT6360_PMU_USBID_CTRL2 (0x6E) +#define MT6360_PMU_USBID_CTRL3 (0x6F) +#define MT6360_PMU_FLED_CFG (0x70) +#define MT6360_PMU_RESV3 (0x71) +#define MT6360_PMU_FLED1_CTRL (0x72) +#define MT6360_PMU_FLED_STRB_CTRL (0x73) +#define MT6360_PMU_FLED1_STRB_CTRL2 (0x74) +#define MT6360_PMU_FLED1_TOR_CTRL (0x75) +#define MT6360_PMU_FLED2_CTRL (0x76) +#define MT6360_PMU_RESV4 (0x77) +#define MT6360_PMU_FLED2_STRB_CTRL2 (0x78) +#define MT6360_PMU_FLED2_TOR_CTRL (0x79) +#define MT6360_PMU_FLED_VMIDTRK_CTRL1 (0x7A) +#define MT6360_PMU_FLED_VMID_RTM (0x7B) +#define MT6360_PMU_FLED_VMIDTRK_CTRL2 (0x7C) +#define MT6360_PMU_FLED_PWSEL (0x7D) +#define MT6360_PMU_FLED_EN (0x7E) +#define MT6360_PMU_FLED_Hidden1 (0x7F) +#define MT6360_PMU_RGB_EN (0x80) +#define MT6360_PMU_RGB1_ISNK (0x81) +#define MT6360_PMU_RGB2_ISNK (0x82) +#define MT6360_PMU_RGB3_ISNK (0x83) +#define MT6360_PMU_RGB_ML_ISNK (0x84) +#define MT6360_PMU_RGB1_DIM (0x85) +#define MT6360_PMU_RGB2_DIM (0x86) +#define MT6360_PMU_RGB3_DIM (0x87) +#define MT6360_PMU_RESV5 (0x88) +#define MT6360_PMU_RGB12_Freq (0x89) +#define MT6360_PMU_RGB34_Freq (0x8A) +#define MT6360_PMU_RGB1_Tr (0x8B) +#define MT6360_PMU_RGB1_Tf (0x8C) +#define MT6360_PMU_RGB1_TON_TOFF (0x8D) +#define MT6360_PMU_RGB2_Tr (0x8E) +#define MT6360_PMU_RGB2_Tf (0x8F) +#define MT6360_PMU_RGB2_TON_TOFF (0x90) +#define MT6360_PMU_RGB3_Tr (0x91) +#define MT6360_PMU_RGB3_Tf (0x92) +#define MT6360_PMU_RGB3_TON_TOFF (0x93) +#define MT6360_PMU_RGB_Hidden_CTRL1 (0x94) +#define MT6360_PMU_RGB_Hidden_CTRL2 (0x95) +#define MT6360_PMU_RESV6 (0x97) +#define MT6360_PMU_SPARE1 (0x9A) +#define MT6360_PMU_SPARE2 (0xA0) +#define MT6360_PMU_SPARE3 (0xB0) +#define MT6360_PMU_SPARE4 (0xC0) +#define MT6360_PMU_CHG_IRQ1 (0xD0) +#define MT6360_PMU_CHG_IRQ2 (0xD1) +#define MT6360_PMU_CHG_IRQ3 (0xD2) +#define MT6360_PMU_CHG_IRQ4 (0xD3) +#define MT6360_PMU_CHG_IRQ5 (0xD4) +#define MT6360_PMU_CHG_IRQ6 (0xD5) +#define MT6360_PMU_QC_IRQ (0xD6) +#define MT6360_PMU_FOD_IRQ (0xD7) +#define MT6360_PMU_BASE_IRQ (0xD8) +#define MT6360_PMU_FLED_IRQ1 (0xD9) +#define MT6360_PMU_FLED_IRQ2 (0xDA) +#define MT6360_PMU_RGB_IRQ (0xDB) +#define MT6360_PMU_BUCK1_IRQ (0xDC) +#define MT6360_PMU_BUCK2_IRQ (0xDD) +#define MT6360_PMU_LDO_IRQ1 (0xDE) +#define MT6360_PMU_LDO_IRQ2 (0xDF) +#define MT6360_PMU_CHG_STAT1 (0xE0) +#define MT6360_PMU_CHG_STAT2 (0xE1) +#define MT6360_PMU_CHG_STAT3 (0xE2) +#define MT6360_PMU_CHG_STAT4 (0xE3) +#define MT6360_PMU_CHG_STAT5 (0xE4) +#define MT6360_PMU_CHG_STAT6 (0xE5) +#define MT6360_PMU_QC_STAT (0xE6) +#define MT6360_PMU_FOD_STAT (0xE7) +#define MT6360_PMU_BASE_STAT (0xE8) +#define MT6360_PMU_FLED_STAT1 (0xE9) +#define MT6360_PMU_FLED_STAT2 (0xEA) +#define MT6360_PMU_RGB_STAT (0xEB) +#define MT6360_PMU_BUCK1_STAT (0xEC) +#define MT6360_PMU_BUCK2_STAT (0xED) +#define MT6360_PMU_LDO_STAT1 (0xEE) +#define MT6360_PMU_LDO_STAT2 (0xEF) +#define MT6360_PMU_CHG_MASK1 (0xF0) +#define MT6360_PMU_CHG_MASK2 (0xF1) +#define MT6360_PMU_CHG_MASK3 (0xF2) +#define MT6360_PMU_CHG_MASK4 (0xF3) +#define MT6360_PMU_CHG_MASK5 (0xF4) +#define MT6360_PMU_CHG_MASK6 (0xF5) +#define MT6360_PMU_QC_MASK (0xF6) +#define MT6360_PMU_FOD_MASK (0xF7) +#define MT6360_PMU_BASE_MASK (0xF8) +#define MT6360_PMU_FLED_MASK1 (0xF9) +#define MT6360_PMU_FLED_MASK2 (0xFA) +#define MT6360_PMU_FAULTB_MASK (0xFB) +#define MT6360_PMU_BUCK1_MASK (0xFC) +#define MT6360_PMU_BUCK2_MASK (0xFD) +#define MT6360_PMU_LDO_MASK1 (0xFE) +#define MT6360_PMU_LDO_MASK2 (0xFF) +#define MT6360_PMU_MAXREG (MT6360_PMU_LDO_MASK2) + +/* MT6360_PMU_IRQ_SET */ +#define MT6360_PMU_IRQ_REGNUM (MT6360_PMU_LDO_IRQ2 - MT6360_PMU_CHG_IRQ1 + 1) +#define MT6360_IRQ_RETRIG BIT(2) + +#define CHIP_VEN_MASK (0xF0) +#define CHIP_VEN_MT6360 (0x50) +#define CHIP_REV_MASK (0x0F) + +#endif /* __MT6360_H__ */ diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h index fc88d315bdde..949268581b36 100644 --- a/include/linux/mfd/mt6397/core.h +++ b/include/linux/mfd/mt6397/core.h @@ -8,9 +8,11 @@ #define __MFD_MT6397_CORE_H__ #include <linux/mutex.h> +#include <linux/notifier.h> enum chip_id { MT6323_CHIP_ID = 0x23, + MT6358_CHIP_ID = 0x58, MT6391_CHIP_ID = 0x91, MT6397_CHIP_ID = 0x97, }; @@ -54,6 +56,7 @@ enum mt6397_irq_numbers { struct mt6397_chip { struct device *dev; struct regmap *regmap; + struct notifier_block pm_nb; int irq; struct irq_domain *irq_domain; struct mutex irqlock; @@ -63,8 +66,10 @@ struct mt6397_chip { u16 int_con[2]; u16 int_status[2]; u16 chip_id; + void *irq_data; }; +int mt6358_irq_init(struct mt6397_chip *chip); int mt6397_irq_init(struct mt6397_chip *chip); #endif /* __MFD_MT6397_CORE_H__ */ diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h index 7dfb63b81373..66989a16221a 100644 --- a/include/linux/mfd/mt6397/rtc.h +++ b/include/linux/mfd/mt6397/rtc.h @@ -18,7 +18,9 @@ #define RTC_BBPU_CBUSY BIT(6) #define RTC_BBPU_KEY (0x43 << 8) -#define RTC_WRTGR 0x003c +#define RTC_WRTGR_MT6358 0x003a +#define RTC_WRTGR_MT6397 0x003c +#define RTC_WRTGR_MT6323 RTC_WRTGR_MT6397 #define RTC_IRQ_STA 0x0002 #define RTC_IRQ_STA_AL BIT(0) @@ -65,6 +67,10 @@ #define MTK_RTC_POLL_DELAY_US 10 #define MTK_RTC_POLL_TIMEOUT (jiffies_to_usecs(HZ)) +struct mtk_rtc_data { + u32 wrtgr; +}; + struct mt6397_rtc { struct device *dev; struct rtc_device *rtc_dev; @@ -74,6 +80,7 @@ struct mt6397_rtc { struct regmap *regmap; int irq; u32 addr_base; + const struct mtk_rtc_data *data; }; #endif /* _LINUX_MFD_MT6397_RTC_H_ */ diff --git a/include/linux/mfd/stmfx.h b/include/linux/mfd/stmfx.h index 3c67983678ec..744dce63946e 100644 --- a/include/linux/mfd/stmfx.h +++ b/include/linux/mfd/stmfx.h @@ -109,6 +109,7 @@ struct stmfx { struct device *dev; struct regmap *map; struct regulator *vdd; + int irq; struct irq_domain *irq_domain; struct mutex lock; /* IRQ bus lock */ u8 irq_src; diff --git a/include/linux/mhi.h b/include/linux/mhi.h index ad1996001965..c4a940d98912 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -53,9 +53,9 @@ enum mhi_callback { * @MHI_CHAIN: Linked transfer */ enum mhi_flags { - MHI_EOB, - MHI_EOT, - MHI_CHAIN, + MHI_EOB = BIT(0), + MHI_EOT = BIT(1), + MHI_CHAIN = BIT(2), }; /** @@ -331,18 +331,17 @@ struct mhi_controller_config { * @wlock: Lock for protecting device wakeup * @mhi_link_info: Device bandwidth info * @st_worker: State transition worker - * @fw_worker: Firmware download worker - * @syserr_worker: System error worker * @state_event: State change event * @status_cb: CB function to notify power states of the device (required) - * @link_status: CB function to query link status of the device (required) * @wake_get: CB function to assert device wake (optional) * @wake_put: CB function to de-assert device wake (optional) * @wake_toggle: CB function to assert and de-assert device wake (optional) * @runtime_get: CB function to controller runtime resume (required) - * @runtimet_put: CB function to decrement pm usage (required) + * @runtime_put: CB function to decrement pm usage (required) * @map_single: CB function to create TRE buffer * @unmap_single: CB function to destroy TRE buffer + * @read_reg: Read a MHI register via the physical link (required) + * @write_reg: Write a MHI register via the physical link (required) * @buffer_len: Bounce buffer length * @bounce_buf: Use of bounce buffer * @fbc_download: MHI host needs to do complete image transfer (optional) @@ -411,13 +410,10 @@ struct mhi_controller { spinlock_t wlock; struct mhi_link_info mhi_link_info; struct work_struct st_worker; - struct work_struct fw_worker; - struct work_struct syserr_worker; wait_queue_head_t state_event; void (*status_cb)(struct mhi_controller *mhi_cntrl, enum mhi_callback cb); - int (*link_status)(struct mhi_controller *mhi_cntrl); void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override); void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override); void (*wake_toggle)(struct mhi_controller *mhi_cntrl); @@ -427,6 +423,10 @@ struct mhi_controller { struct mhi_buf_info *buf); void (*unmap_single)(struct mhi_controller *mhi_cntrl, struct mhi_buf_info *buf); + int (*read_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr, + u32 *out); + void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr, + u32 val); size_t buffer_len; bool bounce_buf; @@ -569,6 +569,13 @@ void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state); /** + * mhi_notify - Notify the MHI client driver about client device status + * @mhi_dev: MHI device instance + * @cb_reason: MHI callback reason + */ +void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason); + +/** * mhi_prepare_for_power_up - Do pre-initialization before power up. * This is optional, call this before power up if * the controller does not want bus framework to @@ -605,6 +612,18 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful); void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl); /** + * mhi_pm_suspend - Move MHI into a suspended state + * @mhi_cntrl: MHI controller + */ +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl); + +/** + * mhi_pm_resume - Resume MHI from suspended state + * @mhi_cntrl: MHI controller + */ +int mhi_pm_resume(struct mhi_controller *mhi_cntrl); + +/** * mhi_download_rddm_img - Download ramdump image from device for * debugging purpose. * @mhi_cntrl: MHI controller diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 20372de0b587..06e066e04a4b 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -573,7 +573,6 @@ struct mlx4_caps { int reserved_eqs; int num_comp_vectors; int num_mpts; - int max_fmr_maps; int num_mtts; int fmr_reserved_mtts; int reserved_mtts; @@ -707,17 +706,6 @@ struct mlx4_mw { int enabled; }; -struct mlx4_fmr { - struct mlx4_mr mr; - struct mlx4_mpt_entry *mpt; - __be64 *mtts; - dma_addr_t dma_handle; - int max_pages; - int max_maps; - int maps; - u8 page_shift; -}; - struct mlx4_uar { unsigned long pfn; int index; @@ -1412,14 +1400,6 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); -int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, - int npages, u64 iova, u32 *lkey, u32 *rkey); -int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, - int max_maps, u8 page_shift, struct mlx4_fmr *fmr); -int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr); -void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, - u32 *lkey, u32 *rkey); -int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); int mlx4_SYNC_TPT(struct mlx4_dev *dev); int mlx4_test_interrupt(struct mlx4_dev *dev, int vector); int mlx4_test_async(struct mlx4_dev *dev); @@ -1522,6 +1502,8 @@ int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port); int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port); int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, int enable); + +struct mlx4_mpt_entry; int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, struct mlx4_mpt_entry ***mpt_entry); int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 8e2828d48d7f..9db93e487496 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -362,7 +362,7 @@ struct mlx4_wqe_datagram_seg { struct mlx4_wqe_lso_seg { __be32 mss_hdr_size; - __be32 header[0]; + __be32 header[]; }; enum mlx4_wqe_bind_seg_flags2 { diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h index 5613e677a5f9..96ebaa94a92e 100644 --- a/include/linux/mlx5/accel.h +++ b/include/linux/mlx5/accel.h @@ -76,7 +76,7 @@ struct aes_gcm_keymat { struct mlx5_accel_esp_xfrm_attrs { enum mlx5_accel_esp_action action; u32 esn; - u32 spi; + __be32 spi; u32 seq; u32 tfc_pad; u32 flags; @@ -92,6 +92,18 @@ struct mlx5_accel_esp_xfrm_attrs { union { struct aes_gcm_keymat aes_gcm; } keymat; + + union { + __be32 a4; + __be32 a6[4]; + } saddr; + + union { + __be32 a4; + __be32 a6[4]; + } daddr; + + u8 is_ipv6; }; struct mlx5_accel_esp_xfrm { diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h deleted file mode 100644 index 68cd08f02c2f..000000000000 --- a/include/linux/mlx5/cmd.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef MLX5_CMD_H -#define MLX5_CMD_H - -#include <linux/types.h> - -struct manage_pages_layout { - u64 ptr; - u32 reserved; - u16 num_entries; - u16 func_id; -}; - - -struct mlx5_cmd_alloc_uar_imm_out { - u32 rsvd[3]; - u32 uarn; -}; - -#endif /* MLX5_CMD_H */ diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 40748fc1b11b..b5a9399e07ee 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -188,7 +188,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen, u32 *out, int outlen); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - u32 *out, int outlen); + u32 *out); int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen); int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 2b90097a6cf9..1bc27aca648b 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -364,6 +364,7 @@ enum { enum { MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1, MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5, + MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8, }; enum { @@ -449,10 +450,12 @@ enum { enum { MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1, + MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS = 0x2, }; enum { MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1, + MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2, }; enum { @@ -689,6 +692,19 @@ struct mlx5_eqe_temp_warning { __be64 sensor_warning_lsb; } __packed; +#define SYNC_RST_STATE_MASK 0xf + +enum sync_rst_state_type { + MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0, + MLX5_SYNC_RST_STATE_RESET_NOW = 0x1, + MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2, +}; + +struct mlx5_eqe_sync_fw_update { + u8 reserved_at_0[3]; + u8 sync_rst_state; +}; + union ev_data { __be32 raw[7]; struct mlx5_eqe_cmd cmd; @@ -707,6 +723,7 @@ union ev_data { struct mlx5_eqe_dct dct; struct mlx5_eqe_temp_warning temp_warning; struct mlx5_eqe_xrq_err xrq_err; + struct mlx5_eqe_sync_fw_update sync_fw_update; } __packed; struct mlx5_eqe { @@ -749,7 +766,7 @@ struct mlx5_err_cqe { }; struct mlx5_cqe64 { - u8 outer_l3_tunneled; + u8 tls_outer_l3_tunneled; u8 rsvd0; __be16 wqe_id; u8 lro_tcppsh_abort_dupack; @@ -767,7 +784,12 @@ struct mlx5_cqe64 { u8 l4_l3_hdr_type; __be16 vlan_info; __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ - __be32 imm_inval_pkey; + union { + __be32 immediate; + __be32 inval_rkey; + __be32 pkey; + __be32 ft_metadata; + }; u8 rsvd40[4]; __be32 byte_cnt; __be32 timestamp_h; @@ -834,7 +856,12 @@ static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe) { - return cqe->outer_l3_tunneled & 0x1; + return cqe->tls_outer_l3_tunneled & 0x1; +} + +static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe) +{ + return (cqe->tls_outer_l3_tunneled >> 3) & 0x3; } static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe) @@ -922,6 +949,13 @@ enum { CQE_L4_OK = 1 << 2, }; +enum { + CQE_TLS_OFFLOAD_NOT_DECRYPTED = 0x0, + CQE_TLS_OFFLOAD_DECRYPTED = 0x1, + CQE_TLS_OFFLOAD_RESYNC = 0x2, + CQE_TLS_OFFLOAD_ERROR = 0x3, +}; + struct mlx5_sig_err_cqe { u8 rsvd0[16]; __be32 expected_trans_sig; @@ -1107,6 +1141,7 @@ enum mlx5_cap_type { MLX5_CAP_TLS, MLX5_CAP_VDPA_EMULATION = 0x13, MLX5_CAP_DEV_EVENT = 0x14, + MLX5_CAP_IPSEC, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1324,6 +1359,9 @@ enum mlx5_qcam_feature_groups { MLX5_GET64(device_virtio_emulation_cap, \ (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) +#define MLX5_CAP_IPSEC(mdev, cap)\ + MLX5_GET(ipsec_cap, (mdev)->caps.hca_cur[MLX5_CAP_IPSEC], cap) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 6f8f79ef829b..13c0e4556eda 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -130,6 +130,7 @@ enum { MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, MLX5_REG_MCIA = 0x9014, + MLX5_REG_MFRL = 0x9028, MLX5_REG_MLCR = 0x902b, MLX5_REG_MTRC_CAP = 0x9040, MLX5_REG_MTRC_CONF = 0x9041, @@ -200,7 +201,7 @@ struct mlx5_rsc_debug { void *object; enum dbg_rsc_type type; struct dentry *root; - struct mlx5_field_desc fields[0]; + struct mlx5_field_desc fields[]; }; enum mlx5_dev_event { @@ -213,6 +214,12 @@ enum mlx5_port_status { MLX5_PORT_DOWN = 2, }; +enum mlx5_cmdif_state { + MLX5_CMDIF_STATE_UNINITIALIZED, + MLX5_CMDIF_STATE_UP, + MLX5_CMDIF_STATE_DOWN, +}; + struct mlx5_cmd_first { __be32 data[4]; }; @@ -258,6 +265,7 @@ struct mlx5_cmd_stats { struct mlx5_cmd { struct mlx5_nb nb; + enum mlx5_cmdif_state state; void *cmd_alloc_buf; dma_addr_t alloc_dma; int alloc_size; @@ -284,12 +292,13 @@ struct mlx5_cmd { struct semaphore sem; struct semaphore pages_sem; int mode; + u16 allowed_opcode; struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; struct dma_pool *pool; struct mlx5_cmd_debug dbg; struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; int checksum_disabled; - struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; + struct mlx5_cmd_stats *stats; }; struct mlx5_port_caps { @@ -541,7 +550,6 @@ struct mlx5_priv { struct mlx5_core_health health; /* start: qp staff */ - struct mlx5_qp_table qp_table; struct dentry *qp_debugfs; struct dentry *eq_debugfs; struct dentry *cq_debugfs; @@ -687,7 +695,6 @@ struct mlx5_core_dev { unsigned long intf_state; struct mlx5_priv priv; struct mlx5_profile *profile; - atomic_t num_qps; u32 issi; struct mlx5e_resources mlx5e_res; struct mlx5_dm *dm; @@ -743,6 +750,7 @@ struct mlx5_cmd_work_ent { struct delayed_work cb_timeout_work; void *context; int idx; + struct completion handling; struct completion done; struct mlx5_cmd *cmd; struct work_struct work; @@ -824,11 +832,6 @@ static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; } -static inline u16 cmdif_rev(struct mlx5_core_dev *dev) -{ - return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; -} - static inline u32 mlx5_base_mkey(const u32 key) { return key & 0xffffff00u; @@ -874,10 +877,17 @@ mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix) return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); } +enum { + CMD_ALLOWED_OPCODE_ALL, +}; + int mlx5_cmd_init(struct mlx5_core_dev *dev); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); +void mlx5_cmd_set_state(struct mlx5_core_dev *dev, + enum mlx5_cmdif_state cmdif_state); void mlx5_cmd_use_events(struct mlx5_core_dev *dev); void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); +void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); struct mlx5_async_ctx { struct mlx5_core_dev *dev; @@ -903,6 +913,19 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); + +#define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out) \ + ({ \ + mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out, \ + MLX5_ST_SZ_BYTES(ifc_cmd##_out)); \ + }) + +#define mlx5_cmd_exec_in(dev, ifc_cmd, in) \ + ({ \ + u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {}; \ + mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out); \ + }) + int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); @@ -1000,11 +1023,6 @@ int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, u8 roce_version, u8 roce_l3_type, const u8 *gid, const u8 *mac, bool vlan, u16 vlan_id, u8 port_num); -static inline int fw_initializing(struct mlx5_core_dev *dev) -{ - return ioread32be(&dev->iseg->initializing) >> 31; -} - static inline u32 mlx5_mkey_to_idx(u32 mkey) { return mkey >> 8; @@ -1062,6 +1080,8 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev); bool mlx5_lag_is_active(struct mlx5_core_dev *dev); struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); +u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, + struct net_device *slave); int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, u64 *values, int num_counters, @@ -1069,7 +1089,8 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, - u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id); + u64 length, u32 log_alignment, u16 uid, + phys_addr_t *addr, u32 *obj_id); int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, u64 length, u16 uid, phys_addr_t addr, u32 obj_id); diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index e2d13e074067..6c5aa0a21425 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -42,6 +42,7 @@ enum { MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17, MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18, + MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS = 1 << 19, }; enum { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 69b27c7dfc3e..116bd9bb347f 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -74,6 +74,7 @@ enum { MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0, MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3, + MLX5_SET_HCA_CAP_OP_MOD_ROCE = 0x4, }; enum { @@ -583,9 +584,7 @@ struct mlx5_ifc_fte_match_set_misc2_bits { u8 metadata_reg_a[0x20]; - u8 metadata_reg_b[0x20]; - - u8 reserved_at_1c0[0x40]; + u8 reserved_at_1a0[0x60]; }; struct mlx5_ifc_fte_match_set_misc3_bits { @@ -885,7 +884,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 tunnel_stateless_vxlan_gpe[0x1]; u8 tunnel_stateless_ipv4_over_vxlan[0x1]; u8 tunnel_stateless_ip_over_ip[0x1]; - u8 reserved_at_2a[0x6]; + u8 insert_trailer[0x1]; + u8 reserved_at_2b[0x5]; u8 max_vxlan_udp_ports[0x8]; u8 reserved_at_38[0x6]; u8 max_geneve_opt_len[0x1]; @@ -903,7 +903,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { struct mlx5_ifc_roce_cap_bits { u8 roce_apm[0x1]; - u8 reserved_at_1[0x1f]; + u8 reserved_at_1[0x3]; + u8 sw_r_roce_src_udp_port[0x1]; + u8 reserved_at_5[0x1b]; u8 reserved_at_20[0x60]; @@ -1097,6 +1099,23 @@ struct mlx5_ifc_tls_cap_bits { u8 reserved_at_20[0x7e0]; }; +struct mlx5_ifc_ipsec_cap_bits { + u8 ipsec_full_offload[0x1]; + u8 ipsec_crypto_offload[0x1]; + u8 ipsec_esn[0x1]; + u8 ipsec_crypto_esp_aes_gcm_256_encrypt[0x1]; + u8 ipsec_crypto_esp_aes_gcm_128_encrypt[0x1]; + u8 ipsec_crypto_esp_aes_gcm_256_decrypt[0x1]; + u8 ipsec_crypto_esp_aes_gcm_128_decrypt[0x1]; + u8 reserved_at_7[0x4]; + u8 log_max_ipsec_offload[0x5]; + u8 reserved_at_10[0x10]; + + u8 min_log_ipsec_full_replay_window[0x8]; + u8 max_log_ipsec_full_replay_window[0x8]; + u8 reserved_at_30[0x7d0]; +}; + enum { MLX5_WQ_TYPE_LINKED_LIST = 0x0, MLX5_WQ_TYPE_CYCLIC = 0x1, @@ -1189,7 +1208,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_99[0x2]; u8 log_max_qp[0x5]; - u8 reserved_at_a0[0xb]; + u8 reserved_at_a0[0x3]; + u8 ece_support[0x1]; + u8 reserved_at_a4[0x7]; u8 log_max_srq[0x5]; u8 reserved_at_b0[0x10]; @@ -1223,7 +1244,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_130[0xa]; u8 log_max_ra_res_dc[0x6]; - u8 reserved_at_140[0x9]; + u8 reserved_at_140[0x6]; + u8 release_all_pages[0x1]; + u8 reserved_at_147[0x2]; u8 roce_accl[0x1]; u8 log_max_ra_req_qp[0x6]; u8 reserved_at_150[0xa]; @@ -1296,7 +1319,11 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 wol_p[0x1]; u8 stat_rate_support[0x10]; - u8 reserved_at_1f0[0xc]; + u8 reserved_at_1f0[0x1]; + u8 pci_sync_for_fw_update_event[0x1]; + u8 reserved_at_1f2[0x6]; + u8 init2_lag_tx_port_affinity[0x1]; + u8 reserved_at_1fa[0x3]; u8 cqe_version[0x4]; u8 compact_address_vector[0x1]; @@ -1461,13 +1488,14 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_460[0x3]; u8 log_max_uctx[0x5]; - u8 reserved_at_468[0x3]; + u8 reserved_at_468[0x2]; + u8 ipsec_offload[0x1]; u8 log_max_umem[0x5]; u8 max_num_eqs[0x10]; u8 reserved_at_480[0x1]; u8 tls_tx[0x1]; - u8 reserved_at_482[0x1]; + u8 tls_rx[0x1]; u8 log_max_l2_table[0x5]; u8 reserved_at_488[0x8]; u8 log_uar_page_sz[0x10]; @@ -1677,7 +1705,7 @@ struct mlx5_ifc_wq_bits { u8 reserved_at_140[0x4c0]; - struct mlx5_ifc_cmd_pas_bits pas[0]; + struct mlx5_ifc_cmd_pas_bits pas[]; }; struct mlx5_ifc_rq_num_bits { @@ -1895,7 +1923,7 @@ struct mlx5_ifc_resource_dump_menu_segment_bits { u8 reserved_at_20[0x10]; u8 num_of_records[0x10]; - struct mlx5_ifc_resource_dump_menu_record_bits record[0]; + struct mlx5_ifc_resource_dump_menu_record_bits record[]; }; struct mlx5_ifc_resource_dump_resource_segment_bits { @@ -1907,7 +1935,7 @@ struct mlx5_ifc_resource_dump_resource_segment_bits { u8 index2[0x20]; - u8 payload[0][0x20]; + u8 payload[][0x20]; }; struct mlx5_ifc_resource_dump_terminate_segment_bits { @@ -2984,7 +3012,7 @@ struct mlx5_ifc_flow_context_bits { u8 reserved_at_1200[0x600]; - union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0]; + union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[]; }; enum { @@ -3112,7 +3140,8 @@ struct mlx5_ifc_tirc_bits { u8 reserved_at_0[0x20]; u8 disp_type[0x4]; - u8 reserved_at_24[0x1c]; + u8 tls_en[0x1]; + u8 reserved_at_25[0x1b]; u8 reserved_at_40[0x40]; @@ -3276,7 +3305,7 @@ struct mlx5_ifc_rqtc_bits { u8 reserved_at_e0[0x6a0]; - struct mlx5_ifc_rq_num_bits rq_num[0]; + struct mlx5_ifc_rq_num_bits rq_num[]; }; enum { @@ -3388,7 +3417,7 @@ struct mlx5_ifc_nic_vport_context_bits { u8 reserved_at_7e0[0x20]; - u8 current_uc_mac_address[0][0x40]; + u8 current_uc_mac_address[][0x40]; }; enum { @@ -3661,7 +3690,8 @@ struct mlx5_ifc_dctc_bits { u8 ecn[0x2]; u8 dscp[0x6]; - u8 reserved_at_1c0[0x40]; + u8 reserved_at_1c0[0x20]; + u8 ece[0x20]; }; enum { @@ -4140,7 +4170,8 @@ enum { MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0, MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1, MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2, - MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3 + MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3, + MLX5_SET_FTE_MODIFY_ENABLE_MASK_IPSEC_OBJ_ID = 0x4 }; struct mlx5_ifc_set_fte_out_bits { @@ -4190,7 +4221,8 @@ struct mlx5_ifc_rts2rts_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x20]; + u8 ece[0x20]; }; struct mlx5_ifc_rts2rts_qp_in_bits { @@ -4207,7 +4239,7 @@ struct mlx5_ifc_rts2rts_qp_in_bits { u8 opt_param_mask[0x20]; - u8 reserved_at_a0[0x20]; + u8 ece[0x20]; struct mlx5_ifc_qpc_bits qpc; @@ -4220,7 +4252,8 @@ struct mlx5_ifc_rtr2rts_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x20]; + u8 ece[0x20]; }; struct mlx5_ifc_rtr2rts_qp_in_bits { @@ -4237,7 +4270,7 @@ struct mlx5_ifc_rtr2rts_qp_in_bits { u8 opt_param_mask[0x20]; - u8 reserved_at_a0[0x20]; + u8 ece[0x20]; struct mlx5_ifc_qpc_bits qpc; @@ -4310,7 +4343,7 @@ struct mlx5_ifc_query_xrc_srq_out_bits { u8 reserved_at_280[0x600]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_query_xrc_srq_in_bits { @@ -4588,7 +4621,7 @@ struct mlx5_ifc_query_srq_out_bits { u8 reserved_at_280[0x600]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_query_srq_in_bits { @@ -4789,7 +4822,8 @@ struct mlx5_ifc_query_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x20]; + u8 ece[0x20]; u8 opt_param_mask[0x20]; @@ -4799,7 +4833,7 @@ struct mlx5_ifc_query_qp_out_bits { u8 reserved_at_800[0x80]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_query_qp_in_bits { @@ -5132,7 +5166,7 @@ struct mlx5_ifc_query_hca_vport_pkey_out_bits { u8 reserved_at_40[0x40]; - struct mlx5_ifc_pkey_bits pkey[0]; + struct mlx5_ifc_pkey_bits pkey[]; }; struct mlx5_ifc_query_hca_vport_pkey_in_bits { @@ -5168,7 +5202,7 @@ struct mlx5_ifc_query_hca_vport_gid_out_bits { u8 gids_num[0x10]; u8 reserved_at_70[0x10]; - struct mlx5_ifc_array128_auto_bits gid[0]; + struct mlx5_ifc_array128_auto_bits gid[]; }; struct mlx5_ifc_query_hca_vport_gid_in_bits { @@ -5436,7 +5470,7 @@ struct mlx5_ifc_query_flow_counter_out_bits { u8 reserved_at_40[0x40]; - struct mlx5_ifc_traffic_counter_bits flow_statistics[0]; + struct mlx5_ifc_traffic_counter_bits flow_statistics[]; }; struct mlx5_ifc_query_flow_counter_in_bits { @@ -5530,7 +5564,7 @@ struct mlx5_ifc_query_eq_out_bits { u8 reserved_at_300[0x580]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_query_eq_in_bits { @@ -5555,7 +5589,7 @@ struct mlx5_ifc_packet_reformat_context_in_bits { u8 reserved_at_20[0x10]; u8 reformat_data[2][0x8]; - u8 more_reformat_data[0][0x8]; + u8 more_reformat_data[][0x8]; }; struct mlx5_ifc_query_packet_reformat_context_out_bits { @@ -5566,7 +5600,7 @@ struct mlx5_ifc_query_packet_reformat_context_out_bits { u8 reserved_at_40[0xa0]; - struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context[0]; + struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context[]; }; struct mlx5_ifc_query_packet_reformat_context_in_bits { @@ -5667,9 +5701,9 @@ struct mlx5_ifc_copy_action_in_bits { u8 reserved_at_38[0x8]; }; -union mlx5_ifc_set_action_in_add_action_in_auto_bits { - struct mlx5_ifc_set_action_in_bits set_action_in; - struct mlx5_ifc_add_action_in_bits add_action_in; +union mlx5_ifc_set_add_copy_action_in_auto_bits { + struct mlx5_ifc_set_action_in_bits set_action_in; + struct mlx5_ifc_add_action_in_bits add_action_in; struct mlx5_ifc_copy_action_in_bits copy_action_in; u8 reserved_at_0[0x40]; }; @@ -5743,7 +5777,7 @@ struct mlx5_ifc_alloc_modify_header_context_in_bits { u8 reserved_at_68[0x10]; u8 num_of_actions[0x8]; - union mlx5_ifc_set_action_in_add_action_in_auto_bits actions[0]; + union mlx5_ifc_set_add_copy_action_in_auto_bits actions[0]; }; struct mlx5_ifc_dealloc_modify_header_context_out_bits { @@ -5805,7 +5839,7 @@ struct mlx5_ifc_query_cq_out_bits { u8 reserved_at_280[0x600]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_query_cq_in_bits { @@ -6412,7 +6446,7 @@ struct mlx5_ifc_modify_cq_in_bits { u8 reserved_at_300[0x580]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_modify_cong_status_out_bits { @@ -6476,7 +6510,7 @@ struct mlx5_ifc_manage_pages_out_bits { u8 reserved_at_60[0x20]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; enum { @@ -6498,7 +6532,7 @@ struct mlx5_ifc_manage_pages_in_bits { u8 input_num_entries[0x20]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_mad_ifc_out_bits { @@ -6554,7 +6588,8 @@ struct mlx5_ifc_init2rtr_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x20]; + u8 ece[0x20]; }; struct mlx5_ifc_init2rtr_qp_in_bits { @@ -6571,7 +6606,7 @@ struct mlx5_ifc_init2rtr_qp_in_bits { u8 opt_param_mask[0x20]; - u8 reserved_at_a0[0x20]; + u8 ece[0x20]; struct mlx5_ifc_qpc_bits qpc; @@ -7453,7 +7488,7 @@ struct mlx5_ifc_create_xrc_srq_in_bits { u8 reserved_at_300[0x580]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_create_tis_out_bits { @@ -7529,7 +7564,7 @@ struct mlx5_ifc_create_srq_in_bits { u8 reserved_at_280[0x600]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_create_sq_out_bits { @@ -7667,7 +7702,7 @@ struct mlx5_ifc_create_qp_out_bits { u8 reserved_at_40[0x8]; u8 qpn[0x18]; - u8 reserved_at_60[0x20]; + u8 ece[0x20]; }; struct mlx5_ifc_create_qp_in_bits { @@ -7681,7 +7716,7 @@ struct mlx5_ifc_create_qp_in_bits { u8 opt_param_mask[0x20]; - u8 reserved_at_a0[0x20]; + u8 ece[0x20]; struct mlx5_ifc_qpc_bits qpc; @@ -7690,7 +7725,7 @@ struct mlx5_ifc_create_qp_in_bits { u8 wq_umem_valid[0x1]; u8 reserved_at_861[0x1f]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_create_psv_out_bits { @@ -7761,7 +7796,7 @@ struct mlx5_ifc_create_mkey_in_bits { u8 reserved_at_320[0x560]; - u8 klm_pas_mtt[0][0x20]; + u8 klm_pas_mtt[][0x20]; }; enum { @@ -7894,7 +7929,7 @@ struct mlx5_ifc_create_eq_in_bits { u8 reserved_at_3c0[0x4c0]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_create_dct_out_bits { @@ -7906,7 +7941,7 @@ struct mlx5_ifc_create_dct_out_bits { u8 reserved_at_40[0x8]; u8 dctn[0x18]; - u8 reserved_at_60[0x20]; + u8 ece[0x20]; }; struct mlx5_ifc_create_dct_in_bits { @@ -7951,7 +7986,7 @@ struct mlx5_ifc_create_cq_in_bits { u8 cq_umem_valid[0x1]; u8 reserved_at_2e1[0x59f]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_config_int_moderation_out_bits { @@ -8307,7 +8342,7 @@ struct mlx5_ifc_access_register_out_bits { u8 reserved_at_40[0x40]; - u8 register_data[0][0x20]; + u8 register_data[][0x20]; }; enum { @@ -8327,7 +8362,7 @@ struct mlx5_ifc_access_register_in_bits { u8 argument[0x20]; - u8 register_data[0][0x20]; + u8 register_data[][0x20]; }; struct mlx5_ifc_sltp_reg_bits { @@ -9344,7 +9379,7 @@ struct mlx5_ifc_cmd_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 command[0][0x20]; + u8 command[][0x20]; }; struct mlx5_ifc_cmd_if_box_bits { @@ -9638,7 +9673,7 @@ struct mlx5_ifc_mcqi_reg_bits { u8 reserved_at_a0[0x10]; u8 data_size[0x10]; - union mlx5_ifc_mcqi_reg_data_bits data[0]; + union mlx5_ifc_mcqi_reg_data_bits data[]; }; struct mlx5_ifc_mcc_reg_bits { @@ -9680,6 +9715,29 @@ struct mlx5_ifc_mcda_reg_bits { u8 data[0][0x20]; }; +enum { + MLX5_MFRL_REG_RESET_TYPE_FULL_CHIP = BIT(0), + MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE = BIT(1), +}; + +enum { + MLX5_MFRL_REG_RESET_LEVEL0 = BIT(0), + MLX5_MFRL_REG_RESET_LEVEL3 = BIT(3), + MLX5_MFRL_REG_RESET_LEVEL6 = BIT(6), +}; + +struct mlx5_ifc_mfrl_reg_bits { + u8 reserved_at_0[0x20]; + + u8 reserved_at_20[0x2]; + u8 pci_sync_for_fw_update_start[0x1]; + u8 pci_sync_for_fw_update_resp[0x2]; + u8 rst_type_sel[0x3]; + u8 reserved_at_28[0x8]; + u8 reset_type[0x8]; + u8 reset_level[0x8]; +}; + struct mlx5_ifc_mirc_reg_bits { u8 reserved_at_0[0x18]; u8 status_code[0x8]; @@ -9743,6 +9801,7 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_mcc_reg_bits mcc_reg; struct mlx5_ifc_mcda_reg_bits mcda_reg; struct mlx5_ifc_mirc_reg_bits mirc_reg; + struct mlx5_ifc_mfrl_reg_bits mfrl_reg; u8 reserved_at_0[0x60e0]; }; @@ -10200,7 +10259,7 @@ struct mlx5_ifc_umem_bits { u8 num_of_mtt[0x40]; - struct mlx5_ifc_mtt_bits mtt[0]; + struct mlx5_ifc_mtt_bits mtt[]; }; struct mlx5_ifc_uctx_bits { @@ -10325,7 +10384,7 @@ struct mlx5_ifc_mtrc_stdb_bits { u8 reserved_at_4[0x4]; u8 read_size[0x18]; u8 start_offset[0x20]; - u8 string_db_data[0]; + u8 string_db_data[]; }; struct mlx5_ifc_mtrc_ctrl_bits { @@ -10379,7 +10438,7 @@ struct mlx5_ifc_query_esw_functions_out_bits { struct mlx5_ifc_host_params_context_bits host_params_context; u8 reserved_at_280[0x180]; - u8 host_sf_enable[0][0x40]; + u8 host_sf_enable[][0x40]; }; struct mlx5_ifc_sf_partition_bits { @@ -10399,7 +10458,7 @@ struct mlx5_ifc_query_sf_partitions_out_bits { u8 reserved_at_60[0x20]; - struct mlx5_ifc_sf_partition_bits sf_partition[0]; + struct mlx5_ifc_sf_partition_bits sf_partition[]; }; struct mlx5_ifc_query_sf_partitions_in_bits { @@ -10465,10 +10524,62 @@ struct mlx5_ifc_affiliated_event_header_bits { enum { MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT(0xc), + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT(0x13), }; enum { MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc, + MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13, +}; + +enum { + MLX5_IPSEC_OBJECT_ICV_LEN_16B, + MLX5_IPSEC_OBJECT_ICV_LEN_12B, + MLX5_IPSEC_OBJECT_ICV_LEN_8B, +}; + +struct mlx5_ifc_ipsec_obj_bits { + u8 modify_field_select[0x40]; + u8 full_offload[0x1]; + u8 reserved_at_41[0x1]; + u8 esn_en[0x1]; + u8 esn_overlap[0x1]; + u8 reserved_at_44[0x2]; + u8 icv_length[0x2]; + u8 reserved_at_48[0x4]; + u8 aso_return_reg[0x4]; + u8 reserved_at_50[0x10]; + + u8 esn_msb[0x20]; + + u8 reserved_at_80[0x8]; + u8 dekn[0x18]; + + u8 salt[0x20]; + + u8 implicit_iv[0x40]; + + u8 reserved_at_100[0x700]; +}; + +struct mlx5_ifc_create_ipsec_obj_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; + struct mlx5_ifc_ipsec_obj_bits ipsec_object; +}; + +enum { + MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP = BIT(0), + MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB = BIT(1), +}; + +struct mlx5_ifc_query_ipsec_obj_out_bits { + struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; + struct mlx5_ifc_ipsec_obj_bits ipsec_object; +}; + +struct mlx5_ifc_modify_ipsec_obj_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; + struct mlx5_ifc_ipsec_obj_bits ipsec_object; }; struct mlx5_ifc_encryption_key_obj_bits { diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index ae63b1ae9004..b8992b861ae6 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -66,6 +66,7 @@ enum mlx5_qp_optpar { MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12, MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13, MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, + MLX5_QP_OPTPAR_LAG_TX_AFF = 1 << 15, MLX5_QP_OPTPAR_PRI_PORT = 1 << 16, MLX5_QP_OPTPAR_SRQN = 1 << 18, MLX5_QP_OPTPAR_CQN_RCV = 1 << 19, @@ -229,6 +230,11 @@ enum { enum { MLX5_ETH_WQE_SVLAN = 1 << 0, + MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC = 1 << 26, + MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC = 1 << 27, + MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC = 3 << 26, + MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC = 1 << 28, + MLX5_ETH_WQE_INSERT_TRAILER = 1 << 30, MLX5_ETH_WQE_INSERT_VLAN = 1 << 15, }; @@ -257,6 +263,7 @@ struct mlx5_wqe_eth_seg { __be16 type; __be16 vlan_tci; } insert; + __be32 trailer; }; }; @@ -315,6 +322,7 @@ struct mlx5_av { struct mlx5_ib_ah { struct ib_ah ibah; struct mlx5_av av; + u8 xmit_port; }; static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah) @@ -402,7 +410,7 @@ struct mlx5_wqe_signature_seg { struct mlx5_wqe_inline_seg { __be32 byte_count; - __be32 data[0]; + __be32 data[]; }; enum mlx5_sig_type { @@ -487,123 +495,8 @@ struct mlx5_core_dct { struct completion drained; }; -struct mlx5_qp_path { - u8 fl_free_ar; - u8 rsvd3; - __be16 pkey_index; - u8 rsvd0; - u8 grh_mlid; - __be16 rlid; - u8 ackto_lt; - u8 mgid_index; - u8 static_rate; - u8 hop_limit; - __be32 tclass_flowlabel; - union { - u8 rgid[16]; - u8 rip[16]; - }; - u8 f_dscp_ecn_prio; - u8 ecn_dscp; - __be16 udp_sport; - u8 dci_cfi_prio_sl; - u8 port; - u8 rmac[6]; -}; - -/* FIXME: use mlx5_ifc.h qpc */ -struct mlx5_qp_context { - __be32 flags; - __be32 flags_pd; - u8 mtu_msgmax; - u8 rq_size_stride; - __be16 sq_crq_size; - __be32 qp_counter_set_usr_page; - __be32 wire_qpn; - __be32 log_pg_sz_remote_qpn; - struct mlx5_qp_path pri_path; - struct mlx5_qp_path alt_path; - __be32 params1; - u8 reserved2[4]; - __be32 next_send_psn; - __be32 cqn_send; - __be32 deth_sqpn; - u8 reserved3[4]; - __be32 last_acked_psn; - __be32 ssn; - __be32 params2; - __be32 rnr_nextrecvpsn; - __be32 xrcd; - __be32 cqn_recv; - __be64 db_rec_addr; - __be32 qkey; - __be32 rq_type_srqn; - __be32 rmsn; - __be16 hw_sq_wqe_counter; - __be16 sw_sq_wqe_counter; - __be16 hw_rcyclic_byte_counter; - __be16 hw_rq_counter; - __be16 sw_rcyclic_byte_counter; - __be16 sw_rq_counter; - u8 rsvd0[5]; - u8 cgs; - u8 cs_req; - u8 cs_res; - __be64 dc_access_key; - u8 rsvd1[24]; -}; - -static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) -{ - return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); -} - -int mlx5_core_create_dct(struct mlx5_core_dev *dev, - struct mlx5_core_dct *qp, - u32 *in, int inlen, - u32 *out, int outlen); -int mlx5_core_create_qp(struct mlx5_core_dev *dev, - struct mlx5_core_qp *qp, - u32 *in, - int inlen); -int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode, - u32 opt_param_mask, void *qpc, - struct mlx5_core_qp *qp); -int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, - struct mlx5_core_qp *qp); -int mlx5_core_destroy_dct(struct mlx5_core_dev *dev, - struct mlx5_core_dct *dct); -int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, - u32 *out, int outlen); -int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct, - u32 *out, int outlen); - -int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev, - u32 timeout_usec); - -int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn); -int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn); -void mlx5_init_qp_table(struct mlx5_core_dev *dev); -void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); -int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, - struct mlx5_core_qp *rq); -void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev, - struct mlx5_core_qp *rq); -int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, - struct mlx5_core_qp *sq); -void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev, - struct mlx5_core_qp *sq); -int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id); -int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id); -int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, - int reset, void *out, int out_size); - -struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev, - int res_num, - enum mlx5_res_type res_type); -void mlx5_core_res_put(struct mlx5_core_rsc_common *res); static inline const char *mlx5_qp_type_str(int type) { diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index dc6b1e7cb8c4..028f442530cf 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -39,27 +39,20 @@ int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn); void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn); int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn); -int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen); +int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in); void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn); int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out); int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn); -int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen); +int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in); void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out); int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state); -int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *tirn); -int mlx5_core_create_tir_out(struct mlx5_core_dev *dev, - u32 *in, int inlen, - u32 *out, int outlen); -int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, - int inlen); +int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, u32 *tirn); +int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in); void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); -int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *tisn); -int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in, - int inlen); +int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, u32 *tisn); +int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in); void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn); int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqtn); diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 16060fb9b5e5..8170da1e9f70 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -127,8 +127,7 @@ int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, u8 other_vport, u64 *rx_discard_vport_down, u64 *tx_discard_vport_down); int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, - int vf, u8 port_num, void *out, - size_t out_sz); + int vf, u8 port_num, void *out); int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, int vf, diff --git a/include/linux/mm.h b/include/linux/mm.h index 5a323422d783..dc7b87310c10 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -15,6 +15,7 @@ #include <linux/atomic.h> #include <linux/debug_locks.h> #include <linux/mm_types.h> +#include <linux/mmap_lock.h> #include <linux/range.h> #include <linux/pfn.h> #include <linux/percpu-refcount.h> @@ -28,6 +29,7 @@ #include <linux/overflow.h> #include <linux/sizes.h> #include <linux/sched.h> +#include <linux/pgtable.h> struct mempolicy; struct anon_vma; @@ -92,7 +94,6 @@ extern int mmap_rnd_compat_bits __read_mostly; #endif #include <asm/page.h> -#include <asm/pgtable.h> #include <asm/processor.h> /* @@ -201,10 +202,10 @@ extern int sysctl_overcommit_memory; extern int sysctl_overcommit_ratio; extern unsigned long sysctl_overcommit_kbytes; -extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *, - size_t *, loff_t *); -extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, - size_t *, loff_t *); +int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); +int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) @@ -325,17 +326,13 @@ extern unsigned int kobjsize(const void *objp); #elif defined(CONFIG_SPARC64) # define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */ # define VM_ARCH_CLEAR VM_SPARC_ADI +#elif defined(CONFIG_ARM64) +# define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */ +# define VM_ARCH_CLEAR VM_ARM64_BTI #elif !defined(CONFIG_MMU) # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ #endif -#if defined(CONFIG_X86_INTEL_MPX) -/* MPX specific bounds table or bounds directory */ -# define VM_MPX VM_HIGH_ARCH_4 -#else -# define VM_MPX VM_NONE -#endif - #ifndef VM_GROWSUP # define VM_GROWSUP VM_NONE #endif @@ -405,7 +402,7 @@ extern pgprot_t protection_map[16]; * @FAULT_FLAG_WRITE: Fault was a write fault. * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE. * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked. - * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_sem and wait when retrying. + * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying. * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region. * @FAULT_FLAG_TRIED: The fault has been tried once. * @FAULT_FLAG_USER: The fault originated in userspace. @@ -455,10 +452,10 @@ extern pgprot_t protection_map[16]; * fault_flag_allow_retry_first - check ALLOW_RETRY the first time * * This is mostly used for places where we want to try to avoid taking - * the mmap_sem for too long a time when waiting for another condition + * the mmap_lock for too long a time when waiting for another condition * to change, in which case we can try to be polite to release the - * mmap_sem in the first round to avoid potential starvation of other - * processes that would also want the mmap_sem. + * mmap_lock in the first round to avoid potential starvation of other + * processes that would also want the mmap_lock. * * Return: true if the page fault allows retry and this is the first * attempt of the fault handling; false otherwise. @@ -505,7 +502,6 @@ struct vm_fault { pte_t orig_pte; /* Value of PTE at the time of fault */ struct page *cow_page; /* Page handler may use for COW fault */ - struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */ struct page *page; /* ->fault handlers should return a * page here, unless VM_FAULT_NOPAGE * is set (which is also implied by @@ -586,7 +582,7 @@ struct vm_operations_struct { * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure * in mm/mempolicy.c will do this automatically. * get_policy() must NOT add a ref if the policy at (vma,addr) is not - * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. + * marked as MPOL_SHARED. vma policies are protected by the mmap_lock. * If no [shared/vma] mempolicy exists at the addr, get_policy() op * must return NULL--i.e., do not "fallback" to task or system default * policy. @@ -781,7 +777,13 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) } extern void kvfree(const void *addr); +extern void kvfree_sensitive(const void *addr, size_t len); +/* + * Mapcount of compound page as a whole, does not include mapped sub-pages. + * + * Must be called only for compound pages or any their tail sub-pages. + */ static inline int compound_mapcount(struct page *page) { VM_BUG_ON_PAGE(!PageCompound(page), page); @@ -801,10 +803,16 @@ static inline void page_mapcount_reset(struct page *page) int __page_mapcount(struct page *page); +/* + * Mapcount of 0-order page; when compound sub-page, includes + * compound_mapcount(). + * + * Result is undefined for pages which cannot be mapped into userspace. + * For example SLAB or special types of pages. See function page_has_type(). + * They use this place in struct page differently. + */ static inline int page_mapcount(struct page *page) { - VM_BUG_ON_PAGE(PageSlab(page), page); - if (unlikely(PageCompound(page))) return __page_mapcount(page); return atomic_read(&page->_mapcount) + 1; @@ -860,7 +868,7 @@ enum compound_dtor_id { #endif NR_COMPOUND_DTORS, }; -extern compound_page_dtor * const compound_page_dtors[]; +extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS]; static inline void set_compound_page_dtor(struct page *page, enum compound_dtor_id compound_dtor) @@ -869,10 +877,10 @@ static inline void set_compound_page_dtor(struct page *page, page[1].compound_dtor = compound_dtor; } -static inline compound_page_dtor *get_compound_page_dtor(struct page *page) +static inline void destroy_compound_page(struct page *page) { VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); - return compound_page_dtors[page[1].compound_dtor]; + compound_page_dtors[page[1].compound_dtor](page); } static inline unsigned int compound_order(struct page *page) @@ -939,8 +947,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) return pte; } -vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, - struct page *page); +vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page); vm_fault_t finish_fault(struct vm_fault *vmf); vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); #endif @@ -1219,7 +1226,7 @@ void unpin_user_pages(struct page **pages, unsigned long npages); * used to track the pincount (instead using of the GUP_PIN_COUNTING_BIAS * scheme). * - * For more information, please see Documentation/vm/pin_user_pages.rst. + * For more information, please see Documentation/core-api/pin_user_pages.rst. * * @page: pointer to page to be queried. * @Return: True, if it is likely that the page has been "dma-pinned". @@ -1367,7 +1374,7 @@ static inline int cpu_pid_to_cpupid(int nid, int pid) static inline bool cpupid_pid_unset(int cpupid) { - return 1; + return true; } static inline void page_cpupid_reset_last(struct page *page) @@ -1700,8 +1707,12 @@ long pin_user_pages(unsigned long start, unsigned long nr_pages, struct vm_area_struct **vmas); long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked); +long pin_user_pages_locked(unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, int *locked); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); +long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, + struct page **pages, unsigned int gup_flags); int get_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); @@ -1718,7 +1729,7 @@ struct frame_vector { unsigned int nr_frames; /* Number of frames stored in ptrs array */ bool got_ref; /* Did we pin pages by getting page ref? */ bool is_pfns; /* Does array contain pages or pfns? */ - void *ptrs[0]; /* Array of pinned pfns / pages. Use + void *ptrs[]; /* Array of pinned pfns / pages. Use * pfns_vector_pages() or pfns_vector_pfns() * for access */ }; @@ -1816,8 +1827,16 @@ extern int mprotect_fixup(struct vm_area_struct *vma, /* * doesn't attempt to fault and will return short. */ -int __get_user_pages_fast(unsigned long start, int nr_pages, int write, - struct page **pages); +int get_user_pages_fast_only(unsigned long start, int nr_pages, + unsigned int gup_flags, struct page **pages); +int pin_user_pages_fast_only(unsigned long start, int nr_pages, + unsigned int gup_flags, struct page **pages); + +static inline bool get_user_page_fast_only(unsigned long addr, + unsigned int gup_flags, struct page **pagep) +{ + return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1; +} /* * per-process(per-mm_struct) statistics. */ @@ -2060,11 +2079,6 @@ int __pte_alloc_kernel(pmd_t *pmd); #if defined(CONFIG_MMU) -/* - * The following ifdef needed to get the 5level-fixup.h header to work. - * Remove it when 5level-fixup.h has been removed. - */ -#ifndef __ARCH_HAS_5LEVEL_HACK static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { @@ -2078,13 +2092,52 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? NULL : pud_offset(p4d, address); } -#endif /* !__ARCH_HAS_5LEVEL_HACK */ + +static inline p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd, + unsigned long address, + pgtbl_mod_mask *mod_mask) + +{ + if (unlikely(pgd_none(*pgd))) { + if (__p4d_alloc(mm, pgd, address)) + return NULL; + *mod_mask |= PGTBL_PGD_MODIFIED; + } + + return p4d_offset(pgd, address); +} + +static inline pud_t *pud_alloc_track(struct mm_struct *mm, p4d_t *p4d, + unsigned long address, + pgtbl_mod_mask *mod_mask) +{ + if (unlikely(p4d_none(*p4d))) { + if (__pud_alloc(mm, p4d, address)) + return NULL; + *mod_mask |= PGTBL_P4D_MODIFIED; + } + + return pud_offset(p4d, address); +} static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) { return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? NULL: pmd_offset(pud, address); } + +static inline pmd_t *pmd_alloc_track(struct mm_struct *mm, pud_t *pud, + unsigned long address, + pgtbl_mod_mask *mod_mask) +{ + if (unlikely(pud_none(*pud))) { + if (__pmd_alloc(mm, pud, address)) + return NULL; + *mod_mask |= PGTBL_PUD_MODIFIED; + } + + return pmd_offset(pud, address); +} #endif /* CONFIG_MMU */ #if USE_SPLIT_PTE_PTLOCKS @@ -2200,6 +2253,11 @@ static inline void pgtable_pte_page_dtor(struct page *page) ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \ NULL: pte_offset_kernel(pmd, address)) +#define pte_alloc_kernel_track(pmd, address, mask) \ + ((unlikely(pmd_none(*(pmd))) && \ + (__pte_alloc_kernel(pmd) || ({*(mask)|=PGTBL_PMD_MODIFIED;0;})))?\ + NULL: pte_offset_kernel(pmd, address)) + #if USE_SPLIT_PMD_PTLOCKS static struct page *pmd_to_page(pmd_t *pmd) @@ -2272,9 +2330,7 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) } extern void __init pagecache_init(void); -extern void free_area_init(unsigned long * zones_size); -extern void __init free_area_init_node(int nid, unsigned long * zones_size, - unsigned long zone_start_pfn, unsigned long *zholes_size); +extern void __init free_area_init_memoryless_node(int nid); extern void free_initmem(void); /* @@ -2344,34 +2400,26 @@ static inline unsigned long get_num_physpages(void) return phys_pages; } -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP /* - * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its - * zones, allocate the backing mem_map and account for memory holes in a more - * architecture independent manner. This is a substitute for creating the - * zone_sizes[] and zholes_size[] arrays and passing them to - * free_area_init_node() + * Using memblock node mappings, an architecture may initialise its + * zones, allocate the backing mem_map and account for memory holes in an + * architecture independent manner. * * An architecture is expected to register range of page frames backed by * physical memory with memblock_add[_node]() before calling - * free_area_init_nodes() passing in the PFN each zone ends at. At a basic + * free_area_init() passing in the PFN each zone ends at. At a basic * usage, an architecture is expected to do something like * * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, * max_highmem_pfn}; * for_each_valid_physical_page_range() * memblock_add_node(base, size, nid) - * free_area_init_nodes(max_zone_pfns); + * free_area_init(max_zone_pfns); * - * free_bootmem_with_active_regions() calls free_bootmem_node() for each - * registered physical page range. Similarly * sparse_memory_present_with_active_regions() calls memory_present() for * each range when SPARSEMEM is enabled. - * - * See mm/page_alloc.c for more information on each function exposed by - * CONFIG_HAVE_MEMBLOCK_NODE_MAP. */ -extern void free_area_init_nodes(unsigned long *max_zone_pfn); +void free_area_init(unsigned long *max_zone_pfn); unsigned long node_map_pfn_alignment(void); unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, unsigned long end_pfn); @@ -2380,16 +2428,10 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn, extern void get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn); extern unsigned long find_min_pfn_with_active_regions(void); -extern void free_bootmem_with_active_regions(int nid, - unsigned long max_low_pfn); extern void sparse_memory_present_with_active_regions(int nid); -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ - -#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ - !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) -static inline int __early_pfn_to_nid(unsigned long pfn, - struct mminit_pfnnid_cache *state) +#ifndef CONFIG_NEED_MULTIPLE_NODES +static inline int early_pfn_to_nid(unsigned long pfn) { return 0; } @@ -2425,6 +2467,7 @@ extern void setup_per_cpu_pageset(void); extern int min_free_kbytes; extern int watermark_boost_factor; extern int watermark_scale_factor; +extern bool arch_has_descending_max_zone_pfns(void); /* nommu.c */ extern atomic_long_t mmap_pages_allocated; @@ -2601,25 +2644,6 @@ extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); int __must_check write_one_page(struct page *page); void task_dirty_inc(struct task_struct *tsk); -/* readahead.c */ -#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) - -int force_page_cache_readahead(struct address_space *mapping, struct file *filp, - pgoff_t offset, unsigned long nr_to_read); - -void page_cache_sync_readahead(struct address_space *mapping, - struct file_ra_state *ra, - struct file *filp, - pgoff_t offset, - unsigned long size); - -void page_cache_async_readahead(struct address_space *mapping, - struct file_ra_state *ra, - struct file *filp, - struct page *pg, - pgoff_t offset, - unsigned long size); - extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); @@ -2780,6 +2804,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, #define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */ #define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */ #define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */ +#define FOLL_FAST_ONLY 0x80000 /* gup_fast: prevent fall-back to slow gup */ /* * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each @@ -2834,7 +2859,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, * releasing pages: get_user_pages*() pages must be released via put_page(), * while pin_user_pages*() pages must be released via unpin_user_page(). * - * Please see Documentation/vm/pin_user_pages.rst for more information. + * Please see Documentation/core-api/pin_user_pages.rst for more information. */ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) @@ -2957,8 +2982,8 @@ extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); #ifdef CONFIG_SYSCTL extern int sysctl_drop_caches; -int drop_caches_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); +int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); #endif void drop_slab(void); @@ -3012,6 +3037,7 @@ enum mf_flags { }; extern int memory_failure(unsigned long pfn, int flags); extern void memory_failure_queue(unsigned long pfn, int flags); +extern void memory_failure_queue_kick(int cpu); extern int unpoison_memory(unsigned long pfn); extern int get_hwpoison_page(struct page *page); #define put_hwpoison_page(page) put_page(page) @@ -3140,5 +3166,7 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping, pgoff_t first_index, pgoff_t nr); #endif +extern int sysctl_nr_trim_pages; + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 4aba6c0c2ba8..64ede5f150dc 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -240,7 +240,11 @@ static inline atomic_t *compound_pincount_ptr(struct page *page) #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) #define page_private(page) ((page)->private) -#define set_page_private(page, v) ((page)->private = (v)) + +static inline void set_page_private(struct page *page, unsigned long private) +{ + page->private = private; +} struct page_frag_cache { void * va; @@ -340,7 +344,7 @@ struct vm_area_struct { * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack * or brk vma (with NULL file) can only be in an anon_vma list. */ - struct list_head anon_vma_chain; /* Serialized by mmap_sem & + struct list_head anon_vma_chain; /* Serialized by mmap_lock & * page_table_lock */ struct anon_vma *anon_vma; /* Serialized by page_table_lock */ @@ -436,7 +440,7 @@ struct mm_struct { spinlock_t page_table_lock; /* Protects page tables and some * counters */ - struct rw_semaphore mmap_sem; + struct rw_semaphore mmap_lock; struct list_head mmlist; /* List of maybe swapped mm's. These * are globally strung together off diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h new file mode 100644 index 000000000000..0707671851a8 --- /dev/null +++ b/include/linux/mmap_lock.h @@ -0,0 +1,90 @@ +#ifndef _LINUX_MMAP_LOCK_H +#define _LINUX_MMAP_LOCK_H + +#include <linux/mmdebug.h> + +#define MMAP_LOCK_INITIALIZER(name) \ + .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock), + +static inline void mmap_init_lock(struct mm_struct *mm) +{ + init_rwsem(&mm->mmap_lock); +} + +static inline void mmap_write_lock(struct mm_struct *mm) +{ + down_write(&mm->mmap_lock); +} + +static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass) +{ + down_write_nested(&mm->mmap_lock, subclass); +} + +static inline int mmap_write_lock_killable(struct mm_struct *mm) +{ + return down_write_killable(&mm->mmap_lock); +} + +static inline bool mmap_write_trylock(struct mm_struct *mm) +{ + return down_write_trylock(&mm->mmap_lock) != 0; +} + +static inline void mmap_write_unlock(struct mm_struct *mm) +{ + up_write(&mm->mmap_lock); +} + +static inline void mmap_write_downgrade(struct mm_struct *mm) +{ + downgrade_write(&mm->mmap_lock); +} + +static inline void mmap_read_lock(struct mm_struct *mm) +{ + down_read(&mm->mmap_lock); +} + +static inline int mmap_read_lock_killable(struct mm_struct *mm) +{ + return down_read_killable(&mm->mmap_lock); +} + +static inline bool mmap_read_trylock(struct mm_struct *mm) +{ + return down_read_trylock(&mm->mmap_lock) != 0; +} + +static inline void mmap_read_unlock(struct mm_struct *mm) +{ + up_read(&mm->mmap_lock); +} + +static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm) +{ + if (down_read_trylock(&mm->mmap_lock)) { + rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_); + return true; + } + return false; +} + +static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) +{ + up_read_non_owner(&mm->mmap_lock); +} + +static inline void mmap_assert_locked(struct mm_struct *mm) +{ + lockdep_assert_held(&mm->mmap_lock); + VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); +} + +static inline void mmap_assert_write_locked(struct mm_struct *mm) +{ + lockdep_assert_held_write(&mm->mmap_lock); + VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); +} + +#endif /* _LINUX_MMAP_LOCK_H */ diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index cf3780a6ccc4..7d46411ffaa2 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -48,6 +48,7 @@ struct mmc_ext_csd { u8 sec_feature_support; u8 rel_sectors; u8 rel_param; + bool enhanced_rpmb_supported; u8 part_config; u8 cache_ctrl; u8 rst_n_function; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index c318fb5b6a94..7149bab555d7 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -92,6 +92,9 @@ struct mmc_host_ops { int err); void (*pre_req)(struct mmc_host *host, struct mmc_request *req); void (*request)(struct mmc_host *host, struct mmc_request *req); + /* Submit one request to host in atomic context. */ + int (*request_atomic)(struct mmc_host *host, + struct mmc_request *req); /* * Avoid calling the next three functions too often or in a "fast @@ -318,7 +321,6 @@ struct mmc_host { #define MMC_CAP_AGGRESSIVE_PM (1 << 7) /* Suspend (e)MMC/SD at idle */ #define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ #define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */ -#define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */ #define MMC_CAP_3_3V_DDR (1 << 11) /* Host supports eMMC DDR 3.3V */ #define MMC_CAP_1_8V_DDR (1 << 12) /* Host supports eMMC DDR 1.8V */ #define MMC_CAP_1_2V_DDR (1 << 13) /* Host supports eMMC DDR 1.2V */ diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 4b85ef05a906..d9a65c6a8816 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -325,6 +325,7 @@ static inline bool mmc_ready_for_data(u32 status) */ #define EXT_CSD_WR_REL_PARAM_EN (1<<2) +#define EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR (1<<4) #define EXT_CSD_BOOT_WP_B_PWR_WP_DIS (0x40) #define EXT_CSD_BOOT_WP_B_PERM_WP_DIS (0x10) diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 2e9a6e4634eb..15ed8ce9d394 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -24,59 +24,101 @@ /* * Vendors and devices. Sort key: vendor first, device next. */ + +#define SDIO_VENDOR_ID_STE 0x0020 +#define SDIO_DEVICE_ID_STE_CW1200 0x2280 + +#define SDIO_VENDOR_ID_INTEL 0x0089 +#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402 +#define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI 0x1403 +#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404 +#define SDIO_DEVICE_ID_INTEL_IWMC3200GPS 0x1405 +#define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406 +#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5 0x1407 + +#define SDIO_VENDOR_ID_CGUYS 0x0092 +#define SDIO_DEVICE_ID_CGUYS_EW_CG1102GC 0x0004 + +#define SDIO_VENDOR_ID_TI 0x0097 +#define SDIO_DEVICE_ID_TI_WL1271 0x4076 + +#define SDIO_VENDOR_ID_ATHEROS 0x0271 +#define SDIO_DEVICE_ID_ATHEROS_AR6003_00 0x0300 +#define SDIO_DEVICE_ID_ATHEROS_AR6003_01 0x0301 +#define SDIO_DEVICE_ID_ATHEROS_AR6004_00 0x0400 +#define SDIO_DEVICE_ID_ATHEROS_AR6004_01 0x0401 +#define SDIO_DEVICE_ID_ATHEROS_AR6004_02 0x0402 +#define SDIO_DEVICE_ID_ATHEROS_AR6004_18 0x0418 +#define SDIO_DEVICE_ID_ATHEROS_AR6004_19 0x0419 +#define SDIO_DEVICE_ID_ATHEROS_AR6005 0x050A +#define SDIO_DEVICE_ID_ATHEROS_QCA9377 0x0701 + #define SDIO_VENDOR_ID_BROADCOM 0x02d0 -#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887 +#define SDIO_DEVICE_ID_BROADCOM_NINTENDO_WII 0x044b #define SDIO_DEVICE_ID_BROADCOM_43241 0x4324 #define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 #define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 #define SDIO_DEVICE_ID_BROADCOM_4334 0x4334 -#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c -#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d #define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 #define SDIO_DEVICE_ID_BROADCOM_4339 0x4339 -#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 -#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4 -#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 #define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 -#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 +#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359 0x4355 #define SDIO_DEVICE_ID_BROADCOM_4356 0x4356 #define SDIO_DEVICE_ID_BROADCOM_4359 0x4359 -#define SDIO_DEVICE_ID_CYPRESS_4373 0x4373 -#define SDIO_DEVICE_ID_CYPRESS_43012 43012 -#define SDIO_DEVICE_ID_CYPRESS_89359 0x4355 - -#define SDIO_VENDOR_ID_INTEL 0x0089 -#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402 -#define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI 0x1403 -#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404 -#define SDIO_DEVICE_ID_INTEL_IWMC3200GPS 0x1405 -#define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406 -#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5 0x1407 +#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373 0x4373 +#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012 0xa804 +#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887 +#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c +#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d +#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 +#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4 +#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 +#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf #define SDIO_VENDOR_ID_MARVELL 0x02df #define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103 -#define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104 -#define SDIO_DEVICE_ID_MARVELL_8688BT 0x9105 +#define SDIO_DEVICE_ID_MARVELL_8688_WLAN 0x9104 +#define SDIO_DEVICE_ID_MARVELL_8688_BT 0x9105 +#define SDIO_DEVICE_ID_MARVELL_8786_WLAN 0x9116 +#define SDIO_DEVICE_ID_MARVELL_8787_WLAN 0x9119 +#define SDIO_DEVICE_ID_MARVELL_8787_BT 0x911a +#define SDIO_DEVICE_ID_MARVELL_8787_BT_AMP 0x911b #define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128 -#define SDIO_DEVICE_ID_MARVELL_8887WLAN 0x9134 +#define SDIO_DEVICE_ID_MARVELL_8797_WLAN 0x9129 +#define SDIO_DEVICE_ID_MARVELL_8797_BT 0x912a +#define SDIO_DEVICE_ID_MARVELL_8897_WLAN 0x912d +#define SDIO_DEVICE_ID_MARVELL_8897_BT 0x912e +#define SDIO_DEVICE_ID_MARVELL_8887_F0 0x9134 +#define SDIO_DEVICE_ID_MARVELL_8887_WLAN 0x9135 +#define SDIO_DEVICE_ID_MARVELL_8887_BT 0x9136 +#define SDIO_DEVICE_ID_MARVELL_8801_WLAN 0x9139 +#define SDIO_DEVICE_ID_MARVELL_8997_F0 0x9140 +#define SDIO_DEVICE_ID_MARVELL_8997_WLAN 0x9141 +#define SDIO_DEVICE_ID_MARVELL_8997_BT 0x9142 +#define SDIO_DEVICE_ID_MARVELL_8977_WLAN 0x9145 +#define SDIO_DEVICE_ID_MARVELL_8977_BT 0x9146 +#define SDIO_DEVICE_ID_MARVELL_8987_WLAN 0x9149 +#define SDIO_DEVICE_ID_MARVELL_8987_BT 0x914a #define SDIO_VENDOR_ID_MEDIATEK 0x037a +#define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663 +#define SDIO_DEVICE_ID_MEDIATEK_MT7668 0x7668 #define SDIO_VENDOR_ID_SIANO 0x039a #define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201 #define SDIO_DEVICE_ID_SIANO_NICE 0x0202 #define SDIO_DEVICE_ID_SIANO_VEGA_A0 0x0300 #define SDIO_DEVICE_ID_SIANO_VENICE 0x0301 +#define SDIO_DEVICE_ID_SIANO_MING 0x0302 +#define SDIO_DEVICE_ID_SIANO_PELE 0x0500 +#define SDIO_DEVICE_ID_SIANO_RIO 0x0600 +#define SDIO_DEVICE_ID_SIANO_DENVER_2160 0x0700 +#define SDIO_DEVICE_ID_SIANO_DENVER_1530 0x0800 #define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100 #define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347 -#define SDIO_VENDOR_ID_TI 0x0097 -#define SDIO_DEVICE_ID_TI_WL1271 0x4076 #define SDIO_VENDOR_ID_TI_WL1251 0x104c #define SDIO_DEVICE_ID_TI_WL1251 0x9066 -#define SDIO_VENDOR_ID_STE 0x0020 -#define SDIO_DEVICE_ID_STE_CW1200 0x2280 - #endif /* LINUX_MMC_SDIO_IDS_H */ diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h index d9a543a9e1cc..c51a84132d7c 100644 --- a/include/linux/mmu_context.h +++ b/include/linux/mmu_context.h @@ -4,11 +4,6 @@ #include <asm/mmu_context.h> -struct mm_struct; - -void use_mm(struct mm_struct *mm); -void unuse_mm(struct mm_struct *mm); - /* Architectures that care about IRQ state in switch_mm can override this. */ #ifndef switch_mm_irqs_off # define switch_mm_irqs_off switch_mm diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 736f6918335e..fc68f3570e19 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -5,6 +5,7 @@ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/mm_types.h> +#include <linux/mmap_lock.h> #include <linux/srcu.h> #include <linux/interval_tree.h> @@ -121,7 +122,7 @@ struct mmu_notifier_ops { /* * invalidate_range_start() and invalidate_range_end() must be - * paired and are called only when the mmap_sem and/or the + * paired and are called only when the mmap_lock and/or the * locks protecting the reverse maps are held. If the subsystem * can't guarantee that no additional references are taken to * the pages in the range, it has to implement the @@ -212,13 +213,13 @@ struct mmu_notifier_ops { }; /* - * The notifier chains are protected by mmap_sem and/or the reverse map + * The notifier chains are protected by mmap_lock and/or the reverse map * semaphores. Notifier chains are only changed when all reverse maps and - * the mmap_sem locks are taken. + * the mmap_lock locks are taken. * * Therefore notifier chains can only be traversed when either * - * 1. mmap_sem is held. + * 1. mmap_lock is held. * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem). * 3. No other concurrent thread can access the list (release) */ @@ -277,9 +278,9 @@ mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm) { struct mmu_notifier *ret; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); ret = mmu_notifier_get_locked(ops, mm); - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return ret; } void mmu_notifier_put(struct mmu_notifier *subscription); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 1b9de7d220fb..c4c37fd12104 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -156,6 +156,9 @@ enum zone_stat_item { NR_MLOCK, /* mlock()ed pages found and moved off LRU */ NR_PAGETABLE, /* used for pagetables */ NR_KERNEL_STACK_KB, /* measured in KiB */ +#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) + NR_KERNEL_SCS_KB, /* measured in KiB */ +#endif /* Second 128 byte cacheline */ NR_BOUNCE, #if IS_ENABLED(CONFIG_ZSMALLOC) @@ -193,7 +196,6 @@ enum node_stat_item { NR_FILE_THPS, NR_FILE_PMDMAPPED, NR_ANON_THPS, - NR_UNSTABLE_NFS, /* NFS unstable pages */ NR_VMSCAN_WRITE, NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ NR_DIRTIED, /* page dirtyings since bootup */ @@ -240,19 +242,6 @@ static inline bool is_active_lru(enum lru_list lru) return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); } -struct zone_reclaim_stat { - /* - * The pageout code in vmscan.c keeps track of how many of the - * mem/swap backed and file backed pages are referenced. - * The higher the rotated/scanned ratio, the more valuable - * that cache is. - * - * The anon LRU stats live in [0], file LRU stats in [1] - */ - unsigned long recent_rotated[2]; - unsigned long recent_scanned[2]; -}; - enum lruvec_flags { LRUVEC_CONGESTED, /* lruvec has many dirty pages * backed by a congested BDI @@ -261,7 +250,13 @@ enum lruvec_flags { struct lruvec { struct list_head lists[NR_LRU_LISTS]; - struct zone_reclaim_stat reclaim_stat; + /* + * These track the cost of reclaiming one LRU - file or anon - + * over the other. As the observed cost of reclaiming one LRU + * increases, the reclaim scan balance tips toward the other. + */ + unsigned long anon_cost; + unsigned long file_cost; /* Evictions & activations on the inactive file list */ atomic_long_t inactive_age; /* Refaults at the time of last reclaim cycle */ @@ -665,9 +660,21 @@ struct deferred_split { * per-zone basis. */ typedef struct pglist_data { + /* + * node_zones contains just the zones for THIS node. Not all of the + * zones may be populated, but it is the full list. It is referenced by + * this node's node_zonelists as well as other node's node_zonelists. + */ struct zone node_zones[MAX_NR_ZONES]; + + /* + * node_zonelists contains references to all zones in all nodes. + * Generally the first zones will be references to this node's + * node_zones. + */ struct zonelist node_zonelists[MAX_ZONELISTS]; - int nr_zones; + + int nr_zones; /* number of populated zones in this node */ #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ struct page *node_mem_map; #ifdef CONFIG_PAGE_EXTENSION @@ -678,6 +685,8 @@ typedef struct pglist_data { /* * Must be held any time you expect node_start_pfn, * node_present_pages, node_spanned_pages or nr_zones to stay constant. + * Also synchronizes pgdat->first_deferred_pfn during deferred page + * init. * * pgdat_resize_lock() and pgdat_resize_unlock() are provided to * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG @@ -697,13 +706,13 @@ typedef struct pglist_data { struct task_struct *kswapd; /* Protected by mem_hotplug_begin/end() */ int kswapd_order; - enum zone_type kswapd_classzone_idx; + enum zone_type kswapd_highest_zoneidx; int kswapd_failures; /* Number of 'reclaimed == 0' runs */ #ifdef CONFIG_COMPACTION int kcompactd_max_order; - enum zone_type kcompactd_classzone_idx; + enum zone_type kcompactd_highest_zoneidx; wait_queue_head_t kcompactd_wait; struct task_struct *kcompactd; #endif @@ -781,15 +790,15 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat) void build_all_zonelists(pg_data_t *pgdat); void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, - enum zone_type classzone_idx); + enum zone_type highest_zoneidx); bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, - int classzone_idx, unsigned int alloc_flags, + int highest_zoneidx, unsigned int alloc_flags, long free_pages); bool zone_watermark_ok(struct zone *z, unsigned int order, - unsigned long mark, int classzone_idx, + unsigned long mark, int highest_zoneidx, unsigned int alloc_flags); bool zone_watermark_ok_safe(struct zone *z, unsigned int order, - unsigned long mark, int classzone_idx); + unsigned long mark, int highest_zoneidx); enum memmap_context { MEMMAP_EARLY, MEMMAP_HOTPLUG, @@ -874,7 +883,7 @@ extern int movable_zone; #ifdef CONFIG_HIGHMEM static inline int zone_movable_is_highmem(void) { -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +#ifdef CONFIG_NEED_MULTIPLE_NODES return movable_zone == ZONE_HIGHMEM; #else return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM; @@ -909,24 +918,23 @@ static inline int is_highmem(struct zone *zone) /* These two functions are used to setup the per zone pages min values */ struct ctl_table; -int min_free_kbytes_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -int watermark_boost_factor_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); + +int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); +int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *, + size_t *, loff_t *); extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; -int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); +int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *, + size_t *, loff_t *); int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); + void *, size_t *, loff_t *); int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); + void *, size_t *, loff_t *); int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); - -extern int numa_zonelist_order_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); + void *, size_t *, loff_t *); +int numa_zonelist_order_handler(struct ctl_table *, int, + void *, size_t *, loff_t *); +extern int percpu_pagelist_fraction; extern char numa_zonelist_order[]; #define NUMA_ZONELIST_ORDER_LEN 16 @@ -1078,15 +1086,6 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, #include <asm/sparsemem.h> #endif -#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ - !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) -static inline unsigned long early_pfn_to_nid(unsigned long pfn) -{ - BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA)); - return 0; -} -#endif - #ifdef CONFIG_FLATMEM #define pfn_to_nid(pfn) (0) #endif diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h index 35942084cd40..8f882f5881e8 100644 --- a/include/linux/mnt_namespace.h +++ b/include/linux/mnt_namespace.h @@ -6,10 +6,12 @@ struct mnt_namespace; struct fs_struct; struct user_namespace; +struct ns_common; extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *, struct user_namespace *, struct fs_struct *); extern void put_mnt_ns(struct mnt_namespace *ns); +extern struct ns_common *from_mnt_ns(struct mnt_namespace *); extern const struct file_operations proc_mounts_operations; extern const struct file_operations proc_mountinfo_operations; diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 4c2ddd0941a7..8d764aab29de 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -434,7 +434,7 @@ struct virtio_device_id { * For Hyper-V devices we use the device guid as the id. */ struct hv_vmbus_device_id { - uuid_le guid; + guid_t guid; kernel_ulong_t driver_data; /* Data private to the driver */ }; @@ -532,6 +532,8 @@ enum dmi_field { DMI_BIOS_VENDOR, DMI_BIOS_VERSION, DMI_BIOS_DATE, + DMI_BIOS_RELEASE, + DMI_EC_FIRMWARE_RELEASE, DMI_SYS_VENDOR, DMI_PRODUCT_NAME, DMI_PRODUCT_VERSION, @@ -663,6 +665,7 @@ struct x86_cpu_id { __u16 vendor; __u16 family; __u16 model; + __u16 steppings; __u16 feature; /* bit index */ kernel_ulong_t driver_data; }; @@ -671,6 +674,7 @@ struct x86_cpu_id { #define X86_VENDOR_ANY 0xffff #define X86_FAMILY_ANY 0 #define X86_MODEL_ANY 0 +#define X86_STEPPING_ANY 0 #define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */ /* diff --git a/include/linux/module.h b/include/linux/module.h index 1ad393e62bef..2e6670860d27 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -458,6 +458,8 @@ struct module { void __percpu *percpu; unsigned int percpu_size; #endif + void *noinstr_text_start; + unsigned int noinstr_text_size; #ifdef CONFIG_TRACEPOINTS unsigned int num_tracepoints; @@ -489,6 +491,12 @@ struct module { unsigned int num_ftrace_callsites; unsigned long *ftrace_callsites; #endif +#ifdef CONFIG_KPROBES + void *kprobes_text_start; + unsigned int kprobes_text_size; + unsigned long *kprobe_blacklist; + unsigned int num_kprobe_blacklist; +#endif #ifdef CONFIG_LIVEPATCH bool klp; /* Is this a livepatch module? */ @@ -858,14 +866,6 @@ extern int module_sysfs_initialized; #define __MODULE_STRING(x) __stringify(x) -#ifdef CONFIG_STRICT_MODULE_RWX -extern void module_enable_ro(const struct module *mod, bool after_init); -extern void module_disable_ro(const struct module *mod); -#else -static inline void module_enable_ro(const struct module *mod, bool after_init) { } -static inline void module_disable_ro(const struct module *mod) { } -#endif - #ifdef CONFIG_GENERIC_BUG void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, struct module *); diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index ca92aea8a6bd..4fa67a8b2265 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -29,6 +29,11 @@ void *module_alloc(unsigned long size); /* Free memory returned from module_alloc. */ void module_memfree(void *module_region); +/* Determines if the section name is an init section (that is only used during + * module loading). + */ +bool module_init_section(const char *name); + /* Determines if the section name is an exit section (that is only used during * module unloading) */ diff --git a/include/linux/mount.h b/include/linux/mount.h index bf8cc4108b8f..de657bd211fa 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -50,7 +50,8 @@ struct fs_context; #define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME ) #define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \ - MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED) + MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | \ + MNT_CURSOR) #define MNT_INTERNAL 0x4000 @@ -64,6 +65,7 @@ struct fs_context; #define MNT_SYNC_UMOUNT 0x2000000 #define MNT_MARKED 0x4000000 #define MNT_UMOUNT 0x8000000 +#define MNT_CURSOR 0x10000000 struct vfsmount { struct dentry *mnt_root; /* root of the mounted tree */ @@ -109,4 +111,6 @@ extern unsigned int sysctl_mount_max; extern bool path_is_mountpoint(const struct path *path); +extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num); + #endif /* _LINUX_MOUNT_H */ diff --git a/include/linux/mpage.h b/include/linux/mpage.h index 001f1fcf9836..f4f5e90a6844 100644 --- a/include/linux/mpage.h +++ b/include/linux/mpage.h @@ -13,9 +13,9 @@ #ifdef CONFIG_BLOCK struct writeback_control; +struct readahead_control; -int mpage_readpages(struct address_space *mapping, struct list_head *pages, - unsigned nr_pages, get_block_t get_block); +void mpage_readahead(struct readahead_control *, get_block_t get_block); int mpage_readpage(struct page *page, get_block_t get_block); int mpage_writepages(struct address_space *mapping, struct writeback_control *wbc, get_block_t get_block); diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h index 886e30441c90..d890805f5494 100644 --- a/include/linux/mtd/bbm.h +++ b/include/linux/mtd/bbm.h @@ -98,7 +98,7 @@ struct nand_bbt_descr { /* * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr - * was allocated dynamicaly and must be freed in nand_release(). Has no meaning + * was allocated dynamicaly and must be freed in nand_cleanup(). Has no meaning * in nand_chip.bbt_options. */ #define NAND_BBT_DYNAMICSTRUCT 0x80000000 diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index c98a21108688..fd1ecb821106 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -138,7 +138,7 @@ struct cfi_ident { uint16_t InterfaceDesc; uint16_t MaxBufWriteSize; uint8_t NumEraseRegions; - uint32_t EraseRegionInfo[0]; /* Not host ordered */ + uint32_t EraseRegionInfo[]; /* Not host ordered */ } __packed; /* Extended Query Structure for both PRI and ALT */ @@ -165,7 +165,7 @@ struct cfi_pri_intelext { uint16_t ProtRegAddr; uint8_t FactProtRegSize; uint8_t UserProtRegSize; - uint8_t extra[0]; + uint8_t extra[]; } __packed; struct cfi_intelext_otpinfo { @@ -286,7 +286,7 @@ struct cfi_private { map_word sector_erase_cmd; unsigned long chipshift; /* Because they're of the same type */ const char *im_name; /* inter_module name for cmdset_setup */ - struct flchip chips[0]; /* per-chip data structure for each chip */ + struct flchip chips[]; /* per-chip data structure for each chip */ }; uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 2d1f4a61f4ac..157357ec1441 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -200,6 +200,8 @@ struct mtd_debug_info { * * @node: list node used to add an MTD partition to the parent partition list * @offset: offset of the partition relatively to the parent offset + * @size: partition size. Should be equal to mtd->size unless + * MTD_SLC_ON_MLC_EMULATION is set * @flags: original flags (before the mtdpart logic decided to tweak them based * on flash constraints, like eraseblock/pagesize alignment) * @@ -209,6 +211,7 @@ struct mtd_debug_info { struct mtd_part { struct list_head node; u64 offset; + u64 size; u32 flags; }; @@ -622,7 +625,9 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd) static inline int mtd_wunit_per_eb(struct mtd_info *mtd) { - return mtd->erasesize / mtd->writesize; + struct mtd_info *master = mtd_get_master(mtd); + + return master->erasesize / mtd->writesize; } static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs) diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index e545c050d3e8..b74a539ec581 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -37,6 +37,7 @@ * master MTD flag set for the corresponding MTD partition. * For example, to force a read-only partition, simply adding * MTD_WRITEABLE to the mask_flags will do the trick. + * add_flags: contains flags to add to the parent flags * * Note: writeable partitions require their size and offset be * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK). @@ -48,6 +49,7 @@ struct mtd_partition { uint64_t size; /* partition size */ uint64_t offset; /* offset within the master MTD space */ uint32_t mask_flags; /* master MTD flags to mask out for this partition */ + uint32_t add_flags; /* flags to add to the partition */ struct device_node *of_node; }; diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h index df5b9fddea16..2e3f43788d48 100644 --- a/include/linux/mtd/qinfo.h +++ b/include/linux/mtd/qinfo.h @@ -24,7 +24,7 @@ struct lpddr_private { struct qinfo_chip *qinfo; int numchips; unsigned long chipshift; - struct flchip chips[0]; + struct flchip chips[]; }; /* qinfo_query_info structure contains request information for diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 1e76196f9829..65b1c1c18b41 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -83,14 +83,14 @@ struct nand_chip; /* * Constants for ECC_MODES */ -typedef enum { +enum nand_ecc_mode { + NAND_ECC_INVALID, NAND_ECC_NONE, NAND_ECC_SOFT, NAND_ECC_HW, NAND_ECC_HW_SYNDROME, - NAND_ECC_HW_OOB_FIRST, NAND_ECC_ON_DIE, -} nand_ecc_modes_t; +}; enum nand_ecc_algo { NAND_ECC_UNKNOWN, @@ -119,85 +119,73 @@ enum nand_ecc_algo { #define NAND_ECC_MAXIMIZE BIT(1) /* + * Option constants for bizarre disfunctionality and real + * features. + */ + +/* Buswidth is 16 bit */ +#define NAND_BUSWIDTH_16 BIT(1) + +/* * When using software implementation of Hamming, we can specify which byte * ordering should be used. */ #define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2) -/* - * Option constants for bizarre disfunctionality and real - * features. - */ -/* Buswidth is 16 bit */ -#define NAND_BUSWIDTH_16 0x00000002 /* Chip has cache program function */ -#define NAND_CACHEPRG 0x00000008 +#define NAND_CACHEPRG BIT(3) +/* Options valid for Samsung large page devices */ +#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG + /* * Chip requires ready check on read (for auto-incremented sequential read). * True only for small page devices; large page devices do not support * autoincrement. */ -#define NAND_NEED_READRDY 0x00000100 +#define NAND_NEED_READRDY BIT(8) /* Chip does not allow subpage writes */ -#define NAND_NO_SUBPAGE_WRITE 0x00000200 +#define NAND_NO_SUBPAGE_WRITE BIT(9) /* Device is one of 'new' xD cards that expose fake nand command set */ -#define NAND_BROKEN_XD 0x00000400 +#define NAND_BROKEN_XD BIT(10) /* Device behaves just like nand, but is readonly */ -#define NAND_ROM 0x00000800 +#define NAND_ROM BIT(11) /* Device supports subpage reads */ -#define NAND_SUBPAGE_READ 0x00001000 +#define NAND_SUBPAGE_READ BIT(12) +/* Macros to identify the above */ +#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) /* * Some MLC NANDs need data scrambling to limit bitflips caused by repeated * patterns. */ -#define NAND_NEED_SCRAMBLING 0x00002000 +#define NAND_NEED_SCRAMBLING BIT(13) /* Device needs 3rd row address cycle */ -#define NAND_ROW_ADDR_3 0x00004000 - -/* Options valid for Samsung large page devices */ -#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG - -/* Macros to identify the above */ -#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) - -/* - * There are different places where the manufacturer stores the factory bad - * block markers. - * - * Position within the block: Each of these pages needs to be checked for a - * bad block marking pattern. - */ -#define NAND_BBM_FIRSTPAGE 0x01000000 -#define NAND_BBM_SECONDPAGE 0x02000000 -#define NAND_BBM_LASTPAGE 0x04000000 - -/* Position within the OOB data of the page */ -#define NAND_BBM_POS_SMALL 5 -#define NAND_BBM_POS_LARGE 0 +#define NAND_ROW_ADDR_3 BIT(14) /* Non chip related options */ /* This option skips the bbt scan during initialization. */ -#define NAND_SKIP_BBTSCAN 0x00010000 +#define NAND_SKIP_BBTSCAN BIT(16) /* Chip may not exist, so silence any errors in scan */ -#define NAND_SCAN_SILENT_NODEV 0x00040000 +#define NAND_SCAN_SILENT_NODEV BIT(18) + /* * Autodetect nand buswidth with readid/onfi. * This suppose the driver will configure the hardware in 8 bits mode * when calling nand_scan_ident, and update its configuration * before calling nand_scan_tail. */ -#define NAND_BUSWIDTH_AUTO 0x00080000 +#define NAND_BUSWIDTH_AUTO BIT(19) + /* * This option could be defined by controller drivers to protect against * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers */ -#define NAND_USE_BOUNCE_BUFFER 0x00100000 +#define NAND_USES_DMA BIT(20) /* * In case your controller is implementing ->legacy.cmd_ctrl() and is relying @@ -207,26 +195,49 @@ enum nand_ecc_algo { * If your controller already takes care of this delay, you don't need to set * this flag. */ -#define NAND_WAIT_TCCS 0x00200000 +#define NAND_WAIT_TCCS BIT(21) /* * Whether the NAND chip is a boot medium. Drivers might use this information * to select ECC algorithms supported by the boot ROM or similar restrictions. */ -#define NAND_IS_BOOT_MEDIUM 0x00400000 +#define NAND_IS_BOOT_MEDIUM BIT(22) /* * Do not try to tweak the timings at runtime. This is needed when the * controller initializes the timings on itself or when it relies on * configuration done by the bootloader. */ -#define NAND_KEEP_TIMINGS 0x00800000 +#define NAND_KEEP_TIMINGS BIT(23) + +/* + * There are different places where the manufacturer stores the factory bad + * block markers. + * + * Position within the block: Each of these pages needs to be checked for a + * bad block marking pattern. + */ +#define NAND_BBM_FIRSTPAGE BIT(24) +#define NAND_BBM_SECONDPAGE BIT(25) +#define NAND_BBM_LASTPAGE BIT(26) + +/* + * Some controllers with pipelined ECC engines override the BBM marker with + * data or ECC bytes, thus making bad block detection through bad block marker + * impossible. Let's flag those chips so the core knows it shouldn't check the + * BBM and consider all blocks good. + */ +#define NAND_NO_BBM_QUIRK BIT(27) /* Cell info constants */ #define NAND_CI_CHIPNR_MSK 0x03 #define NAND_CI_CELLTYPE_MSK 0x0C #define NAND_CI_CELLTYPE_SHIFT 2 +/* Position within the OOB data of the page */ +#define NAND_BBM_POS_SMALL 5 +#define NAND_BBM_POS_LARGE 0 + /** * struct nand_parameters - NAND generic parameters from the parameter page * @model: Model name @@ -351,7 +362,7 @@ static const struct nand_ecc_caps __name = { \ * @write_oob: function to write chip OOB data */ struct nand_ecc_ctrl { - nand_ecc_modes_t mode; + enum nand_ecc_mode mode; enum nand_ecc_algo algo; int steps; int size; @@ -491,13 +502,17 @@ enum nand_data_interface_type { /** * struct nand_data_interface - NAND interface timing * @type: type of the timing - * @timings: The timing, type according to @type + * @timings: The timing information + * @timings.mode: Timing mode as defined in the specification * @timings.sdr: Use it when @type is %NAND_SDR_IFACE. */ struct nand_data_interface { enum nand_data_interface_type type; - union { - struct nand_sdr_timings sdr; + struct nand_timings { + unsigned int mode; + union { + struct nand_sdr_timings sdr; + }; } timings; }; @@ -694,6 +709,7 @@ struct nand_op_instr { /** * struct nand_subop - a sub operation + * @cs: the CS line to select for this NAND sub-operation * @instrs: array of instructions * @ninstrs: length of the @instrs array * @first_instr_start_off: offset to start from for the first instruction @@ -709,6 +725,7 @@ struct nand_op_instr { * controller driver. */ struct nand_subop { + unsigned int cs; const struct nand_op_instr *instrs; unsigned int ninstrs; unsigned int first_instr_start_off; @@ -1321,13 +1338,17 @@ int nand_read_oob_std(struct nand_chip *chip, int page); int nand_get_set_features_notsupp(struct nand_chip *chip, int addr, u8 *subfeature_param); -/* Default read_page_raw implementation */ +/* read_page_raw implementations */ int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, int page); +int nand_monolithic_read_page_raw(struct nand_chip *chip, uint8_t *buf, + int oob_required, int page); -/* Default write_page_raw implementation */ +/* write_page_raw implementations */ int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, int oob_required, int page); +int nand_monolithic_write_page_raw(struct nand_chip *chip, const uint8_t *buf, + int oob_required, int page); /* Reset and initialize a NAND device */ int nand_reset(struct nand_chip *chip, int chipnr); @@ -1356,7 +1377,7 @@ int nand_change_write_column_op(struct nand_chip *chip, unsigned int offset_in_page, const void *buf, unsigned int len, bool force_8bit); int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, - bool force_8bit); + bool force_8bit, bool check_only); int nand_write_data_op(struct nand_chip *chip, const void *buf, unsigned int len, bool force_8bit); @@ -1377,8 +1398,6 @@ void nand_wait_ready(struct nand_chip *chip); * sucessful nand_scan(). */ void nand_cleanup(struct nand_chip *chip); -/* Unregister the MTD device and calls nand_cleanup() */ -void nand_release(struct nand_chip *chip); /* * External helper for controller drivers that have to implement the WAITRDY @@ -1393,6 +1412,10 @@ int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, void nand_select_target(struct nand_chip *chip, unsigned int cs); void nand_deselect_target(struct nand_chip *chip); +/* Bitops */ +void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src, + unsigned int src_off, unsigned int nbits); + /** * nand_get_data_buf() - Get the internal page buffer * @chip: NAND chip object diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 1e2af0ec1f03..60bac2c0ec45 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -20,6 +20,7 @@ */ /* Flash opcodes. */ +#define SPINOR_OP_WRDI 0x04 /* Write disable */ #define SPINOR_OP_WREN 0x06 /* Write enable */ #define SPINOR_OP_RDSR 0x05 /* Read status register */ #define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */ @@ -80,7 +81,6 @@ /* Used for SST flashes only. */ #define SPINOR_OP_BP 0x02 /* Byte program */ -#define SPINOR_OP_WRDI 0x04 /* Write disable */ #define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */ /* Used for S3AN flashes only */ @@ -302,7 +302,7 @@ struct spi_nor; * @read: read data from the SPI NOR. * @write: write data to the SPI NOR. * @erase: erase a sector of the SPI NOR at the offset @offs; if - * not provided by the driver, spi-nor will send the erase + * not provided by the driver, SPI NOR will send the erase * opcode via write_reg(). */ struct spi_nor_controller_ops { @@ -327,16 +327,16 @@ struct spi_nor_manufacturer; struct spi_nor_flash_parameter; /** - * struct spi_nor - Structure for defining a the SPI NOR layer - * @mtd: point to a mtd_info structure + * struct spi_nor - Structure for defining the SPI NOR layer + * @mtd: an mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations - * @dev: point to a spi device, or a spi nor controller device. - * @spimem: point to the spi mem device + * @dev: pointer to an SPI device or an SPI NOR controller device + * @spimem: pointer to the SPI memory device * @bouncebuf: bounce buffer used when the buffer passed by the MTD * layer is not DMA-able * @bouncebuf_size: size of the bounce buffer - * @info: spi-nor part JDEC MFR id and other info - * @manufacturer: spi-nor manufacturer + * @info: SPI NOR part JEDEC MFR ID and other info + * @manufacturer: SPI NOR manufacturer * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes * @erase_opcode: the opcode for erasing a sector @@ -344,17 +344,17 @@ struct spi_nor_flash_parameter; * @read_dummy: the dummy needed by the read operation * @program_opcode: the program opcode * @sst_write_second: used by the SST write operation - * @flags: flag options for the current SPI-NOR (SNOR_F_*) + * @flags: flag options for the current SPI NOR (SNOR_F_*) * @read_proto: the SPI protocol for read operations * @write_proto: the SPI protocol for write operations - * @reg_proto the SPI protocol for read_reg/write_reg/erase operations + * @reg_proto: the SPI protocol for read_reg/write_reg/erase operations * @controller_ops: SPI NOR controller driver specific operations. - * @params: [FLASH-SPECIFIC] SPI-NOR flash parameters and settings. + * @params: [FLASH-SPECIFIC] SPI NOR flash parameters and settings. * The structure includes legacy flash parameters and * settings that can be overwritten by the spi_nor_fixups * hooks, or dynamically when parsing the SFDP tables. * @dirmap: pointers to struct spi_mem_dirmap_desc for reads/writes. - * @priv: the private data + * @priv: pointer to the private data */ struct spi_nor { struct mtd_info mtd; diff --git a/include/linux/net.h b/include/linux/net.h index 6451425e828f..016a9c5faa34 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -264,7 +264,8 @@ do { \ net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__) #define net_info_ratelimited(fmt, ...) \ net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__) -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) #define net_dbg_ratelimited(fmt, ...) \ do { \ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ @@ -303,10 +304,6 @@ int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags); int kernel_getsockname(struct socket *sock, struct sockaddr *addr); int kernel_getpeername(struct socket *sock, struct sockaddr *addr); -int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, - int *optlen); -int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, - unsigned int optlen); int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 9d53c5ad272c..2cc3cf80b49a 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -89,7 +89,7 @@ enum { * Add your fresh new feature above and remember to update * netdev_features_strings[] in net/core/ethtool.c and maybe * some feature mask #defines below. Please also describe it - * in Documentation/networking/netdev-features.txt. + * in Documentation/networking/netdev-features.rst. */ /**/NETDEV_FEATURE_COUNT diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 130a668049ab..6fc613ed8eae 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -53,6 +53,7 @@ struct netpoll_info; struct device; struct phy_device; struct dsa_port; +struct ip_tunnel_parm; struct macsec_context; struct macsec_ops; @@ -288,6 +289,7 @@ enum netdev_state_t { __LINK_STATE_NOCARRIER, __LINK_STATE_LINKWATCH_PENDING, __LINK_STATE_DORMANT, + __LINK_STATE_TESTING, }; @@ -328,6 +330,7 @@ struct napi_struct { unsigned long state; int weight; + int defer_hard_irqs_count; unsigned long gro_bitmask; int (*poll)(struct napi_struct *, int); #ifdef CONFIG_NETPOLL @@ -1146,6 +1149,12 @@ struct netdev_net_notifier { * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); * Called to release previously enslaved netdev. * + * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev, + * struct sk_buff *skb, + * bool all_slaves); + * Get the xmit slave of master device. If all_slaves is true, function + * assume all the slaves can transmit. + * * Feature/offload setting functions. * netdev_features_t (*ndo_fix_features)(struct net_device *dev, * netdev_features_t features); @@ -1266,6 +1275,9 @@ struct netdev_net_notifier { * Get devlink port instance associated with a given netdev. * Called with a reference on the netdevice and devlink locks only, * rtnl_lock is not held. + * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, + * int cmd); + * Add, change, delete or get information on an IPv4 tunnel. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1389,6 +1401,9 @@ struct net_device_ops { struct netlink_ext_ack *extack); int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); + struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, + struct sk_buff *skb, + bool all_slaves); netdev_features_t (*ndo_fix_features)(struct net_device *dev, netdev_features_t features); int (*ndo_set_features)(struct net_device *dev, @@ -1468,6 +1483,8 @@ struct net_device_ops { int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); + int (*ndo_tunnel_ctl)(struct net_device *dev, + struct ip_tunnel_parm *p, int cmd); }; /** @@ -1803,13 +1820,9 @@ enum netdev_priv_flags { * @phydev: Physical device may attach itself * for hardware timestamping * @sfp_bus: attached &struct sfp_bus structure. - * @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock - * spinlock - * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount - * @qdisc_xmit_lock_key: lockdep class annotating - * netdev_queue->_xmit_lock spinlock - * @addr_list_lock_key: lockdep class annotating - * net_device->addr_list_lock spinlock + * + * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock + * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount * * @proto_down: protocol port state information can be sent to the * switch driver and used to set the phys state of the @@ -1994,6 +2007,7 @@ struct net_device { struct bpf_prog __rcu *xdp_prog; unsigned long gro_flush_timeout; + int napi_defer_hard_irqs; rx_handler_func_t __rcu *rx_handler; void __rcu *rx_handler_data; @@ -2109,10 +2123,8 @@ struct net_device { #endif struct phy_device *phydev; struct sfp_bus *sfp_bus; - struct lock_class_key qdisc_tx_busylock_key; - struct lock_class_key qdisc_running_key; - struct lock_class_key qdisc_xmit_lock_key; - struct lock_class_key addr_list_lock_key; + struct lock_class_key *qdisc_tx_busylock; + struct lock_class_key *qdisc_running_key; bool proto_down; unsigned wol_enabled:1; @@ -2197,6 +2209,23 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, f(dev, &dev->_tx[i], arg); } +#define netdev_lockdep_set_classes(dev) \ +{ \ + static struct lock_class_key qdisc_tx_busylock_key; \ + static struct lock_class_key qdisc_running_key; \ + static struct lock_class_key qdisc_xmit_lock_key; \ + static struct lock_class_key dev_addr_list_lock_key; \ + unsigned int i; \ + \ + (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ + (dev)->qdisc_running_key = &qdisc_running_key; \ + lockdep_set_class(&(dev)->addr_list_lock, \ + &dev_addr_list_lock_key); \ + for (i = 0; i < (dev)->num_tx_queues; i++) \ + lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ + &qdisc_xmit_lock_key); \ +} + u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev); struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, @@ -2731,6 +2760,9 @@ void netdev_freemem(struct net_device *dev); void synchronize_net(void); int init_dummy_netdev(struct net_device *dev); +struct net_device *netdev_get_xmit_slave(struct net_device *dev, + struct sk_buff *skb, + bool all_slaves); struct net_device *dev_get_by_index(struct net *net, int ifindex); struct net_device *__dev_get_by_index(struct net *net, int ifindex); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); @@ -3221,7 +3253,6 @@ static inline void netif_stop_queue(struct net_device *dev) } void netif_tx_stop_all_queues(struct net_device *dev); -void netdev_update_lockdep_key(struct net_device *dev); static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) { @@ -3908,6 +3939,46 @@ static inline bool netif_dormant(const struct net_device *dev) /** + * netif_testing_on - mark device as under test. + * @dev: network device + * + * Mark device as under test (as per RFC2863). + * + * The testing state indicates that some test(s) must be performed on + * the interface. After completion, of the test, the interface state + * will change to up, dormant, or down, as appropriate. + */ +static inline void netif_testing_on(struct net_device *dev) +{ + if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) + linkwatch_fire_event(dev); +} + +/** + * netif_testing_off - set device as not under test. + * @dev: network device + * + * Device is not in testing state. + */ +static inline void netif_testing_off(struct net_device *dev) +{ + if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) + linkwatch_fire_event(dev); +} + +/** + * netif_testing - test if device is under test + * @dev: network device + * + * Check if device is under test + */ +static inline bool netif_testing(const struct net_device *dev) +{ + return test_bit(__LINK_STATE_TESTING, &dev->state); +} + + +/** * netif_oper_up - test if device is operational * @dev: network device * @@ -4167,6 +4238,11 @@ static inline void netif_addr_lock(struct net_device *dev) spin_lock(&dev->addr_list_lock); } +static inline void netif_addr_lock_nested(struct net_device *dev) +{ + spin_lock_nested(&dev->addr_list_lock, dev->lower_level); +} + static inline void netif_addr_lock_bh(struct net_device *dev) { spin_lock_bh(&dev->addr_list_lock); @@ -4208,6 +4284,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, int register_netdev(struct net_device *dev); void unregister_netdev(struct net_device *dev); +int devm_register_netdev(struct device *dev, struct net_device *ndev); + /* General hardware address lists handling functions */ int __hw_addr_sync(struct netdev_hw_addr_list *to_list, struct netdev_hw_addr_list *from_list, int addr_len); @@ -4868,7 +4946,8 @@ do { \ #define MODULE_ALIAS_NETDEV(device) \ MODULE_ALIAS("netdev-" device) -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) #define netdev_dbg(__dev, format, args...) \ do { \ dynamic_netdev_dbg(__dev, format, ##args); \ @@ -4938,7 +5017,8 @@ do { \ #define netif_info(priv, type, dev, fmt, args...) \ netif_level(info, priv, type, dev, fmt, ##args) -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) #define netif_dbg(priv, type, netdev, format, args...) \ do { \ if (netif_msg_##type(priv)) \ diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h index fcc409de31a4..a28aa289afdc 100644 --- a/include/linux/netfilter/nf_conntrack_pptp.h +++ b/include/linux/netfilter/nf_conntrack_pptp.h @@ -10,7 +10,7 @@ #include <net/netfilter/nf_conntrack_expect.h> #include <uapi/linux/netfilter/nf_conntrack_tuple_common.h> -extern const char *const pptp_msg_name[]; +const char *pptp_msg_name(u_int16_t msg); /* state of the control session */ enum pptp_ctrlsess_state { diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 676f1ff161a9..f47af135bd56 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -63,15 +63,7 @@ int netpoll_setup(struct netpoll *np); void __netpoll_cleanup(struct netpoll *np); void __netpoll_free(struct netpoll *np); void netpoll_cleanup(struct netpoll *np); -void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, - struct net_device *dev); -static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) -{ - unsigned long flags; - local_irq_save(flags); - netpoll_send_skb_on_dev(np, skb, np->dev); - local_irq_restore(flags); -} +netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); #ifdef CONFIG_NETPOLL static inline void *netpoll_poll_lock(struct napi_struct *napi) diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 82d8fb422092..4dba3c948932 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -38,7 +38,7 @@ struct nfs4_ace { struct nfs4_acl { uint32_t naces; - struct nfs4_ace aces[0]; + struct nfs4_ace aces[]; }; #define NFS4_MAXLABELLEN 2048 @@ -295,7 +295,7 @@ static inline bool seqid_mutating_err(u32 err) case NFS4ERR_NOFILEHANDLE: case NFS4ERR_MOVED: return false; - }; + } return true; } diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 73eda45f1cfd..6ee9119acc5d 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -230,6 +230,7 @@ struct nfs4_copy_state { #define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */ #define NFS_INO_DATA_INVAL_DEFER \ BIT(13) /* Deferred cache invalidation */ +#define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */ #define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ | NFS_INO_INVALID_CTIME \ diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 440230488025..5fd0a9ef425f 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1227,7 +1227,7 @@ struct nfs4_secinfo4 { struct nfs4_secinfo_flavors { unsigned int num_flavors; - struct nfs4_secinfo4 flavors[0]; + struct nfs4_secinfo4 flavors[]; }; struct nfs4_secinfo_arg { @@ -1317,11 +1317,13 @@ struct nfs41_impl_id { struct nfstime4 date; }; +#define MAX_BIND_CONN_TO_SESSION_RETRIES 3 struct nfs41_bind_conn_to_session_args { struct nfs_client *client; struct nfs4_sessionid sessionid; u32 dir; bool use_conn_in_rdma_mode; + int retries; }; struct nfs41_bind_conn_to_session_res { diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 9003e29cde46..750c7f395ca9 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -202,16 +202,11 @@ static inline void watchdog_update_hrtimer_threshold(u64 period) { } #endif struct ctl_table; -extern int proc_watchdog(struct ctl_table *, int , - void __user *, size_t *, loff_t *); -extern int proc_nmi_watchdog(struct ctl_table *, int , - void __user *, size_t *, loff_t *); -extern int proc_soft_watchdog(struct ctl_table *, int , - void __user *, size_t *, loff_t *); -extern int proc_watchdog_thresh(struct ctl_table *, int , - void __user *, size_t *, loff_t *); -extern int proc_watchdog_cpumask(struct ctl_table *, int, - void __user *, size_t *, loff_t *); +int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *); +int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *); +int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *); +int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *); #ifdef CONFIG_HAVE_ACPI_APEI_NMI #include <asm/nmi.h> diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index 074f395b9ad2..cdb171efc7cb 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h @@ -42,6 +42,30 @@ struct nsproxy { extern struct nsproxy init_nsproxy; /* + * A structure to encompass all bits needed to install + * a partial or complete new set of namespaces. + * + * If a new user namespace is requested cred will + * point to a modifiable set of credentials. If a pointer + * to a modifiable set is needed nsset_cred() must be + * used and tested. + */ +struct nsset { + unsigned flags; + struct nsproxy *nsproxy; + struct fs_struct *fs; + const struct cred *cred; +}; + +static inline struct cred *nsset_cred(struct nsset *set) +{ + if (set->flags & CLONE_NEWUSER) + return (struct cred *)set->cred; + + return NULL; +} + +/* * the namespaces access rules are: * * 1. only current task is allowed to change tsk->nsproxy pointer or diff --git a/include/linux/ntb.h b/include/linux/ntb.h index 8c13538aeffe..191b524e5c0d 100644 --- a/include/linux/ntb.h +++ b/include/linux/ntb.h @@ -478,7 +478,7 @@ void ntb_unregister_client(struct ntb_client *client); int ntb_register_device(struct ntb_dev *ntb); /** - * ntb_register_device() - unregister a ntb device + * ntb_unregister_device() - unregister a ntb device * @ntb: NTB device context. * * The device will be removed from the list of ntb devices. If the ntb device @@ -1351,7 +1351,7 @@ static inline int ntb_spad_write(struct ntb_dev *ntb, int sidx, u32 val) * @sidx: Scratchpad index. * @spad_addr: OUT - The address of the peer scratchpad register. * - * Return the address of the peer doorbell register. This may be used, for + * Return the address of the peer scratchpad register. This may be used, for * example, by drivers that offload memory copy operations to a dma engine. * * Return: Zero on success, otherwise an error number. @@ -1373,7 +1373,7 @@ static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx, * * Read the peer scratchpad register, and return the value. * - * Return: The value of the local scratchpad register. + * Return: The value of the peer scratchpad register. */ static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx) { diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 10f81629b9ce..41e7795a3ee4 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -10,47 +10,26 @@ /* - * ********************** LLDD FC-NVME Host API ******************** + * ********************** FC-NVME LS API ******************** * - * For FC LLDD's that are the NVME Host role. + * Data structures used by both FC-NVME hosts and FC-NVME + * targets to perform FC-NVME LS requests or transmit + * responses. * - * ****************************************************************** + * *********************************************************** */ - - /** - * struct nvme_fc_port_info - port-specific ids and FC connection-specific - * data element used during NVME Host role - * registrations - * - * Static fields describing the port being registered: - * @node_name: FC WWNN for the port - * @port_name: FC WWPN for the port - * @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx) - * @dev_loss_tmo: maximum delay for reconnects to an association on - * this device. Used only on a remoteport. + * struct nvmefc_ls_req - Request structure passed from the transport + * to the LLDD to perform a NVME-FC LS request and obtain + * a response. + * Used by nvme-fc transport (host) to send LS's such as + * Create Association, Create Connection and Disconnect + * Association. + * Used by the nvmet-fc transport (controller) to send + * LS's such as Disconnect Association. * - * Initialization values for dynamic port fields: - * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must - * be set to 0. - */ -struct nvme_fc_port_info { - u64 node_name; - u64 port_name; - u32 port_role; - u32 port_id; - u32 dev_loss_tmo; -}; - - -/** - * struct nvmefc_ls_req - Request structure passed from NVME-FC transport - * to LLDD in order to perform a NVME FC-4 LS - * request and obtain a response. - * - * Values set by the NVME-FC layer prior to calling the LLDD ls_req - * entrypoint. + * Values set by the requestor prior to calling the LLDD ls_req entrypoint: * @rqstaddr: pointer to request buffer * @rqstdma: PCI DMA address of request buffer * @rqstlen: Length, in bytes, of request buffer @@ -63,8 +42,8 @@ struct nvme_fc_port_info { * @private: pointer to memory allocated alongside the ls request structure * that is specifically for the LLDD to use while processing the * request. The length of the buffer corresponds to the - * lsrqst_priv_sz value specified in the nvme_fc_port_template - * supplied by the LLDD. + * lsrqst_priv_sz value specified in the xxx_template supplied + * by the LLDD. * @done: The callback routine the LLDD is to invoke upon completion of * the LS request. req argument is the pointer to the original LS * request structure. Status argument must be 0 upon success, a @@ -86,6 +65,101 @@ struct nvmefc_ls_req { } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ +/** + * struct nvmefc_ls_rsp - Structure passed from the transport to the LLDD + * to request the transmit the NVME-FC LS response to a + * NVME-FC LS request. The structure originates in the LLDD + * and is given to the transport via the xxx_rcv_ls_req() + * transport routine. As such, the structure represents the + * FC exchange context for the NVME-FC LS request that was + * received and which the response is to be sent for. + * Used by the LLDD to pass the nvmet-fc transport (controller) + * received LS's such as Create Association, Create Connection + * and Disconnect Association. + * Used by the LLDD to pass the nvme-fc transport (host) + * received LS's such as Disconnect Association or Disconnect + * Connection. + * + * The structure is allocated by the LLDD whenever a LS Request is received + * from the FC link. The address of the structure is passed to the nvmet-fc + * or nvme-fc layer via the xxx_rcv_ls_req() transport routines. + * + * The address of the structure is to be passed back to the LLDD + * when the response is to be transmit. The LLDD will use the address to + * map back to the LLDD exchange structure which maintains information such + * the remote N_Port that sent the LS as well as any FC exchange context. + * Upon completion of the LS response transmit, the LLDD will pass the + * address of the structure back to the transport LS rsp done() routine, + * allowing the transport release dma resources. Upon completion of + * the done() routine, no further access to the structure will be made by + * the transport and the LLDD can de-allocate the structure. + * + * Field initialization: + * At the time of the xxx_rcv_ls_req() call, there is no content that + * is valid in the structure. + * + * When the structure is used for the LLDD->xmt_ls_rsp() call, the + * transport layer will fully set the fields in order to specify the + * response payload buffer and its length as well as the done routine + * to be called upon completion of the transmit. The transport layer + * will also set a private pointer for its own use in the done routine. + * + * Values set by the transport layer prior to calling the LLDD xmt_ls_rsp + * entrypoint: + * @rspbuf: pointer to the LS response buffer + * @rspdma: PCI DMA address of the LS response buffer + * @rsplen: Length, in bytes, of the LS response buffer + * @done: The callback routine the LLDD is to invoke upon completion of + * transmitting the LS response. req argument is the pointer to + * the original ls request. + * @nvme_fc_private: pointer to an internal transport-specific structure + * used as part of the transport done() processing. The LLDD is + * not to access this pointer. + */ +struct nvmefc_ls_rsp { + void *rspbuf; + dma_addr_t rspdma; + u16 rsplen; + + void (*done)(struct nvmefc_ls_rsp *rsp); + void *nvme_fc_private; /* LLDD is not to access !! */ +}; + + + +/* + * ********************** LLDD FC-NVME Host API ******************** + * + * For FC LLDD's that are the NVME Host role. + * + * ****************************************************************** + */ + + +/** + * struct nvme_fc_port_info - port-specific ids and FC connection-specific + * data element used during NVME Host role + * registrations + * + * Static fields describing the port being registered: + * @node_name: FC WWNN for the port + * @port_name: FC WWPN for the port + * @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx) + * @dev_loss_tmo: maximum delay for reconnects to an association on + * this device. Used only on a remoteport. + * + * Initialization values for dynamic port fields: + * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must + * be set to 0. + */ +struct nvme_fc_port_info { + u64 node_name; + u64 port_name; + u32 port_role; + u32 port_id; + u32 dev_loss_tmo; +}; + enum nvmefc_fcp_datadir { NVMEFC_FCP_NODATA, /* payload_length and sg_cnt will be zero */ NVMEFC_FCP_WRITE, @@ -337,6 +411,21 @@ struct nvme_fc_remote_port { * indicating an FC transport Aborted status. * Entrypoint is Mandatory. * + * @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service. + * The nvmefc_ls_rsp structure is the same LLDD-supplied exchange + * structure specified in the nvme_fc_rcv_ls_req() call made when + * the LS request was received. The structure will fully describe + * the buffers for the response payload and the dma address of the + * payload. The LLDD is to transmit the response (or return a + * non-zero errno status), and upon completion of the transmit, call + * the "done" routine specified in the nvmefc_ls_rsp structure + * (argument to done is the address of the nvmefc_ls_rsp structure + * itself). Upon the completion of the done routine, the LLDD shall + * consider the LS handling complete and the nvmefc_ls_rsp structure + * may be freed/released. + * Entrypoint is mandatory if the LLDD calls the nvme_fc_rcv_ls_req() + * entrypoint. + * * @max_hw_queues: indicates the maximum number of hw queues the LLDD * supports for cpu affinitization. * Value is Mandatory. Must be at least 1. @@ -371,7 +460,7 @@ struct nvme_fc_remote_port { * @lsrqst_priv_sz: The LLDD sets this field to the amount of additional * memory that it would like fc nvme layer to allocate on the LLDD's * behalf whenever a ls request structure is allocated. The additional - * memory area solely for the of the LLDD and its location is + * memory area is solely for use by the LLDD and its location is * specified by the ls_request->private pointer. * Value is Mandatory. Allowed to be zero. * @@ -405,6 +494,9 @@ struct nvme_fc_port_template { struct nvme_fc_remote_port *, void *hw_queue_handle, struct nvmefc_fcp_req *); + int (*xmt_ls_rsp)(struct nvme_fc_local_port *localport, + struct nvme_fc_remote_port *rport, + struct nvmefc_ls_rsp *ls_rsp); u32 max_hw_queues; u16 max_sgl_segments; @@ -441,6 +533,34 @@ void nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport); int nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *remoteport, u32 dev_loss_tmo); +/* + * Routine called to pass a NVME-FC LS request, received by the lldd, + * to the nvme-fc transport. + * + * If the return value is zero: the LS was successfully accepted by the + * transport. + * If the return value is non-zero: the transport has not accepted the + * LS. The lldd should ABTS-LS the LS. + * + * Note: if the LLDD receives and ABTS for the LS prior to the transport + * calling the ops->xmt_ls_rsp() routine to transmit a response, the LLDD + * shall mark the LS as aborted, and when the xmt_ls_rsp() is called: the + * response shall not be transmit and the struct nvmefc_ls_rsp() done + * routine shall be called. The LLDD may transmit the ABTS response as + * soon as the LS was marked or can delay until the xmt_ls_rsp() call is + * made. + * Note: if an RCV LS was successfully posted to the transport and the + * remoteport is then unregistered before xmt_ls_rsp() was called for + * the lsrsp structure, the transport will still call xmt_ls_rsp() + * afterward to cleanup the outstanding lsrsp structure. The LLDD should + * noop the transmission of the rsp and call the lsrsp->done() routine + * to allow the lsrsp structure to be released. + */ +int nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *remoteport, + struct nvmefc_ls_rsp *lsrsp, + void *lsreqbuf, u32 lsreqbuf_len); + + /* * *************** LLDD FC-NVME Target/Subsystem API *************** @@ -470,55 +590,6 @@ struct nvmet_fc_port_info { }; -/** - * struct nvmefc_tgt_ls_req - Structure used between LLDD and NVMET-FC - * layer to represent the exchange context for - * a FC-NVME Link Service (LS). - * - * The structure is allocated by the LLDD whenever a LS Request is received - * from the FC link. The address of the structure is passed to the nvmet-fc - * layer via the nvmet_fc_rcv_ls_req() call. The address of the structure - * will be passed back to the LLDD when the response is to be transmit. - * The LLDD is to use the address to map back to the LLDD exchange structure - * which maintains information such as the targetport the LS was received - * on, the remote FC NVME initiator that sent the LS, and any FC exchange - * context. Upon completion of the LS response transmit, the address of the - * structure will be passed back to the LS rsp done() routine, allowing the - * nvmet-fc layer to release dma resources. Upon completion of the done() - * routine, no further access will be made by the nvmet-fc layer and the - * LLDD can de-allocate the structure. - * - * Field initialization: - * At the time of the nvmet_fc_rcv_ls_req() call, there is no content that - * is valid in the structure. - * - * When the structure is used for the LLDD->xmt_ls_rsp() call, the nvmet-fc - * layer will fully set the fields in order to specify the response - * payload buffer and its length as well as the done routine to be called - * upon compeletion of the transmit. The nvmet-fc layer will also set a - * private pointer for its own use in the done routine. - * - * Values set by the NVMET-FC layer prior to calling the LLDD xmt_ls_rsp - * entrypoint. - * @rspbuf: pointer to the LS response buffer - * @rspdma: PCI DMA address of the LS response buffer - * @rsplen: Length, in bytes, of the LS response buffer - * @done: The callback routine the LLDD is to invoke upon completion of - * transmitting the LS response. req argument is the pointer to - * the original ls request. - * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used - * as part of the NVMET-FC processing. The LLDD is not to access - * this pointer. - */ -struct nvmefc_tgt_ls_req { - void *rspbuf; - dma_addr_t rspdma; - u16 rsplen; - - void (*done)(struct nvmefc_tgt_ls_req *req); - void *nvmet_fc_private; /* LLDD is not to access !! */ -}; - /* Operations that NVME-FC layer may request the LLDD to perform for FCP */ enum { NVMET_FCOP_READDATA = 1, /* xmt data to initiator */ @@ -693,17 +764,19 @@ struct nvmet_fc_target_port { * Entrypoint is Mandatory. * * @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service. - * The nvmefc_tgt_ls_req structure is the same LLDD-supplied exchange + * The nvmefc_ls_rsp structure is the same LLDD-supplied exchange * structure specified in the nvmet_fc_rcv_ls_req() call made when - * the LS request was received. The structure will fully describe + * the LS request was received. The structure will fully describe * the buffers for the response payload and the dma address of the - * payload. The LLDD is to transmit the response (or return a non-zero - * errno status), and upon completion of the transmit, call the - * "done" routine specified in the nvmefc_tgt_ls_req structure - * (argument to done is the ls reqwuest structure itself). - * After calling the done routine, the LLDD shall consider the - * LS handling complete and the nvmefc_tgt_ls_req structure may - * be freed/released. + * payload. The LLDD is to transmit the response (or return a + * non-zero errno status), and upon completion of the transmit, call + * the "done" routine specified in the nvmefc_ls_rsp structure + * (argument to done is the address of the nvmefc_ls_rsp structure + * itself). Upon the completion of the done() routine, the LLDD shall + * consider the LS handling complete and the nvmefc_ls_rsp structure + * may be freed/released. + * The transport will always call the xmt_ls_rsp() routine for any + * LS received. * Entrypoint is Mandatory. * * @fcp_op: Called to perform a data transfer or transmit a response. @@ -798,6 +871,39 @@ struct nvmet_fc_target_port { * should cause the initiator to rescan the discovery controller * on the targetport. * + * @ls_req: Called to issue a FC-NVME FC-4 LS service request. + * The nvme_fc_ls_req structure will fully describe the buffers for + * the request payload and where to place the response payload. + * The targetport that is to issue the LS request is identified by + * the targetport argument. The remote port that is to receive the + * LS request is identified by the hosthandle argument. The nvmet-fc + * transport is only allowed to issue FC-NVME LS's on behalf of an + * association that was created prior by a Create Association LS. + * The hosthandle will originate from the LLDD in the struct + * nvmefc_ls_rsp structure for the Create Association LS that + * was delivered to the transport. The transport will save the + * hosthandle as an attribute of the association. If the LLDD + * loses connectivity with the remote port, it must call the + * nvmet_fc_invalidate_host() routine to remove any references to + * the remote port in the transport. + * The LLDD is to allocate an exchange, issue the LS request, obtain + * the LS response, and call the "done" routine specified in the + * request structure (argument to done is the ls request structure + * itself). + * Entrypoint is Optional - but highly recommended. + * + * @ls_abort: called to request the LLDD to abort the indicated ls request. + * The call may return before the abort has completed. After aborting + * the request, the LLDD must still call the ls request done routine + * indicating an FC transport Aborted status. + * Entrypoint is Mandatory if the ls_req entry point is specified. + * + * @host_release: called to inform the LLDD that the request to invalidate + * the host port indicated by the hosthandle has been fully completed. + * No associations exist with the host port and there will be no + * further references to hosthandle. + * Entrypoint is Mandatory if the lldd calls nvmet_fc_invalidate_host(). + * * @max_hw_queues: indicates the maximum number of hw queues the LLDD * supports for cpu affinitization. * Value is Mandatory. Must be at least 1. @@ -826,11 +932,19 @@ struct nvmet_fc_target_port { * area solely for the of the LLDD and its location is specified by * the targetport->private pointer. * Value is Mandatory. Allowed to be zero. + * + * @lsrqst_priv_sz: The LLDD sets this field to the amount of additional + * memory that it would like nvmet-fc layer to allocate on the LLDD's + * behalf whenever a ls request structure is allocated. The additional + * memory area is solely for use by the LLDD and its location is + * specified by the ls_request->private pointer. + * Value is Mandatory. Allowed to be zero. + * */ struct nvmet_fc_target_template { void (*targetport_delete)(struct nvmet_fc_target_port *tgtport); int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport, - struct nvmefc_tgt_ls_req *tls_req); + struct nvmefc_ls_rsp *ls_rsp); int (*fcp_op)(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *fcpreq); void (*fcp_abort)(struct nvmet_fc_target_port *tgtport, @@ -840,6 +954,11 @@ struct nvmet_fc_target_template { void (*defer_rcv)(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *fcpreq); void (*discovery_event)(struct nvmet_fc_target_port *tgtport); + int (*ls_req)(struct nvmet_fc_target_port *targetport, + void *hosthandle, struct nvmefc_ls_req *lsreq); + void (*ls_abort)(struct nvmet_fc_target_port *targetport, + void *hosthandle, struct nvmefc_ls_req *lsreq); + void (*host_release)(void *hosthandle); u32 max_hw_queues; u16 max_sgl_segments; @@ -848,7 +967,9 @@ struct nvmet_fc_target_template { u32 target_features; + /* sizes of additional private data for data structures */ u32 target_priv_sz; + u32 lsrqst_priv_sz; }; @@ -859,10 +980,61 @@ int nvmet_fc_register_targetport(struct nvmet_fc_port_info *portinfo, int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *tgtport); +/* + * Routine called to pass a NVME-FC LS request, received by the lldd, + * to the nvmet-fc transport. + * + * If the return value is zero: the LS was successfully accepted by the + * transport. + * If the return value is non-zero: the transport has not accepted the + * LS. The lldd should ABTS-LS the LS. + * + * Note: if the LLDD receives and ABTS for the LS prior to the transport + * calling the ops->xmt_ls_rsp() routine to transmit a response, the LLDD + * shall mark the LS as aborted, and when the xmt_ls_rsp() is called: the + * response shall not be transmit and the struct nvmefc_ls_rsp() done + * routine shall be called. The LLDD may transmit the ABTS response as + * soon as the LS was marked or can delay until the xmt_ls_rsp() call is + * made. + * Note: if an RCV LS was successfully posted to the transport and the + * targetport is then unregistered before xmt_ls_rsp() was called for + * the lsrsp structure, the transport will still call xmt_ls_rsp() + * afterward to cleanup the outstanding lsrsp structure. The LLDD should + * noop the transmission of the rsp and call the lsrsp->done() routine + * to allow the lsrsp structure to be released. + */ int nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *tgtport, - struct nvmefc_tgt_ls_req *lsreq, + void *hosthandle, + struct nvmefc_ls_rsp *rsp, void *lsreqbuf, u32 lsreqbuf_len); +/* + * Routine called by the LLDD whenever it has a logout or loss of + * connectivity to a NVME-FC host port which there had been active + * NVMe controllers for. The host port is indicated by the + * hosthandle. The hosthandle is given to the nvmet-fc transport + * when a NVME LS was received, typically to create a new association. + * The nvmet-fc transport will cache the hostport value with the + * association for use in LS requests for the association. + * When the LLDD calls this routine, the nvmet-fc transport will + * immediately terminate all associations that were created with + * the hosthandle host port. + * The LLDD, after calling this routine and having control returned, + * must assume the transport may subsequently utilize hosthandle as + * part of sending LS's to terminate the association. The LLDD + * should reject the LS's if they are attempted. + * Once the last association has terminated for the hosthandle host + * port, the nvmet-fc transport will call the ops->host_release() + * callback. As of the callback, the nvmet-fc transport will no + * longer reference hosthandle. + */ +void nvmet_fc_invalidate_host(struct nvmet_fc_target_port *tgtport, + void *hosthandle); + +/* + * If nvmet_fc_rcv_fcp_req returns non-zero, the transport has not accepted + * the FCP cmd. The lldd should ABTS-LS the cmd. + */ int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *fcpreq, void *cmdiubuf, u32 cmdiubuf_len); diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h index e8c30b39bb27..51fe44e0328b 100644 --- a/include/linux/nvme-fc.h +++ b/include/linux/nvme-fc.h @@ -4,8 +4,8 @@ */ /* - * This file contains definitions relative to FC-NVME-2 r1.06 - * (T11-2019-00210-v001). + * This file contains definitions relative to FC-NVME-2 r1.08 + * (T11-2019-00210-v004). */ #ifndef _NVME_FC_H @@ -81,7 +81,8 @@ struct nvme_fc_ersp_iu { }; -#define FCNVME_NVME_SR_OPCODE 0x01 +#define FCNVME_NVME_SR_OPCODE 0x01 +#define FCNVME_NVME_SR_RSP_OPCODE 0x02 struct nvme_fc_nvme_sr_iu { __u8 fc_id; @@ -94,7 +95,7 @@ struct nvme_fc_nvme_sr_iu { enum { FCNVME_SRSTAT_ACC = 0x0, - FCNVME_SRSTAT_INV_FCID = 0x1, + /* reserved 0x1 */ /* reserved 0x2 */ FCNVME_SRSTAT_LOGICAL_ERR = 0x3, FCNVME_SRSTAT_INV_QUALIF = 0x4, @@ -397,7 +398,7 @@ struct fcnvme_ls_disconnect_conn_rqst { struct fcnvme_ls_rqst_w0 w0; __be32 desc_list_len; struct fcnvme_lsdesc_assoc_id associd; - struct fcnvme_lsdesc_disconn_cmd connectid; + struct fcnvme_lsdesc_conn_id connectid; }; struct fcnvme_ls_disconnect_conn_acc { diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 3d5189f46cb1..5ce51ab4c50e 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -38,6 +38,8 @@ enum { NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */ NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */ NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */ + NVMF_ADDR_FAMILY_LOOP = 254, /* Reserved for host usage */ + NVMF_ADDR_FAMILY_MAX, }; /* Transport Type codes for Discovery Log Page entry TRTYPE field */ @@ -299,6 +301,8 @@ struct nvme_id_ctrl { }; enum { + NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1, + NVME_CTRL_CMIC_ANA = 1 << 3, NVME_CTRL_ONCS_COMPARE = 1 << 0, NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, NVME_CTRL_ONCS_DSM = 1 << 2, @@ -394,8 +398,12 @@ enum { enum { NVME_NS_FEAT_THIN = 1 << 0, + NVME_NS_FEAT_ATOMICS = 1 << 1, + NVME_NS_FEAT_IO_OPT = 1 << 4, + NVME_NS_ATTR_RO = 1 << 0, NVME_NS_FLBAS_LBA_MASK = 0xf, NVME_NS_FLBAS_META_EXT = 0x10, + NVME_NS_NMIC_SHARED = 1 << 0, NVME_LBAF_RP_BEST = 0, NVME_LBAF_RP_BETTER = 1, NVME_LBAF_RP_GOOD = 2, @@ -412,6 +420,12 @@ enum { NVME_NS_DPS_PI_TYPE3 = 3, }; +/* Identify Namespace Metadata Capabilities (MC): */ +enum { + NVME_MC_EXTENDED_LBA = (1 << 0), + NVME_MC_METADATA_PTR = (1 << 1), +}; + struct nvme_ns_id_desc { __u8 nidt; __u8 nidl; @@ -1177,7 +1191,7 @@ struct nvmf_disc_rsp_page_hdr { __le64 numrec; __le16 recfmt; __u8 resv14[1006]; - struct nvmf_disc_rsp_page_entry entries[0]; + struct nvmf_disc_rsp_page_entry entries[]; }; enum { diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index 491a2b7e77c1..0f61a4ac6bcf 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -30,7 +30,9 @@ extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); extern int of_phy_register_fixed_link(struct device_node *np); extern void of_phy_deregister_fixed_link(struct device_node *np); extern bool of_phy_is_fixed_link(struct device_node *np); - +extern int of_mdiobus_phy_device_register(struct mii_bus *mdio, + struct phy_device *phy, + struct device_node *child, u32 addr); static inline int of_mdio_parse_addr(struct device *dev, const struct device_node *np) @@ -118,6 +120,13 @@ static inline bool of_phy_is_fixed_link(struct device_node *np) { return false; } + +static inline int of_mdiobus_phy_device_register(struct mii_bus *mdio, + struct phy_device *phy, + struct device_node *child, u32 addr) +{ + return -ENOSYS; +} #endif diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h index 60f541912ccf..8216a4156263 100644 --- a/include/linux/of_reserved_mem.h +++ b/include/linux/of_reserved_mem.h @@ -3,6 +3,7 @@ #define __OF_RESERVED_MEM_H #include <linux/device.h> +#include <linux/of.h> struct of_phandle_args; struct reserved_mem_ops; @@ -33,6 +34,9 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem); int of_reserved_mem_device_init_by_idx(struct device *dev, struct device_node *np, int idx); +int of_reserved_mem_device_init_by_name(struct device *dev, + struct device_node *np, + const char *name); void of_reserved_mem_device_release(struct device *dev); void fdt_init_reserved_mem(void); @@ -45,6 +49,14 @@ static inline int of_reserved_mem_device_init_by_idx(struct device *dev, { return -ENOSYS; } + +static inline int of_reserved_mem_device_init_by_name(struct device *dev, + struct device_node *np, + const char *name) +{ + return -ENOSYS; +} + static inline void of_reserved_mem_device_release(struct device *pdev) { } static inline void fdt_init_reserved_mem(void) { } diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 659045046468..93fcef105061 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -304,16 +304,33 @@ static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c) * struct_size() - Calculate size of structure with trailing array. * @p: Pointer to the structure. * @member: Name of the array member. - * @n: Number of elements in the array. + * @count: Number of elements in the array. * * Calculates size of memory needed for structure @p followed by an - * array of @n @member elements. + * array of @count number of @member elements. * * Return: number of bytes needed or SIZE_MAX on overflow. */ -#define struct_size(p, member, n) \ - __ab_c_size(n, \ +#define struct_size(p, member, count) \ + __ab_c_size(count, \ sizeof(*(p)->member) + __must_be_array((p)->member),\ sizeof(*(p))) +/** + * flex_array_size() - Calculate size of a flexible array member + * within an enclosing structure. + * + * @p: Pointer to the structure. + * @member: Name of the flexible array member. + * @count: Number of elements in the array. + * + * Calculates size of a flexible array of @count number of @member + * elements, at the end of structure @p. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define flex_array_size(p, member, count) \ + array_size(count, \ + sizeof(*(p)->member) + __must_be_array((p)->member)) + #endif /* __LINUX_OVERFLOW_H */ diff --git a/include/linux/padata.h b/include/linux/padata.h index a0d8b41850b2..7302efff5e65 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -4,6 +4,9 @@ * * Copyright (C) 2008, 2009 secunet Security Networks AG * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com> + * + * Copyright (c) 2020 Oracle and/or its affiliates. + * Author: Daniel Jordan <daniel.m.jordan@oracle.com> */ #ifndef PADATA_H @@ -24,7 +27,6 @@ * @list: List entry, to attach to the padata lists. * @pd: Pointer to the internal control structure. * @cb_cpu: Callback cpu for serializatioon. - * @cpu: Cpu for parallelization. * @seq_nr: Sequence number of the parallelized data object. * @info: Used to pass information from the parallel to the serial function. * @parallel: Parallel execution function. @@ -34,7 +36,6 @@ struct padata_priv { struct list_head list; struct parallel_data *pd; int cb_cpu; - int cpu; unsigned int seq_nr; int info; void (*parallel)(struct padata_priv *padata); @@ -68,15 +69,11 @@ struct padata_serial_queue { /** * struct padata_parallel_queue - The percpu padata parallel queue * - * @parallel: List to wait for parallelization. * @reorder: List to wait for reordering after parallel processing. - * @work: work struct for parallelization. * @num_obj: Number of objects that are processed by this cpu. */ struct padata_parallel_queue { - struct padata_list parallel; struct padata_list reorder; - struct work_struct work; atomic_t num_obj; }; @@ -111,7 +108,7 @@ struct parallel_data { struct padata_parallel_queue __percpu *pqueue; struct padata_serial_queue __percpu *squeue; atomic_t refcnt; - atomic_t seq_nr; + unsigned int seq_nr; unsigned int processed; int cpu; struct padata_cpumask cpumask; @@ -137,9 +134,35 @@ struct padata_shell { }; /** + * struct padata_mt_job - represents one multithreaded job + * + * @thread_fn: Called for each chunk of work that a padata thread does. + * @fn_arg: The thread function argument. + * @start: The start of the job (units are job-specific). + * @size: size of this node's work (units are job-specific). + * @align: Ranges passed to the thread function fall on this boundary, with the + * possible exceptions of the beginning and end of the job. + * @min_chunk: The minimum chunk size in job-specific units. This allows + * the client to communicate the minimum amount of work that's + * appropriate for one worker thread to do at once. + * @max_threads: Max threads to use for the job, actual number may be less + * depending on task size and minimum chunk size. + */ +struct padata_mt_job { + void (*thread_fn)(unsigned long start, unsigned long end, void *arg); + void *fn_arg; + unsigned long start; + unsigned long size; + unsigned long align; + unsigned long min_chunk; + int max_threads; +}; + +/** * struct padata_instance - The overall control structure. * - * @node: Used by CPU hotplug. + * @cpu_online_node: Linkage for CPU online callback. + * @cpu_dead_node: Linkage for CPU offline callback. * @parallel_wq: The workqueue used for parallel work. * @serial_wq: The workqueue used for serial work. * @pslist: List of padata_shell objects attached to this instance. @@ -150,7 +173,8 @@ struct padata_shell { * @flags: padata flags. */ struct padata_instance { - struct hlist_node node; + struct hlist_node cpu_online_node; + struct hlist_node cpu_dead_node; struct workqueue_struct *parallel_wq; struct workqueue_struct *serial_wq; struct list_head pslist; @@ -164,6 +188,12 @@ struct padata_instance { #define PADATA_INVALID 4 }; +#ifdef CONFIG_PADATA +extern void __init padata_init(void); +#else +static inline void __init padata_init(void) {} +#endif + extern struct padata_instance *padata_alloc_possible(const char *name); extern void padata_free(struct padata_instance *pinst); extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst); @@ -171,6 +201,7 @@ extern void padata_free_shell(struct padata_shell *ps); extern int padata_do_parallel(struct padata_shell *ps, struct padata_priv *padata, int *cb_cpu); extern void padata_do_serial(struct padata_priv *padata); +extern void __init padata_do_multithreaded(struct padata_mt_job *job); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); extern int padata_start(struct padata_instance *pinst); diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 222f6f7b2bb3..6be1aa559b1e 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -777,6 +777,16 @@ PAGE_TYPE_OPS(Buddy, buddy) * not onlined when onlining the section). * The content of these pages is effectively stale. Such pages should not * be touched (read/write/dump/save) except by their owner. + * + * If a driver wants to allow to offline unmovable PageOffline() pages without + * putting them back to the buddy, it can do so via the memory notifier by + * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the + * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline() + * pages (now with a reference count of zero) are treated like free pages, + * allowing the containing memory block to get offlined. A driver that + * relies on this feature is aware that re-onlining the memory block will + * require to re-set the pages PageOffline() and not giving them to the + * buddy via online_page_callback_t. */ PAGE_TYPE_OPS(Offline, offline) diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index bab7e57f659b..85bd413e784e 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -10,6 +10,7 @@ struct page_counter { atomic_long_t usage; unsigned long min; unsigned long low; + unsigned long high; unsigned long max; struct page_counter *parent; @@ -55,6 +56,13 @@ bool page_counter_try_charge(struct page_counter *counter, void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages); + +static inline void page_counter_set_high(struct page_counter *counter, + unsigned long nr_pages) +{ + WRITE_ONCE(counter->high, nr_pages); +} + int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages); int page_counter_memparse(const char *buf, const char *max, unsigned long *nr_pages); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a8f7bd8ea1c6..cf2468da68e9 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -51,7 +51,10 @@ static inline void mapping_set_error(struct address_space *mapping, int error) return; /* Record in wb_err for checkers using errseq_t based tracking */ - filemap_set_wb_err(mapping, error); + __filemap_set_wb_err(mapping, error); + + /* Record it in superblock */ + errseq_set(&mapping->host->i_sb->s_wb_err, error); /* Record it in flags for now, for legacy callers */ if (error == -ENOSPC) @@ -205,6 +208,43 @@ static inline int page_cache_add_speculative(struct page *page, int count) return __page_cache_add_speculative(page, count); } +/** + * attach_page_private - Attach private data to a page. + * @page: Page to attach data to. + * @data: Data to attach to page. + * + * Attaching private data to a page increments the page's reference count. + * The data must be detached before the page will be freed. + */ +static inline void attach_page_private(struct page *page, void *data) +{ + get_page(page); + set_page_private(page, (unsigned long)data); + SetPagePrivate(page); +} + +/** + * detach_page_private - Detach private data from a page. + * @page: Page to detach data from. + * + * Removes the data that was previously attached to the page and decrements + * the refcount on the page. + * + * Return: Data that was attached to the page. + */ +static inline void *detach_page_private(struct page *page) +{ + void *data = (void *)page_private(page); + + if (!PagePrivate(page)) + return NULL; + ClearPagePrivate(page); + set_page_private(page, 0); + put_page(page); + + return data; +} + #ifdef CONFIG_NUMA extern struct page *__page_cache_alloc(gfp_t gfp); #else @@ -498,7 +538,7 @@ static inline int lock_page_killable(struct page *page) * lock_page_or_retry - Lock the page, unless this would block and the * caller indicated that it can handle a retry. * - * Return value and mmap_sem implications depend on flags; see + * Return value and mmap_lock implications depend on flags; see * __lock_page_or_retry(). */ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, @@ -615,6 +655,17 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); void delete_from_page_cache_batch(struct address_space *mapping, struct pagevec *pvec); +#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) + +void page_cache_sync_readahead(struct address_space *, struct file_ra_state *, + struct file *, pgoff_t index, unsigned long req_count); +void page_cache_async_readahead(struct address_space *, struct file_ra_state *, + struct file *, struct page *, pgoff_t index, + unsigned long req_count); +void page_cache_readahead_unbounded(struct address_space *, struct file *, + pgoff_t index, unsigned long nr_to_read, + unsigned long lookahead_count); + /* * Like add_to_page_cache_locked, but used to add newly allocated pages: * the page is new, so we can just run __SetPageLocked() against it. @@ -631,6 +682,146 @@ static inline int add_to_page_cache(struct page *page, return error; } +/** + * struct readahead_control - Describes a readahead request. + * + * A readahead request is for consecutive pages. Filesystems which + * implement the ->readahead method should call readahead_page() or + * readahead_page_batch() in a loop and attempt to start I/O against + * each page in the request. + * + * Most of the fields in this struct are private and should be accessed + * by the functions below. + * + * @file: The file, used primarily by network filesystems for authentication. + * May be NULL if invoked internally by the filesystem. + * @mapping: Readahead this filesystem object. + */ +struct readahead_control { + struct file *file; + struct address_space *mapping; +/* private: use the readahead_* accessors instead */ + pgoff_t _index; + unsigned int _nr_pages; + unsigned int _batch_count; +}; + +/** + * readahead_page - Get the next page to read. + * @rac: The current readahead request. + * + * Context: The page is locked and has an elevated refcount. The caller + * should decreases the refcount once the page has been submitted for I/O + * and unlock the page once all I/O to that page has completed. + * Return: A pointer to the next page, or %NULL if we are done. + */ +static inline struct page *readahead_page(struct readahead_control *rac) +{ + struct page *page; + + BUG_ON(rac->_batch_count > rac->_nr_pages); + rac->_nr_pages -= rac->_batch_count; + rac->_index += rac->_batch_count; + + if (!rac->_nr_pages) { + rac->_batch_count = 0; + return NULL; + } + + page = xa_load(&rac->mapping->i_pages, rac->_index); + VM_BUG_ON_PAGE(!PageLocked(page), page); + rac->_batch_count = hpage_nr_pages(page); + + return page; +} + +static inline unsigned int __readahead_batch(struct readahead_control *rac, + struct page **array, unsigned int array_sz) +{ + unsigned int i = 0; + XA_STATE(xas, &rac->mapping->i_pages, 0); + struct page *page; + + BUG_ON(rac->_batch_count > rac->_nr_pages); + rac->_nr_pages -= rac->_batch_count; + rac->_index += rac->_batch_count; + rac->_batch_count = 0; + + xas_set(&xas, rac->_index); + rcu_read_lock(); + xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(PageTail(page), page); + array[i++] = page; + rac->_batch_count += hpage_nr_pages(page); + + /* + * The page cache isn't using multi-index entries yet, + * so the xas cursor needs to be manually moved to the + * next index. This can be removed once the page cache + * is converted. + */ + if (PageHead(page)) + xas_set(&xas, rac->_index + rac->_batch_count); + + if (i == array_sz) + break; + } + rcu_read_unlock(); + + return i; +} + +/** + * readahead_page_batch - Get a batch of pages to read. + * @rac: The current readahead request. + * @array: An array of pointers to struct page. + * + * Context: The pages are locked and have an elevated refcount. The caller + * should decreases the refcount once the page has been submitted for I/O + * and unlock the page once all I/O to that page has completed. + * Return: The number of pages placed in the array. 0 indicates the request + * is complete. + */ +#define readahead_page_batch(rac, array) \ + __readahead_batch(rac, array, ARRAY_SIZE(array)) + +/** + * readahead_pos - The byte offset into the file of this readahead request. + * @rac: The readahead request. + */ +static inline loff_t readahead_pos(struct readahead_control *rac) +{ + return (loff_t)rac->_index * PAGE_SIZE; +} + +/** + * readahead_length - The number of bytes in this readahead request. + * @rac: The readahead request. + */ +static inline loff_t readahead_length(struct readahead_control *rac) +{ + return (loff_t)rac->_nr_pages * PAGE_SIZE; +} + +/** + * readahead_index - The index of the first page in this readahead request. + * @rac: The readahead request. + */ +static inline pgoff_t readahead_index(struct readahead_control *rac) +{ + return rac->_index; +} + +/** + * readahead_count - The number of pages in this readahead request. + * @rac: The readahead request. + */ +static inline unsigned int readahead_count(struct readahead_control *rac) +{ + return rac->_nr_pages; +} + static inline unsigned long dir_pages(struct inode *inode) { return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> diff --git a/include/linux/parport.h b/include/linux/parport.h index 13932ce8b37b..1fb508c19e83 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h @@ -325,18 +325,10 @@ struct pardev_cb { unsigned int flags; }; -/* parport_register_device declares that a device is connected to a - port, and tells the kernel all it needs to know. - - pf is the preemption function (may be NULL for no callback) - - kf is the wake-up function (may be NULL for no callback) - - irq_func is the interrupt handler (may be NULL for no interrupts) - - handle is a user pointer that gets handed to callback functions. */ -struct pardevice *parport_register_device(struct parport *port, - const char *name, - int (*pf)(void *), void (*kf)(void *), - void (*irq_func)(void *), - int flags, void *handle); - +/* + * parport_register_dev_model declares that a device is connected to a + * port, and tells the kernel all it needs to know. + */ struct pardevice * parport_register_dev_model(struct parport *port, const char *name, const struct pardev_cb *par_dev_cb, int cnt); diff --git a/include/linux/parser.h b/include/linux/parser.h index 12fc3482f5fc..89e2b23fb888 100644 --- a/include/linux/parser.h +++ b/include/linux/parser.h @@ -7,7 +7,8 @@ * but could potentially be used anywhere else that simple option=arg * parsing is required. */ - +#ifndef _LINUX_PARSER_H +#define _LINUX_PARSER_H /* associates an integer enumerator with a pattern string. */ struct match_token { @@ -34,3 +35,5 @@ int match_hex(substring_t *, int *result); bool match_wildcard(const char *pattern, const char *str); size_t match_strlcpy(char *, const substring_t *, size_t); char *match_strdup(const substring_t *); + +#endif /* _LINUX_PARSER_H */ diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h index ece607607a86..24125778ef3e 100644 --- a/include/linux/part_stat.h +++ b/include/linux/part_stat.h @@ -4,21 +4,25 @@ #include <linux/genhd.h> +struct disk_stats { + u64 nsecs[NR_STAT_GROUPS]; + unsigned long sectors[NR_STAT_GROUPS]; + unsigned long ios[NR_STAT_GROUPS]; + unsigned long merges[NR_STAT_GROUPS]; + unsigned long io_ticks; + local_t in_flight[2]; +}; + /* * Macros to operate on percpu disk statistics: * - * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters - * and should be called between disk_stat_lock() and - * disk_stat_unlock(). + * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters and should + * be called between disk_stat_lock() and disk_stat_unlock(). * * part_stat_read() can be called at any time. - * - * part_stat_{add|set_all}() and {init|free}_part_stats are for - * internal use only. */ -#ifdef CONFIG_SMP -#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); }) -#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0) +#define part_stat_lock() preempt_disable() +#define part_stat_unlock() preempt_enable() #define part_stat_get_cpu(part, field, cpu) \ (per_cpu_ptr((part)->dkstats, (cpu))->field) @@ -44,50 +48,13 @@ static inline void part_stat_set_all(struct hd_struct *part, int value) sizeof(struct disk_stats)); } -static inline int init_part_stats(struct hd_struct *part) -{ - part->dkstats = alloc_percpu(struct disk_stats); - if (!part->dkstats) - return 0; - return 1; -} - -static inline void free_part_stats(struct hd_struct *part) -{ - free_percpu(part->dkstats); -} - -#else /* !CONFIG_SMP */ -#define part_stat_lock() ({ rcu_read_lock(); 0; }) -#define part_stat_unlock() rcu_read_unlock() - -#define part_stat_get(part, field) ((part)->dkstats.field) -#define part_stat_get_cpu(part, field, cpu) part_stat_get(part, field) -#define part_stat_read(part, field) part_stat_get(part, field) - -static inline void part_stat_set_all(struct hd_struct *part, int value) -{ - memset(&part->dkstats, value, sizeof(struct disk_stats)); -} - -static inline int init_part_stats(struct hd_struct *part) -{ - return 1; -} - -static inline void free_part_stats(struct hd_struct *part) -{ -} - -#endif /* CONFIG_SMP */ - #define part_stat_read_accum(part, field) \ (part_stat_read(part, field[STAT_READ]) + \ part_stat_read(part, field[STAT_WRITE]) + \ part_stat_read(part, field[STAT_DISCARD])) #define __part_stat_add(part, field, addnd) \ - (part_stat_get(part, field) += (addnd)) + __this_cpu_add((part)->dkstats->field, addnd) #define part_stat_add(part, field, addnd) do { \ __part_stat_add((part), field, addnd); \ diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 2d155bfb8fbf..5ba475ca9078 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -27,7 +27,7 @@ extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle); struct pci_ecam_ops; extern int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres, - struct pci_ecam_ops **ecam_ops); + const struct pci_ecam_ops **ecam_ops); static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) { @@ -107,10 +107,12 @@ static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { } #endif extern const guid_t pci_acpi_dsm_guid; -#define IGNORE_PCI_BOOT_CONFIG_DSM 0x05 -#define DEVICE_LABEL_DSM 0x07 -#define RESET_DELAY_DSM 0x08 -#define FUNCTION_DELAY_DSM 0x09 + +/* _DSM Definitions for PCI */ +#define DSM_PCI_PRESERVE_BOOT_CONFIG 0x05 +#define DSM_PCI_DEVICE_NAME 0x07 +#define DSM_PCI_POWER_ON_RESET_DELAY 0x08 +#define DSM_PCI_DEVICE_READINESS_DURATIONS 0x09 #ifdef CONFIG_PCIE_EDR void pci_acpi_add_edr_notifier(struct pci_dev *pdev); @@ -125,10 +127,4 @@ static inline void acpi_pci_add_bus(struct pci_bus *bus) { } static inline void acpi_pci_remove_bus(struct pci_bus *bus) { } #endif /* CONFIG_ACPI */ -#ifdef CONFIG_ACPI_APEI -extern bool aer_acpi_firmware_first(void); -#else -static inline bool aer_acpi_firmware_first(void) { return false; } -#endif - #endif /* _PCI_ACPI_H_ */ diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h index d08f0869f121..f75c307f346d 100644 --- a/include/linux/pci-ats.h +++ b/include/linux/pci-ats.h @@ -6,11 +6,14 @@ #ifdef CONFIG_PCI_ATS /* Address Translation Service */ +bool pci_ats_supported(struct pci_dev *dev); int pci_enable_ats(struct pci_dev *dev, int ps); void pci_disable_ats(struct pci_dev *dev); int pci_ats_queue_depth(struct pci_dev *dev); int pci_ats_page_aligned(struct pci_dev *dev); #else /* CONFIG_PCI_ATS */ +static inline bool pci_ats_supported(struct pci_dev *d) +{ return false; } static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; } static inline void pci_disable_ats(struct pci_dev *d) { } diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index a73164c85e78..1af5cb02ef7f 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -29,7 +29,7 @@ struct pci_config_window { struct resource res; struct resource busr; void *priv; - struct pci_ecam_ops *ops; + const struct pci_ecam_ops *ops; union { void __iomem *win; /* 64-bit single mapping */ void __iomem **winp; /* 32-bit per-bus mapping */ @@ -40,29 +40,28 @@ struct pci_config_window { /* create and free pci_config_window */ struct pci_config_window *pci_ecam_create(struct device *dev, struct resource *cfgres, struct resource *busr, - struct pci_ecam_ops *ops); + const struct pci_ecam_ops *ops); void pci_ecam_free(struct pci_config_window *cfg); /* map_bus when ->sysdata is an instance of pci_config_window */ void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn, int where); /* default ECAM ops */ -extern struct pci_ecam_ops pci_generic_ecam_ops; +extern const struct pci_ecam_ops pci_generic_ecam_ops; #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) -extern struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */ -extern struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */ -extern struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */ -extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */ -extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */ -extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ -extern struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ +extern const struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */ +extern const struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */ +extern const struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */ +extern const struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */ +extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */ +extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ +extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ #endif -#ifdef CONFIG_PCI_HOST_COMMON +#if IS_ENABLED(CONFIG_PCI_HOST_COMMON) /* for DT-based PCI controllers that support ECAM */ -int pci_host_common_probe(struct platform_device *pdev, - struct pci_ecam_ops *ops); +int pci_host_common_probe(struct platform_device *pdev); int pci_host_common_remove(struct platform_device *pdev); #endif #endif diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index e0ed9d01f6e5..cc66bec8be90 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -66,19 +66,27 @@ struct pci_epc_ops { }; /** + * struct pci_epc_mem_window - address window of the endpoint controller + * @phys_base: physical base address of the PCI address window + * @size: the size of the PCI address window + * @page_size: size of each page + */ +struct pci_epc_mem_window { + phys_addr_t phys_base; + size_t size; + size_t page_size; +}; + +/** * struct pci_epc_mem - address space of the endpoint controller - * @phys_base: physical base address of the PCI address space - * @size: the size of the PCI address space + * @window: address window of the endpoint controller * @bitmap: bitmap to manage the PCI address space * @pages: number of bits representing the address region - * @page_size: size of each page * @lock: mutex to protect bitmap */ struct pci_epc_mem { - phys_addr_t phys_base; - size_t size; + struct pci_epc_mem_window window; unsigned long *bitmap; - size_t page_size; int pages; /* mutex to protect against concurrent access for memory allocation*/ struct mutex lock; @@ -89,7 +97,11 @@ struct pci_epc_mem { * @dev: PCI EPC device * @pci_epf: list of endpoint functions present in this EPC device * @ops: function pointers for performing endpoint operations - * @mem: address space of the endpoint controller + * @windows: array of address space of the endpoint controller + * @mem: first window of the endpoint controller, which corresponds to + * default address space of the endpoint controller supporting + * single window. + * @num_windows: number of windows supported by device * @max_functions: max number of functions that can be configured in this EPC * @group: configfs group representing the PCI EPC device * @lock: mutex to protect pci_epc ops @@ -100,7 +112,9 @@ struct pci_epc { struct device dev; struct list_head pci_epf; const struct pci_epc_ops *ops; + struct pci_epc_mem **windows; struct pci_epc_mem *mem; + unsigned int num_windows; u8 max_functions; struct config_group *group; /* mutex to protect against concurrent access of EP controller */ @@ -137,9 +151,6 @@ struct pci_epc_features { #define devm_pci_epc_create(dev, ops) \ __devm_pci_epc_create((dev), (ops), THIS_MODULE) -#define pci_epc_mem_init(epc, phys_addr, size) \ - __pci_epc_mem_init((epc), (phys_addr), (size), PAGE_SIZE) - static inline void epc_set_drvdata(struct pci_epc *epc, void *data) { dev_set_drvdata(&epc->dev, data); @@ -195,8 +206,11 @@ unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features struct pci_epc *pci_epc_get(const char *epc_name); void pci_epc_put(struct pci_epc *epc); -int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_addr, size_t size, - size_t page_size); +int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t base, + size_t size, size_t page_size); +int pci_epc_multi_mem_init(struct pci_epc *epc, + struct pci_epc_mem_window *window, + unsigned int num_windows); void pci_epc_mem_exit(struct pci_epc *epc); void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, phys_addr_t *phys_addr, size_t size); diff --git a/include/linux/pci.h b/include/linux/pci.h index 83ce1cdf5676..c79d83304e52 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -100,9 +100,21 @@ enum { PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1, #endif - /* Resources assigned to buses behind the bridge */ +/* PCI-to-PCI (P2P) bridge windows */ +#define PCI_BRIDGE_IO_WINDOW (PCI_BRIDGE_RESOURCES + 0) +#define PCI_BRIDGE_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 1) +#define PCI_BRIDGE_PREF_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 2) + +/* CardBus bridge windows */ +#define PCI_CB_BRIDGE_IO_0_WINDOW (PCI_BRIDGE_RESOURCES + 0) +#define PCI_CB_BRIDGE_IO_1_WINDOW (PCI_BRIDGE_RESOURCES + 1) +#define PCI_CB_BRIDGE_MEM_0_WINDOW (PCI_BRIDGE_RESOURCES + 2) +#define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3) + +/* Total number of bridge resources for P2P and CardBus */ #define PCI_BRIDGE_RESOURCE_NUM 4 + /* Resources assigned to buses behind the bridge */ PCI_BRIDGE_RESOURCES, PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES + PCI_BRIDGE_RESOURCE_NUM - 1, @@ -279,7 +291,7 @@ struct pci_cap_saved_data { u16 cap_nr; bool cap_extended; unsigned int size; - u32 data[0]; + u32 data[]; }; struct pci_cap_saved_state { @@ -420,8 +432,6 @@ struct pci_dev { * mappings to make sure they cannot access arbitrary memory. */ unsigned int untrusted:1; - unsigned int __aer_firmware_first_valid:1; - unsigned int __aer_firmware_first:1; unsigned int broken_intx_masking:1; /* INTx masking can't be used */ unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */ unsigned int irq_managed:1; @@ -532,7 +542,7 @@ struct pci_host_bridge { resource_size_t start, resource_size_t size, resource_size_t align); - unsigned long private[0] ____cacheline_aligned; + unsigned long private[] ____cacheline_aligned; }; #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) @@ -1025,7 +1035,6 @@ void pci_bus_add_device(struct pci_dev *dev); void pci_read_bridge_bases(struct pci_bus *child); struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res); -struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev); u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin); int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); @@ -2048,6 +2057,8 @@ int pci_iov_virtfn_devfn(struct pci_dev *dev, int id); int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); void pci_disable_sriov(struct pci_dev *dev); + +int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id); int pci_iov_add_virtfn(struct pci_dev *dev, int id); void pci_iov_remove_virtfn(struct pci_dev *dev, int id); int pci_num_vf(struct pci_dev *dev); @@ -2073,6 +2084,12 @@ static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id) } static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) { return -ENODEV; } + +static inline int pci_iov_sysfs_link(struct pci_dev *dev, + struct pci_dev *virtfn, int id) +{ + return -ENODEV; +} static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id) { return -ENOSYS; @@ -2143,17 +2160,23 @@ static inline int pci_pcie_type(const struct pci_dev *dev) return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; } +/** + * pcie_find_root_port - Get the PCIe root port device + * @dev: PCI device + * + * Traverse up the parent chain and return the PCIe Root Port PCI Device + * for a given PCI/PCIe Device. + */ static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev) { - while (1) { - if (!pci_is_pcie(dev)) - break; - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) - return dev; - if (!dev->bus->self) - break; - dev = dev->bus->self; + struct pci_dev *bridge = pci_upstream_bridge(dev); + + while (bridge) { + if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) + return bridge; + bridge = pci_upstream_bridge(bridge); } + return NULL; } diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 1dfc4e1dcb94..0ad57693f392 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -550,6 +550,7 @@ #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 #define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 @@ -1832,6 +1833,12 @@ #define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2 #define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018 +#define PCI_VENDOR_ID_PERICOM 0x12D8 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958 + #define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 9c3e7619c929..b4bb32082342 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -61,7 +61,7 @@ struct perf_guest_info_callbacks { struct perf_callchain_entry { __u64 nr; - __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */ + __u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */ }; struct perf_callchain_entry_ctx { @@ -113,7 +113,7 @@ struct perf_raw_record { struct perf_branch_stack { __u64 nr; __u64 hw_idx; - struct perf_branch_entry entries[0]; + struct perf_branch_entry entries[]; }; struct task_struct; @@ -1280,15 +1280,12 @@ extern int sysctl_perf_cpu_time_max_percent; extern void perf_sample_event_took(u64 sample_len_ns); -extern int perf_proc_update_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); -extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); - +int perf_proc_update_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); int perf_event_max_stack_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); /* Access to perf_event_open(2) syscall. */ #define PERF_SECURITY_OPEN 0 @@ -1305,7 +1302,7 @@ static inline int perf_is_paranoid(void) static inline int perf_allow_kernel(struct perf_event_attr *attr) { - if (sysctl_perf_event_paranoid > 1 && !capable(CAP_SYS_ADMIN)) + if (sysctl_perf_event_paranoid > 1 && !perfmon_capable()) return -EACCES; return security_perf_event_open(attr, PERF_SECURITY_KERNEL); @@ -1313,7 +1310,7 @@ static inline int perf_allow_kernel(struct perf_event_attr *attr) static inline int perf_allow_cpu(struct perf_event_attr *attr) { - if (sysctl_perf_event_paranoid > 0 && !capable(CAP_SYS_ADMIN)) + if (sysctl_perf_event_paranoid > 0 && !perfmon_capable()) return -EACCES; return security_perf_event_open(attr, PERF_SECURITY_CPU); @@ -1321,7 +1318,7 @@ static inline int perf_allow_cpu(struct perf_event_attr *attr) static inline int perf_allow_tracepoint(struct perf_event_attr *attr) { - if (sysctl_perf_event_paranoid > -1 && !capable(CAP_SYS_ADMIN)) + if (sysctl_perf_event_paranoid > -1 && !perfmon_capable()) return -EPERM; return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT); diff --git a/include/asm-generic/pgtable.h b/include/linux/pgtable.h index 329b8c8ca703..56c1e8eb7bb0 100644 --- a/include/asm-generic/pgtable.h +++ b/include/linux/pgtable.h @@ -1,8 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_GENERIC_PGTABLE_H -#define _ASM_GENERIC_PGTABLE_H +#ifndef _LINUX_PGTABLE_H +#define _LINUX_PGTABLE_H #include <linux/pfn.h> +#include <asm/pgtable.h> #ifndef __ASSEMBLY__ #ifdef CONFIG_MMU @@ -27,6 +28,121 @@ #define USER_PGTABLES_CEILING 0UL #endif +/* + * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD] + * + * The pXx_index() functions return the index of the entry in the page + * table page which would control the given virtual address + * + * As these functions may be used by the same code for different levels of + * the page table folding, they are always available, regardless of + * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0 + * because in such cases PTRS_PER_PxD equals 1. + */ + +static inline unsigned long pte_index(unsigned long address) +{ + return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); +} + +#ifndef pmd_index +static inline unsigned long pmd_index(unsigned long address) +{ + return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); +} +#define pmd_index pmd_index +#endif + +#ifndef pud_index +static inline unsigned long pud_index(unsigned long address) +{ + return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); +} +#define pud_index pud_index +#endif + +#ifndef pgd_index +/* Must be a compile-time constant, so implement it as a macro */ +#define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) +#endif + +#ifndef pte_offset_kernel +static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) +{ + return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); +} +#define pte_offset_kernel pte_offset_kernel +#endif + +#if defined(CONFIG_HIGHPTE) +#define pte_offset_map(dir, address) \ + ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \ + pte_index((address))) +#define pte_unmap(pte) kunmap_atomic((pte)) +#else +#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) +#define pte_unmap(pte) ((void)(pte)) /* NOP */ +#endif + +/* Find an entry in the second-level page table.. */ +#ifndef pmd_offset +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) +{ + return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); +} +#define pmd_offset pmd_offset +#endif + +#ifndef pud_offset +static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) +{ + return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address); +} +#define pud_offset pud_offset +#endif + +static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address) +{ + return (pgd + pgd_index(address)); +}; + +/* + * a shortcut to get a pgd_t in a given mm + */ +#ifndef pgd_offset +#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address)) +#endif + +/* + * a shortcut which implies the use of the kernel's pgd, instead + * of a process's + */ +#define pgd_offset_k(address) pgd_offset(&init_mm, (address)) + +/* + * In many cases it is known that a virtual address is mapped at PMD or PTE + * level, so instead of traversing all the page table levels, we can get a + * pointer to the PMD entry in user or kernel page table or translate a virtual + * address to the pointer in the PTE in the kernel page tables with simple + * helpers. + */ +static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va) +{ + return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va); +} + +static inline pmd_t *pmd_off_k(unsigned long va) +{ + return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va); +} + +static inline pte_t *virt_to_kpte(unsigned long vaddr) +{ + pmd_t *pmd = pmd_off_k(vaddr); + + return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr); +} + #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, @@ -133,6 +249,13 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, } #endif +#ifndef __HAVE_ARCH_PTEP_GET +static inline pte_t ptep_get(pte_t *ptep) +{ + return READ_ONCE(*ptep); +} +#endif + #ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, @@ -159,11 +282,11 @@ static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, #ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL -static inline pmd_t pmdp_huge_get_and_clear_full(struct mm_struct *mm, +static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, int full) { - return pmdp_huge_get_and_clear(mm, address, pmdp); + return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); } #endif @@ -188,6 +311,23 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, } #endif + +/* + * If two threads concurrently fault at the same page, the thread that + * won the race updates the PTE and its local TLB/Cache. The other thread + * gives up, simply does nothing, and continues; on architectures where + * software can update TLB, local TLB can be updated here to avoid next page + * fault. This function updates TLB only, do nothing with cache or others. + * It is the difference with function update_mmu_cache. + */ +#ifndef __HAVE_ARCH_UPDATE_MMU_TLB +static inline void update_mmu_tlb(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ +} +#define __HAVE_ARCH_UPDATE_MMU_TLB +#endif + /* * Some architectures may be able to avoid expensive synchronization * primitives when modifications are made to PTE's which are already @@ -227,6 +367,22 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres } #endif +/* + * On some architectures hardware does not set page access bit when accessing + * memory page, it is responsibilty of software setting this bit. It brings + * out extra page fault penalty to track page access bit. For optimization page + * access bit can be set during all page fault flow on these arches. + * To be differentiate with macro pte_mkyoung, this macro is used on platforms + * where software maintains page access bit. + */ +#ifndef pte_sw_mkyoung +static inline pte_t pte_sw_mkyoung(pte_t pte) +{ + return pte; +} +#define pte_sw_mkyoung pte_sw_mkyoung +#endif + #ifndef pte_savedwrite #define pte_savedwrite pte_write #endif @@ -491,6 +647,10 @@ static inline int arch_unmap_one(struct mm_struct *mm, #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) #endif +#ifndef pgprot_nx +#define pgprot_nx(prot) (prot) +#endif + #ifndef pgprot_noncached #define pgprot_noncached(prot) (prot) #endif @@ -981,11 +1141,11 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp) #endif /* * This function is meant to be used by sites walking pagetables with - * the mmap_sem hold in read mode to protect against MADV_DONTNEED and + * the mmap_lock held in read mode to protect against MADV_DONTNEED and * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd * into a null pmd and the transhuge page fault can convert a null pmd * into an hugepmd or into a regular pmd (if the hugepage allocation - * fails). While holding the mmap_sem in read mode the pmd becomes + * fails). While holding the mmap_lock in read mode the pmd becomes * stable and stops changing under us only if it's not null and not a * transhuge pmd. When those races occurs and this function makes a * difference vs the standard pmd_none_or_clear_bad, the result is @@ -995,7 +1155,7 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp) * * For 32bit kernels with a 64bit large pmd_t this automatically takes * care of reading the pmd atomically to avoid SMP race conditions - * against pmd_populate() when the mmap_sem is hold for reading by the + * against pmd_populate() when the mmap_lock is hold for reading by the * caller (a special atomic read not done by "gcc" as in the generic * version above, is also needed when THP is disabled because the page * fault can populate the pmd from under us). @@ -1209,6 +1369,29 @@ static inline bool arch_has_pfn_modify_check(void) # define PAGE_KERNEL_EXEC PAGE_KERNEL #endif +/* + * Page Table Modification bits for pgtbl_mod_mask. + * + * These are used by the p?d_alloc_track*() set of functions an in the generic + * vmalloc/ioremap code to track at which page-table levels entries have been + * modified. Based on that the code can better decide when vmalloc and ioremap + * mapping changes need to be synchronized to other page-tables in the system. + */ +#define __PGTBL_PGD_MODIFIED 0 +#define __PGTBL_P4D_MODIFIED 1 +#define __PGTBL_PUD_MODIFIED 2 +#define __PGTBL_PMD_MODIFIED 3 +#define __PGTBL_PTE_MODIFIED 4 + +#define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED) +#define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED) +#define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED) +#define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED) +#define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED) + +/* Page-Table Modification Mask */ +typedef unsigned int pgtbl_mod_mask; + #endif /* !__ASSEMBLY__ */ #ifndef io_remap_pfn_range @@ -1259,4 +1442,4 @@ static inline bool arch_has_pfn_modify_check(void) #define pmd_leaf(x) 0 #endif -#endif /* _ASM_GENERIC_PGTABLE_H */ +#endif /* _LINUX_PGTABLE_H */ diff --git a/include/linux/phy.h b/include/linux/phy.h index 2432ca463ddc..8c05d0fb5c00 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -15,6 +15,7 @@ #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/linkmode.h> +#include <linux/netlink.h> #include <linux/mdio.h> #include <linux/mii.h> #include <linux/mii_timestamper.h> @@ -25,6 +26,7 @@ #include <linux/u64_stats_sync.h> #include <linux/irqreturn.h> #include <linux/iopoll.h> +#include <linux/refcount.h> #include <linux/atomic.h> @@ -77,6 +79,7 @@ extern const int phy_10gbit_features_array[1]; #define PHY_IS_INTERNAL 0x00000001 #define PHY_RST_AFTER_CLK_EN 0x00000002 +#define PHY_POLL_CABLE_TEST 0x00000004 #define MDIO_DEVICE_IS_PHY 0x80000000 /* Interface Mode definitions */ @@ -206,12 +209,6 @@ static inline const char *phy_modes(phy_interface_t interface) #define MII_BUS_ID_SIZE 61 -/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit - IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */ -#define MII_ADDR_C45 (1<<30) -#define MII_DEVADDR_C45_SHIFT 16 -#define MII_REGADDR_C45_MASK GENMASK(15, 0) - struct device; struct phylink; struct sfp_bus; @@ -227,6 +224,28 @@ struct mdio_bus_stats { struct u64_stats_sync syncp; }; +/* Represents a shared structure between different phydev's in the same + * package, for example a quad PHY. See phy_package_join() and + * phy_package_leave(). + */ +struct phy_package_shared { + int addr; + refcount_t refcnt; + unsigned long flags; + size_t priv_size; + + /* private data pointer */ + /* note that this pointer is shared between different phydevs and + * the user has to take care of appropriate locking. It is allocated + * and freed automatically by phy_package_join() and + * phy_package_leave(). + */ + void *priv; +}; + +/* used as bit number in atomic bitops */ +#define PHY_SHARED_F_INIT_DONE 0 + /* * The Bus class for PHYs. Devices which provide access to * PHYs should register using this structure @@ -241,6 +260,9 @@ struct mii_bus { int (*reset)(struct mii_bus *bus); struct mdio_bus_stats stats[PHY_MAX_ADDR]; + unsigned int is_managed:1; /* is device-managed */ + unsigned int is_managed_registered:1; + /* * A lock to ensure that only one thing can read/write * the MDIO bus at a time @@ -275,6 +297,12 @@ struct mii_bus { int reset_delay_us; /* RESET GPIO descriptor pointer */ struct gpio_desc *reset_gpiod; + + /* protect access to the shared element */ + struct mutex shared_lock; + + /* shared state across different PHYs */ + struct phy_package_shared *shared[PHY_MAX_ADDR]; }; #define to_mii_bus(d) container_of(d, struct mii_bus, dev) @@ -286,6 +314,20 @@ static inline struct mii_bus *mdiobus_alloc(void) int __mdiobus_register(struct mii_bus *bus, struct module *owner); #define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE) +static inline int devm_mdiobus_register(struct mii_bus *bus) +{ + int ret; + + if (!bus->is_managed) + return -EPERM; + + ret = mdiobus_register(bus); + if (!ret) + bus->is_managed_registered = 1; + + return ret; +} + void mdiobus_unregister(struct mii_bus *bus); void mdiobus_free(struct mii_bus *bus); struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv); @@ -326,6 +368,12 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); * - irq or timer will set NOLINK if link goes down * - phy_stop moves to HALTED * + * CABLETEST: PHY is performing a cable test. Packet reception/sending + * is not expected to work, carrier will be indicated as down. PHY will be + * poll once per second, or on interrupt for it current state. + * Once complete, move to UP to restart the PHY. + * - phy_stop aborts the running test and moves to HALTED + * * HALTED: PHY is up, but no polling or interrupts are done. Or * PHY is in an error state. * - phy_start moves to UP @@ -337,6 +385,7 @@ enum phy_state { PHY_UP, PHY_RUNNING, PHY_NOLINK, + PHY_CABLETEST, }; /** @@ -431,6 +480,9 @@ struct phy_device { int duplex; int pause; int asym_pause; + u8 master_slave_get; + u8 master_slave_set; + u8 master_slave_state; /* Union of PHY and Attached devices' supported link modes */ /* See ethtool.h for more info */ @@ -461,6 +513,15 @@ struct phy_device { /* For use by PHYs to maintain extra state */ void *priv; + /* shared data pointer */ + /* For use by PHYs inside the same package that need a shared state. */ + struct phy_package_shared *shared; + + /* Reporting cable test results */ + struct sk_buff *skb; + void *ehdr; + struct nlattr *nest; + /* Interrupt and Polling infrastructure */ struct delayed_work state_queue; @@ -476,7 +537,7 @@ struct phy_device { u8 mdix; u8 mdix_ctrl; - void (*phy_link_change)(struct phy_device *, bool up, bool do_carrier); + void (*phy_link_change)(struct phy_device *phydev, bool up); void (*adjust_link)(struct net_device *dev); #if IS_ENABLED(CONFIG_MACSEC) @@ -487,6 +548,18 @@ struct phy_device { #define to_phy_device(d) container_of(to_mdio_device(d), \ struct phy_device, mdio) +/* A structure containing possible configuration parameters + * for a TDR cable test. The driver does not need to implement + * all the parameters, but should report what is actually used. + */ +struct phy_tdr_config { + u32 first; + u32 last; + u32 step; + s8 pair; +}; +#define PHY_PAIR_ALL -1 + /* struct phy_driver: Driver structure for a particular PHY type * * driver_data: static driver data @@ -636,6 +709,18 @@ struct phy_driver { int (*module_eeprom)(struct phy_device *dev, struct ethtool_eeprom *ee, u8 *data); + /* Start a cable test */ + int (*cable_test_start)(struct phy_device *dev); + + /* Start a raw TDR cable test */ + int (*cable_test_tdr_start)(struct phy_device *dev, + const struct phy_tdr_config *config); + + /* Once per second, or on interrupt, request the status of the + * test. + */ + int (*cable_test_get_status)(struct phy_device *dev, bool *finished); + /* Get statistics from the phy using ethtool */ int (*get_sset_count)(struct phy_device *dev); void (*get_strings)(struct phy_device *dev, u8 *data); @@ -649,6 +734,8 @@ struct phy_driver { struct ethtool_tunable *tuna, const void *data); int (*set_loopback)(struct phy_device *dev, bool enable); + int (*get_sqi)(struct phy_device *dev); + int (*get_sqi_max)(struct phy_device *dev); }; #define to_phy_driver(d) container_of(to_mdio_common_driver(d), \ struct phy_driver, mdiodrv) @@ -993,6 +1080,10 @@ static inline bool phy_interrupt_is_valid(struct phy_device *phydev) */ static inline bool phy_polling_mode(struct phy_device *phydev) { + if (phydev->state == PHY_CABLETEST) + if (phydev->drv->flags & PHY_POLL_CABLE_TEST) + return true; + return phydev->irq == PHY_POLL; } @@ -1174,6 +1265,34 @@ int phy_speed_up(struct phy_device *phydev); int phy_restart_aneg(struct phy_device *phydev); int phy_reset_after_clk_enable(struct phy_device *phydev); +#if IS_ENABLED(CONFIG_PHYLIB) +int phy_start_cable_test(struct phy_device *phydev, + struct netlink_ext_ack *extack); +int phy_start_cable_test_tdr(struct phy_device *phydev, + struct netlink_ext_ack *extack, + const struct phy_tdr_config *config); +#else +static inline +int phy_start_cable_test(struct phy_device *phydev, + struct netlink_ext_ack *extack) +{ + NL_SET_ERR_MSG(extack, "Kernel not compiled with PHYLIB support"); + return -EOPNOTSUPP; +} +static inline +int phy_start_cable_test_tdr(struct phy_device *phydev, + struct netlink_ext_ack *extack, + const struct phy_tdr_config *config) +{ + NL_SET_ERR_MSG(extack, "Kernel not compiled with PHYLIB support"); + return -EOPNOTSUPP; +} +#endif + +int phy_cable_test_result(struct phy_device *phydev, u8 pair, u16 result); +int phy_cable_test_fault_length(struct phy_device *phydev, u8 pair, + u16 cm); + static inline void phy_device_reset(struct phy_device *phydev, int value) { mdio_device_reset(&phydev->mdio, value); @@ -1234,10 +1353,6 @@ static inline int genphy_config_aneg(struct phy_device *phydev) return __genphy_config_aneg(phydev, false); } -static inline int genphy_no_soft_reset(struct phy_device *phydev) -{ - return 0; -} static inline int genphy_no_ack_interrupt(struct phy_device *phydev) { return 0; @@ -1341,6 +1456,10 @@ int phy_ethtool_get_link_ksettings(struct net_device *ndev, int phy_ethtool_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd); int phy_ethtool_nway_reset(struct net_device *ndev); +int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size); +void phy_package_leave(struct phy_device *phydev); +int devm_phy_package_join(struct device *dev, struct phy_device *phydev, + int addr, size_t priv_size); #if IS_ENABLED(CONFIG_PHYLIB) int __init mdio_bus_init(void); @@ -1393,6 +1512,58 @@ static inline int phy_ethtool_get_stats(struct phy_device *phydev, return 0; } +static inline int phy_package_read(struct phy_device *phydev, u32 regnum) +{ + struct phy_package_shared *shared = phydev->shared; + + if (!shared) + return -EIO; + + return mdiobus_read(phydev->mdio.bus, shared->addr, regnum); +} + +static inline int __phy_package_read(struct phy_device *phydev, u32 regnum) +{ + struct phy_package_shared *shared = phydev->shared; + + if (!shared) + return -EIO; + + return __mdiobus_read(phydev->mdio.bus, shared->addr, regnum); +} + +static inline int phy_package_write(struct phy_device *phydev, + u32 regnum, u16 val) +{ + struct phy_package_shared *shared = phydev->shared; + + if (!shared) + return -EIO; + + return mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val); +} + +static inline int __phy_package_write(struct phy_device *phydev, + u32 regnum, u16 val) +{ + struct phy_package_shared *shared = phydev->shared; + + if (!shared) + return -EIO; + + return __mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val); +} + +static inline bool phy_package_init_once(struct phy_device *phydev) +{ + struct phy_package_shared *shared = phydev->shared; + + if (!shared) + return false; + + return !test_and_set_bit(PHY_SHARED_F_INIT_DONE, &shared->flags); +} + extern struct bus_type mdio_bus_type; struct mdio_board_info { diff --git a/include/linux/phy/omap_usb.h b/include/linux/phy/omap_usb.h index 5973a6313529..e23b52df93ec 100644 --- a/include/linux/phy/omap_usb.h +++ b/include/linux/phy/omap_usb.h @@ -2,68 +2,14 @@ /* * omap_usb.h -- omap usb2 phy header file * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2012-2020 Texas Instruments Incorporated - http://www.ti.com * Author: Kishon Vijay Abraham I <kishon@ti.com> */ #ifndef __DRIVERS_OMAP_USB2_H #define __DRIVERS_OMAP_USB2_H -#include <linux/io.h> -#include <linux/usb/otg.h> - -struct usb_dpll_params { - u16 m; - u8 n; - u8 freq:3; - u8 sd; - u32 mf; -}; - -enum omap_usb_phy_type { - TYPE_USB2, /* USB2_PHY, power down in CONTROL_DEV_CONF */ - TYPE_DRA7USB2, /* USB2 PHY, power and power_aux e.g. DRA7 */ - TYPE_AM437USB2, /* USB2 PHY, power e.g. AM437x */ -}; - -struct omap_usb { - struct usb_phy phy; - struct phy_companion *comparator; - void __iomem *pll_ctrl_base; - void __iomem *phy_base; - struct device *dev; - struct device *control_dev; - struct clk *wkupclk; - struct clk *optclk; - u8 flags; - enum omap_usb_phy_type type; - struct regmap *syscon_phy_power; /* ctrl. reg. acces */ - unsigned int power_reg; /* power reg. index within syscon */ - u32 mask; - u32 power_on; - u32 power_off; -}; - -struct usb_phy_data { - const char *label; - u8 flags; - u32 mask; - u32 power_on; - u32 power_off; -}; - -/* Driver Flags */ -#define OMAP_USB2_HAS_START_SRP (1 << 0) -#define OMAP_USB2_HAS_SET_VBUS (1 << 1) -#define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT (1 << 2) - -#define OMAP_DEV_PHY_PD BIT(0) -#define OMAP_USB2_PHY_PD BIT(28) - -#define AM437X_USB2_PHY_PD BIT(0) -#define AM437X_USB2_OTG_PD BIT(1) -#define AM437X_USB2_OTGVDET_EN BIT(19) -#define AM437X_USB2_OTGSESSEND_EN BIT(20) +#include <linux/usb/phy_companion.h> #define phy_to_omapusb(x) container_of((x), struct omap_usb, phy) @@ -76,15 +22,4 @@ static inline int omap_usb2_set_comparator(struct phy_companion *comparator) } #endif -static inline u32 omap_usb_readl(void __iomem *addr, unsigned offset) -{ - return __raw_readl(addr + offset); -} - -static inline void omap_usb_writel(void __iomem *addr, unsigned offset, - u32 data) -{ - __raw_writel(data, addr + offset); -} - #endif /* __DRIVERS_OMAP_USB_H */ diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 3f8d37ec5503..cc5b452a184e 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -67,6 +67,9 @@ struct phylink_config { struct device *dev; enum phylink_op_type type; bool pcs_poll; + bool poll_fixed_state; + void (*get_fixed_state)(struct phylink_config *config, + struct phylink_link_state *state); }; /** @@ -366,9 +369,6 @@ void phylink_destroy(struct phylink *); int phylink_connect_phy(struct phylink *, struct phy_device *); int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags); void phylink_disconnect_phy(struct phylink *); -int phylink_fixed_state_cb(struct phylink *, - void (*cb)(struct net_device *dev, - struct phylink_link_state *)); void phylink_mac_change(struct phylink *, bool up); diff --git a/include/linux/pid.h b/include/linux/pid.h index cc896f0fc4e3..176d6cf80e7c 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -102,12 +102,16 @@ extern void attach_pid(struct task_struct *task, enum pid_type); extern void detach_pid(struct task_struct *task, enum pid_type); extern void change_pid(struct task_struct *task, enum pid_type, struct pid *pid); +extern void exchange_tids(struct task_struct *task, struct task_struct *old); extern void transfer_pid(struct task_struct *old, struct task_struct *new, enum pid_type); struct pid_namespace; extern struct pid_namespace init_pid_ns; +extern int pid_max; +extern int pid_max_min, pid_max_max; + /* * look up a PID in the hash table. Must be called with the tasklist_lock * or rcu_read_lock() held. diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index 4956e362e55e..5a5cb45ac57e 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -17,12 +17,6 @@ struct fs_pin; -enum { /* definitions for pid_namespace's hide_pid field */ - HIDEPID_OFF = 0, - HIDEPID_NO_ACCESS = 1, - HIDEPID_INVISIBLE = 2, -}; - struct pid_namespace { struct kref kref; struct idr idr; @@ -32,17 +26,11 @@ struct pid_namespace { struct kmem_cache *pid_cachep; unsigned int level; struct pid_namespace *parent; -#ifdef CONFIG_PROC_FS - struct dentry *proc_self; - struct dentry *proc_thread_self; -#endif #ifdef CONFIG_BSD_PROCESS_ACCT struct fs_pin *bacct; #endif struct user_namespace *user_ns; struct ucounts *ucounts; - kgid_t pid_gid; - int hide_pid; int reboot; /* group exit code if this pidns was rebooted */ struct ns_common ns; } __randomize_layout; diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index ae58fad7f1e0..50afd0d0084c 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -8,6 +8,11 @@ #define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */ #define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */ #define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */ +#define PIPE_BUF_FLAG_CAN_MERGE 0x10 /* can merge buffers */ +#define PIPE_BUF_FLAG_WHOLE 0x20 /* read() must return entire buffer or error */ +#ifdef CONFIG_WATCH_QUEUE +#define PIPE_BUF_FLAG_LOSS 0x40 /* Message loss happened after this buffer */ +#endif /** * struct pipe_buffer - a linux kernel pipe buffer @@ -33,8 +38,10 @@ struct pipe_buffer { * @wr_wait: writer wait point in case of full pipe * @head: The point of buffer production * @tail: The point of buffer consumption + * @note_loss: The next read() should insert a data-lost message * @max_usage: The maximum number of slots that may be used in the ring * @ring_size: total number of buffers (should be a power of 2) + * @nr_accounted: The amount this pipe accounts for in user->pipe_bufs * @tmp_page: cached released page * @readers: number of current readers of this pipe * @writers: number of current writers of this pipe @@ -45,6 +52,7 @@ struct pipe_buffer { * @fasync_writers: writer side fasync * @bufs: the circular array of pipe buffers * @user: the user who created this pipe + * @watch_queue: If this pipe is a watch_queue, this is the stuff for that **/ struct pipe_inode_info { struct mutex mutex; @@ -53,6 +61,10 @@ struct pipe_inode_info { unsigned int tail; unsigned int max_usage; unsigned int ring_size; +#ifdef CONFIG_WATCH_QUEUE + bool note_loss; +#endif + unsigned int nr_accounted; unsigned int readers; unsigned int writers; unsigned int files; @@ -63,17 +75,20 @@ struct pipe_inode_info { struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; +#ifdef CONFIG_WATCH_QUEUE + struct watch_queue *watch_queue; +#endif }; /* * Note on the nesting of these functions: * * ->confirm() - * ->steal() + * ->try_steal() * - * That is, ->steal() must be called on a confirmed buffer. - * See below for the meaning of each operation. Also see kerneldoc - * in fs/pipe.c for the pipe and generic variants of these hooks. + * That is, ->try_steal() must be called on a confirmed buffer. See below for + * the meaning of each operation. Also see the kerneldoc in fs/pipe.c for the + * pipe and generic variants of these hooks. */ struct pipe_buf_operations { /* @@ -81,7 +96,7 @@ struct pipe_buf_operations { * and that the contents are good. If the pages in the pipe belong * to a file system, we may need to wait for IO completion in this * hook. Returns 0 for good, or a negative error value in case of - * error. + * error. If not present all pages are considered good. */ int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); @@ -93,13 +108,13 @@ struct pipe_buf_operations { /* * Attempt to take ownership of the pipe buffer and its contents. - * ->steal() returns 0 for success, in which case the contents - * of the pipe (the buf->page) is locked and now completely owned - * by the caller. The page may then be transferred to a different - * mapping, the most often used case is insertion into different - * file address space cache. + * ->try_steal() returns %true for success, in which case the contents + * of the pipe (the buf->page) is locked and now completely owned by the + * caller. The page may then be transferred to a different mapping, the + * most often used case is insertion into different file address space + * cache. */ - int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); + bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *); /* * Get a reference to the pipe buffer. @@ -194,18 +209,22 @@ static inline void pipe_buf_release(struct pipe_inode_info *pipe, static inline int pipe_buf_confirm(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { + if (!buf->ops->confirm) + return 0; return buf->ops->confirm(pipe, buf); } /** - * pipe_buf_steal - attempt to take ownership of a pipe_buffer + * pipe_buf_try_steal - attempt to take ownership of a pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to attempt to steal */ -static inline int pipe_buf_steal(struct pipe_inode_info *pipe, - struct pipe_buffer *buf) +static inline bool pipe_buf_try_steal(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) { - return buf->ops->steal(pipe, buf); + if (!buf->ops->try_steal) + return false; + return buf->ops->try_steal(pipe, buf); } /* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual @@ -229,17 +248,25 @@ void free_pipe_info(struct pipe_inode_info *); /* Generic pipe buffer ops functions */ bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); -int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); -int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); -int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *); +bool generic_pipe_buf_try_steal(struct pipe_inode_info *, struct pipe_buffer *); void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); -void pipe_buf_mark_unmergeable(struct pipe_buffer *buf); extern const struct pipe_buf_operations nosteal_pipe_buf_ops; +#ifdef CONFIG_WATCH_QUEUE +unsigned long account_pipe_buffers(struct user_struct *user, + unsigned long old, unsigned long new); +bool too_many_pipe_buffers_soft(unsigned long user_bufs); +bool too_many_pipe_buffers_hard(unsigned long user_bufs); +bool pipe_is_unprivileged_user(void); +#endif + /* for F_SETPIPE_SZ and F_GETPIPE_SZ */ +#ifdef CONFIG_WATCH_QUEUE +int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots); +#endif long pipe_fcntl(struct file *, unsigned int, unsigned long arg); -struct pipe_inode_info *get_pipe_info(struct file *file); +struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice); int create_pipe_files(struct file **, int); unsigned int round_pipe_size(unsigned long size); diff --git a/include/linux/platform_data/ad5761.h b/include/linux/platform_data/ad5761.h index 02bef5177ff5..69e261e2ca14 100644 --- a/include/linux/platform_data/ad5761.h +++ b/include/linux/platform_data/ad5761.h @@ -3,7 +3,7 @@ * AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter * * Copyright 2016 Qtechnology A/S - * 2016 Ricardo Ribalda <ricardo.ribalda@gmail.com> + * 2016 Ricardo Ribalda <ribalda@kernel.org> */ #ifndef __LINUX_PLATFORM_DATA_AD5761_H__ #define __LINUX_PLATFORM_DATA_AD5761_H__ diff --git a/include/linux/platform_data/clk-integrator.h b/include/linux/platform_data/clk-integrator.h deleted file mode 100644 index addd48cac625..000000000000 --- a/include/linux/platform_data/clk-integrator.h +++ /dev/null @@ -1,2 +0,0 @@ -void integrator_impd1_clk_init(void __iomem *base, unsigned int id); -void integrator_impd1_clk_exit(unsigned int id); diff --git a/include/linux/platform_data/cros_ec_sensorhub.h b/include/linux/platform_data/cros_ec_sensorhub.h index c588be843f61..0ecce6aa69d5 100644 --- a/include/linux/platform_data/cros_ec_sensorhub.h +++ b/include/linux/platform_data/cros_ec_sensorhub.h @@ -185,6 +185,7 @@ int cros_ec_sensorhub_register_push_data(struct cros_ec_sensorhub *sensorhub, void cros_ec_sensorhub_unregister_push_data(struct cros_ec_sensorhub *sensorhub, u8 sensor_num); +int cros_ec_sensorhub_ring_allocate(struct cros_ec_sensorhub *sensorhub); int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub); void cros_ec_sensorhub_ring_remove(void *arg); int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub, diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h index 3c606c450d05..ff1be737bad6 100644 --- a/include/linux/platform_data/gpio-dwapb.h +++ b/include/linux/platform_data/gpio-dwapb.h @@ -12,7 +12,6 @@ struct dwapb_port_property { unsigned int ngpio; unsigned int gpio_base; int irq[32]; - bool has_irq; bool irq_shared; }; diff --git a/include/linux/platform_data/gsc_hwmon.h b/include/linux/platform_data/gsc_hwmon.h new file mode 100644 index 000000000000..ec1611aff863 --- /dev/null +++ b/include/linux/platform_data/gsc_hwmon.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _GSC_HWMON_H +#define _GSC_HWMON_H + +enum gsc_hwmon_mode { + mode_temperature, + mode_voltage, + mode_voltage_raw, + mode_max, +}; + +/** + * struct gsc_hwmon_channel - configuration parameters + * @reg: I2C register offset + * @mode: channel mode + * @name: channel name + * @mvoffset: voltage offset + * @vdiv: voltage divider array (2 resistor values in milli-ohms) + */ +struct gsc_hwmon_channel { + unsigned int reg; + unsigned int mode; + const char *name; + unsigned int mvoffset; + unsigned int vdiv[2]; +}; + +/** + * struct gsc_hwmon_platform_data - platform data for gsc_hwmon driver + * @channels: pointer to array of gsc_hwmon_channel structures + * describing channels + * @nchannels: number of elements in @channels array + * @vreference: voltage reference (mV) + * @resolution: ADC bit resolution + * @fan_base: register base for FAN controller + */ +struct gsc_hwmon_platform_data { + const struct gsc_hwmon_channel *channels; + int nchannels; + unsigned int resolution; + unsigned int vreference; + unsigned int fan_base; +}; +#endif diff --git a/include/linux/platform_data/i2c-pxa.h b/include/linux/platform_data/i2c-pxa.h index 6a9b28399b39..24953981bd9f 100644 --- a/include/linux/platform_data/i2c-pxa.h +++ b/include/linux/platform_data/i2c-pxa.h @@ -7,54 +7,6 @@ #ifndef _I2C_PXA_H_ #define _I2C_PXA_H_ -#if 0 -#define DEF_TIMEOUT 3 -#else -/* need a longer timeout if we're dealing with the fact we may well be - * looking at a multi-master environment -*/ -#define DEF_TIMEOUT 32 -#endif - -#define BUS_ERROR (-EREMOTEIO) -#define XFER_NAKED (-ECONNREFUSED) -#define I2C_RETRY (-2000) /* an error has occurred retry transmit */ - -/* ICR initialize bit values -* -* 15. FM 0 (100 Khz operation) -* 14. UR 0 (No unit reset) -* 13. SADIE 0 (Disables the unit from interrupting on slave addresses -* matching its slave address) -* 12. ALDIE 0 (Disables the unit from interrupt when it loses arbitration -* in master mode) -* 11. SSDIE 0 (Disables interrupts from a slave stop detected, in slave mode) -* 10. BEIE 1 (Enable interrupts from detected bus errors, no ACK sent) -* 9. IRFIE 1 (Enable interrupts from full buffer received) -* 8. ITEIE 1 (Enables the I2C unit to interrupt when transmit buffer empty) -* 7. GCD 1 (Disables i2c unit response to general call messages as a slave) -* 6. IUE 0 (Disable unit until we change settings) -* 5. SCLE 1 (Enables the i2c clock output for master mode (drives SCL) -* 4. MA 0 (Only send stop with the ICR stop bit) -* 3. TB 0 (We are not transmitting a byte initially) -* 2. ACKNAK 0 (Send an ACK after the unit receives a byte) -* 1. STOP 0 (Do not send a STOP) -* 0. START 0 (Do not send a START) -* -*/ -#define I2C_ICR_INIT (ICR_BEIE | ICR_IRFIE | ICR_ITEIE | ICR_GCD | ICR_SCLE) - -/* I2C status register init values - * - * 10. BED 1 (Clear bus error detected) - * 9. SAD 1 (Clear slave address detected) - * 7. IRF 1 (Clear IDBR Receive Full) - * 6. ITE 1 (Clear IDBR Transmit Empty) - * 5. ALD 1 (Clear Arbitration Loss Detected) - * 4. SSD 1 (Clear Slave Stop Detected) - */ -#define I2C_ISR_INIT 0x7FF /* status register init */ - struct i2c_pxa_platform_data { unsigned int class; unsigned int use_pio :1; diff --git a/include/linux/platform_data/itco_wdt.h b/include/linux/platform_data/itco_wdt.h index 2ccdce6a4e27..45d860cac2b0 100644 --- a/include/linux/platform_data/itco_wdt.h +++ b/include/linux/platform_data/itco_wdt.h @@ -12,13 +12,16 @@ #define ICH_RES_MEM_OFF 2 #define ICH_RES_MEM_GCS_PMC 0 +/** + * struct itco_wdt_platform_data - iTCO_wdt platform data + * @name: Name of the platform + * @version: iTCO version + * @no_reboot_use_pmc: Use PMC BXT API to set and clear NO_REBOOT bit + */ struct itco_wdt_platform_data { char name[32]; unsigned int version; - /* private data to be passed to update_no_reboot_bit API */ - void *no_reboot_priv; - /* pointer for platform specific no reboot update function */ - int (*update_no_reboot_bit)(void *priv, bool set); + bool no_reboot_use_pmc; }; #endif /* _ITCO_WDT_H_ */ diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h index 0434f68eda86..cba1184b364c 100644 --- a/include/linux/platform_data/mmc-esdhc-imx.h +++ b/include/linux/platform_data/mmc-esdhc-imx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright 2010 Wolfram Sang <w.sang@pengutronix.de> + * Copyright 2010 Wolfram Sang <kernel@pengutronix.de> */ #ifndef __ASM_ARCH_IMX_ESDHC_H diff --git a/include/linux/platform_data/mmc-esdhc-mcf.h b/include/linux/platform_data/mmc-esdhc-mcf.h new file mode 100644 index 000000000000..85cb786a62fe --- /dev/null +++ b/include/linux/platform_data/mmc-esdhc-mcf.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_PLATFORM_DATA_MCF_ESDHC_H__ +#define __LINUX_PLATFORM_DATA_MCF_ESDHC_H__ + +enum cd_types { + ESDHC_CD_NONE, /* no CD, neither controller nor gpio */ + ESDHC_CD_CONTROLLER, /* mmc controller internal CD */ + ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */ +}; + +struct mcf_esdhc_platform_data { + int max_bus_width; + int cd_type; +}; + +#endif /* __LINUX_PLATFORM_DATA_MCF_ESDHC_H__ */ diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h index 08e639e047e5..03e92c71b3fa 100644 --- a/include/linux/platform_data/mtd-davinci.h +++ b/include/linux/platform_data/mtd-davinci.h @@ -68,7 +68,7 @@ struct davinci_nand_pdata { /* platform_data */ * Newer ones also support 4-bit ECC, but are awkward * using it with large page chips. */ - nand_ecc_modes_t ecc_mode; + enum nand_ecc_mode ecc_mode; u8 ecc_bits; /* e.g. NAND_BUSWIDTH_16 */ diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h index deb849bcf0ec..08675b16f9e1 100644 --- a/include/linux/platform_data/mtd-nand-s3c2410.h +++ b/include/linux/platform_data/mtd-nand-s3c2410.h @@ -49,7 +49,7 @@ struct s3c2410_platform_nand { unsigned int ignore_unset_ecc:1; - nand_ecc_modes_t ecc_mode; + enum nand_ecc_mode ecc_mode; int nr_sets; struct s3c2410_nand_set *sets; diff --git a/include/linux/platform_data/wilco-ec.h b/include/linux/platform_data/wilco-ec.h index 25f46a939637..3e268e636b5b 100644 --- a/include/linux/platform_data/wilco-ec.h +++ b/include/linux/platform_data/wilco-ec.h @@ -83,7 +83,7 @@ struct wilco_ec_response { u16 result; u16 data_size; u8 reserved[2]; - u8 data[0]; + u8 data[]; } __packed; /** diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index d39fc658c320..897b8332a39f 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -85,6 +85,9 @@ /* Maximum charging percentage */ #define ASUS_WMI_DEVID_RSOC 0x00120057 +/* Keyboard dock */ +#define ASUS_WMI_DEVID_KBD_DOCK 0x00120063 + /* DSTS masks */ #define ASUS_WMI_DSTS_STATUS_BIT 0x00000001 #define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002 diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index bdc35753ef7c..77a2aada106d 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -25,6 +25,7 @@ struct platform_device { bool id_auto; struct device dev; u64 platform_dma_mask; + struct device_dma_parameters dma_parms; u32 num_resources; struct resource *resource; diff --git a/include/linux/pm.h b/include/linux/pm.h index e057d1fa2469..121c104a4090 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -544,31 +544,17 @@ struct pm_subsys_data { * These flags can be set by device drivers at the probe time. They need not be * cleared by the drivers as the driver core will take care of that. * - * NEVER_SKIP: Do not skip all system suspend/resume callbacks for the device. - * SMART_PREPARE: Check the return value of the driver's ->prepare callback. - * SMART_SUSPEND: No need to resume the device from runtime suspend. - * LEAVE_SUSPENDED: Avoid resuming the device during system resume if possible. - * - * Setting SMART_PREPARE instructs bus types and PM domains which may want - * system suspend/resume callbacks to be skipped for the device to return 0 from - * their ->prepare callbacks if the driver's ->prepare callback returns 0 (in - * other words, the system suspend/resume callbacks can only be skipped for the - * device if its driver doesn't object against that). This flag has no effect - * if NEVER_SKIP is set. - * - * Setting SMART_SUSPEND instructs bus types and PM domains which may want to - * runtime resume the device upfront during system suspend that doing so is not - * necessary from the driver's perspective. It also may cause them to skip - * invocations of the ->suspend_late and ->suspend_noirq callbacks provided by - * the driver if they decide to leave the device in runtime suspend. - * - * Setting LEAVE_SUSPENDED informs the PM core and middle-layer code that the - * driver prefers the device to be left in suspend after system resume. + * NO_DIRECT_COMPLETE: Do not apply direct-complete optimization to the device. + * SMART_PREPARE: Take the driver ->prepare callback return value into account. + * SMART_SUSPEND: Avoid resuming the device from runtime suspend. + * MAY_SKIP_RESUME: Allow driver "noirq" and "early" callbacks to be skipped. + * + * See Documentation/driver-api/pm/devices.rst for details. */ -#define DPM_FLAG_NEVER_SKIP BIT(0) +#define DPM_FLAG_NO_DIRECT_COMPLETE BIT(0) #define DPM_FLAG_SMART_PREPARE BIT(1) #define DPM_FLAG_SMART_SUSPEND BIT(2) -#define DPM_FLAG_LEAVE_SUSPENDED BIT(3) +#define DPM_FLAG_MAY_SKIP_RESUME BIT(3) struct dev_pm_info { pm_message_t power_state; @@ -758,8 +744,8 @@ extern int pm_generic_poweroff_late(struct device *dev); extern int pm_generic_poweroff(struct device *dev); extern void pm_generic_complete(struct device *dev); -extern bool dev_pm_may_skip_resume(struct device *dev); -extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); +extern bool dev_pm_skip_resume(struct device *dev); +extern bool dev_pm_skip_suspend(struct device *dev); #else /* !CONFIG_PM_SLEEP */ diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 747861816f4f..d5c4a329321d 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -42,6 +42,18 @@ struct dev_pm_opp_supply { }; /** + * struct dev_pm_opp_icc_bw - Interconnect bandwidth values + * @avg: Average bandwidth corresponding to this OPP (in icc units) + * @peak: Peak bandwidth corresponding to this OPP (in icc units) + * + * This structure stores the bandwidth values for a single interconnect path. + */ +struct dev_pm_opp_icc_bw { + u32 avg; + u32 peak; +}; + +/** * struct dev_pm_opp_info - OPP freq/voltage/current values * @rate: Target clk rate in hz * @supplies: Array of voltage/current values for all power supplies @@ -360,6 +372,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); int of_get_required_opp_performance_state(struct device_node *np, int index); +int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table); void dev_pm_opp_of_register_em(struct cpumask *cpus); #else static inline int dev_pm_opp_of_add_table(struct device *dev) @@ -408,6 +421,11 @@ static inline int of_get_required_opp_performance_state(struct device_node *np, { return -ENOTSUPP; } + +static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table) +{ + return -ENOTSUPP; +} #endif #endif /* __LINUX_OPP_H__ */ diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 3bdcbce8141a..3dbc207bff53 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -102,9 +102,9 @@ static inline bool pm_runtime_enabled(struct device *dev) return !dev->power.disable_depth; } -static inline bool pm_runtime_callbacks_present(struct device *dev) +static inline bool pm_runtime_has_no_callbacks(struct device *dev) { - return !dev->power.no_callbacks; + return dev->power.no_callbacks; } static inline void pm_runtime_mark_last_busy(struct device *dev) diff --git a/include/linux/pnp.h b/include/linux/pnp.h index b18dca67253d..c2a7cfbca713 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -220,10 +220,8 @@ struct pnp_card { #define global_to_pnp_card(n) list_entry(n, struct pnp_card, global_list) #define protocol_to_pnp_card(n) list_entry(n, struct pnp_card, protocol_list) #define to_pnp_card(n) container_of(n, struct pnp_card, dev) -#define pnp_for_each_card(card) \ - for((card) = global_to_pnp_card(pnp_cards.next); \ - (card) != global_to_pnp_card(&pnp_cards); \ - (card) = global_to_pnp_card((card)->global_list.next)) +#define pnp_for_each_card(card) \ + list_for_each_entry(card, &pnp_cards, global_list) struct pnp_card_link { struct pnp_card *card; @@ -276,14 +274,9 @@ struct pnp_dev { #define card_to_pnp_dev(n) list_entry(n, struct pnp_dev, card_list) #define protocol_to_pnp_dev(n) list_entry(n, struct pnp_dev, protocol_list) #define to_pnp_dev(n) container_of(n, struct pnp_dev, dev) -#define pnp_for_each_dev(dev) \ - for((dev) = global_to_pnp_dev(pnp_global.next); \ - (dev) != global_to_pnp_dev(&pnp_global); \ - (dev) = global_to_pnp_dev((dev)->global_list.next)) -#define card_for_each_dev(card,dev) \ - for((dev) = card_to_pnp_dev((card)->devices.next); \ - (dev) != card_to_pnp_dev(&(card)->devices); \ - (dev) = card_to_pnp_dev((dev)->card_list.next)) +#define pnp_for_each_dev(dev) list_for_each_entry(dev, &pnp_global, global_list) +#define card_for_each_dev(card, dev) \ + list_for_each_entry(dev, &(card)->devices, card_list) #define pnp_dev_name(dev) (dev)->name static inline void *pnp_get_drvdata(struct pnp_dev *pdev) @@ -437,14 +430,10 @@ struct pnp_protocol { }; #define to_pnp_protocol(n) list_entry(n, struct pnp_protocol, protocol_list) -#define protocol_for_each_card(protocol,card) \ - for((card) = protocol_to_pnp_card((protocol)->cards.next); \ - (card) != protocol_to_pnp_card(&(protocol)->cards); \ - (card) = protocol_to_pnp_card((card)->protocol_list.next)) -#define protocol_for_each_dev(protocol,dev) \ - for((dev) = protocol_to_pnp_dev((protocol)->devices.next); \ - (dev) != protocol_to_pnp_dev(&(protocol)->devices); \ - (dev) = protocol_to_pnp_dev((dev)->protocol_list.next)) +#define protocol_for_each_card(protocol, card) \ + list_for_each_entry(card, &(protocol)->cards, protocol_list) +#define protocol_for_each_dev(protocol, dev) \ + list_for_each_entry(dev, &(protocol)->devices, protocol_list) extern struct bus_type pnp_bus_type; diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index 540595a321a7..90797f1b421d 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -28,7 +28,7 @@ struct posix_acl { refcount_t a_refcount; struct rcu_head a_rcu; unsigned int a_count; - struct posix_acl_entry a_entries[0]; + struct posix_acl_entry a_entries[]; }; #define FOREACH_ACL_ENTRY(pa, acl, pe) \ diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index dcd5a71e6c67..ac1345a48ad0 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -61,6 +61,7 @@ enum { POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE, POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE, POWER_SUPPLY_HEALTH_OVERCURRENT, + POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED, }; enum { @@ -139,6 +140,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_CAPACITY, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */ + POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_MAX, @@ -158,6 +160,9 @@ enum power_supply_property { POWER_SUPPLY_PROP_PRECHARGE_CURRENT, POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, POWER_SUPPLY_PROP_CALIBRATE, + POWER_SUPPLY_PROP_MANUFACTURE_YEAR, + POWER_SUPPLY_PROP_MANUFACTURE_MONTH, + POWER_SUPPLY_PROP_MANUFACTURE_DAY, /* Properties of type `const char *' */ POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, @@ -223,9 +228,9 @@ struct power_supply_config { struct power_supply_desc { const char *name; enum power_supply_type type; - enum power_supply_usb_type *usb_types; + const enum power_supply_usb_type *usb_types; size_t num_usb_types; - enum power_supply_property *properties; + const enum power_supply_property *properties; size_t num_properties; /* @@ -346,8 +351,12 @@ struct power_supply_battery_info { int charge_full_design_uah; /* microAmp-hours */ int voltage_min_design_uv; /* microVolts */ int voltage_max_design_uv; /* microVolts */ + int tricklecharge_current_ua; /* microAmps */ int precharge_current_ua; /* microAmps */ + int precharge_voltage_max_uv; /* microVolts */ int charge_term_current_ua; /* microAmps */ + int charge_restart_voltage_uv; /* microVolts */ + int overvoltage_limit_uv; /* microVolts */ int constant_charge_current_max_ua; /* microAmps */ int constant_charge_voltage_max_uv; /* microVolts */ int factory_internal_resistance_uohm; /* microOhms */ diff --git a/include/linux/preempt.h b/include/linux/preempt.h index bc3f1aecaa19..7d9c1c0e149c 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -26,13 +26,13 @@ * PREEMPT_MASK: 0x000000ff * SOFTIRQ_MASK: 0x0000ff00 * HARDIRQ_MASK: 0x000f0000 - * NMI_MASK: 0x00100000 + * NMI_MASK: 0x00f00000 * PREEMPT_NEED_RESCHED: 0x80000000 */ #define PREEMPT_BITS 8 #define SOFTIRQ_BITS 8 #define HARDIRQ_BITS 4 -#define NMI_BITS 1 +#define NMI_BITS 4 #define PREEMPT_SHIFT 0 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) diff --git a/include/linux/printk.h b/include/linux/printk.h index e061635e0409..fc8f03c54543 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -189,7 +189,7 @@ extern int printk_delay_msec; extern int dmesg_restrict; extern int -devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf, +devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void *buf, size_t *lenp, loff_t *ppos); extern void wake_up_klogd(void); @@ -279,39 +279,116 @@ static inline void printk_safe_flush_on_panic(void) extern int kptr_restrict; +/** + * pr_fmt - used by the pr_*() macros to generate the printk format string + * @fmt: format string passed from a pr_*() macro + * + * This macro can be used to generate a unified format string for pr_*() + * macros. A common use is to prefix all pr_*() messages in a file with a common + * string. For example, defining this at the top of a source file: + * + * #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + * + * would prefix all pr_info, pr_emerg... messages in the file with the module + * name. + */ #ifndef pr_fmt #define pr_fmt(fmt) fmt #endif -/* - * These can be used to print at the various log levels. - * All of these will print unconditionally, although note that pr_debug() - * and other debug macros are compiled out unless either DEBUG is defined - * or CONFIG_DYNAMIC_DEBUG is set. +/** + * pr_emerg - Print an emergency-level message + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_EMERG loglevel. It uses pr_fmt() to + * generate the format string. */ #define pr_emerg(fmt, ...) \ printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) +/** + * pr_alert - Print an alert-level message + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_ALERT loglevel. It uses pr_fmt() to + * generate the format string. + */ #define pr_alert(fmt, ...) \ printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) +/** + * pr_crit - Print a critical-level message + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_CRIT loglevel. It uses pr_fmt() to + * generate the format string. + */ #define pr_crit(fmt, ...) \ printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) +/** + * pr_err - Print an error-level message + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_ERR loglevel. It uses pr_fmt() to + * generate the format string. + */ #define pr_err(fmt, ...) \ printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) +/** + * pr_warn - Print a warning-level message + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_WARNING loglevel. It uses pr_fmt() + * to generate the format string. + */ #define pr_warn(fmt, ...) \ printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) +/** + * pr_notice - Print a notice-level message + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_NOTICE loglevel. It uses pr_fmt() to + * generate the format string. + */ #define pr_notice(fmt, ...) \ printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) +/** + * pr_info - Print an info-level message + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_INFO loglevel. It uses pr_fmt() to + * generate the format string. + */ #define pr_info(fmt, ...) \ printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) -/* - * Like KERN_CONT, pr_cont() should only be used when continuing - * a line with no newline ('\n') enclosed. Otherwise it defaults - * back to KERN_DEFAULT. + +/** + * pr_cont - Continues a previous log message in the same line. + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_CONT loglevel. It should only be + * used when continuing a log message with no newline ('\n') enclosed. Otherwise + * it defaults back to KERN_DEFAULT loglevel. */ #define pr_cont(fmt, ...) \ printk(KERN_CONT fmt, ##__VA_ARGS__) -/* pr_devel() should produce zero code unless DEBUG is defined */ +/** + * pr_devel - Print a debug-level message conditionally + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_DEBUG loglevel if DEBUG is + * defined. Otherwise it does nothing. + * + * It uses pr_fmt() to generate the format string. + */ #ifdef DEBUG #define pr_devel(fmt, ...) \ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) @@ -322,11 +399,23 @@ extern int kptr_restrict; /* If you are writing a driver, please use dev_dbg instead */ -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) #include <linux/dynamic_debug.h> -/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ -#define pr_debug(fmt, ...) \ +/** + * pr_debug - Print a debug-level message conditionally + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to dynamic_pr_debug() if CONFIG_DYNAMIC_DEBUG is + * set. Otherwise, if DEBUG is defined, it's equivalent to a printk with + * KERN_DEBUG loglevel. If DEBUG is not defined it does nothing. + * + * It uses pr_fmt() to generate the format string (dynamic_pr_debug() uses + * pr_fmt() internally). + */ +#define pr_debug(fmt, ...) \ dynamic_pr_debug(fmt, ##__VA_ARGS__) #elif defined(DEBUG) #define pr_debug(fmt, ...) \ @@ -384,8 +473,7 @@ extern int kptr_restrict; printk_once(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) #define pr_info_once(fmt, ...) \ printk_once(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) -#define pr_cont_once(fmt, ...) \ - printk_once(KERN_CONT pr_fmt(fmt), ##__VA_ARGS__) +/* no pr_cont_once, don't do that... */ #if defined(DEBUG) #define pr_devel_once(fmt, ...) \ @@ -448,7 +536,8 @@ extern int kptr_restrict; #endif /* If you are writing a driver, please use dev_dbg instead */ -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) /* descriptor check is first to prevent flooding with "callbacks suppressed" */ #define pr_debug_ratelimited(fmt, ...) \ do { \ @@ -495,7 +584,8 @@ static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type, #endif -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) #define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 45c05fd9c99d..d1eed1b43651 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -42,6 +42,34 @@ struct proc_ops { unsigned long (*proc_get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); } __randomize_layout; +/* definitions for hide_pid field */ +enum proc_hidepid { + HIDEPID_OFF = 0, + HIDEPID_NO_ACCESS = 1, + HIDEPID_INVISIBLE = 2, + HIDEPID_NOT_PTRACEABLE = 4, /* Limit pids to only ptraceable pids */ +}; + +/* definitions for proc mount option pidonly */ +enum proc_pidonly { + PROC_PIDONLY_OFF = 0, + PROC_PIDONLY_ON = 1, +}; + +struct proc_fs_info { + struct pid_namespace *pid_ns; + struct dentry *proc_self; /* For /proc/self */ + struct dentry *proc_thread_self; /* For /proc/thread-self */ + kgid_t pid_gid; + enum proc_hidepid hide_pid; + enum proc_pidonly pidonly; +}; + +static inline struct proc_fs_info *proc_sb_info(struct super_block *sb) +{ + return sb->s_fs_info; +} + #ifdef CONFIG_PROC_FS typedef int (*proc_write_t)(struct file *, char *, size_t); @@ -105,6 +133,9 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo void *data); extern struct pid *tgid_pidfd_to_pid(const struct file *file); +extern int bpf_iter_init_seq_net(void *priv_data); +extern void bpf_iter_fini_seq_net(void *priv_data); + #ifdef CONFIG_PROC_PID_ARCH_STATUS /* * The architecture which selects CONFIG_PROC_PID_ARCH_STATUS must @@ -174,9 +205,11 @@ int open_related_ns(struct ns_common *ns, struct ns_common *(*get_ns)(struct ns_common *ns)); /* get the associated pid namespace for a file in procfs */ -static inline struct pid_namespace *proc_pid_ns(const struct inode *inode) +static inline struct pid_namespace *proc_pid_ns(struct super_block *sb) { - return inode->i_sb->s_fs_info; + return proc_sb_info(sb)->pid_ns; } +bool proc_ns_file(const struct file *file); + #endif /* _LINUX_PROC_FS_H */ diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h index 6abe85c34681..75807ecef880 100644 --- a/include/linux/proc_ns.h +++ b/include/linux/proc_ns.h @@ -8,7 +8,7 @@ #include <linux/ns_common.h> struct pid_namespace; -struct nsproxy; +struct nsset; struct path; struct task_struct; struct inode; @@ -19,7 +19,7 @@ struct proc_ns_operations { int type; struct ns_common *(*get)(struct task_struct *task); void (*put)(struct ns_common *ns); - int (*install)(struct nsproxy *nsproxy, struct ns_common *ns); + int (*install)(struct nsset *nsset, struct ns_common *ns); struct user_namespace *(*owner)(struct ns_common *ns); struct ns_common *(*get_parent)(struct ns_common *ns); } __randomize_layout; diff --git a/include/linux/property.h b/include/linux/property.h index d86de017c689..10d03572f52e 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -440,7 +440,11 @@ software_node_find_by_name(const struct software_node *parent, int software_node_register_nodes(const struct software_node *nodes); void software_node_unregister_nodes(const struct software_node *nodes); +int software_node_register_node_group(const struct software_node **node_group); +void software_node_unregister_node_group(const struct software_node **node_group); + int software_node_register(const struct software_node *node); +void software_node_unregister(const struct software_node *node); int software_node_notify(struct device *dev, unsigned long action); diff --git a/include/linux/psci.h b/include/linux/psci.h index a67712b73b6c..14ad9b9ebcd6 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -21,11 +21,6 @@ bool psci_power_state_is_valid(u32 state); int psci_set_osi_mode(void); bool psci_has_osi_support(void); -enum smccc_version { - SMCCC_VERSION_1_0, - SMCCC_VERSION_1_1, -}; - struct psci_operations { u32 (*get_version)(void); int (*cpu_suspend)(u32 state, unsigned long entry_point); @@ -35,8 +30,6 @@ struct psci_operations { int (*affinity_info)(unsigned long target_affinity, unsigned long lowest_affinity_level); int (*migrate_info_type)(void); - enum arm_smccc_conduit conduit; - enum smccc_version smccc_version; }; extern struct psci_operations psci_ops; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 5167bf2bfc75..49d155cd2dfe 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -100,6 +100,8 @@ struct sev_data_init { u32 tmr_len; /* In */ } __packed; +#define SEV_INIT_FLAGS_SEV_ES 0x01 + /** * struct sev_data_pek_csr - PEK_CSR command parameters * @@ -595,7 +597,7 @@ int sev_guest_df_flush(int *error); */ int sev_guest_decommission(struct sev_data_decommission *data, int *error); -void *psp_copy_user_blob(u64 __user uaddr, u32 len); +void *psp_copy_user_blob(u64 uaddr, u32 len); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ diff --git a/include/linux/pstore.h b/include/linux/pstore.h index e779441e6d26..eb93a54cff31 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -96,6 +96,12 @@ struct pstore_record { * * @read_mutex: serializes @open, @read, @close, and @erase callbacks * @flags: bitfield of frontends the backend can accept writes for + * @max_reason: Used when PSTORE_FLAGS_DMESG is set. Contains the + * kmsg_dump_reason enum value. KMSG_DUMP_UNDEF means + * "use existing kmsg_dump() filtering, based on the + * printk.always_kmsg_dump boot param" (which is either + * KMSG_DUMP_OOPS when false, or KMSG_DUMP_MAX when + * true); see printk.always_kmsg_dump for more details. * @data: backend-private pointer passed back during callbacks * * Callbacks: @@ -170,7 +176,7 @@ struct pstore_record { */ struct pstore_info { struct module *owner; - char *name; + const char *name; struct semaphore buf_lock; char *buf; @@ -179,6 +185,7 @@ struct pstore_info { struct mutex read_mutex; int flags; + int max_reason; void *data; int (*open)(struct pstore_info *psi); diff --git a/include/linux/pstore_blk.h b/include/linux/pstore_blk.h new file mode 100644 index 000000000000..61e914522b01 --- /dev/null +++ b/include/linux/pstore_blk.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __PSTORE_BLK_H_ +#define __PSTORE_BLK_H_ + +#include <linux/types.h> +#include <linux/pstore.h> +#include <linux/pstore_zone.h> + +/** + * typedef pstore_blk_panic_write_op - panic write operation to block device + * + * @buf: the data to write + * @start_sect: start sector to block device + * @sects: sectors count on buf + * + * Return: On success, zero should be returned. Others excluding -ENOMSG + * mean error. -ENOMSG means to try next zone. + * + * Panic write to block device must be aligned to SECTOR_SIZE. + */ +typedef int (*pstore_blk_panic_write_op)(const char *buf, sector_t start_sect, + sector_t sects); + +/** + * struct pstore_blk_info - pstore/blk registration details + * + * @major: Which major device number to support with pstore/blk + * @flags: The supported PSTORE_FLAGS_* from linux/pstore.h. + * @panic_write:The write operation only used for the panic case. + * This can be NULL, but is recommended to avoid losing + * crash data if the kernel's IO path or work queues are + * broken during a panic. + * @devt: The dev_t that pstore/blk has attached to. + * @nr_sects: Number of sectors on @devt. + * @start_sect: Starting sector on @devt. + */ +struct pstore_blk_info { + unsigned int major; + unsigned int flags; + pstore_blk_panic_write_op panic_write; + + /* Filled in by pstore/blk after registration. */ + dev_t devt; + sector_t nr_sects; + sector_t start_sect; +}; + +int register_pstore_blk(struct pstore_blk_info *info); +void unregister_pstore_blk(unsigned int major); + +/** + * struct pstore_device_info - back-end pstore/blk driver structure. + * + * @total_size: The total size in bytes pstore/blk can use. It must be greater + * than 4096 and be multiple of 4096. + * @flags: Refer to macro starting with PSTORE_FLAGS defined in + * linux/pstore.h. It means what front-ends this device support. + * Zero means all backends for compatible. + * @read: The general read operation. Both of the function parameters + * @size and @offset are relative value to bock device (not the + * whole disk). + * On success, the number of bytes should be returned, others + * means error. + * @write: The same as @read, but the following error number: + * -EBUSY means try to write again later. + * -ENOMSG means to try next zone. + * @erase: The general erase operation for device with special removing + * job. Both of the function parameters @size and @offset are + * relative value to storage. + * Return 0 on success and others on failure. + * @panic_write:The write operation only used for panic case. It's optional + * if you do not care panic log. The parameters are relative + * value to storage. + * On success, the number of bytes should be returned, others + * excluding -ENOMSG mean error. -ENOMSG means to try next zone. + */ +struct pstore_device_info { + unsigned long total_size; + unsigned int flags; + pstore_zone_read_op read; + pstore_zone_write_op write; + pstore_zone_erase_op erase; + pstore_zone_write_op panic_write; +}; + +int register_pstore_device(struct pstore_device_info *dev); +void unregister_pstore_device(struct pstore_device_info *dev); + +/** + * struct pstore_blk_config - the pstore_blk backend configuration + * + * @device: Name of the desired block device + * @max_reason: Maximum kmsg dump reason to store to block device + * @kmsg_size: Total size of for kmsg dumps + * @pmsg_size: Total size of the pmsg storage area + * @console_size: Total size of the console storage area + * @ftrace_size: Total size for ftrace logging data (for all CPUs) + */ +struct pstore_blk_config { + char device[80]; + enum kmsg_dump_reason max_reason; + unsigned long kmsg_size; + unsigned long pmsg_size; + unsigned long console_size; + unsigned long ftrace_size; +}; + +/** + * pstore_blk_get_config - get a copy of the pstore_blk backend configuration + * + * @info: The sturct pstore_blk_config to be filled in + * + * Failure returns negative error code, and success returns 0. + */ +int pstore_blk_get_config(struct pstore_blk_config *info); + +#endif diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index 9cb9b9067298..9f16afec7290 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -133,7 +133,7 @@ struct ramoops_platform_data { unsigned long console_size; unsigned long ftrace_size; unsigned long pmsg_size; - int dump_oops; + int max_reason; u32 flags; struct persistent_ram_ecc_info ecc_info; }; diff --git a/include/linux/pstore_zone.h b/include/linux/pstore_zone.h new file mode 100644 index 000000000000..1e35eaa33e5e --- /dev/null +++ b/include/linux/pstore_zone.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __PSTORE_ZONE_H_ +#define __PSTORE_ZONE_H_ + +#include <linux/types.h> + +typedef ssize_t (*pstore_zone_read_op)(char *, size_t, loff_t); +typedef ssize_t (*pstore_zone_write_op)(const char *, size_t, loff_t); +typedef ssize_t (*pstore_zone_erase_op)(size_t, loff_t); +/** + * struct pstore_zone_info - pstore/zone back-end driver structure + * + * @owner: Module which is responsible for this back-end driver. + * @name: Name of the back-end driver. + * @total_size: The total size in bytes pstore/zone can use. It must be greater + * than 4096 and be multiple of 4096. + * @kmsg_size: The size of oops/panic zone. Zero means disabled, otherwise, + * it must be multiple of SECTOR_SIZE(512 Bytes). + * @max_reason: Maximum kmsg dump reason to store. + * @pmsg_size: The size of pmsg zone which is the same as @kmsg_size. + * @console_size:The size of console zone which is the same as @kmsg_size. + * @ftrace_size:The size of ftrace zone which is the same as @kmsg_size. + * @read: The general read operation. Both of the function parameters + * @size and @offset are relative value to storage. + * On success, the number of bytes should be returned, others + * mean error. + * @write: The same as @read, but the following error number: + * -EBUSY means try to write again later. + * -ENOMSG means to try next zone. + * @erase: The general erase operation for device with special removing + * job. Both of the function parameters @size and @offset are + * relative value to storage. + * Return 0 on success and others on failure. + * @panic_write:The write operation only used for panic case. It's optional + * if you do not care panic log. The parameters are relative + * value to storage. + * On success, the number of bytes should be returned, others + * excluding -ENOMSG mean error. -ENOMSG means to try next zone. + */ +struct pstore_zone_info { + struct module *owner; + const char *name; + + unsigned long total_size; + unsigned long kmsg_size; + int max_reason; + unsigned long pmsg_size; + unsigned long console_size; + unsigned long ftrace_size; + pstore_zone_read_op read; + pstore_zone_write_op write; + pstore_zone_erase_op erase; + pstore_zone_write_op panic_write; +}; + +extern int register_pstore_zone(struct pstore_zone_info *info); +extern void unregister_pstore_zone(struct pstore_zone_info *info); + +#endif diff --git a/include/linux/ptdump.h b/include/linux/ptdump.h index a67065c403c3..2a3a95586425 100644 --- a/include/linux/ptdump.h +++ b/include/linux/ptdump.h @@ -13,7 +13,8 @@ struct ptdump_range { struct ptdump_state { /* level is 0:PGD to 4:PTE, or -1 if unknown */ void (*note_page)(struct ptdump_state *st, unsigned long addr, - int level, unsigned long val); + int level, u64 val); + void (*effective_prot)(struct ptdump_state *st, int level, u64 val); const struct ptdump_range *range; }; diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 121a7eda4593..d3e8ba5c7125 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -36,7 +36,7 @@ struct ptp_system_timestamp { }; /** - * struct ptp_clock_info - decribes a PTP hardware clock + * struct ptp_clock_info - describes a PTP hardware clock * * @owner: The clock driver should set to THIS_MODULE. * @name: A short "friendly name" to identify the clock and to @@ -65,6 +65,9 @@ struct ptp_system_timestamp { * parameter delta: Desired frequency offset from nominal frequency * in parts per billion * + * @adjphase: Adjusts the phase offset of the hardware clock. + * parameter delta: Desired change in nanoseconds. + * * @adjtime: Shifts the time of the hardware clock. * parameter delta: Desired change in nanoseconds. * @@ -105,10 +108,10 @@ struct ptp_system_timestamp { * parameter func: the desired function to use. * parameter chan: the function channel index to use. * - * @do_work: Request driver to perform auxiliary (periodic) operations - * Driver should return delay of the next auxiliary work scheduling - * time (>=0) or negative value in case further scheduling - * is not required. + * @do_aux_work: Request driver to perform auxiliary (periodic) operations + * Driver should return delay of the next auxiliary work + * scheduling time (>=0) or negative value in case further + * scheduling is not required. * * Drivers should embed their ptp_clock_info within a private * structure, obtaining a reference to it using container_of(). @@ -128,6 +131,7 @@ struct ptp_clock_info { struct ptp_pin_desc *pin_config; int (*adjfine)(struct ptp_clock_info *ptp, long scaled_ppm); int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); + int (*adjphase)(struct ptp_clock_info *ptp, s32 phase); int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts); int (*gettimex64)(struct ptp_clock_info *ptp, struct timespec64 *ts, diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 8f29e0d8a7b3..8cb76405cbce 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -607,6 +607,16 @@ struct qed_sb_info { struct qed_dev *cdev; }; +enum qed_hw_err_type { + QED_HW_ERR_FAN_FAIL, + QED_HW_ERR_MFW_RESP_FAIL, + QED_HW_ERR_HW_ATTN, + QED_HW_ERR_DMAE_FAIL, + QED_HW_ERR_RAMROD_FAIL, + QED_HW_ERR_FW_ASSERT, + QED_HW_ERR_LAST, +}; + enum qed_dev_type { QED_DEV_TYPE_BB, QED_DEV_TYPE_AH, @@ -811,12 +821,14 @@ enum qed_nvm_flash_cmd { struct qed_common_cb_ops { void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc); - void (*link_update)(void *dev, - struct qed_link_output *link); + void (*link_update)(void *dev, struct qed_link_output *link); void (*schedule_recovery_handler)(void *dev); - void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); + void (*schedule_hw_err_handler)(void *dev, + enum qed_hw_err_type err_type); + void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data); void (*get_protocol_tlv_data)(void *dev, void *data); + void (*bw_update)(void *dev); }; struct qed_selftest_ops { @@ -1034,6 +1046,15 @@ struct qed_common_ops { */ int (*set_led)(struct qed_dev *cdev, enum qed_led_mode mode); + +/** + * @brief attn_clr_enable - Prevent attentions from being reasserted + * + * @param cdev + * @param clr_enable + */ + void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable); + /** * @brief db_recovery_add - add doorbell information to the doorbell * recovery mechanism. diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index 74efca15fde7..2d3ddd2b85e0 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -53,6 +53,13 @@ enum qed_roce_qp_state { QED_ROCE_QP_STATE_SQE }; +enum qed_rdma_qp_type { + QED_RDMA_QP_TYPE_RC, + QED_RDMA_QP_TYPE_XRC_INI, + QED_RDMA_QP_TYPE_XRC_TGT, + QED_RDMA_QP_TYPE_INVAL = 0xffff, +}; + enum qed_rdma_tid_type { QED_RDMA_TID_REGISTERED_MR, QED_RDMA_TID_FMR, @@ -91,7 +98,6 @@ struct qed_rdma_device { u64 max_mr_size; u32 max_cqe; u32 max_mw; - u32 max_fmr; u32 max_mr_mw_fmr_pbl; u64 max_mr_mw_fmr_size; u32 max_pd; @@ -291,6 +297,12 @@ struct qed_rdma_create_srq_in_params { u16 num_pages; u16 pd_id; u16 page_size; + + /* XRC related only */ + bool reserved_key_en; + bool is_xrc; + u32 cq_cid; + u16 xrcd_id; }; struct qed_rdma_destroy_cq_in_params { @@ -319,7 +331,12 @@ struct qed_rdma_create_qp_in_params { u16 rq_num_pages; u64 rq_pbl_ptr; u16 srq_id; + u16 xrcd_id; u8 stats_queue; + enum qed_rdma_qp_type qp_type; + u8 flags; +#define QED_ROCE_EDPM_MODE_MASK 0x1 +#define QED_ROCE_EDPM_MODE_SHIFT 0 }; struct qed_rdma_create_qp_out_params { @@ -429,11 +446,13 @@ struct qed_rdma_create_srq_out_params { struct qed_rdma_destroy_srq_in_params { u16 srq_id; + bool is_xrc; }; struct qed_rdma_modify_srq_in_params { u32 wqe_limit; u16 srq_id; + bool is_xrc; }; struct qed_rdma_stats_out_params { @@ -611,6 +630,8 @@ struct qed_rdma_ops { int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt); int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd); void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd); + int (*rdma_alloc_xrcd)(void *rdma_cxt, u16 *xrcd); + void (*rdma_dealloc_xrcd)(void *rdma_cxt, u16 xrcd); int (*rdma_create_cq)(void *rdma_cxt, struct qed_rdma_create_cq_in_params *params, u16 *icid); diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 63e62372443a..c2a9f7c90727 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -16,11 +16,20 @@ #include <linux/spinlock.h> #include <linux/types.h> #include <linux/xarray.h> +#include <linux/local_lock.h> /* Keep unconverted code working */ #define radix_tree_root xarray #define radix_tree_node xa_node +struct radix_tree_preload { + local_lock_t lock; + unsigned nr; + /* nodes->parent points to next preallocated node */ + struct radix_tree_node *nodes; +}; +DECLARE_PER_CPU(struct radix_tree_preload, radix_tree_preloads); + /* * The bottom two bits of the slot determine how the remaining bits in the * slot are interpreted: @@ -245,7 +254,7 @@ int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); static inline void radix_tree_preload_end(void) { - preempt_enable(); + local_unlock(&radix_tree_preloads.lock); } void __rcu **idr_get_free(struct radix_tree_root *root, diff --git a/include/linux/ras.h b/include/linux/ras.h index 7c3debb47c87..1f4048bf2674 100644 --- a/include/linux/ras.h +++ b/include/linux/ras.h @@ -17,12 +17,7 @@ static inline int ras_add_daemon_trace(void) { return 0; } #endif #ifdef CONFIG_RAS_CEC -void __init cec_init(void); int __init parse_cec_param(char *str); -int cec_add_elem(u64 pfn); -#else -static inline void __init cec_init(void) { } -static inline int cec_add_elem(u64 pfn) { return -ENODEV; } #endif #ifdef CONFIG_RAS diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index 1fd61a9af45c..d7db17996322 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -11,7 +11,7 @@ I know it's not the cleaner way, but in C (not in C++) to get performances and genericity... - See Documentation/rbtree.txt for documentation and samples. + See Documentation/core-api/rbtree.rst for documentation and samples. */ #ifndef _LINUX_RBTREE_H diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 724b0d036b57..d1c53e9d8c75 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -21,7 +21,7 @@ * rb_insert_augmented() and rb_erase_augmented() are intended to be public. * The rest are implementation details you are not expected to depend on. * - * See Documentation/rbtree.txt for documentation and samples. + * See Documentation/core-api/rbtree.rst for documentation and samples. */ struct rb_augment_callbacks { diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 8214cdc715f2..df587d181844 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -371,7 +371,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. - * @cond...: optional lockdep expression if called from non-RCU protection. + * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as list_add_rcu() @@ -506,6 +506,27 @@ static inline void hlist_replace_rcu(struct hlist_node *old, WRITE_ONCE(old->pprev, LIST_POISON2); } +/** + * hlists_swap_heads_rcu - swap the lists the hlist heads point to + * @left: The hlist head on the left + * @right: The hlist head on the right + * + * The lists start out as [@left ][node1 ... ] and + [@right ][node2 ... ] + * The lists end up as [@left ][node2 ... ] + * [@right ][node1 ... ] + */ +static inline void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right) +{ + struct hlist_node *node1 = left->first; + struct hlist_node *node2 = right->first; + + rcu_assign_pointer(left->first, node2); + rcu_assign_pointer(right->first, node1); + WRITE_ONCE(node2->pprev, &left->first); + WRITE_ONCE(node1->pprev, &right->first); +} + /* * return the first or the next element in an RCU protected hlist */ @@ -646,7 +667,7 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. - * @cond...: optional lockdep expression if called from non-RCU protection. + * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 2678a37c3169..659cbfa7581a 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -37,6 +37,7 @@ /* Exported common interfaces */ void call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier_tasks(void); +void rcu_barrier_tasks_rude(void); void synchronize_rcu(void); #ifdef CONFIG_PREEMPT_RCU @@ -129,25 +130,57 @@ static inline void rcu_init_nohz(void) { } * Note a quasi-voluntary context switch for RCU-tasks's benefit. * This is a macro rather than an inline function to avoid #include hell. */ -#ifdef CONFIG_TASKS_RCU -#define rcu_tasks_qs(t) \ - do { \ - if (READ_ONCE((t)->rcu_tasks_holdout)) \ - WRITE_ONCE((t)->rcu_tasks_holdout, false); \ +#ifdef CONFIG_TASKS_RCU_GENERIC + +# ifdef CONFIG_TASKS_RCU +# define rcu_tasks_classic_qs(t, preempt) \ + do { \ + if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \ + WRITE_ONCE((t)->rcu_tasks_holdout, false); \ } while (0) -#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t) void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); void synchronize_rcu_tasks(void); +# else +# define rcu_tasks_classic_qs(t, preempt) do { } while (0) +# define call_rcu_tasks call_rcu +# define synchronize_rcu_tasks synchronize_rcu +# endif + +# ifdef CONFIG_TASKS_RCU_TRACE +# define rcu_tasks_trace_qs(t) \ + do { \ + if (!likely(READ_ONCE((t)->trc_reader_checked)) && \ + !unlikely(READ_ONCE((t)->trc_reader_nesting))) { \ + smp_store_release(&(t)->trc_reader_checked, true); \ + smp_mb(); /* Readers partitioned by store. */ \ + } \ + } while (0) +# else +# define rcu_tasks_trace_qs(t) do { } while (0) +# endif + +#define rcu_tasks_qs(t, preempt) \ +do { \ + rcu_tasks_classic_qs((t), (preempt)); \ + rcu_tasks_trace_qs((t)); \ +} while (0) + +# ifdef CONFIG_TASKS_RUDE_RCU +void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func); +void synchronize_rcu_tasks_rude(void); +# endif + +#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false) void exit_tasks_rcu_start(void); void exit_tasks_rcu_finish(void); -#else /* #ifdef CONFIG_TASKS_RCU */ -#define rcu_tasks_qs(t) do { } while (0) +#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ +#define rcu_tasks_qs(t, preempt) do { } while (0) #define rcu_note_voluntary_context_switch(t) do { } while (0) #define call_rcu_tasks call_rcu #define synchronize_rcu_tasks synchronize_rcu static inline void exit_tasks_rcu_start(void) { } static inline void exit_tasks_rcu_finish(void) { } -#endif /* #else #ifdef CONFIG_TASKS_RCU */ +#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ /** * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU @@ -158,7 +191,7 @@ static inline void exit_tasks_rcu_finish(void) { } */ #define cond_resched_tasks_rcu_qs() \ do { \ - rcu_tasks_qs(current); \ + rcu_tasks_qs(current, false); \ cond_resched(); \ } while (0) diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h new file mode 100644 index 000000000000..4c25a41f8b27 --- /dev/null +++ b/include/linux/rcupdate_trace.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Read-Copy Update mechanism for mutual exclusion, adapted for tracing. + * + * Copyright (C) 2020 Paul E. McKenney. + */ + +#ifndef __LINUX_RCUPDATE_TRACE_H +#define __LINUX_RCUPDATE_TRACE_H + +#include <linux/sched.h> +#include <linux/rcupdate.h> + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +extern struct lockdep_map rcu_trace_lock_map; + +static inline int rcu_read_lock_trace_held(void) +{ + return lock_is_held(&rcu_trace_lock_map); +} + +#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +static inline int rcu_read_lock_trace_held(void) +{ + return 1; +} + +#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +#ifdef CONFIG_TASKS_TRACE_RCU + +void rcu_read_unlock_trace_special(struct task_struct *t, int nesting); + +/** + * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section + * + * When synchronize_rcu_trace() is invoked by one task, then that task + * is guaranteed to block until all other tasks exit their read-side + * critical sections. Similarly, if call_rcu_trace() is invoked on one + * task while other tasks are within RCU read-side critical sections, + * invocation of the corresponding RCU callback is deferred until after + * the all the other tasks exit their critical sections. + * + * For more details, please see the documentation for rcu_read_lock(). + */ +static inline void rcu_read_lock_trace(void) +{ + struct task_struct *t = current; + + WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1); + if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && + t->trc_reader_special.b.need_mb) + smp_mb(); // Pairs with update-side barriers + rcu_lock_acquire(&rcu_trace_lock_map); +} + +/** + * rcu_read_unlock_trace - mark end of RCU-trace read-side critical section + * + * Pairs with a preceding call to rcu_read_lock_trace(), and nesting is + * allowed. Invoking a rcu_read_unlock_trace() when there is no matching + * rcu_read_lock_trace() is verboten, and will result in lockdep complaints. + * + * For more details, please see the documentation for rcu_read_unlock(). + */ +static inline void rcu_read_unlock_trace(void) +{ + int nesting; + struct task_struct *t = current; + + rcu_lock_release(&rcu_trace_lock_map); + nesting = READ_ONCE(t->trc_reader_nesting) - 1; + if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) { + WRITE_ONCE(t->trc_reader_nesting, nesting); + return; // We assume shallow reader nesting. + } + rcu_read_unlock_trace_special(t, nesting); +} + +void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); +void synchronize_rcu_tasks_trace(void); +void rcu_barrier_tasks_trace(void); + +#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ + +#endif /* __LINUX_RCUPDATE_TRACE_H */ diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h index c0578ba23c1a..699b938358bf 100644 --- a/include/linux/rcupdate_wait.h +++ b/include/linux/rcupdate_wait.h @@ -31,4 +31,23 @@ do { \ #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) +/** + * synchronize_rcu_mult - Wait concurrently for multiple grace periods + * @...: List of call_rcu() functions for different grace periods to wait on + * + * This macro waits concurrently for multiple types of RCU grace periods. + * For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait + * on concurrent RCU and RCU-tasks grace periods. Waiting on a given SRCU + * domain requires you to write a wrapper function for that SRCU domain's + * call_srcu() function, with this wrapper supplying the pointer to the + * corresponding srcu_struct. + * + * The first argument tells Tiny RCU's _wait_rcu_gp() not to + * bother waiting for RCU. The reason for this is because anywhere + * synchronize_rcu_mult() can be called is automatically already a full + * grace period. + */ +#define synchronize_rcu_mult(...) \ + _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) + #endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 045c28b71f4f..8512caeb7682 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -49,7 +49,7 @@ static inline void rcu_softirq_qs(void) #define rcu_note_context_switch(preempt) \ do { \ rcu_qs(); \ - rcu_tasks_qs(current); \ + rcu_tasks_qs(current, (preempt)); \ } while (0) static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) @@ -71,6 +71,8 @@ static inline void rcu_irq_enter(void) { } static inline void rcu_irq_exit_irqson(void) { } static inline void rcu_irq_enter_irqson(void) { } static inline void rcu_irq_exit(void) { } +static inline void rcu_irq_exit_preempt(void) { } +static inline void rcu_irq_exit_check_preempt(void) { } static inline void exit_rcu(void) { } static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t) { @@ -85,8 +87,10 @@ static inline void rcu_scheduler_starting(void) { } static inline void rcu_end_inkernel_boot(void) { } static inline bool rcu_inkernel_boot_has_ended(void) { return true; } static inline bool rcu_is_watching(void) { return true; } +static inline bool __rcu_is_watching(void) { return true; } static inline void rcu_momentary_dyntick_idle(void) { } static inline void kfree_rcu_scheduler_running(void) { } +static inline bool rcu_gp_might_be_stalled(void) { return false; } /* Avoid RCU read-side critical sections leaking across. */ static inline void rcu_all_qs(void) { barrier(); } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 45f3f66bb04d..d5cc9d675987 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -39,6 +39,7 @@ void rcu_barrier(void); bool rcu_eqs_special_set(int cpu); void rcu_momentary_dyntick_idle(void); void kfree_rcu_scheduler_running(void); +bool rcu_gp_might_be_stalled(void); unsigned long get_state_synchronize_rcu(void); void cond_synchronize_rcu(unsigned long oldstate); @@ -46,9 +47,16 @@ void rcu_idle_enter(void); void rcu_idle_exit(void); void rcu_irq_enter(void); void rcu_irq_exit(void); +void rcu_irq_exit_preempt(void); void rcu_irq_enter_irqson(void); void rcu_irq_exit_irqson(void); +#ifdef CONFIG_PROVE_RCU +void rcu_irq_exit_check_preempt(void); +#else +static inline void rcu_irq_exit_check_preempt(void) { } +#endif + void exit_rcu(void); void rcu_scheduler_starting(void); @@ -56,6 +64,7 @@ extern int rcu_scheduler_active __read_mostly; void rcu_end_inkernel_boot(void); bool rcu_inkernel_boot_has_ended(void); bool rcu_is_watching(void); +bool __rcu_is_watching(void); #ifndef CONFIG_PREEMPTION void rcu_all_qs(void); #endif diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h index 2ffe1ee6d482..61c56cca95c4 100644 --- a/include/linux/rcuwait.h +++ b/include/linux/rcuwait.h @@ -25,16 +25,38 @@ static inline void rcuwait_init(struct rcuwait *w) w->task = NULL; } -extern void rcuwait_wake_up(struct rcuwait *w); +/* + * Note: this provides no serialization and, just as with waitqueues, + * requires care to estimate as to whether or not the wait is active. + */ +static inline int rcuwait_active(struct rcuwait *w) +{ + return !!rcu_access_pointer(w->task); +} + +extern int rcuwait_wake_up(struct rcuwait *w); /* * The caller is responsible for locking around rcuwait_wait_event(), - * such that writes to @task are properly serialized. + * and [prepare_to/finish]_rcuwait() such that writes to @task are + * properly serialized. */ + +static inline void prepare_to_rcuwait(struct rcuwait *w) +{ + rcu_assign_pointer(w->task, current); +} + +static inline void finish_rcuwait(struct rcuwait *w) +{ + rcu_assign_pointer(w->task, NULL); + __set_current_state(TASK_RUNNING); +} + #define rcuwait_wait_event(w, condition, state) \ ({ \ int __ret = 0; \ - rcu_assign_pointer((w)->task, current); \ + prepare_to_rcuwait(w); \ for (;;) { \ /* \ * Implicit barrier (A) pairs with (B) in \ @@ -51,9 +73,7 @@ extern void rcuwait_wake_up(struct rcuwait *w); \ schedule(); \ } \ - \ - WRITE_ONCE((w)->task, NULL); \ - __set_current_state(TASK_RUNNING); \ + finish_rcuwait(w); \ __ret; \ }) diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 40b07168fd8e..cb666b9c6b6a 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -17,10 +17,12 @@ #include <linux/err.h> #include <linux/bug.h> #include <linux/lockdep.h> +#include <linux/iopoll.h> struct module; struct clk; struct device; +struct device_node; struct i2c_client; struct i3c_device; struct irq_domain; @@ -71,6 +73,13 @@ struct reg_sequence { unsigned int delay_us; }; +#define REG_SEQ(_reg, _def, _delay_us) { \ + .reg = _reg, \ + .def = _def, \ + .delay_us = _delay_us, \ + } +#define REG_SEQ0(_reg, _def) REG_SEQ(_reg, _def, 0) + #define regmap_update_bits(map, reg, mask, val) \ regmap_update_bits_base(map, reg, mask, val, NULL, false, false) #define regmap_update_bits_async(map, reg, mask, val)\ @@ -122,26 +131,10 @@ struct reg_sequence { */ #define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \ ({ \ - u64 __timeout_us = (timeout_us); \ - unsigned long __sleep_us = (sleep_us); \ - ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ - int __ret; \ - might_sleep_if(__sleep_us); \ - for (;;) { \ - __ret = regmap_read((map), (addr), &(val)); \ - if (__ret) \ - break; \ - if (cond) \ - break; \ - if ((__timeout_us) && \ - ktime_compare(ktime_get(), __timeout) > 0) { \ - __ret = regmap_read((map), (addr), &(val)); \ - break; \ - } \ - if (__sleep_us) \ - usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ - } \ - __ret ?: ((cond) ? 0 : -ETIMEDOUT); \ + int __ret, __tmp; \ + __tmp = read_poll_timeout(regmap_read, __ret, __ret || (cond), \ + sleep_us, timeout_us, false, (map), (addr), &(val)); \ + __ret ?: __tmp; \ }) /** @@ -209,25 +202,10 @@ struct reg_sequence { */ #define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \ ({ \ - u64 __timeout_us = (timeout_us); \ - unsigned long __sleep_us = (sleep_us); \ - ktime_t timeout = ktime_add_us(ktime_get(), __timeout_us); \ - int pollret; \ - might_sleep_if(__sleep_us); \ - for (;;) { \ - pollret = regmap_field_read((field), &(val)); \ - if (pollret) \ - break; \ - if (cond) \ - break; \ - if (__timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ - pollret = regmap_field_read((field), &(val)); \ - break; \ - } \ - if (__sleep_us) \ - usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ - } \ - pollret ?: ((cond) ? 0 : -ETIMEDOUT); \ + int __ret, __tmp; \ + __tmp = read_poll_timeout(regmap_field_read, __ret, __ret || (cond), \ + sleep_us, timeout_us, false, (field), &(val)); \ + __ret ?: __tmp; \ }) #ifdef CONFIG_REGMAP @@ -1111,6 +1089,21 @@ bool regmap_reg_in_ranges(unsigned int reg, const struct regmap_range *ranges, unsigned int nranges); +static inline int regmap_set_bits(struct regmap *map, + unsigned int reg, unsigned int bits) +{ + return regmap_update_bits_base(map, reg, bits, bits, + NULL, false, false); +} + +static inline int regmap_clear_bits(struct regmap *map, + unsigned int reg, unsigned int bits) +{ + return regmap_update_bits_base(map, reg, bits, 0, NULL, false, false); +} + +int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits); + /** * struct reg_field - Description of an register field * @@ -1134,6 +1127,14 @@ struct reg_field { .msb = _msb, \ } +#define REG_FIELD_ID(_reg, _lsb, _msb, _size, _offset) { \ + .reg = _reg, \ + .lsb = _lsb, \ + .msb = _msb, \ + .id_size = _size, \ + .id_offset = _offset, \ + } + struct regmap_field *regmap_field_alloc(struct regmap *regmap, struct reg_field reg_field); void regmap_field_free(struct regmap_field *field); @@ -1310,12 +1311,21 @@ struct regmap_irq_chip_data; int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, int irq_base, const struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data); +int regmap_add_irq_chip_np(struct device_node *np, struct regmap *map, int irq, + int irq_flags, int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data); void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data); int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, int irq_flags, int irq_base, const struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data); +int devm_regmap_add_irq_chip_np(struct device *dev, struct device_node *np, + struct regmap *map, int irq, int irq_flags, + int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data); void devm_regmap_del_irq_chip(struct device *dev, int irq, struct regmap_irq_chip_data *data); @@ -1410,6 +1420,27 @@ static inline int regmap_update_bits_base(struct regmap *map, unsigned int reg, return -EINVAL; } +static inline int regmap_set_bits(struct regmap *map, + unsigned int reg, unsigned int bits) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_clear_bits(struct regmap *map, + unsigned int reg, unsigned int bits) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_test_bits(struct regmap *map, + unsigned int reg, unsigned int bits) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + static inline int regmap_field_update_bits_base(struct regmap_field *field, unsigned int mask, unsigned int val, bool *change, bool async, bool force) diff --git a/include/linux/regset.h b/include/linux/regset.h index bf0243779738..46d6ae68c455 100644 --- a/include/linux/regset.h +++ b/include/linux/regset.h @@ -320,7 +320,7 @@ static inline int user_regset_copyout_zero(unsigned int *pos, if (*kbuf) { memset(*kbuf, 0, copy); *kbuf += copy; - } else if (__clear_user(*ubuf, copy)) + } else if (clear_user(*ubuf, copy)) return -EFAULT; else *ubuf += copy; diff --git a/include/linux/regulator/coupler.h b/include/linux/regulator/coupler.h index 0212d6255e4e..5f86824bd117 100644 --- a/include/linux/regulator/coupler.h +++ b/include/linux/regulator/coupler.h @@ -62,6 +62,8 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev); int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV, int max_uV, suspend_state_t state); +int regulator_do_balance_voltage(struct regulator_dev *rdev, + suspend_state_t state, bool skip_coupled); #else static inline int regulator_coupler_register(struct regulator_coupler *coupler) { @@ -92,6 +94,12 @@ static inline int regulator_set_voltage_rdev(struct regulator_dev *rdev, { return -EINVAL; } +static inline int regulator_do_balance_voltage(struct regulator_dev *rdev, + suspend_state_t state, + bool skip_coupled) +{ + return -EINVAL; +} #endif #endif diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 29d920516e0b..7eb9fea8e482 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -13,6 +13,7 @@ #define __LINUX_REGULATOR_DRIVER_H_ #include <linux/device.h> +#include <linux/linear_range.h> #include <linux/notifier.h> #include <linux/regulator/consumer.h> #include <linux/ww_mutex.h> @@ -39,31 +40,13 @@ enum regulator_status { REGULATOR_STATUS_UNDEFINED, }; -/** - * struct regulator_linear_range - specify linear voltage ranges - * - * Specify a range of voltages for regulator_map_linear_range() and - * regulator_list_linear_range(). - * - * @min_uV: Lowest voltage in range - * @min_sel: Lowest selector for range - * @max_sel: Highest selector for range - * @uV_step: Step size - */ -struct regulator_linear_range { - unsigned int min_uV; - unsigned int min_sel; - unsigned int max_sel; - unsigned int uV_step; -}; - -/* Initialize struct regulator_linear_range */ +/* Initialize struct linear_range for regulators */ #define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \ { \ - .min_uV = _min_uV, \ + .min = _min_uV, \ .min_sel = _min_sel, \ .max_sel = _max_sel, \ - .uV_step = _step_uV, \ + .step = _step_uV, \ } /** @@ -348,7 +331,7 @@ struct regulator_desc { unsigned int ramp_delay; int min_dropout_uV; - const struct regulator_linear_range *linear_ranges; + const struct linear_range *linear_ranges; const unsigned int *linear_range_selectors; int n_linear_ranges; diff --git a/include/linux/relay.h b/include/linux/relay.h index c759f96e39c1..e13a333e7c37 100644 --- a/include/linux/relay.h +++ b/include/linux/relay.h @@ -141,7 +141,7 @@ struct rchan_callbacks * cause relay_open() to create a single global buffer rather * than the default set of per-cpu buffers. * - * See Documentation/filesystems/relay.txt for more info. + * See Documentation/filesystems/relay.rst for more info. */ struct dentry *(*create_buf_file)(const char *filename, struct dentry *parent, diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 9c07d7958c53..e7b7bab8b235 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -73,7 +73,7 @@ struct resource_table { u32 ver; u32 num; u32 reserved[2]; - u32 offset[0]; + u32 offset[]; } __packed; /** @@ -87,7 +87,7 @@ struct resource_table { */ struct fw_rsc_hdr { u32 type; - u8 data[0]; + u8 data[]; } __packed; /** @@ -306,7 +306,7 @@ struct fw_rsc_vdev { u8 status; u8 num_of_vrings; u8 reserved[2]; - struct fw_rsc_vdev_vring vring[0]; + struct fw_rsc_vdev_vring vring[]; } __packed; struct rproc; @@ -355,6 +355,8 @@ enum rsc_handling_status { /** * struct rproc_ops - platform-specific device handlers + * @prepare: prepare device for code loading + * @unprepare: unprepare device after stop * @start: power on the device and boot it * @stop: power off the device * @kick: kick a virtqueue (virtqueue id given as a parameter) @@ -373,6 +375,8 @@ enum rsc_handling_status { * panic at least the returned number of milliseconds */ struct rproc_ops { + int (*prepare)(struct rproc *rproc); + int (*unprepare)(struct rproc *rproc); int (*start)(struct rproc *rproc); int (*stop)(struct rproc *rproc); void (*kick)(struct rproc *rproc, int vqid); @@ -489,7 +493,7 @@ struct rproc { struct list_head node; struct iommu_domain *domain; const char *name; - char *firmware; + const char *firmware; void *priv; struct rproc_ops *ops; struct device dev; @@ -518,6 +522,7 @@ struct rproc { struct list_head dump_segments; int nb_vdev; u8 elf_class; + u16 elf_machine; }; /** @@ -599,6 +604,11 @@ int rproc_add(struct rproc *rproc); int rproc_del(struct rproc *rproc); void rproc_free(struct rproc *rproc); +struct rproc *devm_rproc_alloc(struct device *dev, const char *name, + const struct rproc_ops *ops, + const char *firmware, int len); +int devm_rproc_add(struct device *dev, struct rproc *rproc); + void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem); struct rproc_mem_entry * @@ -622,6 +632,7 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc, struct rproc_dump_segment *segment, void *dest), void *priv); +int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine); static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev) { diff --git a/include/linux/rio.h b/include/linux/rio.h index 317bace5ac64..2cd637268b4f 100644 --- a/include/linux/rio.h +++ b/include/linux/rio.h @@ -100,7 +100,7 @@ struct rio_switch { u32 port_ok; struct rio_switch_ops *ops; spinlock_t lock; - struct rio_dev *nextdev[0]; + struct rio_dev *nextdev[]; }; /** @@ -201,7 +201,7 @@ struct rio_dev { u8 hopcount; struct rio_dev *prev; atomic_t state; - struct rio_switch rswitch[0]; /* RIO switch info */ + struct rio_switch rswitch[]; /* RIO switch info */ }; #define rio_dev_g(n) list_entry(n, struct rio_dev, global_list) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 988d176472df..3a6adfa70fb0 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -77,7 +77,7 @@ struct anon_vma { struct anon_vma_chain { struct vm_area_struct *vma; struct anon_vma *anon_vma; - struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ + struct list_head same_vma; /* locked by mmap_lock & page_table_lock */ struct rb_node rb; /* locked by anon_vma->rwsem */ unsigned long rb_subtree_last; #ifdef CONFIG_DEBUG_VM_RB diff --git a/include/linux/rpmsg/qcom_glink.h b/include/linux/rpmsg/qcom_glink.h index 96e26d94719f..daded9fddf36 100644 --- a/include/linux/rpmsg/qcom_glink.h +++ b/include/linux/rpmsg/qcom_glink.h @@ -12,6 +12,7 @@ struct qcom_glink; struct qcom_glink *qcom_glink_smem_register(struct device *parent, struct device_node *node); void qcom_glink_smem_unregister(struct qcom_glink *glink); +void qcom_glink_ssr_notify(const char *ssr_name); #else @@ -23,7 +24,7 @@ qcom_glink_smem_register(struct device *parent, } static inline void qcom_glink_smem_unregister(struct qcom_glink *glink) {} - +static inline void qcom_glink_ssr_notify(const char *ssr_name) {} #endif #endif diff --git a/include/linux/rslib.h b/include/linux/rslib.h index 5974cedd008c..238bb85243d3 100644 --- a/include/linux/rslib.h +++ b/include/linux/rslib.h @@ -54,7 +54,7 @@ struct rs_codec { */ struct rs_control { struct rs_codec *codec; - uint16_t buffers[0]; + uint16_t buffers[]; }; /* General purpose RS codec, 8-bit data width, symbol width 1-15 bit */ diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h index 65b8142a7fed..e8780d4e4636 100644 --- a/include/linux/rtsx_pci.h +++ b/include/linux/rtsx_pci.h @@ -1080,11 +1080,7 @@ struct pcr_ops { void (*stop_cmd)(struct rtsx_pcr *pcr); void (*set_aspm)(struct rtsx_pcr *pcr, bool enable); - int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency); - int (*set_l1off_sub)(struct rtsx_pcr *pcr, u8 val); void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active); - void (*full_on)(struct rtsx_pcr *pcr); - void (*power_saving)(struct rtsx_pcr *pcr); void (*enable_ocp)(struct rtsx_pcr *pcr); void (*disable_ocp)(struct rtsx_pcr *pcr); void (*init_ocp)(struct rtsx_pcr *pcr); @@ -1108,13 +1104,6 @@ enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN}; #define L1_SNOOZE_TEST_EN BIT(5) #define LTR_L1SS_PWR_GATE_CHECK_CARD_EN BIT(6) -enum dev_aspm_mode { - DEV_ASPM_DYNAMIC, - DEV_ASPM_BACKDOOR, - DEV_ASPM_STATIC, - DEV_ASPM_DISABLE, -}; - /* * struct rtsx_cr_option - card reader option * @dev_flags: device flags @@ -1125,7 +1114,6 @@ enum dev_aspm_mode { * @ltr_active_latency: ltr mode active latency * @ltr_idle_latency: ltr mode idle latency * @ltr_l1off_latency: ltr mode l1off latency - * @dev_aspm_mode: device aspm mode * @l1_snooze_delay: l1 snooze delay * @ltr_l1off_sspwrgate: ltr l1off sspwrgate * @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate @@ -1142,7 +1130,6 @@ struct rtsx_cr_option { u32 ltr_active_latency; u32 ltr_idle_latency; u32 ltr_l1off_latency; - enum dev_aspm_mode dev_aspm_mode; u32 l1_snooze_delay; u8 ltr_l1off_sspwrgate; u8 ltr_l1off_snooze_sspwrgate; @@ -1320,18 +1307,6 @@ static inline u8 *rtsx_pci_get_cmd_data(struct rtsx_pcr *pcr) return (u8 *)(pcr->host_cmds_ptr); } -static inline int rtsx_pci_update_cfg_byte(struct rtsx_pcr *pcr, int addr, - u8 mask, u8 append) -{ - int err; - u8 val; - - err = pci_read_config_byte(pcr->pci, addr, &val); - if (err < 0) - return err; - return pci_write_config_byte(pcr->pci, addr, (val & mask) | append); -} - static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val) { rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg, 0xFF, val >> 24); diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 6eec50fb36c8..4f922afb607a 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -151,6 +151,20 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, #define for_each_sg(sglist, sg, nr, __i) \ for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg)) +/* + * Loop over each sg element in the given sg_table object. + */ +#define for_each_sgtable_sg(sgt, sg, i) \ + for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) + +/* + * Loop over each sg element in the given *DMA mapped* sg_table object. + * Please use sg_dma_address(sg) and sg_dma_len(sg) to extract DMA addresses + * of the each element. + */ +#define for_each_sgtable_dma_sg(sgt, sg, i) \ + for_each_sg(sgt->sgl, sg, sgt->nents, i) + /** * sg_chain - Chain two sglists together * @prv: First scatterlist @@ -401,9 +415,10 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) * @sglist: sglist to iterate over * @piter: page iterator to hold current page, sg, sg_pgoffset * @nents: maximum number of sg entries to iterate over - * @pgoffset: starting page offset + * @pgoffset: starting page offset (in pages) * * Callers may use sg_page_iter_page() to get each page pointer. + * In each loop it operates on PAGE_SIZE unit. */ #define for_each_sg_page(sglist, piter, nents, pgoffset) \ for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \ @@ -412,18 +427,47 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) /** * for_each_sg_dma_page - iterate over the pages of the given sg list * @sglist: sglist to iterate over - * @dma_iter: page iterator to hold current page + * @dma_iter: DMA page iterator to hold current page * @dma_nents: maximum number of sg entries to iterate over, this is the value * returned from dma_map_sg - * @pgoffset: starting page offset + * @pgoffset: starting page offset (in pages) * * Callers may use sg_page_iter_dma_address() to get each page's DMA address. + * In each loop it operates on PAGE_SIZE unit. */ #define for_each_sg_dma_page(sglist, dma_iter, dma_nents, pgoffset) \ for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents, \ pgoffset); \ __sg_page_iter_dma_next(dma_iter);) +/** + * for_each_sgtable_page - iterate over all pages in the sg_table object + * @sgt: sg_table object to iterate over + * @piter: page iterator to hold current page + * @pgoffset: starting page offset (in pages) + * + * Iterates over the all memory pages in the buffer described by + * a scatterlist stored in the given sg_table object. + * See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit. + */ +#define for_each_sgtable_page(sgt, piter, pgoffset) \ + for_each_sg_page(sgt->sgl, piter, sgt->orig_nents, pgoffset) + +/** + * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object + * @sgt: sg_table object to iterate over + * @dma_iter: DMA page iterator to hold current page + * @pgoffset: starting page offset (in pages) + * + * Iterates over the all DMA mapped pages in the buffer described by + * a scatterlist stored in the given sg_table object. + * See also for_each_sg_dma_page(). In each loop it operates on PAGE_SIZE + * unit. + */ +#define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \ + for_each_sg_dma_page(sgt->sgl, dma_iter, sgt->nents, pgoffset) + + /* * Mapping sg iterator * diff --git a/include/linux/sched.h b/include/linux/sched.h index 4418f5cb8324..b62e6aaf28f0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -31,6 +31,7 @@ #include <linux/task_io_accounting.h> #include <linux/posix-timers.h> #include <linux/rseq.h> +#include <linux/kcsan.h> /* task_struct member predeclarations (sorted alphabetically): */ struct audit_context; @@ -613,7 +614,7 @@ union rcu_special { u8 blocked; u8 need_qs; u8 exp_hint; /* Hint for performance. */ - u8 deferred_qs; + u8 need_mb; /* Readers need smp_mb(). */ } b; /* Bits. */ u32 s; /* Set of bits. */ }; @@ -654,6 +655,7 @@ struct task_struct { #ifdef CONFIG_SMP struct llist_node wake_entry; + unsigned int wake_entry_type; int on_cpu; #ifdef CONFIG_THREAD_INFO_IN_TASK /* Current CPU: */ @@ -724,6 +726,14 @@ struct task_struct { struct list_head rcu_tasks_holdout_list; #endif /* #ifdef CONFIG_TASKS_RCU */ +#ifdef CONFIG_TASKS_TRACE_RCU + int trc_reader_nesting; + int trc_ipi_to_cpu; + union rcu_special trc_reader_special; + bool trc_reader_checked; + struct list_head trc_holdout_list; +#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ + struct sched_info sched_info; struct list_head tasks; @@ -983,6 +993,7 @@ struct task_struct { unsigned int hardirq_disable_event; int hardirqs_enabled; int hardirq_context; + u64 hardirq_chain_key; unsigned long softirq_disable_ip; unsigned long softirq_enable_ip; unsigned int softirq_disable_event; @@ -1187,6 +1198,9 @@ struct task_struct { #ifdef CONFIG_KASAN unsigned int kasan_depth; #endif +#ifdef CONFIG_KCSAN + struct kcsan_ctx kcsan_ctx; +#endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Index of current stored address in ret_stack: */ @@ -1237,6 +1251,9 @@ struct task_struct { /* KCOV sequence number: */ int kcov_sequence; + + /* Collect coverage from softirq context: */ + unsigned int kcov_softirq; #endif #ifdef CONFIG_MEMCG @@ -1289,6 +1306,14 @@ struct task_struct { unsigned long prev_lowest_stack; #endif +#ifdef CONFIG_X86_MCE + u64 mce_addr; + __u64 mce_ripv : 1, + mce_whole_page : 1, + __mce_reserved : 62; + struct callback_head mce_kill_me; +#endif + /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. @@ -1481,7 +1506,8 @@ extern struct pid *cad_pid; #define PF_KSWAPD 0x00020000 /* I am kswapd */ #define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */ #define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */ -#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ +#define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to, + * I am cleaning dirty pages from some other bdi. */ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ @@ -1715,7 +1741,15 @@ extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); }) #ifdef CONFIG_SMP -void scheduler_ipi(void); +static __always_inline void scheduler_ipi(void) +{ + /* + * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting + * TIF_NEED_RESCHED remotely (for the first time) will also send + * this IPI. + */ + preempt_fold_need_resched(); +} extern unsigned long wait_task_inactive(struct task_struct *, long match_state); #else static inline void scheduler_ipi(void) { } diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h index 95fb9e025247..00c45a0e6abe 100644 --- a/include/linux/sched/debug.h +++ b/include/linux/sched/debug.h @@ -30,7 +30,8 @@ extern void show_regs(struct pt_regs *); * task), SP is the stack pointer of the first frame that should be shown in the back * trace (or NULL if the entire call-chain of the task should be shown). */ -extern void show_stack(struct task_struct *task, unsigned long *sp); +extern void show_stack(struct task_struct *task, unsigned long *sp, + const char *loglvl); extern void sched_show_task(struct task_struct *p); diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index c49257a3b510..480a4d1b7dd8 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -49,9 +49,11 @@ static inline void mmdrop(struct mm_struct *mm) __mmdrop(mm); } +void mmdrop(struct mm_struct *mm); + /* * This has to be called after a get_task_mm()/mmget_not_zero() - * followed by taking the mmap_sem for writing before modifying the + * followed by taking the mmap_lock for writing before modifying the * vmas or anything the coredump pretends not to change from under it. * * It also has to be called when mmgrab() is used in the context of @@ -59,14 +61,14 @@ static inline void mmdrop(struct mm_struct *mm) * the context of the process to run down_write() on that pinned mm. * * NOTE: find_extend_vma() called from GUP context is the only place - * that can modify the "mm" (notably the vm_start/end) under mmap_sem + * that can modify the "mm" (notably the vm_start/end) under mmap_lock * for reading and outside the context of the process, so it is also - * the only case that holds the mmap_sem for reading that must call - * this function. Generally if the mmap_sem is hold for reading + * the only case that holds the mmap_lock for reading that must call + * this function. Generally if the mmap_lock is hold for reading * there's no need of this check after get_task_mm()/mmget_not_zero(). * * This function can be obsoleted and the check can be removed, after - * the coredump code will hold the mmap_sem for writing before + * the coredump code will hold the mmap_lock for writing before * invoking the ->core_dump methods. */ static inline bool mmget_still_valid(struct mm_struct *mm) diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 3e5b090c16d4..0ee5e696c5d8 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -654,17 +654,6 @@ static inline bool thread_group_leader(struct task_struct *p) return p->exit_signal >= 0; } -/* Do to the insanities of de_thread it is possible for a process - * to have the pid of the thread group leader without actually being - * the thread group leader. For iteration through the pids in proc - * all we care about is that we have a task with the appropriate - * pid, we don't actually care if we have the right task. - */ -static inline bool has_group_leader_pid(struct task_struct *p) -{ - return task_pid(p) == task_tgid(p); -} - static inline bool same_thread_group(struct task_struct *p1, struct task_struct *p2) { diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index d4f6215ee03f..660ac49f2b53 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -7,14 +7,20 @@ struct ctl_table; #ifdef CONFIG_DETECT_HUNG_TASK + +#ifdef CONFIG_SMP +extern unsigned int sysctl_hung_task_all_cpu_backtrace; +#else +#define sysctl_hung_task_all_cpu_backtrace 0 +#endif /* CONFIG_SMP */ + extern int sysctl_hung_task_check_count; extern unsigned int sysctl_hung_task_panic; extern unsigned long sysctl_hung_task_timeout_secs; extern unsigned long sysctl_hung_task_check_interval_secs; extern int sysctl_hung_task_warnings; -extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos); +int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); #else /* Avoid need for ifdefs elsewhere in the code */ enum { sysctl_hung_task_timeout_secs = 0 }; @@ -43,8 +49,7 @@ extern __read_mostly unsigned int sysctl_sched_migration_cost; extern __read_mostly unsigned int sysctl_sched_nr_migrate; int sched_proc_update_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *length, - loff_t *ppos); + void *buffer, size_t *length, loff_t *ppos); #endif /* @@ -72,33 +77,21 @@ extern unsigned int sysctl_sched_autogroup_enabled; extern int sysctl_sched_rr_timeslice; extern int sched_rr_timeslice; -extern int sched_rr_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); - -extern int sched_rt_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); - -#ifdef CONFIG_UCLAMP_TASK -extern int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); -#endif - -extern int sysctl_numa_balancing(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); - -extern int sysctl_schedstats(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); +int sched_rr_handler(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); +int sched_rt_handler(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); +int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int sysctl_numa_balancing(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); +int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) extern unsigned int sysctl_sched_energy_aware; -extern int sched_energy_aware_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); +int sched_energy_aware_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); #endif #endif /* _LINUX_SCHED_SYSCTL_H */ diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index af9319e4cfb9..fb11091129b3 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -11,21 +11,20 @@ */ #ifdef CONFIG_SMP -#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ -#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ -#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ -#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ -#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ -#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ -#define SD_ASYM_CPUCAPACITY 0x0040 /* Domain members have different CPU capacities */ -#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share CPU capacity */ -#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ -#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share CPU pkg resources */ -#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ -#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ -#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ -#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ -#define SD_NUMA 0x4000 /* cross-node balancing */ +#define SD_BALANCE_NEWIDLE 0x0001 /* Balance when about to become idle */ +#define SD_BALANCE_EXEC 0x0002 /* Balance on exec */ +#define SD_BALANCE_FORK 0x0004 /* Balance on fork, clone */ +#define SD_BALANCE_WAKE 0x0008 /* Balance on wakeup */ +#define SD_WAKE_AFFINE 0x0010 /* Wake task to waking CPU */ +#define SD_ASYM_CPUCAPACITY 0x0020 /* Domain members have different CPU capacities */ +#define SD_SHARE_CPUCAPACITY 0x0040 /* Domain members share CPU capacity */ +#define SD_SHARE_POWERDOMAIN 0x0080 /* Domain members share power domain */ +#define SD_SHARE_PKG_RESOURCES 0x0100 /* Domain members share CPU pkg resources */ +#define SD_SERIALIZE 0x0200 /* Only a single load balancing instance */ +#define SD_ASYM_PACKING 0x0400 /* Place busy groups earlier in the domain */ +#define SD_PREFER_SIBLING 0x0800 /* Prefer to place tasks in a sibling domain */ +#define SD_OVERLAP 0x1000 /* sched_domains of this level overlap */ +#define SD_NUMA 0x2000 /* cross-node balancing */ #ifdef CONFIG_SCHED_SMT static inline int cpu_smt_flags(void) @@ -142,7 +141,7 @@ struct sched_domain { * by attaching extra space to the end of the structure, * depending on how many CPUs the kernel has booted up with) */ - unsigned long span[0]; + unsigned long span[]; }; static inline struct cpumask *sched_domain_span(struct sched_domain *sd) diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 5c873a59b387..ce2f5c28b2df 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -4,6 +4,10 @@ * * Copyright (C) 2018 ARM Ltd. */ + +#ifndef _LINUX_SCMI_PROTOCOL_H +#define _LINUX_SCMI_PROTOCOL_H + #include <linux/device.h> #include <linux/types.h> @@ -319,3 +323,5 @@ static inline void scmi_driver_unregister(struct scmi_driver *driver) {} typedef int (*scmi_prot_init_fn_t)(struct scmi_handle *); int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn); void scmi_protocol_unregister(int protocol_id); + +#endif /* _LINUX_SCMI_PROTOCOL_H */ diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h index ecb004711acf..afbf8037d8db 100644 --- a/include/linux/scpi_protocol.h +++ b/include/linux/scpi_protocol.h @@ -4,6 +4,10 @@ * * Copyright (C) 2014 ARM Ltd. */ + +#ifndef _LINUX_SCPI_PROTOCOL_H +#define _LINUX_SCPI_PROTOCOL_H + #include <linux/types.h> struct scpi_opp { @@ -71,3 +75,5 @@ struct scpi_ops *get_scpi_ops(void); #else static inline struct scpi_ops *get_scpi_ops(void) { return NULL; } #endif + +#endif /* _LINUX_SCPI_PROTOCOL_H */ diff --git a/include/linux/scs.h b/include/linux/scs.h new file mode 100644 index 000000000000..6dec390cf154 --- /dev/null +++ b/include/linux/scs.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Shadow Call Stack support. + * + * Copyright (C) 2019 Google LLC + */ + +#ifndef _LINUX_SCS_H +#define _LINUX_SCS_H + +#include <linux/gfp.h> +#include <linux/poison.h> +#include <linux/sched.h> +#include <linux/sizes.h> + +#ifdef CONFIG_SHADOW_CALL_STACK + +/* + * In testing, 1 KiB shadow stack size (i.e. 128 stack frames on a 64-bit + * architecture) provided ~40% safety margin on stack usage while keeping + * memory allocation overhead reasonable. + */ +#define SCS_SIZE SZ_1K +#define GFP_SCS (GFP_KERNEL | __GFP_ZERO) + +/* An illegal pointer value to mark the end of the shadow stack. */ +#define SCS_END_MAGIC (0x5f6UL + POISON_POINTER_DELTA) + +/* Allocate a static per-CPU shadow stack */ +#define DEFINE_SCS(name) \ + DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], name) \ + +#define task_scs(tsk) (task_thread_info(tsk)->scs_base) +#define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp) + +void scs_init(void); +int scs_prepare(struct task_struct *tsk, int node); +void scs_release(struct task_struct *tsk); + +static inline void scs_task_reset(struct task_struct *tsk) +{ + /* + * Reset the shadow stack to the base address in case the task + * is reused. + */ + task_scs_sp(tsk) = task_scs(tsk); +} + +static inline unsigned long *__scs_magic(void *s) +{ + return (unsigned long *)(s + SCS_SIZE) - 1; +} + +static inline bool task_scs_end_corrupted(struct task_struct *tsk) +{ + unsigned long *magic = __scs_magic(task_scs(tsk)); + unsigned long sz = task_scs_sp(tsk) - task_scs(tsk); + + return sz >= SCS_SIZE - 1 || READ_ONCE_NOCHECK(*magic) != SCS_END_MAGIC; +} + +#else /* CONFIG_SHADOW_CALL_STACK */ + +static inline void scs_init(void) {} +static inline void scs_task_reset(struct task_struct *tsk) {} +static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; } +static inline void scs_release(struct task_struct *tsk) {} +static inline bool task_scs_end_corrupted(struct task_struct *tsk) { return false; } + +#endif /* CONFIG_SHADOW_CALL_STACK */ + +#endif /* _LINUX_SCS_H */ diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 8ccd82105de8..76731230bbc5 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -221,7 +221,7 @@ struct sctp_datahdr { __be16 stream; __be16 ssn; __u32 ppid; - __u8 payload[0]; + __u8 payload[]; }; struct sctp_data_chunk { @@ -269,7 +269,7 @@ struct sctp_inithdr { __be16 num_outbound_streams; __be16 num_inbound_streams; __be32 initial_tsn; - __u8 params[0]; + __u8 params[]; }; struct sctp_init_chunk { @@ -299,13 +299,13 @@ struct sctp_cookie_preserve_param { /* Section 3.3.2.1 Host Name Address (11) */ struct sctp_hostname_param { struct sctp_paramhdr param_hdr; - uint8_t hostname[0]; + uint8_t hostname[]; }; /* Section 3.3.2.1 Supported Address Types (12) */ struct sctp_supported_addrs_param { struct sctp_paramhdr param_hdr; - __be16 types[0]; + __be16 types[]; }; /* ADDIP Section 3.2.6 Adaptation Layer Indication */ @@ -317,25 +317,25 @@ struct sctp_adaptation_ind_param { /* ADDIP Section 4.2.7 Supported Extensions Parameter */ struct sctp_supported_ext_param { struct sctp_paramhdr param_hdr; - __u8 chunks[0]; + __u8 chunks[]; }; /* AUTH Section 3.1 Random */ struct sctp_random_param { struct sctp_paramhdr param_hdr; - __u8 random_val[0]; + __u8 random_val[]; }; /* AUTH Section 3.2 Chunk List */ struct sctp_chunks_param { struct sctp_paramhdr param_hdr; - __u8 chunks[0]; + __u8 chunks[]; }; /* AUTH Section 3.3 HMAC Algorithm */ struct sctp_hmac_algo_param { struct sctp_paramhdr param_hdr; - __be16 hmac_ids[0]; + __be16 hmac_ids[]; }; /* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): @@ -350,7 +350,7 @@ struct sctp_initack_chunk { /* Section 3.3.3.1 State Cookie (7) */ struct sctp_cookie_param { struct sctp_paramhdr p; - __u8 body[0]; + __u8 body[]; }; /* Section 3.3.3.1 Unrecognized Parameters (8) */ @@ -384,7 +384,7 @@ struct sctp_sackhdr { __be32 a_rwnd; __be16 num_gap_ack_blocks; __be16 num_dup_tsns; - union sctp_sack_variable variable[0]; + union sctp_sack_variable variable[]; }; struct sctp_sack_chunk { @@ -436,7 +436,7 @@ struct sctp_shutdown_chunk { struct sctp_errhdr { __be16 cause; __be16 length; - __u8 variable[0]; + __u8 variable[]; }; struct sctp_operr_chunk { @@ -594,7 +594,7 @@ struct sctp_fwdtsn_skip { struct sctp_fwdtsn_hdr { __be32 new_cum_tsn; - struct sctp_fwdtsn_skip skip[0]; + struct sctp_fwdtsn_skip skip[]; }; struct sctp_fwdtsn_chunk { @@ -611,7 +611,7 @@ struct sctp_ifwdtsn_skip { struct sctp_ifwdtsn_hdr { __be32 new_cum_tsn; - struct sctp_ifwdtsn_skip skip[0]; + struct sctp_ifwdtsn_skip skip[]; }; struct sctp_ifwdtsn_chunk { @@ -658,7 +658,7 @@ struct sctp_addip_param { struct sctp_addiphdr { __be32 serial; - __u8 params[0]; + __u8 params[]; }; struct sctp_addip_chunk { @@ -718,7 +718,7 @@ struct sctp_addip_chunk { struct sctp_authhdr { __be16 shkey_id; __be16 hmac_id; - __u8 hmac[0]; + __u8 hmac[]; }; struct sctp_auth_chunk { @@ -733,7 +733,7 @@ struct sctp_infox { struct sctp_reconf_chunk { struct sctp_chunkhdr chunk_hdr; - __u8 params[0]; + __u8 params[]; }; struct sctp_strreset_outreq { @@ -741,13 +741,13 @@ struct sctp_strreset_outreq { __be32 request_seq; __be32 response_seq; __be32 send_reset_at_tsn; - __be16 list_of_streams[0]; + __be16 list_of_streams[]; }; struct sctp_strreset_inreq { struct sctp_paramhdr param_hdr; __be32 request_seq; - __be16 list_of_streams[0]; + __be16 list_of_streams[]; }; struct sctp_strreset_tsnreq { diff --git a/include/linux/security.h b/include/linux/security.h index a8d9310472df..0a0a03b36a3b 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -56,6 +56,8 @@ struct mm_struct; struct fs_context; struct fs_parameter; enum fs_value_type; +struct watch; +struct watch_notification; /* Default (no) options for the capable function */ #define CAP_OPT_NONE 0x0 @@ -140,7 +142,7 @@ extern int cap_capset(struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted); -extern int cap_bprm_set_creds(struct linux_binprm *bprm); +extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file); extern int cap_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); extern int cap_inode_removexattr(struct dentry *dentry, const char *name); @@ -211,7 +213,7 @@ struct request_sock; #ifdef CONFIG_MMU extern int mmap_min_addr_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); #endif /* security_inode_init_security callback function to write xattrs */ @@ -276,7 +278,8 @@ int security_quota_on(struct dentry *dentry); int security_syslog(int type); int security_settime64(const struct timespec64 *ts, const struct timezone *tz); int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); -int security_bprm_set_creds(struct linux_binprm *bprm); +int security_bprm_creds_for_exec(struct linux_binprm *bprm); +int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file); int security_bprm_check(struct linux_binprm *bprm); void security_bprm_committing_creds(struct linux_binprm *bprm); void security_bprm_committed_creds(struct linux_binprm *bprm); @@ -389,6 +392,8 @@ int security_kernel_post_read_file(struct file *file, char *buf, loff_t size, enum kernel_read_file_id id); int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags); +int security_task_fix_setgid(struct cred *new, const struct cred *old, + int flags); int security_task_setpgid(struct task_struct *p, pid_t pgid); int security_task_getpgid(struct task_struct *p); int security_task_getsid(struct task_struct *p); @@ -569,9 +574,15 @@ static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages)); } -static inline int security_bprm_set_creds(struct linux_binprm *bprm) +static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm) +{ + return 0; +} + +static inline int security_bprm_creds_from_file(struct linux_binprm *bprm, + struct file *file) { - return cap_bprm_set_creds(bprm); + return cap_bprm_creds_from_file(bprm, file); } static inline int security_bprm_check(struct linux_binprm *bprm) @@ -1027,6 +1038,13 @@ static inline int security_task_fix_setuid(struct cred *new, return cap_task_fix_setuid(new, old, flags); } +static inline int security_task_fix_setgid(struct cred *new, + const struct cred *old, + int flags) +{ + return 0; +} + static inline int security_task_setpgid(struct task_struct *p, pid_t pgid) { return 0; @@ -1275,6 +1293,28 @@ static inline int security_locked_down(enum lockdown_reason what) } #endif /* CONFIG_SECURITY */ +#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE) +int security_post_notification(const struct cred *w_cred, + const struct cred *cred, + struct watch_notification *n); +#else +static inline int security_post_notification(const struct cred *w_cred, + const struct cred *cred, + struct watch_notification *n) +{ + return 0; +} +#endif + +#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS) +int security_watch_key(struct key *key); +#else +static inline int security_watch_key(struct key *key) +{ + return 0; +} +#endif + #ifdef CONFIG_SECURITY_NETWORK int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk); @@ -1743,8 +1783,8 @@ static inline int security_path_chroot(const struct path *path) int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags); void security_key_free(struct key *key); -int security_key_permission(key_ref_t key_ref, - const struct cred *cred, unsigned perm); +int security_key_permission(key_ref_t key_ref, const struct cred *cred, + enum key_need_perm need_perm); int security_key_getsecurity(struct key *key, char **_buffer); #else @@ -1762,7 +1802,7 @@ static inline void security_key_free(struct key *key) static inline int security_key_permission(key_ref_t key_ref, const struct cred *cred, - unsigned perm) + enum key_need_perm need_perm) { return 0; } diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 1672cf6f7614..813614d4b71f 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -145,6 +145,25 @@ void *__seq_open_private(struct file *, const struct seq_operations *, int); int seq_open_private(struct file *, const struct seq_operations *, int); int seq_release_private(struct inode *, struct file *); +#define DEFINE_SEQ_ATTRIBUTE(__name) \ +static int __name ## _open(struct inode *inode, struct file *file) \ +{ \ + int ret = seq_open(file, &__name ## _sops); \ + if (!ret && inode->i_private) { \ + struct seq_file *seq_f = file->private_data; \ + seq_f->private = inode->i_private; \ + } \ + return ret; \ +} \ + \ +static const struct file_operations __name ## _fops = { \ + .owner = THIS_MODULE, \ + .open = __name ## _open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = seq_release, \ +} + #define DEFINE_SHOW_ATTRIBUTE(__name) \ static int __name ## _open(struct inode *inode, struct file *file) \ { \ diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 0491d963d47e..8b97204f35a7 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -37,9 +37,25 @@ #include <linux/preempt.h> #include <linux/lockdep.h> #include <linux/compiler.h> +#include <linux/kcsan-checks.h> #include <asm/processor.h> /* + * The seqlock interface does not prescribe a precise sequence of read + * begin/retry/end. For readers, typically there is a call to + * read_seqcount_begin() and read_seqcount_retry(), however, there are more + * esoteric cases which do not follow this pattern. + * + * As a consequence, we take the following best-effort approach for raw usage + * via seqcount_t under KCSAN: upon beginning a seq-reader critical section, + * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as + * atomics; if there is a matching read_seqcount_retry() call, no following + * memory operations are considered atomic. Usage of seqlocks via seqlock_t + * interface is not affected. + */ +#define KCSAN_SEQLOCK_REGION_MAX 1000 + +/* * Version using sequence counter only. * This can be used when code has its own mutex protecting the * updating starting before the write_seqcountbeqin() and ending @@ -115,6 +131,7 @@ repeat: cpu_relax(); goto repeat; } + kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); return ret; } @@ -131,6 +148,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s) { unsigned ret = READ_ONCE(s->sequence); smp_rmb(); + kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); return ret; } @@ -183,6 +201,7 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s) { unsigned ret = READ_ONCE(s->sequence); smp_rmb(); + kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); return ret & ~1; } @@ -202,7 +221,8 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s) */ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) { - return unlikely(s->sequence != start); + kcsan_atomic_next(0); + return unlikely(READ_ONCE(s->sequence) != start); } /** @@ -225,6 +245,7 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) static inline void raw_write_seqcount_begin(seqcount_t *s) { + kcsan_nestable_atomic_begin(); s->sequence++; smp_wmb(); } @@ -233,6 +254,7 @@ static inline void raw_write_seqcount_end(seqcount_t *s) { smp_wmb(); s->sequence++; + kcsan_nestable_atomic_end(); } /** @@ -243,6 +265,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s) * usual consistency guarantee. It is one wmb cheaper, because we can * collapse the two back-to-back wmb()s. * + * Note that writes surrounding the barrier should be declared atomic (e.g. + * via WRITE_ONCE): a) to ensure the writes become visible to other threads + * atomically, avoiding compiler optimizations; b) to document which writes are + * meant to propagate to the reader critical section. This is necessary because + * neither writes before and after the barrier are enclosed in a seq-writer + * critical section that would ensure readers are aware of ongoing writes. + * * seqcount_t seq; * bool X = true, Y = false; * @@ -262,18 +291,20 @@ static inline void raw_write_seqcount_end(seqcount_t *s) * * void write(void) * { - * Y = true; + * WRITE_ONCE(Y, true); * * raw_write_seqcount_barrier(seq); * - * X = false; + * WRITE_ONCE(X, false); * } */ static inline void raw_write_seqcount_barrier(seqcount_t *s) { + kcsan_nestable_atomic_begin(); s->sequence++; smp_wmb(); s->sequence++; + kcsan_nestable_atomic_end(); } static inline int raw_read_seqcount_latch(seqcount_t *s) @@ -398,7 +429,9 @@ static inline void write_seqcount_end(seqcount_t *s) static inline void write_seqcount_invalidate(seqcount_t *s) { smp_wmb(); + kcsan_nestable_atomic_begin(); s->sequence+=2; + kcsan_nestable_atomic_end(); } typedef struct { @@ -430,11 +463,21 @@ typedef struct { */ static inline unsigned read_seqbegin(const seqlock_t *sl) { - return read_seqcount_begin(&sl->seqcount); + unsigned ret = read_seqcount_begin(&sl->seqcount); + + kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */ + kcsan_flat_atomic_begin(); + return ret; } static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) { + /* + * Assume not nested: read_seqretry() may be called multiple times when + * completing read critical section. + */ + kcsan_flat_atomic_end(); + return read_seqcount_retry(&sl->seqcount, start); } diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 92f5eba86052..9fd550e7946a 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -10,6 +10,7 @@ #include <linux/bitops.h> #include <linux/compiler.h> #include <linux/console.h> +#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/circ_buf.h> #include <linux/spinlock.h> @@ -251,6 +252,7 @@ struct uart_port { struct attribute_group *attr_group; /* port specific attributes */ const struct attribute_group **tty_groups; /* all attributes (serial core use only) */ struct serial_rs485 rs485; + struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */ struct serial_iso7816 iso7816; void *private_data; /* generic platform data pointer */ }; @@ -472,5 +474,5 @@ extern int uart_handle_break(struct uart_port *port); (cflag) & CRTSCTS || \ !((cflag) & CLOCAL)) -void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf); +int uart_get_rs485_mode(struct uart_port *port); #endif /* LINUX_SERIAL_CORE_H */ diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index 86281ac7c305..860e0f843c12 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -26,7 +26,7 @@ static inline int set_direct_map_default_noflush(struct page *page) #endif #ifndef set_mce_nospec -static inline int set_mce_nospec(unsigned long pfn) +static inline int set_mce_nospec(unsigned long pfn, bool unmap) { return 0; } diff --git a/include/linux/signal.h b/include/linux/signal.h index 05bacd2ab135..6bb1a3f0258c 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -24,6 +24,14 @@ static inline void clear_siginfo(kernel_siginfo_t *info) #define SI_EXPANSION_SIZE (sizeof(struct siginfo) - sizeof(struct kernel_siginfo)) +static inline void copy_siginfo_to_external(siginfo_t *to, + const kernel_siginfo_t *from) +{ + memcpy(to, from, sizeof(*from)); + memset(((char *)to) + sizeof(struct kernel_siginfo), 0, + SI_EXPANSION_SIZE); +} + int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from); int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3a2ac7072dbb..0c0377fc00c2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1283,32 +1283,6 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector, const struct flow_dissector_key *key, unsigned int key_count); -#ifdef CONFIG_NET -int skb_flow_dissector_prog_query(const union bpf_attr *attr, - union bpf_attr __user *uattr); -int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr, - struct bpf_prog *prog); - -int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr); -#else -static inline int skb_flow_dissector_prog_query(const union bpf_attr *attr, - union bpf_attr __user *uattr) -{ - return -EOPNOTSUPP; -} - -static inline int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr, - struct bpf_prog *prog) -{ - return -EOPNOTSUPP; -} - -static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) -{ - return -EOPNOTSUPP; -} -#endif - struct bpf_flow_dissector; bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, __be16 proto, int nhoff, int hlen, unsigned int flags); @@ -3945,6 +3919,14 @@ static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) } } +static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + skb->ip_summed = CHECKSUM_NONE; + skb->csum_level = 0; + } +} + /* Check if we need to perform checksum complete validation. * * Returns true if checksum complete is needed, false otherwise @@ -4162,10 +4144,10 @@ struct skb_ext { refcount_t refcnt; u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */ u8 chunks; /* same */ - char data[0] __aligned(8); + char data[] __aligned(8); }; -struct skb_ext *__skb_ext_alloc(void); +struct skb_ext *__skb_ext_alloc(gfp_t flags); void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, struct skb_ext *ext); void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id); diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 8a709f63c5e5..08674cd14d5a 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -187,6 +187,7 @@ static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src, dst->sg.data[which] = src->sg.data[which]; dst->sg.data[which].length = size; dst->sg.size += size; + src->sg.size -= size; src->sg.data[which].length -= size; src->sg.data[which].offset += size; } @@ -436,4 +437,12 @@ static inline void psock_progs_drop(struct sk_psock_progs *progs) psock_set_prog(&progs->skb_verdict, NULL); } +int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb); + +static inline bool sk_psock_strp_enabled(struct sk_psock *psock) +{ + if (!psock) + return false; + return psock->parser.enabled; +} #endif /* _LINUX_SKMSG_H */ diff --git a/include/linux/smp.h b/include/linux/smp.h index cbc9162689d0..7ee202ad21a6 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -16,17 +16,39 @@ typedef void (*smp_call_func_t)(void *info); typedef bool (*smp_cond_func_t)(int cpu, void *info); + +enum { + CSD_FLAG_LOCK = 0x01, + + /* IRQ_WORK_flags */ + + CSD_TYPE_ASYNC = 0x00, + CSD_TYPE_SYNC = 0x10, + CSD_TYPE_IRQ_WORK = 0x20, + CSD_TYPE_TTWU = 0x30, + CSD_FLAG_TYPE_MASK = 0xF0, +}; + +/* + * structure shares (partial) layout with struct irq_work + */ struct __call_single_data { struct llist_node llist; + unsigned int flags; smp_call_func_t func; void *info; - unsigned int flags; }; /* Use __aligned() to avoid to use 2 cache lines for 1 csd */ typedef struct __call_single_data call_single_data_t __aligned(sizeof(struct __call_single_data)); +/* + * Enqueue a llist_node on the call_single_queue; be very careful, read + * flush_smp_call_function_queue() in detail. + */ +extern void __smp_call_single_queue(int cpu, struct llist_node *node); + /* total number of cpus in this system (may exceed NR_CPUS) */ extern unsigned int total_cpus; @@ -227,8 +249,8 @@ static inline int get_boot_cpu_id(void) */ extern void arch_disable_smp_support(void); -extern void arch_enable_nonboot_cpus_begin(void); -extern void arch_enable_nonboot_cpus_end(void); +extern void arch_thaw_secondary_cpus_begin(void); +extern void arch_thaw_secondary_cpus_end(void); void smp_setup_processor_id(void); diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h new file mode 100644 index 000000000000..7bab5d9a3d31 --- /dev/null +++ b/include/linux/soc/mediatek/mtk-mmsys.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015 MediaTek Inc. + */ + +#ifndef __MTK_MMSYS_H +#define __MTK_MMSYS_H + +enum mtk_ddp_comp_id; +struct device; + +void mtk_mmsys_ddp_connect(struct device *dev, + enum mtk_ddp_comp_id cur, + enum mtk_ddp_comp_id next); + +void mtk_mmsys_ddp_disconnect(struct device *dev, + enum mtk_ddp_comp_id cur, + enum mtk_ddp_comp_id next); + +#endif /* __MTK_MMSYS_H */ diff --git a/include/linux/socket.h b/include/linux/socket.h index 54338fac45cb..04d2bc97f497 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -50,7 +50,17 @@ struct msghdr { void *msg_name; /* ptr to socket address structure */ int msg_namelen; /* size of socket address structure */ struct iov_iter msg_iter; /* data */ - void *msg_control; /* ancillary data */ + + /* + * Ancillary data. msg_control_user is the user buffer used for the + * recv* side when msg_control_is_user is set, msg_control is the kernel + * buffer used for all other cases. + */ + union { + void *msg_control; + void __user *msg_control_user; + }; + bool msg_control_is_user : 1; __kernel_size_t msg_controllen; /* ancillary data buffer length */ unsigned int msg_flags; /* flags on received message */ struct kiocb *msg_iocb; /* ptr to iocb for async requests */ @@ -94,7 +104,10 @@ struct cmsghdr { #define CMSG_ALIGN(len) ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) ) -#define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + sizeof(struct cmsghdr))) +#define CMSG_DATA(cmsg) \ + ((void *)(cmsg) + sizeof(struct cmsghdr)) +#define CMSG_USER_DATA(cmsg) \ + ((void __user *)(cmsg) + sizeof(struct cmsghdr)) #define CMSG_SPACE(len) (sizeof(struct cmsghdr) + CMSG_ALIGN(len)) #define CMSG_LEN(len) (sizeof(struct cmsghdr) + (len)) diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 00f5826092e3..9c27a32df9bb 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -291,8 +291,8 @@ struct sdw_dpn_audio_mode { * implementation-defined interrupts * @max_ch: Maximum channels supported * @min_ch: Minimum channels supported - * @num_ch: Number of discrete channels supported - * @ch: Discrete channels supported + * @num_channels: Number of discrete channels supported + * @channels: Discrete channels supported * @num_ch_combinations: Number of channel combinations supported * @ch_combinations: Channel combinations supported * @modes: SDW mode supported @@ -316,8 +316,8 @@ struct sdw_dpn_prop { u32 imp_def_interrupts; u32 max_ch; u32 min_ch; - u32 num_ch; - u32 *ch; + u32 num_channels; + u32 *channels; u32 num_ch_combinations; u32 *ch_combinations; u32 modes; @@ -632,6 +632,19 @@ struct sdw_slave { #define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev) +/** + * struct sdw_master_device - SoundWire 'Master Device' representation + * @dev: Linux device for this Master + * @bus: Bus handle shortcut + */ +struct sdw_master_device { + struct device dev; + struct sdw_bus *bus; +}; + +#define dev_to_sdw_master_device(d) \ + container_of(d, struct sdw_master_device, dev) + struct sdw_driver { const char *name; @@ -787,8 +800,10 @@ struct sdw_master_ops { /** * struct sdw_bus - SoundWire bus - * @dev: Master linux device + * @dev: Shortcut to &bus->md->dev to avoid changing the entire code. + * @md: Master device * @link_id: Link id number, can be 0 to N, unique for each Master + * @id: bus system-wide unique id * @slaves: list of Slaves on this bus * @assigned: Bitmap for Slave device numbers. * Bit set implies used number, bit clear implies unused number. @@ -812,7 +827,9 @@ struct sdw_master_ops { */ struct sdw_bus { struct device *dev; + struct sdw_master_device *md; unsigned int link_id; + int id; struct list_head slaves; DECLARE_BITMAP(assigned, SDW_MAX_DEVICES); struct mutex bus_lock; @@ -832,8 +849,9 @@ struct sdw_bus { bool multi_link; }; -int sdw_add_bus_master(struct sdw_bus *bus); -void sdw_delete_bus_master(struct sdw_bus *bus); +int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, + struct fwnode_handle *fwnode); +void sdw_bus_master_delete(struct sdw_bus *bus); /** * sdw_port_config: Master or Slave Port configuration diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h index aaa7f4267c14..52eb66cd11bc 100644 --- a/include/linux/soundwire/sdw_type.h +++ b/include/linux/soundwire/sdw_type.h @@ -5,6 +5,13 @@ #define __SOUNDWIRE_TYPES_H extern struct bus_type sdw_bus_type; +extern struct device_type sdw_slave_type; +extern struct device_type sdw_master_type; + +static inline int is_sdw_slave(const struct device *dev) +{ + return dev->type == &sdw_slave_type; +} #define drv_to_sdw_driver(_drv) container_of(_drv, struct sdw_driver, driver) @@ -14,7 +21,7 @@ extern struct bus_type sdw_bus_type; int __sdw_register_driver(struct sdw_driver *drv, struct module *owner); void sdw_unregister_driver(struct sdw_driver *drv); -int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size); +int sdw_slave_uevent(struct device *dev, struct kobj_uevent_env *env); /** * module_sdw_driver() - Helper macro for registering a Soundwire driver diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h deleted file mode 100644 index 831a5de7a0e2..000000000000 --- a/include/linux/spi/l4f00242t03.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * l4f00242t03.h -- Platform glue for Epson L4F00242T03 LCD - * - * Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com> - * Based on Marek Vasut work in lms283gf05.h -*/ - -#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_ -#define _INCLUDE_LINUX_SPI_L4F00242T03_H_ - -struct l4f00242t03_pdata { - unsigned int reset_gpio; - unsigned int data_enable_gpio; -}; - -#endif /* _INCLUDE_LINUX_SPI_L4F00242T03_H_ */ diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h deleted file mode 100644 index 738a45b435f2..000000000000 --- a/include/linux/spi/mcp23s08.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -struct mcp23s08_platform_data { - /* For mcp23s08, up to 4 slaves (numbered 0..3) can share one SPI - * chipselect, each providing 1 gpio_chip instance with 8 gpios. - * For mpc23s17, up to 8 slaves (numbered 0..7) can share one SPI - * chipselect, each providing 1 gpio_chip (port A + port B) with - * 16 gpios. - */ - u32 spi_present_mask; - - /* "base" is the number of the first GPIO or -1 for dynamic - * assignment. If there are gaps in chip addressing the GPIO - * numbers are sequential .. so for example if only slaves 0 - * and 3 are present, their GPIOs range from base to base+15 - * (or base+31 for s17 variant). - */ - unsigned base; -}; diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 38286de779e3..aac57b5b7c21 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -394,6 +394,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * for example doing DMA mapping. Called from threaded * context. * @transfer_one: transfer a single spi_transfer. + * * - return 0 if the transfer is finished, * - return 1 if the transfer is still in progress. When * the driver is finished with this transfer it must diff --git a/include/linux/splice.h b/include/linux/splice.h index ebbbfea48aa0..5c47013f708e 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h @@ -82,6 +82,9 @@ extern long do_splice(struct file *in, loff_t __user *off_in, struct file *out, loff_t __user *off_out, size_t len, unsigned int flags); +extern long do_tee(struct file *in, struct file *out, size_t len, + unsigned int flags); + /* * for dynamic pipe sizing */ diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 83bd8cb475d7..b7af8cc13eda 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -64,7 +64,7 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, struct stack_trace { unsigned int nr_entries, max_entries; unsigned long *entries; - int skip; /* input argument: How many entries to skip */ + unsigned int skip; /* input argument: How many entries to skip */ }; extern void save_stack_trace(struct stack_trace *trace); diff --git a/include/linux/stat.h b/include/linux/stat.h index 528c4baad091..56614af83d4a 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -47,6 +47,7 @@ struct kstat { struct timespec64 ctime; struct timespec64 btime; /* File creation time */ u64 blocks; + u64 mnt_id; }; #endif diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index fbafb353e9be..bd964c31d333 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -177,6 +177,8 @@ struct plat_stmmacenet_data { struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES]; struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES]; void (*fix_mac_speed)(void *priv, unsigned int speed); + int (*serdes_powerup)(struct net_device *ndev, void *priv); + void (*serdes_powerdown)(struct net_device *ndev, void *priv); int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); struct mac_device_info *(*setup)(void *priv); diff --git a/include/linux/string.h b/include/linux/string.h index 6dfbb2efa815..9b7a0632e87a 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -272,6 +272,31 @@ void __read_overflow3(void) __compiletime_error("detected read beyond size of ob void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter"); #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) + +#ifdef CONFIG_KASAN +extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); +extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); +extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); +extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove); +extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset); +extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat); +extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy); +extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen); +extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat); +extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy); +#else +#define __underlying_memchr __builtin_memchr +#define __underlying_memcmp __builtin_memcmp +#define __underlying_memcpy __builtin_memcpy +#define __underlying_memmove __builtin_memmove +#define __underlying_memset __builtin_memset +#define __underlying_strcat __builtin_strcat +#define __underlying_strcpy __builtin_strcpy +#define __underlying_strlen __builtin_strlen +#define __underlying_strncat __builtin_strncat +#define __underlying_strncpy __builtin_strncpy +#endif + __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 0); @@ -279,14 +304,14 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) __write_overflow(); if (p_size < size) fortify_panic(__func__); - return __builtin_strncpy(p, q, size); + return __underlying_strncpy(p, q, size); } __FORTIFY_INLINE char *strcat(char *p, const char *q) { size_t p_size = __builtin_object_size(p, 0); if (p_size == (size_t)-1) - return __builtin_strcat(p, q); + return __underlying_strcat(p, q); if (strlcat(p, q, p_size) >= p_size) fortify_panic(__func__); return p; @@ -300,7 +325,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p) /* Work around gcc excess stack consumption issue */ if (p_size == (size_t)-1 || (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0')) - return __builtin_strlen(p); + return __underlying_strlen(p); ret = strnlen(p, p_size); if (p_size <= ret) fortify_panic(__func__); @@ -333,7 +358,7 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) __write_overflow(); if (len >= p_size) fortify_panic(__func__); - __builtin_memcpy(p, q, len); + __underlying_memcpy(p, q, len); p[len] = '\0'; } return ret; @@ -346,12 +371,12 @@ __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) size_t p_size = __builtin_object_size(p, 0); size_t q_size = __builtin_object_size(q, 0); if (p_size == (size_t)-1 && q_size == (size_t)-1) - return __builtin_strncat(p, q, count); + return __underlying_strncat(p, q, count); p_len = strlen(p); copy_len = strnlen(q, count); if (p_size < p_len + copy_len + 1) fortify_panic(__func__); - __builtin_memcpy(p + p_len, q, copy_len); + __underlying_memcpy(p + p_len, q, copy_len); p[p_len + copy_len] = '\0'; return p; } @@ -363,7 +388,7 @@ __FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size) __write_overflow(); if (p_size < size) fortify_panic(__func__); - return __builtin_memset(p, c, size); + return __underlying_memset(p, c, size); } __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) @@ -378,7 +403,7 @@ __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) } if (p_size < size || q_size < size) fortify_panic(__func__); - return __builtin_memcpy(p, q, size); + return __underlying_memcpy(p, q, size); } __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) @@ -393,7 +418,7 @@ __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) } if (p_size < size || q_size < size) fortify_panic(__func__); - return __builtin_memmove(p, q, size); + return __underlying_memmove(p, q, size); } extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); @@ -419,7 +444,7 @@ __FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size) } if (p_size < size || q_size < size) fortify_panic(__func__); - return __builtin_memcmp(p, q, size); + return __underlying_memcmp(p, q, size); } __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) @@ -429,7 +454,7 @@ __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) __read_overflow(); if (p_size < size) fortify_panic(__func__); - return __builtin_memchr(p, c, size); + return __underlying_memchr(p, c, size); } void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); @@ -460,11 +485,22 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q) size_t p_size = __builtin_object_size(p, 0); size_t q_size = __builtin_object_size(q, 0); if (p_size == (size_t)-1 && q_size == (size_t)-1) - return __builtin_strcpy(p, q); + return __underlying_strcpy(p, q); memcpy(p, q, strlen(q) + 1); return p; } +/* Don't use these outside the FORITFY_SOURCE implementation */ +#undef __underlying_memchr +#undef __underlying_memcmp +#undef __underlying_memcpy +#undef __underlying_memmove +#undef __underlying_memset +#undef __underlying_strcat +#undef __underlying_strcpy +#undef __underlying_strlen +#undef __underlying_strncat +#undef __underlying_strncpy #endif /** diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 4f6b28487f28..98da816b5fc2 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -76,7 +76,7 @@ struct rpc_auth { unsigned int au_verfsize; /* size of reply verifier */ unsigned int au_ralign; /* words before UL header */ - unsigned int au_flags; + unsigned long au_flags; const struct rpc_authops *au_ops; rpc_authflavor_t au_flavor; /* pseudoflavor (note may * differ from the flavor in @@ -89,7 +89,8 @@ struct rpc_auth { }; /* rpc_auth au_flags */ -#define RPCAUTH_AUTH_DATATOUCH 0x00000002 +#define RPCAUTH_AUTH_DATATOUCH (1) +#define RPCAUTH_AUTH_UPDATE_SLACK (2) struct rpc_auth_create_args { rpc_authflavor_t pseudoflavor; diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index ca7e108248e2..02e7a5863d28 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -71,7 +71,13 @@ struct rpc_clnt { #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct dentry *cl_debugfs; /* debugfs directory */ #endif - struct rpc_xprt_iter cl_xpi; + /* cl_work is only needed after cl_xpi is no longer used, + * and that are of similar size + */ + union { + struct rpc_xprt_iter cl_xpi; + struct work_struct cl_work; + }; const struct cred *cl_cred; }; @@ -236,4 +242,9 @@ static inline int rpc_reply_expected(struct rpc_task *task) (task->tk_msg.rpc_proc->p_decode != NULL); } +static inline void rpc_task_close_connection(struct rpc_task *task) +{ + if (task->tk_xprt) + xprt_force_disconnect(task->tk_xprt); +} #endif /* _LINUX_SUNRPC_CLNT_H */ diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index 48c1b1674cbf..bf4ac8a0268c 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h @@ -21,6 +21,7 @@ struct gss_ctx { struct gss_api_mech *mech_type; void *internal_ctx_id; + unsigned int slack, align; }; #define GSS_C_NO_BUFFER ((struct xdr_netobj) 0) @@ -66,6 +67,7 @@ u32 gss_wrap( u32 gss_unwrap( struct gss_ctx *ctx_id, int offset, + int len, struct xdr_buf *inbuf); u32 gss_delete_sec_context( struct gss_ctx **ctx_id); @@ -82,6 +84,7 @@ struct pf_desc { u32 service; char *name; char *auth_domain_name; + struct auth_domain *domain; bool datatouch; }; @@ -126,6 +129,7 @@ struct gss_api_ops { u32 (*gss_unwrap)( struct gss_ctx *ctx_id, int offset, + int len, struct xdr_buf *buf); void (*gss_delete_sec_context)( void *internal_ctx_id); diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h index c1d77dd8ed41..e8f8ffe7448b 100644 --- a/include/linux/sunrpc/gss_krb5.h +++ b/include/linux/sunrpc/gss_krb5.h @@ -83,7 +83,7 @@ struct gss_krb5_enctype { u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, struct page **pages); /* v2 encryption function */ - u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, + u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len, struct xdr_buf *buf, u32 *headskip, u32 *tailskip); /* v2 decryption function */ }; @@ -255,7 +255,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset, struct xdr_buf *outbuf, struct page **pages); u32 -gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, +gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len, struct xdr_buf *buf); @@ -312,7 +312,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset, struct page **pages); u32 -gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, +gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len, struct xdr_buf *buf, u32 *plainoffset, u32 *plainlen); diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index fd390894a584..386628b36bc7 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -254,6 +254,7 @@ struct svc_rqst { struct page * *rq_page_end; /* one past the last page */ struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */ + struct bio_vec rq_bvec[RPCSVC_MAXPAGES]; __be32 rq_xid; /* transmission id */ u32 rq_prog; /* program number */ @@ -299,6 +300,7 @@ struct svc_rqst { struct net *rq_bc_net; /* pointer to backchannel's * net namespace */ + void ** rq_lease_breaker; /* The v4 client breaking a lease */ }; #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net) diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 78fe2ac6dc6c..7ed82625dc0b 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -48,7 +48,6 @@ #include <linux/sunrpc/rpc_rdma.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> -#define SVCRDMA_DEBUG /* Default and maximum inline threshold sizes */ enum { @@ -160,9 +159,8 @@ struct svc_rdma_send_ctxt { }; /* svc_rdma_backchannel.c */ -extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, - __be32 *rdma_resp, - struct xdr_buf *rcvbuf); +extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp, + struct svc_rdma_recv_ctxt *rctxt); /* svc_rdma_recvfrom.c */ extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma); @@ -170,6 +168,7 @@ extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma); extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, struct svc_rdma_recv_ctxt *ctxt); extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma); +extern void svc_rdma_release_rqst(struct svc_rqst *rqstp); extern int svc_rdma_recvfrom(struct svc_rqst *); /* svc_rdma_rw.c */ diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 9e1e046de176..aca35ab5cff2 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -117,6 +117,12 @@ static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u return 0; } +static inline bool svc_xprt_is_dead(const struct svc_xprt *xprt) +{ + return (test_bit(XPT_DEAD, &xprt->xpt_flags) != 0) || + (test_bit(XPT_CLOSE, &xprt->xpt_flags) != 0); +} + int svc_reg_xprt_class(struct svc_xprt_class *); void svc_unreg_xprt_class(struct svc_xprt_class *); void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *, diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h index ca39a388dc22..f09c82b0a7ae 100644 --- a/include/linux/sunrpc/svcauth_gss.h +++ b/include/linux/sunrpc/svcauth_gss.h @@ -20,7 +20,8 @@ int gss_svc_init(void); void gss_svc_shutdown(void); int gss_svc_init_net(struct net *net); void gss_svc_shutdown_net(struct net *net); -int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); +struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor, + char *name); u32 svcauth_gss_flavor(struct auth_domain *dom); #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index 771baadaee9d..b7ac7fe68306 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -28,7 +28,7 @@ struct svc_sock { /* private TCP part */ /* On-the-wire fragment header: */ - __be32 sk_reclen; + __be32 sk_marker; /* As we receive a record, this includes the length received so * far (including the fragment header): */ u32 sk_tcplen; @@ -41,12 +41,12 @@ struct svc_sock { static inline u32 svc_sock_reclen(struct svc_sock *svsk) { - return ntohl(svsk->sk_reclen) & RPC_FRAGMENT_SIZE_MASK; + return be32_to_cpu(svsk->sk_marker) & RPC_FRAGMENT_SIZE_MASK; } static inline u32 svc_sock_final_rec(struct svc_sock *svsk) { - return ntohl(svsk->sk_reclen) & RPC_LAST_STREAM_FRAGMENT; + return be32_to_cpu(svsk->sk_marker) & RPC_LAST_STREAM_FRAGMENT; } /* diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 01bb41908c93..22c207b2425f 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -184,6 +184,7 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p) extern void xdr_shift_buf(struct xdr_buf *, size_t); extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); +extern void xdr_buf_trim(struct xdr_buf *, unsigned int); extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 4fcc6fd0cbd6..b960098acfb0 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -466,6 +466,12 @@ static inline bool system_entering_hibernation(void) { return false; } static inline bool hibernation_available(void) { return false; } #endif /* CONFIG_HIBERNATION */ +#ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV +int is_hibernate_resume_dev(const struct inode *); +#else +static inline int is_hibernate_resume_dev(const struct inode *i) { return 0; } +#endif + /* Hibernation and suspend events */ #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ #define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */ diff --git a/include/linux/swait.h b/include/linux/swait.h index 73e06e9986d4..6a8c22b8c2a5 100644 --- a/include/linux/swait.h +++ b/include/linux/swait.h @@ -9,23 +9,10 @@ #include <asm/current.h> /* - * BROKEN wait-queues. - * - * These "simple" wait-queues are broken garbage, and should never be - * used. The comments below claim that they are "similar" to regular - * wait-queues, but the semantics are actually completely different, and - * every single user we have ever had has been buggy (or pointless). - * - * A "swake_up_one()" only wakes up _one_ waiter, which is not at all what - * "wake_up()" does, and has led to problems. In other cases, it has - * been fine, because there's only ever one waiter (kvm), but in that - * case gthe whole "simple" wait-queue is just pointless to begin with, - * since there is no "queue". Use "wake_up_process()" with a direct - * pointer instead. - * - * While these are very similar to regular wait queues (wait.h) the most - * important difference is that the simple waitqueue allows for deterministic - * behaviour -- IOW it has strictly bounded IRQ and lock hold times. + * Simple waitqueues are semantically very different to regular wait queues + * (wait.h). The most important difference is that the simple waitqueue allows + * for deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold + * times. * * Mainly, this is accomplished by two things. Firstly not allowing swake_up_all * from IRQ disabled, and dropping the lock upon every wakeup, giving a higher @@ -39,7 +26,7 @@ * sleeper state. * * - the !exclusive mode; because that leads to O(n) wakeups, everything is - * exclusive. + * exclusive. As such swake_up_one will only ever awake _one_ waiter. * * - custom wake callback functions; because you cannot give any guarantees * about random code. This also allows swait to be used in RT, such that diff --git a/include/linux/swap.h b/include/linux/swap.h index b835d8dbea0e..4c5974bb9ba9 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -183,12 +183,17 @@ enum { #define SWAP_CLUSTER_MAX 32UL #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX -#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ -#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ +/* Bit flag in swap_map */ #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ -#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ -#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ -#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ +#define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */ + +/* Special value in first swap_map */ +#define SWAP_MAP_MAX 0x3e /* Max count */ +#define SWAP_MAP_BAD 0x3f /* Note page is bad */ +#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */ + +/* Special value in each swap_map continuation */ +#define SWAP_CONT_MAX 0x7f /* Max count */ /* * We use this to track usage of a cluster. A cluster is a block of swap disk @@ -247,6 +252,7 @@ struct swap_info_struct { unsigned int inuse_pages; /* number of those currently in use */ unsigned int cluster_next; /* likely index for next allocation */ unsigned int cluster_nr; /* countdown to next cluster search */ + unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ struct rb_root swap_extent_root;/* root of the swap extent rbtree */ struct block_device *bdev; /* swap device or bdev of swap file */ @@ -275,7 +281,7 @@ struct swap_info_struct { */ struct work_struct discard_work; /* discard worker */ struct swap_cluster_list discard_clusters; /* discard clusters list */ - struct plist_node avail_lists[0]; /* + struct plist_node avail_lists[]; /* * entries in swap_avail_heads, one * entry per node. * Must be last as the number of the @@ -328,15 +334,17 @@ extern unsigned long nr_free_pagecache_pages(void); /* linux/mm/swap.c */ +extern void lru_note_cost(struct lruvec *lruvec, bool file, + unsigned int nr_pages); +extern void lru_note_cost_page(struct page *); extern void lru_cache_add(struct page *); -extern void lru_cache_add_anon(struct page *page); -extern void lru_cache_add_file(struct page *page); extern void lru_add_page_tail(struct page *page, struct page *page_tail, struct lruvec *lruvec, struct list_head *head); extern void activate_page(struct page *); extern void mark_page_accessed(struct page *); extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); +extern void lru_add_drain_cpu_zone(struct zone *zone); extern void lru_add_drain_all(void); extern void rotate_reclaimable_page(struct page *page); extern void deactivate_file_page(struct page *page); @@ -408,7 +416,6 @@ extern unsigned long total_swapcache_pages(void); extern void show_swap_cache_info(void); extern int add_to_swap(struct page *page); extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); -extern int __add_to_swap_cache(struct page *page, swp_entry_t entry); extern void __delete_from_swap_cache(struct page *, swp_entry_t entry); extern void delete_from_swap_cache(struct page *); extern void free_page_and_swap_cache(struct page *); @@ -645,11 +652,9 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) #endif #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) -extern void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node, - gfp_t gfp_mask); +extern void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask); #else -static inline void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, - int node, gfp_t gfp_mask) +static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) { } #endif diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 1815065d52f3..7c354c2955f5 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -428,6 +428,8 @@ asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length); #endif asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode); +asmlinkage long sys_faccessat2(int dfd, const char __user *filename, int mode, + int flags); asmlinkage long sys_chdir(const char __user *filename); asmlinkage long sys_fchdir(unsigned int fd); asmlinkage long sys_chroot(const char __user *filename); @@ -1333,11 +1335,11 @@ static inline int ksys_chmod(const char __user *filename, umode_t mode) return do_fchmodat(AT_FDCWD, filename, mode); } -extern long do_faccessat(int dfd, const char __user *filename, int mode); +long do_faccessat(int dfd, const char __user *filename, int mode, int flags); static inline long ksys_access(const char __user *filename, int mode) { - return do_faccessat(AT_FDCWD, filename, mode); + return do_faccessat(AT_FDCWD, filename, mode, 0); } extern int do_fchownat(int dfd, const char __user *filename, uid_t user, diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 02fa84493f23..50bb7f383a1b 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -44,35 +44,26 @@ struct ctl_dir; extern const int sysctl_vals[]; -typedef int proc_handler (struct ctl_table *ctl, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); - -extern int proc_dostring(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_dointvec(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_douintvec(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_dointvec_minmax(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_douintvec_minmax(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); -extern int proc_dointvec_jiffies(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_dointvec_ms_jiffies(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_doulongvec_minmax(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, - void __user *, size_t *, loff_t *); -extern int proc_do_large_bitmap(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_do_static_key(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); +typedef int proc_handler(struct ctl_table *ctl, int write, void *buffer, + size_t *lenp, loff_t *ppos); + +int proc_dostring(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_dointvec(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_douintvec_minmax(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); +int proc_dointvec_jiffies(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_dointvec_userhz_jiffies(struct ctl_table *, int, void *, size_t *, + loff_t *); +int proc_dointvec_ms_jiffies(struct ctl_table *, int, void *, size_t *, + loff_t *); +int proc_doulongvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, void *, + size_t *, loff_t *); +int proc_do_large_bitmap(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_do_static_key(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); /* * Register a set of sysctl names by calling register_sysctl_table @@ -206,8 +197,17 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, void unregister_sysctl_table(struct ctl_table_header * table); extern int sysctl_init(void); +void do_sysctl_args(void); + +extern int pwrsw_enabled; +extern int unaligned_enabled; +extern int unaligned_dump_stack; +extern int no_unaligned_warning; extern struct ctl_table sysctl_mount_point[]; +extern struct ctl_table random_table[]; +extern struct ctl_table firmware_config_table[]; +extern struct ctl_table epoll_table[]; #else /* CONFIG_SYSCTL */ static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table) @@ -236,9 +236,12 @@ static inline void setup_sysctl_set(struct ctl_table_set *p, { } +static inline void do_sysctl_args(void) +{ +} #endif /* CONFIG_SYSCTL */ -int sysctl_max_threads(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); +int sysctl_max_threads(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); #endif /* _LINUX_SYSCTL_H */ diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 80bb865b3a33..86067dbe7745 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -7,7 +7,7 @@ * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007 Tejun Heo <teheo@suse.de> * - * Please see Documentation/filesystems/sysfs.txt for more information. + * Please see Documentation/filesystems/sysfs.rst for more information. */ #ifndef _SYSFS_H_ diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h index 8e159e16850f..3a582ec7a2f1 100644 --- a/include/linux/sysrq.h +++ b/include/linux/sysrq.h @@ -30,10 +30,10 @@ #define SYSRQ_ENABLE_RTNICE 0x0100 struct sysrq_key_op { - void (*handler)(int); - char *help_msg; - char *action_msg; - int enable_mask; + void (* const handler)(int); + const char * const help_msg; + const char * const action_msg; + const int enable_mask; }; #ifdef CONFIG_MAGIC_SYSRQ @@ -45,9 +45,9 @@ struct sysrq_key_op { void handle_sysrq(int key); void __handle_sysrq(int key, bool check_mask); -int register_sysrq_key(int key, struct sysrq_key_op *op); -int unregister_sysrq_key(int key, struct sysrq_key_op *op); -struct sysrq_key_op *__sysrq_get_key_op(int key); +int register_sysrq_key(int key, const struct sysrq_key_op *op); +int unregister_sysrq_key(int key, const struct sysrq_key_op *op); +extern const struct sysrq_key_op *__sysrq_reboot_op; int sysrq_toggle_support(int enable_mask); int sysrq_mask(void); @@ -62,12 +62,12 @@ static inline void __handle_sysrq(int key, bool check_mask) { } -static inline int register_sysrq_key(int key, struct sysrq_key_op *op) +static inline int register_sysrq_key(int key, const struct sysrq_key_op *op) { return -EINVAL; } -static inline int unregister_sysrq_key(int key, struct sysrq_key_op *op) +static inline int unregister_sysrq_key(int key, const struct sysrq_key_op *op) { return -EINVAL; } diff --git a/include/linux/tboot.h b/include/linux/tboot.h index 5424bc6feac8..c7e424766360 100644 --- a/include/linux/tboot.h +++ b/include/linux/tboot.h @@ -121,13 +121,7 @@ struct tboot { #define TBOOT_UUID {0xff, 0x8d, 0x3c, 0x66, 0xb3, 0xe8, 0x82, 0x4b, 0xbf,\ 0xaa, 0x19, 0xea, 0x4d, 0x5, 0x7a, 0x8} -extern struct tboot *tboot; - -static inline int tboot_enabled(void) -{ - return tboot != NULL; -} - +bool tboot_enabled(void); extern void tboot_probe(void); extern void tboot_shutdown(u32 shutdown_type); extern struct acpi_table_header *tboot_get_dmar_table( diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 421c99c12291..9aac824c523c 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -78,47 +78,6 @@ struct tcp_sack_block { #define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */ #define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/ -#if IS_ENABLED(CONFIG_MPTCP) -struct mptcp_options_received { - u64 sndr_key; - u64 rcvr_key; - u64 data_ack; - u64 data_seq; - u32 subflow_seq; - u16 data_len; - u16 mp_capable : 1, - mp_join : 1, - dss : 1, - add_addr : 1, - rm_addr : 1, - family : 4, - echo : 1, - backup : 1; - u32 token; - u32 nonce; - u64 thmac; - u8 hmac[20]; - u8 join_id; - u8 use_map:1, - dsn64:1, - data_fin:1, - use_ack:1, - ack64:1, - mpc_map:1, - __unused:2; - u8 addr_id; - u8 rm_id; - union { - struct in_addr addr; -#if IS_ENABLED(CONFIG_MPTCP_IPV6) - struct in6_addr addr6; -#endif - }; - u64 ahmac; - u16 port; -}; -#endif - struct tcp_options_received { /* PAWS/RTTM data */ int ts_recent_stamp;/* Time we stored ts_recent (for aging) */ @@ -136,9 +95,6 @@ struct tcp_options_received { u8 num_sacks; /* Number of SACK blocks */ u16 user_mss; /* mss requested by user in ioctl */ u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ -#if IS_ENABLED(CONFIG_MPTCP) - struct mptcp_options_received mptcp; -#endif }; static inline void tcp_clear_options(struct tcp_options_received *rx_opt) @@ -148,13 +104,6 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt) #if IS_ENABLED(CONFIG_SMC) rx_opt->smc_ok = 0; #endif -#if IS_ENABLED(CONFIG_MPTCP) - rx_opt->mptcp.mp_capable = 0; - rx_opt->mptcp.mp_join = 0; - rx_opt->mptcp.add_addr = 0; - rx_opt->mptcp.rm_addr = 0; - rx_opt->mptcp.dss = 0; -#endif } /* This is the max number of SACKS that we'll generate and process. It's safe @@ -171,6 +120,9 @@ struct tcp_request_sock { u64 snt_synack; /* first SYNACK sent time */ bool tfo_listener; bool is_mptcp; +#if IS_ENABLED(CONFIG_MPTCP) + bool drop_req; +#endif u32 txhash; u32 rcv_isn; u32 snt_isn; @@ -268,6 +220,7 @@ struct tcp_sock { } rack; u16 advmss; /* Advertised MSS */ u8 compressed_ack; + u8 dup_ack_counter; u32 chrono_start; /* Start time in jiffies of a TCP chrono */ u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ u8 chrono_type:2, /* current chronograph type */ @@ -544,4 +497,13 @@ static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount, int shiftlen); +void tcp_sock_set_cork(struct sock *sk, bool on); +int tcp_sock_set_keepcnt(struct sock *sk, int val); +int tcp_sock_set_keepidle(struct sock *sk, int val); +int tcp_sock_set_keepintvl(struct sock *sk, int val); +void tcp_sock_set_nodelay(struct sock *sk); +void tcp_sock_set_quickack(struct sock *sk, int val); +int tcp_sock_set_syncnt(struct sock *sk, int val); +void tcp_sock_set_user_timeout(struct sock *sk, u32 val); + #endif /* _LINUX_TCP_H */ diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 1412e9cc79ce..d074302989dd 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -26,6 +26,7 @@ #define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */ #define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */ #define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */ +#define TEE_SHM_KERNEL_MAPPED BIT(6) /* Memory mapped in kernel space */ struct device; struct tee_device; @@ -166,6 +167,22 @@ int tee_device_register(struct tee_device *teedev); void tee_device_unregister(struct tee_device *teedev); /** + * tee_session_calc_client_uuid() - Calculates client UUID for session + * @uuid: Resulting UUID + * @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*) + * @connectuon_data: Connection data for opening session + * + * Based on connection method calculates UUIDv5 based client UUID. + * + * For group based logins verifies that calling process has specified + * credentials. + * + * @return < 0 on failure + */ +int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, + const u8 connection_data[TEE_IOCTL_UUID_LEN]); + +/** * struct tee_shm - shared memory object * @ctx: context using the object * @paddr: physical address of the shared memory diff --git a/include/linux/thermal.h b/include/linux/thermal.h index c91b1e344d56..216185bb3014 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -32,20 +32,10 @@ /* use value, which < 0K, to indicate an invalid/uninitialized temperature */ #define THERMAL_TEMP_INVALID -274000 -/* Default Thermal Governor */ -#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) -#define DEFAULT_THERMAL_GOVERNOR "step_wise" -#elif defined(CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE) -#define DEFAULT_THERMAL_GOVERNOR "fair_share" -#elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE) -#define DEFAULT_THERMAL_GOVERNOR "user_space" -#elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR) -#define DEFAULT_THERMAL_GOVERNOR "power_allocator" -#endif - struct thermal_zone_device; struct thermal_cooling_device; struct thermal_instance; +struct thermal_attr; enum thermal_device_mode { THERMAL_DEVICE_DISABLED = 0, @@ -130,11 +120,6 @@ struct thermal_cooling_device { struct list_head node; }; -struct thermal_attr { - struct device_attribute attr; - char name[THERMAL_NAME_LENGTH]; -}; - /** * struct thermal_zone_device - structure for a thermal zone * @id: unique id number for each thermal zone @@ -347,21 +332,6 @@ struct thermal_zone_of_device_ops { int (*set_trip_temp)(void *, int, int); }; -/** - * struct thermal_trip - representation of a point in temperature domain - * @np: pointer to struct device_node that this trip point was created from - * @temperature: temperature value in miliCelsius - * @hysteresis: relative hysteresis in miliCelsius - * @type: trip point type - */ - -struct thermal_trip { - struct device_node *np; - int temperature; - int hysteresis; - enum thermal_trip_type type; -}; - /* Function declarations */ #ifdef CONFIG_THERMAL_OF int thermal_zone_of_get_sensor_id(struct device_node *tz_np, @@ -413,19 +383,7 @@ void devm_thermal_zone_of_sensor_unregister(struct device *dev, #endif -#if IS_ENABLED(CONFIG_THERMAL) -static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) -{ - return cdev->ops->get_requested_power && cdev->ops->state2power && - cdev->ops->power2state; -} - -int power_actor_get_max_power(struct thermal_cooling_device *, - struct thermal_zone_device *tz, u32 *max_power); -int power_actor_get_min_power(struct thermal_cooling_device *, - struct thermal_zone_device *tz, u32 *min_power); -int power_actor_set_power(struct thermal_cooling_device *, - struct thermal_instance *, u32); +#ifdef CONFIG_THERMAL struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, void *, struct thermal_zone_device_ops *, struct thermal_zone_params *, int, int); @@ -439,7 +397,6 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *); void thermal_zone_device_update(struct thermal_zone_device *, enum thermal_notify_event); -void thermal_zone_set_trips(struct thermal_zone_device *); struct thermal_cooling_device *thermal_cooling_device_register(const char *, void *, const struct thermal_cooling_device_ops *); @@ -457,24 +414,9 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); int thermal_zone_get_slope(struct thermal_zone_device *tz); int thermal_zone_get_offset(struct thermal_zone_device *tz); -int get_tz_trend(struct thermal_zone_device *, int); -struct thermal_instance *get_thermal_instance(struct thermal_zone_device *, - struct thermal_cooling_device *, int); void thermal_cdev_update(struct thermal_cooling_device *); void thermal_notify_framework(struct thermal_zone_device *, int); #else -static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) -{ return false; } -static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, - struct thermal_zone_device *tz, u32 *max_power) -{ return 0; } -static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev, - struct thermal_zone_device *tz, - u32 *min_power) -{ return -ENODEV; } -static inline int power_actor_set_power(struct thermal_cooling_device *cdev, - struct thermal_instance *tz, u32 power) -{ return 0; } static inline struct thermal_zone_device *thermal_zone_device_register( const char *type, int trips, int mask, void *devdata, struct thermal_zone_device_ops *ops, @@ -484,21 +426,6 @@ static inline struct thermal_zone_device *thermal_zone_device_register( static inline void thermal_zone_device_unregister( struct thermal_zone_device *tz) { } -static inline int thermal_zone_bind_cooling_device( - struct thermal_zone_device *tz, int trip, - struct thermal_cooling_device *cdev, - unsigned long upper, unsigned long lower, - unsigned int weight) -{ return -ENODEV; } -static inline int thermal_zone_unbind_cooling_device( - struct thermal_zone_device *tz, int trip, - struct thermal_cooling_device *cdev) -{ return -ENODEV; } -static inline void thermal_zone_device_update(struct thermal_zone_device *tz, - enum thermal_notify_event event) -{ } -static inline void thermal_zone_set_trips(struct thermal_zone_device *tz) -{ } static inline struct thermal_cooling_device * thermal_cooling_device_register(char *type, void *devdata, const struct thermal_cooling_device_ops *ops) @@ -530,12 +457,7 @@ static inline int thermal_zone_get_slope( static inline int thermal_zone_get_offset( struct thermal_zone_device *tz) { return -ENODEV; } -static inline int get_tz_trend(struct thermal_zone_device *tz, int trip) -{ return -ENODEV; } -static inline struct thermal_instance * -get_thermal_instance(struct thermal_zone_device *tz, - struct thermal_cooling_device *cdev, int trip) -{ return ERR_PTR(-ENODEV); } + static inline void thermal_cdev_update(struct thermal_cooling_device *cdev) { } static inline void thermal_notify_framework(struct thermal_zone_device *tz, diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index ece782ef5466..ff397c0d5c07 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -80,7 +80,7 @@ struct tb { int index; enum tb_security_level security_level; size_t nboot_acl; - unsigned long privdata[0]; + unsigned long privdata[]; }; extern struct bus_type tb_bus_type; diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h index eb6cbdf10e50..44a7f9169ac6 100644 --- a/include/linux/ti_wilink_st.h +++ b/include/linux/ti_wilink_st.h @@ -295,7 +295,7 @@ struct bts_header { u32 magic; u32 version; u8 future[24]; - u8 actions[0]; + u8 actions[]; } __attribute__ ((packed)); /** @@ -305,7 +305,7 @@ struct bts_header { struct bts_action { u16 type; u16 size; - u8 data[0]; + u8 data[]; } __attribute__ ((packed)); struct bts_action_send { @@ -315,7 +315,7 @@ struct bts_action_send { struct bts_action_wait { u32 msec; u32 size; - u8 data[0]; + u8 data[]; } __attribute__ ((packed)); struct bts_action_delay { diff --git a/include/linux/tifm.h b/include/linux/tifm.h index 299cbb8c63bb..44073d06710f 100644 --- a/include/linux/tifm.h +++ b/include/linux/tifm.h @@ -124,7 +124,7 @@ struct tifm_adapter { int (*has_ms_pif)(struct tifm_adapter *fm, struct tifm_dev *sock); - struct tifm_dev *sockets[0]; + struct tifm_dev *sockets[]; }; struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets, diff --git a/include/linux/timer.h b/include/linux/timer.h index 0dc19a8c39c9..07910ae5ddd9 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -201,8 +201,7 @@ struct ctl_table; extern unsigned int sysctl_timer_migration; int timer_migration_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); #endif unsigned long __round_jiffies(unsigned long j, int cpu); diff --git a/include/linux/torture.h b/include/linux/torture.h index 6241f59e2d6f..629b66e6c161 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -89,7 +89,7 @@ void _torture_stop_kthread(char *m, struct task_struct **tp); #ifdef CONFIG_PREEMPTION #define torture_preempt_schedule() preempt_schedule() #else -#define torture_preempt_schedule() +#define torture_preempt_schedule() do { } while (0) #endif #endif /* __LINUX_TORTURE_H */ diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 131ea1bad458..4f8c90c93c29 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -28,7 +28,7 @@ struct tcpa_event { u32 event_type; u8 pcr_value[20]; /* SHA1 */ u32 event_size; - u8 event_data[0]; + u8 event_data[]; }; enum tcpa_event_types { @@ -55,7 +55,7 @@ enum tcpa_event_types { struct tcpa_pc_event { u32 event_id; u32 event_size; - u8 event_data[0]; + u8 event_data[]; }; enum tcpa_pc_event_ids { @@ -97,12 +97,12 @@ struct tcg_pcr_event { u32 event_type; u8 digest[20]; u32 event_size; - u8 event[0]; + u8 event[]; } __packed; struct tcg_event_field { u32 event_size; - u8 event[0]; + u8 event[]; } __packed; struct tcg_pcr_event2_head { diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 1fb11daa5c53..a1fecf311621 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -156,8 +156,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * Note, the proto and args passed in includes "__data" as the first parameter. * The reason for this is to handle the "void" prototype. If a tracepoint * has a "void" prototype, then it is invalid to declare a function - * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just - * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto". + * as "(void *, void)". */ #define __DO_TRACE(tp, proto, args, cond, rcuidle) \ do { \ @@ -373,25 +372,6 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) # define __tracepoint_string #endif -/* - * The need for the DECLARE_TRACE_NOARGS() is to handle the prototype - * (void). "void" is a special value in a function prototype and can - * not be combined with other arguments. Since the DECLARE_TRACE() - * macro adds a data element at the beginning of the prototype, - * we need a way to differentiate "(void *data, proto)" from - * "(void *data, void)". The second prototype is invalid. - * - * DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype - * and "void *__data" as the callback prototype. - * - * DECLARE_TRACE() passes "proto" as the tracepoint protoype and - * "void *__data, proto" as the callback prototype. - */ -#define DECLARE_TRACE_NOARGS(name) \ - __DECLARE_TRACE(name, void, , \ - cpu_online(raw_smp_processor_id()), \ - void *__data, __data) - #define DECLARE_TRACE(name, proto, args) \ __DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \ cpu_online(raw_smp_processor_id()), \ diff --git a/include/linux/tty.h b/include/linux/tty.h index bd5fe0e907e8..a99e9b8e4e31 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -66,7 +66,7 @@ struct tty_buffer { int read; int flags; /* Data points here */ - unsigned long data[0]; + unsigned long data[]; }; /* Values for .flags field of tty_buffer */ diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index 9de5c10293f5..c6abb79501b3 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -3,33 +3,36 @@ #define _LINUX_U64_STATS_SYNC_H /* - * To properly implement 64bits network statistics on 32bit and 64bit hosts, - * we provide a synchronization point, that is a noop on 64bit or UP kernels. + * Protect against 64-bit values tearing on 32-bit architectures. This is + * typically used for statistics read/update in different subsystems. * * Key points : - * 1) Use a seqcount on SMP 32bits, with low overhead. - * 2) Whole thing is a noop on 64bit arches or UP kernels. - * 3) Write side must ensure mutual exclusion or one seqcount update could + * + * - Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP. + * - The whole thing is a no-op on 64-bit architectures. + * + * Usage constraints: + * + * 1) Write side must ensure mutual exclusion, or one seqcount update could * be lost, thus blocking readers forever. - * If this synchronization point is not a mutex, but a spinlock or - * spinlock_bh() or disable_bh() : - * 3.1) Write side should not sleep. - * 3.2) Write side should not allow preemption. - * 3.3) If applicable, interrupts should be disabled. * - * 4) If reader fetches several counters, there is no guarantee the whole values - * are consistent (remember point 1) : this is a noop on 64bit arches anyway) + * 2) Write side must disable preemption, or a seqcount reader can preempt the + * writer and also spin forever. + * + * 3) Write side must use the _irqsave() variant if other writers, or a reader, + * can be invoked from an IRQ context. * - * 5) readers are allowed to sleep or be preempted/interrupted : They perform - * pure reads. But if they have to fetch many values, it's better to not allow - * preemptions/interruptions to avoid many retries. + * 4) If reader fetches several counters, there is no guarantee the whole values + * are consistent w.r.t. each other (remember point #2: seqcounts are not + * used for 64bit architectures). * - * 6) If counter might be written by an interrupt, readers should block interrupts. - * (On UP, there is no seqcount_t protection, a reader allowing interrupts could - * read partial values) + * 5) Readers are allowed to sleep or be preempted/interrupted: they perform + * pure reads. * - * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and - * u64_stats_fetch_retry_irq() helpers + * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats + * might be updated from a hardirq or softirq context (remember point #1: + * seqcounts are not used for UP kernels). 32-bit UP stat readers could read + * corrupted 64-bit values otherwise. * * Usage : * diff --git a/include/linux/uacce.h b/include/linux/uacce.h index 0e215e6d0534..454c2f6672d7 100644 --- a/include/linux/uacce.h +++ b/include/linux/uacce.h @@ -68,19 +68,21 @@ enum uacce_q_state { * @uacce: pointer to uacce * @priv: private pointer * @wait: wait queue head - * @list: index into uacce_mm - * @uacce_mm: the corresponding mm + * @list: index into uacce queues list * @qfrs: pointer of qfr regions * @state: queue state machine + * @pasid: pasid associated to the mm + * @handle: iommu_sva handle returned by iommu_sva_bind_device() */ struct uacce_queue { struct uacce_device *uacce; void *priv; wait_queue_head_t wait; struct list_head list; - struct uacce_mm *uacce_mm; struct uacce_qfile_region *qfrs[UACCE_MAX_REGION]; enum uacce_q_state state; + int pasid; + struct iommu_sva *handle; }; /** @@ -96,8 +98,8 @@ struct uacce_queue { * @cdev: cdev of the uacce * @dev: dev of the uacce * @priv: private pointer of the uacce - * @mm_list: list head of uacce_mm->list - * @mm_lock: lock for mm_list + * @queues: list of queues + * @queues_lock: lock for queues list * @inode: core vfs */ struct uacce_device { @@ -112,27 +114,9 @@ struct uacce_device { struct cdev *cdev; struct device dev; void *priv; - struct list_head mm_list; - struct mutex mm_lock; - struct inode *inode; -}; - -/** - * struct uacce_mm - keep track of queues bound to a process - * @list: index into uacce_device - * @queues: list of queues - * @mm: the mm struct - * @lock: protects the list of queues - * @pasid: pasid of the uacce_mm - * @handle: iommu_sva handle return from iommu_sva_bind_device - */ -struct uacce_mm { - struct list_head list; struct list_head queues; - struct mm_struct *mm; - struct mutex lock; - int pasid; - struct iommu_sva *handle; + struct mutex queues_lock; + struct inode *inode; }; #if IS_ENABLED(CONFIG_UACCE) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 67f016010aad..0a76ddc07d59 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -2,9 +2,9 @@ #ifndef __LINUX_UACCESS_H__ #define __LINUX_UACCESS_H__ +#include <linux/instrumented.h> #include <linux/sched.h> #include <linux/thread_info.h> -#include <linux/kasan-checks.h> #define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS) @@ -58,7 +58,7 @@ static __always_inline __must_check unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { - kasan_check_write(to, n); + instrument_copy_from_user(to, from, n); check_object_size(to, n, false); return raw_copy_from_user(to, from, n); } @@ -67,7 +67,7 @@ static __always_inline __must_check unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { might_fault(); - kasan_check_write(to, n); + instrument_copy_from_user(to, from, n); check_object_size(to, n, false); return raw_copy_from_user(to, from, n); } @@ -88,7 +88,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) static __always_inline __must_check unsigned long __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { - kasan_check_read(from, n); + instrument_copy_to_user(to, from, n); check_object_size(from, n, true); return raw_copy_to_user(to, from, n); } @@ -97,7 +97,7 @@ static __always_inline __must_check unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); - kasan_check_read(from, n); + instrument_copy_to_user(to, from, n); check_object_size(from, n, true); return raw_copy_to_user(to, from, n); } @@ -109,7 +109,7 @@ _copy_from_user(void *to, const void __user *from, unsigned long n) unsigned long res = n; might_fault(); if (likely(access_ok(from, n))) { - kasan_check_write(to, n); + instrument_copy_from_user(to, from, n); res = raw_copy_from_user(to, from, n); } if (unlikely(res)) @@ -127,7 +127,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); if (access_ok(to, n)) { - kasan_check_read(from, n); + instrument_copy_to_user(to, from, n); n = raw_copy_to_user(to, from, n); } return n; @@ -301,72 +301,33 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src, return 0; } -/* - * probe_kernel_read(): safely attempt to read from a location - * @dst: pointer to the buffer that shall take the data - * @src: address to read from - * @size: size of the data chunk - * - * Safely read from address @src to the buffer at @dst. If a kernel fault - * happens, handle that and return -EFAULT. - */ -extern long probe_kernel_read(void *dst, const void *src, size_t size); -extern long probe_kernel_read_strict(void *dst, const void *src, size_t size); -extern long __probe_kernel_read(void *dst, const void *src, size_t size); +bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size); -/* - * probe_user_read(): safely attempt to read from a location in user space - * @dst: pointer to the buffer that shall take the data - * @src: address to read from - * @size: size of the data chunk - * - * Safely read from address @src to the buffer at @dst. If a kernel fault - * happens, handle that and return -EFAULT. - */ -extern long probe_user_read(void *dst, const void __user *src, size_t size); -extern long __probe_user_read(void *dst, const void __user *src, size_t size); +long copy_from_kernel_nofault(void *dst, const void *src, size_t size); +long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size); -/* - * probe_kernel_write(): safely attempt to write to a location - * @dst: address to write to - * @src: pointer to the data that shall be written - * @size: size of the data chunk - * - * Safely write to address @dst from the buffer at @src. If a kernel fault - * happens, handle that and return -EFAULT. - */ -extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); -extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); +long copy_from_user_nofault(void *dst, const void __user *src, size_t size); +long notrace copy_to_user_nofault(void __user *dst, const void *src, + size_t size); -/* - * probe_user_write(): safely attempt to write to a location in user space - * @dst: address to write to - * @src: pointer to the data that shall be written - * @size: size of the data chunk - * - * Safely write to address @dst from the buffer at @src. If a kernel fault - * happens, handle that and return -EFAULT. - */ -extern long notrace probe_user_write(void __user *dst, const void *src, size_t size); -extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size); +long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, + long count); -extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); -extern long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr, - long count); -extern long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); -extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, - long count); -extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count); +long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, + long count); +long strnlen_user_nofault(const void __user *unsafe_addr, long count); /** - * probe_kernel_address(): safely attempt to read from a location - * @addr: address to read from - * @retval: read into this variable + * get_kernel_nofault(): safely attempt to read from a location + * @val: read into this variable + * @ptr: address to read from * * Returns 0 on success, or -EFAULT. */ -#define probe_kernel_address(addr, retval) \ - probe_kernel_read(&retval, addr, sizeof(retval)) +#define get_kernel_nofault(val, ptr) ({ \ + const typeof(val) *__gk_ptr = (ptr); \ + copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\ +}) #ifndef user_access_begin #define user_access_begin(ptr,len) access_ok(ptr, len) @@ -378,6 +339,14 @@ extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count); static inline unsigned long user_access_save(void) { return 0UL; } static inline void user_access_restore(unsigned long flags) { } #endif +#ifndef user_write_access_begin +#define user_write_access_begin user_access_begin +#define user_write_access_end user_access_end +#endif +#ifndef user_read_access_begin +#define user_read_access_begin user_access_begin +#define user_read_access_end user_access_end +#endif #ifdef CONFIG_HARDENED_USERCOPY void usercopy_warn(const char *name, const char *detail, bool to_user, diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index edd89b7c8f18..54167a2d28ea 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -67,6 +67,7 @@ struct ci_hdrc_platform_data { #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 #define CI_HDRC_IMX_HSIC_ACTIVE_EVENT 2 #define CI_HDRC_IMX_HSIC_SUSPEND_EVENT 3 +#define CI_HDRC_CONTROLLER_VBUS_EVENT 4 int (*notify_event) (struct ci_hdrc *ci, unsigned event); struct regulator *reg_vbus; struct usb_otg_caps ci_otg_caps; diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 8675e145ea8b..2040696d75b6 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -249,6 +249,9 @@ int usb_function_activate(struct usb_function *); int usb_interface_id(struct usb_configuration *, struct usb_function *); +int config_ep_by_speed_and_alt(struct usb_gadget *g, struct usb_function *f, + struct usb_ep *_ep, u8 alt); + int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, struct usb_ep *_ep); diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 9411c08a5c7e..6a178177e4c9 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -42,6 +42,8 @@ struct usb_ep; * @num_mapped_sgs: number of SG entries mapped to DMA (internal) * @length: Length of that data * @stream_id: The stream id, when USB3.0 bulk streams are being used + * @is_last: Indicates if this is the last request of a stream_id before + * switching to a different stream (required for DWC3 controllers). * @no_interrupt: If true, hints that no completion irq is needed. * Helpful sometimes with deep request queues that are handled * directly by DMA controllers. @@ -104,6 +106,7 @@ struct usb_request { unsigned num_mapped_sgs; unsigned stream_id:16; + unsigned is_last:1; unsigned no_interrupt:1; unsigned zero:1; unsigned short_not_ok:1; @@ -373,6 +376,7 @@ struct usb_gadget_ops { * @connected: True if gadget is connected. * @lpm_capable: If the gadget max_speed is FULL or HIGH, this flag * indicates that it supports LPM as per the LPM ECN & errata. + * @irq: the interrupt number for device controller. * * Gadgets have a mostly-portable "gadget driver" implementing device * functions, handling all usb configurations and interfaces. Gadget @@ -427,6 +431,7 @@ struct usb_gadget { unsigned deactivated:1; unsigned connected:1; unsigned lpm_capable:1; + int irq; }; #define work_to_gadget(w) (container_of((w), struct usb_gadget, work)) @@ -773,6 +778,9 @@ struct usb_gadget_string_container { /* put descriptor for string with that id into buf (buflen >= 256) */ int usb_gadget_get_string(const struct usb_gadget_strings *table, int id, u8 *buf); +/* check if the given language identifier is valid */ +bool usb_validate_langid(u16 langid); + /*-------------------------------------------------------------------------*/ /* utility to simplify managing config descriptors */ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index e12105ed3834..3dbb42c637c1 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -479,7 +479,8 @@ extern void usb_hcd_platform_shutdown(struct platform_device *dev); struct pci_dev; struct pci_device_id; extern int usb_hcd_pci_probe(struct pci_dev *dev, - const struct pci_device_id *id); + const struct pci_device_id *id, + const struct hc_driver *driver); extern void usb_hcd_pci_remove(struct pci_dev *dev); extern void usb_hcd_pci_shutdown(struct pci_dev *dev); diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index b00a2642a9cd..5daa1c49761c 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -254,6 +254,7 @@ int typec_set_mode(struct typec_port *port, int mode); void *typec_get_drvdata(struct typec_port *port); +int typec_find_orientation(const char *name); int typec_find_port_power_role(const char *name); int typec_find_power_role(const char *name); int typec_find_port_data_role(const char *name); diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 733acfb7ef84..239db794357c 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -18,6 +18,16 @@ struct vdpa_callback { }; /** + * vDPA notification area + * @addr: base address of the notification area + * @size: size of the notification area + */ +struct vdpa_notification_area { + resource_size_t addr; + resource_size_t size; +}; + +/** * vDPA device - representation of a vDPA device * @dev: underlying device * @dma_dev: the actual device that is performing DMA @@ -73,6 +83,10 @@ struct vdpa_device { * @vdev: vdpa device * @idx: virtqueue index * Returns virtqueue state (last_avail_idx) + * @get_vq_notification: Get the notification area for a virtqueue + * @vdev: vdpa device + * @idx: virtqueue index + * Returns the notifcation area * @get_vq_align: Get the virtqueue align requirement * for the device * @vdev: vdpa device @@ -162,9 +176,11 @@ struct vdpa_config_ops { bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx); int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, u64 state); u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx); + struct vdpa_notification_area + (*get_vq_notification)(struct vdpa_device *vdev, u16 idx); /* Device ops */ - u16 (*get_vq_align)(struct vdpa_device *vdev); + u32 (*get_vq_align)(struct vdpa_device *vdev); u64 (*get_features)(struct vdpa_device *vdev); int (*set_features)(struct vdpa_device *vdev, u64 features); void (*set_config_cb)(struct vdpa_device *vdev, diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h index 9aced11e9000..1eaaa93c37bf 100644 --- a/include/linux/vermagic.h +++ b/include/linux/vermagic.h @@ -1,5 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_VERMAGIC_H +#define _LINUX_VERMAGIC_H + +#ifndef INCLUDE_VERMAGIC +#error "This header can be included from kernel/module.c or *.mod.c only" +#endif + #include <generated/utsrelease.h> +#include <asm/vermagic.h> /* Simply sanity version stamp for modules. */ #ifdef CONFIG_SMP @@ -24,9 +32,6 @@ #else #define MODULE_VERMAGIC_MODVERSIONS "" #endif -#ifndef MODULE_ARCH_VERMAGIC -#define MODULE_ARCH_VERMAGIC "" -#endif #ifdef RANDSTRUCT_PLUGIN #include <generated/randomize_layout_hash.h> #define MODULE_RANDSTRUCT_PLUGIN "RANDSTRUCT_PLUGIN_" RANDSTRUCT_HASHED_SEED @@ -41,3 +46,4 @@ MODULE_ARCH_VERMAGIC \ MODULE_RANDSTRUCT_PLUGIN +#endif /* _LINUX_VERMAGIC_H */ diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h index 0e130b5077a5..2f9dd072f11f 100644 --- a/include/linux/vexpress.h +++ b/include/linux/vexpress.h @@ -10,38 +10,8 @@ #include <linux/device.h> #include <linux/regmap.h> -#define VEXPRESS_SITE_MB 0 -#define VEXPRESS_SITE_DB1 1 -#define VEXPRESS_SITE_DB2 2 -#define VEXPRESS_SITE_MASTER 0xf - -/* Config infrastructure */ - -void vexpress_config_set_master(u32 site); -u32 vexpress_config_get_master(void); - -void vexpress_config_lock(void *arg); -void vexpress_config_unlock(void *arg); - -int vexpress_config_get_topo(struct device_node *node, u32 *site, - u32 *position, u32 *dcc); - -/* Config bridge API */ - -struct vexpress_config_bridge_ops { - struct regmap * (*regmap_init)(struct device *dev, void *context); - void (*regmap_exit)(struct regmap *regmap, void *context); -}; - -struct device *vexpress_config_bridge_register(struct device *parent, - struct vexpress_config_bridge_ops *ops, void *context); - /* Config regmap API */ struct regmap *devm_regmap_init_vexpress_config(struct device *dev); -/* Platform control */ - -void vexpress_flags_set(u32 data); - #endif diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 5d92ee15d098..38d3c6a8dc7e 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -76,7 +76,9 @@ struct vfio_iommu_driver_ops { struct iommu_group *group); void (*detach_group)(void *iommu_data, struct iommu_group *group); - int (*pin_pages)(void *iommu_data, unsigned long *user_pfn, + int (*pin_pages)(void *iommu_data, + struct iommu_group *group, + unsigned long *user_pfn, int npage, int prot, unsigned long *phys_pfn); int (*unpin_pages)(void *iommu_data, diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 15f906e4a748..a493eac08393 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -9,7 +9,6 @@ #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/gfp.h> -#include <linux/vringh.h> /** * virtqueue - a queue to register buffers for sending or receiving. diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 0d1fe9297ac6..e8a924eeea3d 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -3,6 +3,8 @@ #define _LINUX_VIRTIO_NET_H #include <linux/if_vlan.h> +#include <uapi/linux/tcp.h> +#include <uapi/linux/udp.h> #include <uapi/linux/virtio_net.h> static inline int virtio_net_hdr_set_proto(struct sk_buff *skb, @@ -28,17 +30,26 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, bool little_endian) { unsigned int gso_type = 0; + unsigned int thlen = 0; + unsigned int p_off = 0; + unsigned int ip_proto; if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { case VIRTIO_NET_HDR_GSO_TCPV4: gso_type = SKB_GSO_TCPV4; + ip_proto = IPPROTO_TCP; + thlen = sizeof(struct tcphdr); break; case VIRTIO_NET_HDR_GSO_TCPV6: gso_type = SKB_GSO_TCPV6; + ip_proto = IPPROTO_TCP; + thlen = sizeof(struct tcphdr); break; case VIRTIO_NET_HDR_GSO_UDP: gso_type = SKB_GSO_UDP; + ip_proto = IPPROTO_UDP; + thlen = sizeof(struct udphdr); break; default: return -EINVAL; @@ -57,16 +68,23 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, if (!skb_partial_csum_set(skb, start, off)) return -EINVAL; + + p_off = skb_transport_offset(skb) + thlen; + if (p_off > skb_headlen(skb)) + return -EINVAL; } else { /* gso packets without NEEDS_CSUM do not set transport_offset. * probe and drop if does not match one of the above types. */ if (gso_type && skb->network_header) { + struct flow_keys_basic keys; + if (!skb->protocol) virtio_net_hdr_set_proto(skb, hdr); retry: - skb_probe_transport_header(skb); - if (!skb_transport_header_was_set(skb)) { + if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys, + NULL, 0, 0, 0, + 0)) { /* UFO does not specify ipv4 or 6: try both */ if (gso_type & SKB_GSO_UDP && skb->protocol == htons(ETH_P_IP)) { @@ -75,18 +93,33 @@ retry: } return -EINVAL; } + + p_off = keys.control.thoff + thlen; + if (p_off > skb_headlen(skb) || + keys.basic.ip_proto != ip_proto) + return -EINVAL; + + skb_set_transport_header(skb, keys.control.thoff); + } else if (gso_type) { + p_off = thlen; + if (p_off > skb_headlen(skb)) + return -EINVAL; } } if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); + struct skb_shared_info *shinfo = skb_shinfo(skb); - skb_shinfo(skb)->gso_size = gso_size; - skb_shinfo(skb)->gso_type = gso_type; + /* Too small packets are not really GSO ones. */ + if (skb->len - p_off > gso_size) { + shinfo->gso_size = gso_size; + shinfo->gso_type = gso_type; - /* Header must be checked, and gso_segs computed. */ - skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; - skb_shinfo(skb)->gso_segs = 0; + /* Header must be checked, and gso_segs computed. */ + shinfo->gso_type |= SKB_GSO_DODGY; + shinfo->gso_segs = 0; + } } return 0; diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index 71c81e0dc8f2..dc636b727179 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -48,6 +48,7 @@ struct virtio_vsock_pkt { u32 len; u32 off; bool reply; + bool tap_delivered; }; struct virtio_vsock_pkt_info { diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index ffef0f279747..24fc7c3ae7d6 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -35,6 +35,10 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PGSCAN_KSWAPD, PGSCAN_DIRECT, PGSCAN_DIRECT_THROTTLE, + PGSCAN_ANON, + PGSCAN_FILE, + PGSTEAL_ANON, + PGSTEAL_FILE, #ifdef CONFIG_NUMA PGSCAN_ZONE_RECLAIM_FAILED, #endif diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 0507a162ccd0..48bb681e6c2a 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -88,8 +88,7 @@ struct vmap_area { * Highlevel APIs for driver use */ extern void vm_unmap_ram(const void *mem, unsigned int count); -extern void *vm_map_ram(struct page **pages, unsigned int count, - int node, pgprot_t prot); +extern void *vm_map_ram(struct page **pages, unsigned int count, int node); extern void vm_unmap_aliases(void); #ifdef CONFIG_MMU @@ -107,26 +106,16 @@ extern void *vzalloc(unsigned long size); extern void *vmalloc_user(unsigned long size); extern void *vmalloc_node(unsigned long size, int node); extern void *vzalloc_node(unsigned long size, int node); -extern void *vmalloc_user_node_flags(unsigned long size, int node, gfp_t flags); extern void *vmalloc_exec(unsigned long size); extern void *vmalloc_32(unsigned long size); extern void *vmalloc_32_user(unsigned long size); -extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); +extern void *__vmalloc(unsigned long size, gfp_t gfp_mask); extern void *__vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, unsigned long vm_flags, int node, const void *caller); -#ifndef CONFIG_MMU -extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags); -static inline void *__vmalloc_node_flags_caller(unsigned long size, int node, - gfp_t flags, void *caller) -{ - return __vmalloc_node_flags(size, node, flags); -} -#else -extern void *__vmalloc_node_flags_caller(unsigned long size, - int node, gfp_t flags, void *caller); -#endif +void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, + int node, const void *caller); extern void vfree(const void *addr); extern void vfree_atomic(const void *addr); @@ -137,12 +126,26 @@ extern void vunmap(const void *addr); extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, void *kaddr, - unsigned long size); + unsigned long pgoff, unsigned long size); extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff); -void vmalloc_sync_mappings(void); -void vmalloc_sync_unmappings(void); + +/* + * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values + * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings() + * needs to be called. + */ +#ifndef ARCH_PAGE_TABLE_SYNC_MASK +#define ARCH_PAGE_TABLE_SYNC_MASK 0 +#endif + +/* + * There is no default implementation for arch_sync_kernel_mappings(). It is + * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK + * is 0. + */ +void arch_sync_kernel_mappings(unsigned long start, unsigned long end); /* * Lowlevel-APIs (not for driver use!) @@ -161,8 +164,6 @@ static inline size_t get_vm_area_size(const struct vm_struct *area) extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); extern struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, const void *caller); -extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, - unsigned long start, unsigned long end); extern struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, @@ -170,11 +171,11 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size, extern struct vm_struct *remove_vm_area(const void *addr); extern struct vm_struct *find_vm_area(const void *addr); -extern int map_vm_area(struct vm_struct *area, pgprot_t prot, - struct page **pages); #ifdef CONFIG_MMU extern int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages); +int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, + struct page **pages); extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); extern void unmap_kernel_range(unsigned long addr, unsigned long size); static inline void set_vm_flush_reset_perms(void *addr) @@ -191,14 +192,12 @@ map_kernel_range_noflush(unsigned long start, unsigned long size, { return size >> PAGE_SHIFT; } +#define map_kernel_range map_kernel_range_noflush static inline void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) { } -static inline void -unmap_kernel_range(unsigned long addr, unsigned long size) -{ -} +#define unmap_kernel_range unmap_kernel_range_noflush static inline void set_vm_flush_reset_perms(void *addr) { } diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 292485f3d24d..aa961088c551 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -16,8 +16,8 @@ extern int sysctl_stat_interval; #define DISABLE_NUMA_STAT 0 extern int sysctl_vm_numa_stat; DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key); -extern int sysctl_vm_numa_stat_handler(struct ctl_table *table, - int write, void __user *buffer, size_t *length, loff_t *ppos); +int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, + void *buffer, size_t *length, loff_t *ppos); #endif struct reclaim_stat { @@ -26,9 +26,11 @@ struct reclaim_stat { unsigned nr_congested; unsigned nr_writeback; unsigned nr_immediate; + unsigned nr_pageout; unsigned nr_activate[2]; unsigned nr_ref_keep; unsigned nr_unmap_fail; + unsigned nr_lazyfree_fail; }; enum writeback_stat_item { @@ -274,8 +276,8 @@ void cpu_vm_stats_fold(int cpu); void refresh_zone_stat_thresholds(void); struct ctl_table; -int vmstat_refresh(struct ctl_table *, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); +int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp, + loff_t *ppos); void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); diff --git a/include/linux/vringh.h b/include/linux/vringh.h index bd0503ca6f8f..59bd50f99291 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h @@ -14,8 +14,10 @@ #include <linux/virtio_byteorder.h> #include <linux/uio.h> #include <linux/slab.h> +#if IS_REACHABLE(CONFIG_VHOST_IOTLB) #include <linux/dma-direction.h> #include <linux/vhost_iotlb.h> +#endif #include <asm/barrier.h> /* virtio_ring with information needed for host access. */ @@ -103,9 +105,9 @@ struct vringh_kiov { /* Helpers for userspace vrings. */ int vringh_init_user(struct vringh *vrh, u64 features, unsigned int num, bool weak_barriers, - struct vring_desc __user *desc, - struct vring_avail __user *avail, - struct vring_used __user *used); + vring_desc_t __user *desc, + vring_avail_t __user *avail, + vring_used_t __user *used); static inline void vringh_iov_init(struct vringh_iov *iov, struct iovec *iovec, unsigned num) @@ -254,6 +256,8 @@ static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val) return __cpu_to_virtio64(vringh_is_little_endian(vrh), val); } +#if IS_REACHABLE(CONFIG_VHOST_IOTLB) + void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb); int vringh_init_iotlb(struct vringh *vrh, u64 features, @@ -284,4 +288,6 @@ void vringh_notify_disable_iotlb(struct vringh *vrh); int vringh_need_notify_iotlb(struct vringh *vrh); +#endif /* CONFIG_VHOST_IOTLB */ + #endif /* _LINUX_VRINGH_H */ diff --git a/include/linux/wait.h b/include/linux/wait.h index feeb6be5cad6..898c890fc153 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -1149,4 +1149,6 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i (wait)->flags = 0; \ } while (0) +bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg); + #endif /* _LINUX_WAIT_H */ diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h new file mode 100644 index 000000000000..5e08db2adc31 --- /dev/null +++ b/include/linux/watch_queue.h @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* User-mappable watch queue + * + * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * See Documentation/watch_queue.rst + */ + +#ifndef _LINUX_WATCH_QUEUE_H +#define _LINUX_WATCH_QUEUE_H + +#include <uapi/linux/watch_queue.h> +#include <linux/kref.h> +#include <linux/rcupdate.h> + +#ifdef CONFIG_WATCH_QUEUE + +struct cred; + +struct watch_type_filter { + enum watch_notification_type type; + __u32 subtype_filter[1]; /* Bitmask of subtypes to filter on */ + __u32 info_filter; /* Filter on watch_notification::info */ + __u32 info_mask; /* Mask of relevant bits in info_filter */ +}; + +struct watch_filter { + union { + struct rcu_head rcu; + unsigned long type_filter[2]; /* Bitmask of accepted types */ + }; + u32 nr_filters; /* Number of filters */ + struct watch_type_filter filters[]; +}; + +struct watch_queue { + struct rcu_head rcu; + struct watch_filter __rcu *filter; + struct pipe_inode_info *pipe; /* The pipe we're using as a buffer */ + struct hlist_head watches; /* Contributory watches */ + struct page **notes; /* Preallocated notifications */ + unsigned long *notes_bitmap; /* Allocation bitmap for notes */ + struct kref usage; /* Object usage count */ + spinlock_t lock; + unsigned int nr_notes; /* Number of notes */ + unsigned int nr_pages; /* Number of pages in notes[] */ + bool defunct; /* T when queues closed */ +}; + +/* + * Representation of a watch on an object. + */ +struct watch { + union { + struct rcu_head rcu; + u32 info_id; /* ID to be OR'd in to info field */ + }; + struct watch_queue __rcu *queue; /* Queue to post events to */ + struct hlist_node queue_node; /* Link in queue->watches */ + struct watch_list __rcu *watch_list; + struct hlist_node list_node; /* Link in watch_list->watchers */ + const struct cred *cred; /* Creds of the owner of the watch */ + void *private; /* Private data for the watched object */ + u64 id; /* Internal identifier */ + struct kref usage; /* Object usage count */ +}; + +/* + * List of watches on an object. + */ +struct watch_list { + struct rcu_head rcu; + struct hlist_head watchers; + void (*release_watch)(struct watch *); + spinlock_t lock; +}; + +extern void __post_watch_notification(struct watch_list *, + struct watch_notification *, + const struct cred *, + u64); +extern struct watch_queue *get_watch_queue(int); +extern void put_watch_queue(struct watch_queue *); +extern void init_watch(struct watch *, struct watch_queue *); +extern int add_watch_to_object(struct watch *, struct watch_list *); +extern int remove_watch_from_object(struct watch_list *, struct watch_queue *, u64, bool); +extern long watch_queue_set_size(struct pipe_inode_info *, unsigned int); +extern long watch_queue_set_filter(struct pipe_inode_info *, + struct watch_notification_filter __user *); +extern int watch_queue_init(struct pipe_inode_info *); +extern void watch_queue_clear(struct watch_queue *); + +static inline void init_watch_list(struct watch_list *wlist, + void (*release_watch)(struct watch *)) +{ + INIT_HLIST_HEAD(&wlist->watchers); + spin_lock_init(&wlist->lock); + wlist->release_watch = release_watch; +} + +static inline void post_watch_notification(struct watch_list *wlist, + struct watch_notification *n, + const struct cred *cred, + u64 id) +{ + if (unlikely(wlist)) + __post_watch_notification(wlist, n, cred, id); +} + +static inline void remove_watch_list(struct watch_list *wlist, u64 id) +{ + if (wlist) { + remove_watch_from_object(wlist, NULL, id, true); + kfree_rcu(wlist, rcu); + } +} + +/** + * watch_sizeof - Calculate the information part of the size of a watch record, + * given the structure size. + */ +#define watch_sizeof(STRUCT) (sizeof(STRUCT) << WATCH_INFO_LENGTH__SHIFT) + +#endif + +#endif /* _LINUX_WATCH_QUEUE_H */ diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 417d9f37077a..1464ce6ffa31 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -37,15 +37,15 @@ struct watchdog_governor; * * The watchdog_ops structure contains a list of low-level operations * that control a watchdog device. It also contains the module that owns - * these operations. The start and stop function are mandatory, all other + * these operations. The start function is mandatory, all other * functions are optional. */ struct watchdog_ops { struct module *owner; /* mandatory operations */ int (*start)(struct watchdog_device *); - int (*stop)(struct watchdog_device *); /* optional operations */ + int (*stop)(struct watchdog_device *); int (*ping)(struct watchdog_device *); unsigned int (*status)(struct watchdog_device *); int (*set_timeout)(struct watchdog_device *, unsigned int); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 8b505d22fc0e..26de0cae2a0a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -62,7 +62,7 @@ enum { WORK_CPU_UNBOUND = NR_CPUS, /* - * Reserve 7 bits off of pwq pointer w/ debugobjects turned off. + * Reserve 8 bits off of pwq pointer w/ debugobjects turned off. * This makes pwqs aligned to 256 bytes and allows 15 workqueue * flush colors. */ diff --git a/include/linux/writeback.h b/include/linux/writeback.h index a19d845dd7eb..8e5c5bb16e2d 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -197,6 +197,7 @@ void wakeup_flusher_threads(enum wb_reason reason); void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, enum wb_reason reason); void inode_wait_for_writeback(struct inode *inode); +void inode_io_list_del(struct inode *inode); /* writeback.h requires fs.h; it, too, is not included from here. */ static inline void wait_on_inode(struct inode *inode) @@ -362,24 +363,18 @@ extern int vm_highmem_is_dirtyable; extern int block_dump; extern int laptop_mode; -extern int dirty_background_ratio_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); -extern int dirty_background_bytes_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); -extern int dirty_ratio_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); -extern int dirty_bytes_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); +int dirty_background_ratio_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int dirty_background_bytes_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int dirty_ratio_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int dirty_bytes_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); int dirtytime_interval_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); - -struct ctl_table; -int dirty_writeback_centisecs_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); + void *buffer, size_t *lenp, loff_t *ppos); +int dirty_writeback_centisecs_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 14c893433139..b4d70e7568b2 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -576,7 +576,7 @@ void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. - * Return: The entry which used to be at this index. + * Return: The old entry at this index or xa_err() if an error happened. */ static inline void *xa_store_bh(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) @@ -602,7 +602,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index, * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. - * Return: The entry which used to be at this index. + * Return: The old entry at this index or xa_err() if an error happened. */ static inline void *xa_store_irq(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 4cf6e11f4a3c..47eaa34f8761 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -73,7 +73,7 @@ struct simple_xattr { struct list_head list; char *name; size_t size; - char value[0]; + char value[]; }; /* diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 2219cce81ca4..0fdbf653b173 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -20,7 +20,7 @@ * zsmalloc mapping modes * * NOTE: These only make a difference when a mapped object spans pages. - * They also have no effect when PGTABLE_MAPPING is selected. + * They also have no effect when ZSMALLOC_PGTABLE_MAPPING is selected. */ enum zs_mapmode { ZS_MM_RW, /* normal read-write mapping */ diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h index 38956969fd12..b1c839734124 100644 --- a/include/media/cec-notifier.h +++ b/include/media/cec-notifier.h @@ -2,7 +2,7 @@ /* * cec-notifier.h - notify CEC drivers of physical address changes * - * Copyright 2016 Russell King <rmk+kernel@arm.linux.org.uk> + * Copyright 2016 Russell King. * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ diff --git a/include/media/cec-pin.h b/include/media/cec-pin.h index 88c8b016eb09..483bc4769fe9 100644 --- a/include/media/cec-pin.h +++ b/include/media/cec-pin.h @@ -13,7 +13,8 @@ /** * struct cec_pin_ops - low-level CEC pin operations - * @read: read the CEC pin. Return true if high, false if low. + * @read: read the CEC pin. Returns > 0 if high, 0 if low, or an error + * if negative. * @low: drive the CEC pin low. * @high: stop driving the CEC pin. The pull-up will drive the pin * high, unless someone else is driving the pin low. @@ -22,13 +23,10 @@ * @free: optional. Free any allocated resources. Called when the * adapter is deleted. * @status: optional, log status information. - * @read_hpd: read the HPD pin. Return true if high, false if low or - * an error if negative. If NULL or -ENOTTY is returned, - * then this is not supported. - * @read_5v: read the 5V pin. Return true if high, false if low or - * an error if negative. If NULL or -ENOTTY is returned, - * then this is not supported. - * + * @read_hpd: optional. Read the HPD pin. Returns > 0 if high, 0 if low or + * an error if negative. + * @read_5v: optional. Read the 5V pin. Returns > 0 if high, 0 if low or + * an error if negative. * @received: optional. High-level CEC message callback. Allows the driver * to process CEC messages. * @@ -36,7 +34,7 @@ * cec pin framework to manipulate the CEC pin. */ struct cec_pin_ops { - bool (*read)(struct cec_adapter *adap); + int (*read)(struct cec_adapter *adap); void (*low)(struct cec_adapter *adap); void (*high)(struct cec_adapter *adap); bool (*enable_irq)(struct cec_adapter *adap); diff --git a/include/media/dvb-usb-ids.h b/include/media/dvb-usb-ids.h index 800d473b03c4..d37cb74b769c 100644 --- a/include/media/dvb-usb-ids.h +++ b/include/media/dvb-usb-ids.h @@ -56,6 +56,7 @@ #define USB_VID_REALTEK 0x0bda #define USB_VID_TECHNOTREND 0x0b48 #define USB_VID_TERRATEC 0x0ccd +#define USB_VID_TERRATEC_2 0x153b #define USB_VID_TELESTAR 0x10b9 #define USB_VID_VISIONPLUS 0x13d3 #define USB_VID_SONY 0x1415 @@ -96,7 +97,7 @@ #define USB_PID_AFATECH_AF9035_9035 0x9035 #define USB_PID_TREKSTOR_DVBT 0x901b #define USB_PID_TREKSTOR_TERRES_2_0 0xC803 -#define USB_VID_ALINK_DTU 0xf170 +#define USB_PID_ALINK_DTU 0xf170 #define USB_PID_ANSONIC_DVBT_USB 0x6000 #define USB_PID_ANYSEE 0x861f #define USB_PID_AZUREWAVE_AD_TU700 0x3237 @@ -280,6 +281,8 @@ #define USB_PID_TERRATEC_CINERGY_S2_R2 0x00b0 #define USB_PID_TERRATEC_CINERGY_S2_R3 0x0102 #define USB_PID_TERRATEC_CINERGY_S2_R4 0x0105 +#define USB_PID_TERRATEC_CINERGY_S2_1 0x1181 +#define USB_PID_TERRATEC_CINERGY_S2_2 0x1182 #define USB_PID_TERRATEC_H7 0x10b4 #define USB_PID_TERRATEC_H7_2 0x10a3 #define USB_PID_TERRATEC_H7_3 0x10a5 diff --git a/include/media/h264-ctrls.h b/include/media/h264-ctrls.h index 1c6ff7d63bca..080fd1293c42 100644 --- a/include/media/h264-ctrls.h +++ b/include/media/h264-ctrls.h @@ -13,6 +13,12 @@ #include <linux/videodev2.h> +/* + * Maximum DPB size, as specified by section 'A.3.1 Level limits + * common to the Baseline, Main, and Extended profiles'. + */ +#define V4L2_H264_NUM_DPB_ENTRIES 16 + /* Our pixel format isn't stable at the moment */ #define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */ @@ -201,7 +207,7 @@ struct v4l2_h264_dpb_entry { #define V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC 0x01 struct v4l2_ctrl_h264_decode_params { - struct v4l2_h264_dpb_entry dpb[16]; + struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES]; __u16 num_slices; __u16 nal_ref_idc; __s32 top_field_order_cnt; diff --git a/include/media/media-entity.h b/include/media/media-entity.h index 8cb2c504a05c..cde80ad029b7 100644 --- a/include/media/media-entity.h +++ b/include/media/media-entity.h @@ -212,7 +212,8 @@ struct media_pad { * mutex held. */ struct media_entity_operations { - int (*get_fwnode_pad)(struct fwnode_endpoint *endpoint); + int (*get_fwnode_pad)(struct media_entity *entity, + struct fwnode_endpoint *endpoint); int (*link_setup)(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags); diff --git a/include/media/rc-map.h b/include/media/rc-map.h index 0ce896f10202..7dbb91c601a7 100644 --- a/include/media/rc-map.h +++ b/include/media/rc-map.h @@ -41,36 +41,85 @@ #define RC_PROTO_BIT_RCMM32 BIT_ULL(RC_PROTO_RCMM32) #define RC_PROTO_BIT_XBOX_DVD BIT_ULL(RC_PROTO_XBOX_DVD) -/* All rc protocols for which we have decoders */ +#if IS_ENABLED(CONFIG_IR_RC5_DECODER) +#define __RC_PROTO_RC5_CODEC \ + (RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 | RC_PROTO_BIT_RC5_SZ) +#else +#define __RC_PROTO_RC5_CODEC 0 +#endif + +#if IS_ENABLED(CONFIG_IR_JVC_DECODER) +#define __RC_PROTO_JVC_CODEC RC_PROTO_BIT_JVC +#else +#define __RC_PROTO_JVC_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_SONY_DECODER) +#define __RC_PROTO_SONY_CODEC \ + (RC_PROTO_BIT_SONY12 | RC_PROTO_BIT_SONY15 | RC_PROTO_BIT_SONY20) +#else +#define __RC_PROTO_SONY_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_NEC_DECODER) +#define __RC_PROTO_NEC_CODEC \ + (RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32) +#else +#define __RC_PROTO_NEC_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_SANYO_DECODER) +#define __RC_PROTO_SANYO_CODEC RC_PROTO_BIT_SANYO +#else +#define __RC_PROTO_SANYO_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_MCE_KBD_DECODER) +#define __RC_PROTO_MCE_KBD_CODEC \ + (RC_PROTO_BIT_MCIR2_KBD | RC_PROTO_BIT_MCIR2_MSE) +#else +#define __RC_PROTO_MCE_KBD_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_RC6_DECODER) +#define __RC_PROTO_RC6_CODEC \ + (RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | \ + RC_PROTO_BIT_RC6_6A_24 | RC_PROTO_BIT_RC6_6A_32 | \ + RC_PROTO_BIT_RC6_MCE) +#else +#define __RC_PROTO_RC6_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_SHARP_DECODER) +#define __RC_PROTO_SHARP_CODEC RC_PROTO_BIT_SHARP +#else +#define __RC_PROTO_SHARP_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_XMP_DECODER) +#define __RC_PROTO_XMP_CODEC RC_PROTO_BIT_XMP +#else +#define __RC_PROTO_XMP_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_IMON_DECODER) +#define __RC_PROTO_IMON_CODEC RC_PROTO_BIT_IMON +#else +#define __RC_PROTO_IMON_CODEC 0 +#endif +#if IS_ENABLED(CONFIG_IR_RCMM_DECODER) +#define __RC_PROTO_RCMM_CODEC \ + (RC_PROTO_BIT_RCMM12 | RC_PROTO_BIT_RCMM24 | RC_PROTO_BIT_RCMM32) +#else +#define __RC_PROTO_RCMM_CODEC 0 +#endif + +/* All kernel-based codecs have encoders and decoders */ #define RC_PROTO_BIT_ALL_IR_DECODER \ - (RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 | \ - RC_PROTO_BIT_RC5_SZ | RC_PROTO_BIT_JVC | \ - RC_PROTO_BIT_SONY12 | RC_PROTO_BIT_SONY15 | \ - RC_PROTO_BIT_SONY20 | RC_PROTO_BIT_NEC | \ - RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32 | \ - RC_PROTO_BIT_SANYO | RC_PROTO_BIT_MCIR2_KBD | \ - RC_PROTO_BIT_MCIR2_MSE | \ - RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | \ - RC_PROTO_BIT_RC6_6A_24 | RC_PROTO_BIT_RC6_6A_32 | \ - RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_SHARP | \ - RC_PROTO_BIT_XMP | RC_PROTO_BIT_IMON | \ - RC_PROTO_BIT_RCMM12 | RC_PROTO_BIT_RCMM24 | \ - RC_PROTO_BIT_RCMM32) + (__RC_PROTO_RC5_CODEC | __RC_PROTO_JVC_CODEC | __RC_PROTO_SONY_CODEC | \ + __RC_PROTO_NEC_CODEC | __RC_PROTO_SANYO_CODEC | \ + __RC_PROTO_MCE_KBD_CODEC | __RC_PROTO_RC6_CODEC | \ + __RC_PROTO_SHARP_CODEC | __RC_PROTO_XMP_CODEC | \ + __RC_PROTO_IMON_CODEC | __RC_PROTO_RCMM_CODEC) #define RC_PROTO_BIT_ALL_IR_ENCODER \ - (RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 | \ - RC_PROTO_BIT_RC5_SZ | RC_PROTO_BIT_JVC | \ - RC_PROTO_BIT_SONY12 | RC_PROTO_BIT_SONY15 | \ - RC_PROTO_BIT_SONY20 | RC_PROTO_BIT_NEC | \ - RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32 | \ - RC_PROTO_BIT_SANYO | RC_PROTO_BIT_MCIR2_KBD | \ - RC_PROTO_BIT_MCIR2_MSE | \ - RC_PROTO_BIT_RC6_0 | RC_PROTO_BIT_RC6_6A_20 | \ - RC_PROTO_BIT_RC6_6A_24 | \ - RC_PROTO_BIT_RC6_6A_32 | RC_PROTO_BIT_RC6_MCE | \ - RC_PROTO_BIT_SHARP | RC_PROTO_BIT_IMON | \ - RC_PROTO_BIT_RCMM12 | RC_PROTO_BIT_RCMM24 | \ - RC_PROTO_BIT_RCMM32) + (__RC_PROTO_RC5_CODEC | __RC_PROTO_JVC_CODEC | __RC_PROTO_SONY_CODEC | \ + __RC_PROTO_NEC_CODEC | __RC_PROTO_SANYO_CODEC | \ + __RC_PROTO_MCE_KBD_CODEC | __RC_PROTO_RC6_CODEC | \ + __RC_PROTO_SHARP_CODEC | __RC_PROTO_XMP_CODEC | \ + __RC_PROTO_IMON_CODEC | __RC_PROTO_RCMM_CODEC) #define RC_SCANCODE_UNKNOWN(x) (x) #define RC_SCANCODE_OTHER(x) (x) diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index 7db9e719a583..f40e2cbb21d3 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -25,14 +25,15 @@ /* forward references */ struct file; +struct poll_table_struct; +struct v4l2_ctrl; struct v4l2_ctrl_handler; struct v4l2_ctrl_helper; -struct v4l2_ctrl; -struct video_device; +struct v4l2_fh; +struct v4l2_fwnode_device_properties; struct v4l2_subdev; struct v4l2_subscribed_event; -struct v4l2_fh; -struct poll_table_struct; +struct video_device; /** * union v4l2_ctrl_ptr - A pointer to a control value. @@ -685,7 +686,9 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl, * @p_def: The control's default value. * * Sames as v4l2_ctrl_new_std(), but with support to compound controls, thanks - * to the @p_def field. + * to the @p_def field. Use v4l2_ctrl_ptr_create() to create @p_def from a + * pointer. Use v4l2_ctrl_ptr_create(NULL) if the default value of the + * compound control should be all zeroes. * */ struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl, @@ -1113,45 +1116,54 @@ static inline int v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s) } /** - * __v4l2_ctrl_s_ctrl_area() - Unlocked variant of v4l2_ctrl_s_ctrl_area(). + * __v4l2_ctrl_s_ctrl_compound() - Unlocked variant to set a compound control * - * @ctrl: The control. - * @area: The new area. + * @ctrl: The control. + * @type: The type of the data. + * @p: The new compound payload. * - * This sets the control's new area safely by going through the control - * framework. This function assumes the control's handler is already locked, - * allowing it to be used from within the &v4l2_ctrl_ops functions. + * This sets the control's new compound payload safely by going through the + * control framework. This function assumes the control's handler is already + * locked, allowing it to be used from within the &v4l2_ctrl_ops functions. * - * This function is for area type controls only. + * This function is for compound type controls only. */ -int __v4l2_ctrl_s_ctrl_area(struct v4l2_ctrl *ctrl, - const struct v4l2_area *area); +int __v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl, + enum v4l2_ctrl_type type, const void *p); /** - * v4l2_ctrl_s_ctrl_area() - Helper function to set a control's area value - * from within a driver. + * v4l2_ctrl_s_ctrl_compound() - Helper function to set a compound control + * from within a driver. * - * @ctrl: The control. - * @area: The new area. + * @ctrl: The control. + * @type: The type of the data. + * @p: The new compound payload. * - * This sets the control's new area safely by going through the control - * framework. This function will lock the control's handler, so it cannot be - * used from within the &v4l2_ctrl_ops functions. + * This sets the control's new compound payload safely by going through the + * control framework. This function will lock the control's handler, so it + * cannot be used from within the &v4l2_ctrl_ops functions. * - * This function is for area type controls only. + * This function is for compound type controls only. */ -static inline int v4l2_ctrl_s_ctrl_area(struct v4l2_ctrl *ctrl, - const struct v4l2_area *area) +static inline int v4l2_ctrl_s_ctrl_compound(struct v4l2_ctrl *ctrl, + enum v4l2_ctrl_type type, + const void *p) { int rval; v4l2_ctrl_lock(ctrl); - rval = __v4l2_ctrl_s_ctrl_area(ctrl, area); + rval = __v4l2_ctrl_s_ctrl_compound(ctrl, type, p); v4l2_ctrl_unlock(ctrl); return rval; } +/* Helper defines for area type controls */ +#define __v4l2_ctrl_s_ctrl_area(ctrl, area) \ + __v4l2_ctrl_s_ctrl_compound((ctrl), V4L2_CTRL_TYPE_AREA, (area)) +#define v4l2_ctrl_s_ctrl_area(ctrl, area) \ + v4l2_ctrl_s_ctrl_compound((ctrl), V4L2_CTRL_TYPE_AREA, (area)) + /* Internal helper functions that deal with control events. */ extern const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops; @@ -1417,4 +1429,29 @@ int v4l2_ctrl_subdev_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, */ int v4l2_ctrl_subdev_log_status(struct v4l2_subdev *sd); +/** + * v4l2_ctrl_new_fwnode_properties() - Register controls for the device + * properties + * + * @hdl: pointer to &struct v4l2_ctrl_handler to register controls on + * @ctrl_ops: pointer to &struct v4l2_ctrl_ops to register controls with + * @p: pointer to &struct v4l2_fwnode_device_properties + * + * This function registers controls associated to device properties, using the + * property values contained in @p parameter, if the property has been set to + * a value. + * + * Currently the following v4l2 controls are parsed and registered: + * - V4L2_CID_CAMERA_ORIENTATION + * - V4L2_CID_CAMERA_SENSOR_ROTATION; + * + * Controls already registered by the caller with the @hdl control handler are + * not overwritten. Callers should register the controls they want to handle + * themselves before calling this function. + * + * Return: 0 on success, a negative error code on failure. + */ +int v4l2_ctrl_new_fwnode_properties(struct v4l2_ctrl_handler *hdl, + const struct v4l2_ctrl_ops *ctrl_ops, + const struct v4l2_fwnode_device_properties *p); #endif diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index 4602c15ff878..ad2d41952442 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -82,11 +82,18 @@ struct v4l2_ctrl_handler; * but the old crop API will still work as expected in order to preserve * backwards compatibility. * Never set this flag for new drivers. + * @V4L2_FL_SUBDEV_RO_DEVNODE: + * indicates that the video device node is registered in read-only mode. + * The flag only applies to device nodes registered for sub-devices, it is + * set by the core when the sub-devices device nodes are registered with + * v4l2_device_register_ro_subdev_nodes() and used by the sub-device ioctl + * handler to restrict access to some ioctl calls. */ enum v4l2_video_device_flags { V4L2_FL_REGISTERED = 0, V4L2_FL_USES_V4L2_FH = 1, V4L2_FL_QUIRK_INVERTED_CROP = 2, + V4L2_FL_SUBDEV_RO_DEVNODE = 3, }; /* Priority helper functions */ diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h index 7c912b7d2870..64ec4de948e9 100644 --- a/include/media/v4l2-device.h +++ b/include/media/v4l2-device.h @@ -174,14 +174,56 @@ int __must_check v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, void v4l2_device_unregister_subdev(struct v4l2_subdev *sd); /** - * v4l2_device_register_subdev_nodes - Registers device nodes for all subdevs - * of the v4l2 device that are marked with - * the %V4L2_SUBDEV_FL_HAS_DEVNODE flag. + * __v4l2_device_register_ro_subdev_nodes - Registers device nodes for + * all subdevs of the v4l2 device that are marked with the + * %V4L2_SUBDEV_FL_HAS_DEVNODE flag. * * @v4l2_dev: pointer to struct v4l2_device + * @read_only: subdevices read-only flag. True to register the subdevices + * device nodes in read-only mode, false to allow full access to the + * subdevice userspace API. */ int __must_check -v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev); +__v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev, + bool read_only); + +/** + * v4l2_device_register_subdev_nodes - Registers subdevices device nodes with + * unrestricted access to the subdevice userspace operations + * + * Internally calls __v4l2_device_register_subdev_nodes(). See its documentation + * for more details. + * + * @v4l2_dev: pointer to struct v4l2_device + */ +static inline int __must_check +v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev) +{ +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + return __v4l2_device_register_subdev_nodes(v4l2_dev, false); +#else + return 0; +#endif +} + +/** + * v4l2_device_register_ro_subdev_nodes - Registers subdevices device nodes + * in read-only mode + * + * Internally calls __v4l2_device_register_subdev_nodes(). See its documentation + * for more details. + * + * @v4l2_dev: pointer to struct v4l2_device + */ +static inline int __must_check +v4l2_device_register_ro_subdev_nodes(struct v4l2_device *v4l2_dev) +{ +#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API) + return __v4l2_device_register_subdev_nodes(v4l2_dev, true); +#else + return 0; +#endif +} /** * v4l2_subdev_notify - Sends a notification to v4l2_device. diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h index 53b4dbb4ae8e..b5b3e00c8e6a 100644 --- a/include/media/v4l2-fh.h +++ b/include/media/v4l2-fh.h @@ -53,9 +53,7 @@ struct v4l2_fh { unsigned int navailable; u32 sequence; -#if IS_ENABLED(CONFIG_V4L2_MEM2MEM_DEV) struct v4l2_m2m_ctx *m2m_ctx; -#endif }; /** diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h index dd82d6d9764e..c47b70636e42 100644 --- a/include/media/v4l2-fwnode.h +++ b/include/media/v4l2-fwnode.h @@ -20,7 +20,6 @@ #include <linux/types.h> #include <media/v4l2-mediabus.h> -#include <media/v4l2-subdev.h> struct fwnode_handle; struct v4l2_async_notifier; @@ -110,6 +109,36 @@ struct v4l2_fwnode_endpoint { }; /** + * V4L2_FWNODE_PROPERTY_UNSET - identify a non initialized property + * + * All properties in &struct v4l2_fwnode_device_properties are initialized + * to this value. + */ +#define V4L2_FWNODE_PROPERTY_UNSET (-1U) + +/** + * enum v4l2_fwnode_orientation - possible device orientation + * @V4L2_FWNODE_ORIENTATION_FRONT: device installed on the front side + * @V4L2_FWNODE_ORIENTATION_BACK: device installed on the back side + * @V4L2_FWNODE_ORIENTATION_EXTERNAL: device externally located + */ +enum v4l2_fwnode_orientation { + V4L2_FWNODE_ORIENTATION_FRONT, + V4L2_FWNODE_ORIENTATION_BACK, + V4L2_FWNODE_ORIENTATION_EXTERNAL +}; + +/** + * struct v4l2_fwnode_device_properties - fwnode device properties + * @orientation: device orientation. See &enum v4l2_fwnode_orientation + * @rotation: device rotation + */ +struct v4l2_fwnode_device_properties { + enum v4l2_fwnode_orientation orientation; + unsigned int rotation; +}; + +/** * struct v4l2_fwnode_link - a link between two endpoints * @local_node: pointer to device_node of this endpoint * @local_port: identifier of the port this endpoint belongs to @@ -355,6 +384,23 @@ int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode, struct v4l2_fwnode_connector *connector); /** + * v4l2_fwnode_device_parse() - parse fwnode device properties + * @dev: pointer to &struct device + * @props: pointer to &struct v4l2_fwnode_device_properties where to store the + * parsed properties values + * + * This function parses and validates the V4L2 fwnode device properties from the + * firmware interface, and fills the @struct v4l2_fwnode_device_properties + * provided by the caller. + * + * Return: + * % 0 on success + * %-EINVAL if a parsed property value is not valid + */ +int v4l2_fwnode_device_parse(struct device *dev, + struct v4l2_fwnode_device_properties *props); + +/** * typedef parse_endpoint_func - Driver's callback function to be called on * each V4L2 fwnode endpoint. * @@ -490,43 +536,6 @@ v4l2_async_notifier_parse_fwnode_endpoints_by_port(struct device *dev, int v4l2_async_notifier_parse_fwnode_sensor_common(struct device *dev, struct v4l2_async_notifier *notifier); -/** - * v4l2_async_register_fwnode_subdev - registers a sub-device to the - * asynchronous sub-device framework - * and parses fwnode endpoints - * - * @sd: pointer to struct &v4l2_subdev - * @asd_struct_size: size of the driver's async sub-device struct, including - * sizeof(struct v4l2_async_subdev). The &struct - * v4l2_async_subdev shall be the first member of - * the driver's async sub-device struct, i.e. both - * begin at the same memory address. - * @ports: array of port id's to parse for fwnode endpoints. If NULL, will - * parse all ports owned by the sub-device. - * @num_ports: number of ports in @ports array. Ignored if @ports is NULL. - * @parse_endpoint: Driver's callback function called on each V4L2 fwnode - * endpoint. Optional. - * - * This function is just like v4l2_async_register_subdev() with the - * exception that calling it will also allocate a notifier for the - * sub-device, parse the sub-device's firmware node endpoints using - * v4l2_async_notifier_parse_fwnode_endpoints() or - * v4l2_async_notifier_parse_fwnode_endpoints_by_port(), and - * registers the sub-device notifier. The sub-device is similarly - * unregistered by calling v4l2_async_unregister_subdev(). - * - * While registered, the subdev module is marked as in-use. - * - * An error is returned if the module is no longer loaded on any attempts - * to register it. - */ -int -v4l2_async_register_fwnode_subdev(struct v4l2_subdev *sd, - size_t asd_struct_size, - unsigned int *ports, - unsigned int num_ports, - parse_endpoint_func parse_endpoint); - /* Helper macros to access the connector links. */ /** v4l2_connector_last_link - Helper macro to get the first diff --git a/include/media/v4l2-h264.h b/include/media/v4l2-h264.h new file mode 100644 index 000000000000..bc9ebb560ccf --- /dev/null +++ b/include/media/v4l2-h264.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Helper functions for H264 codecs. + * + * Copyright (c) 2019 Collabora, Ltd. + * + * Author: Boris Brezillon <boris.brezillon@collabora.com> + */ + +#ifndef _MEDIA_V4L2_H264_H +#define _MEDIA_V4L2_H264_H + +#include <media/h264-ctrls.h> + +/** + * struct v4l2_h264_reflist_builder - Reference list builder object + * + * @refs.pic_order_count: reference picture order count + * @refs.frame_num: reference frame number + * @refs.pic_num: reference picture number + * @refs.longterm: set to true for a long term reference + * @refs: array of references + * @cur_pic_order_count: picture order count of the frame being decoded + * @unordered_reflist: unordered list of references. Will be used to generate + * ordered P/B0/B1 lists + * @num_valid: number of valid references in the refs array + * + * This object stores the context of the P/B0/B1 reference list builder. + * This procedure is described in section '8.2.4 Decoding process for reference + * picture lists construction' of the H264 spec. + */ +struct v4l2_h264_reflist_builder { + struct { + s32 pic_order_count; + int frame_num; + u16 pic_num; + u16 longterm : 1; + } refs[V4L2_H264_NUM_DPB_ENTRIES]; + s32 cur_pic_order_count; + u8 unordered_reflist[V4L2_H264_NUM_DPB_ENTRIES]; + u8 num_valid; +}; + +void +v4l2_h264_init_reflist_builder(struct v4l2_h264_reflist_builder *b, + const struct v4l2_ctrl_h264_decode_params *dec_params, + const struct v4l2_ctrl_h264_slice_params *slice_params, + const struct v4l2_ctrl_h264_sps *sps, + const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES]); + +/** + * v4l2_h264_build_b_ref_lists() - Build the B0/B1 reference lists + * + * @builder: reference list builder context + * @b0_reflist: 16-bytes array used to store the B0 reference list. Each entry + * is an index in the DPB + * @b1_reflist: 16-bytes array used to store the B1 reference list. Each entry + * is an index in the DPB + * + * This functions builds the B0/B1 reference lists. This procedure is described + * in section '8.2.4 Decoding process for reference picture lists construction' + * of the H264 spec. This function can be used by H264 decoder drivers that + * need to pass B0/B1 reference lists to the hardware. + */ +void +v4l2_h264_build_b_ref_lists(const struct v4l2_h264_reflist_builder *builder, + u8 *b0_reflist, u8 *b1_reflist); + +/** + * v4l2_h264_build_b_ref_lists() - Build the P reference list + * + * @builder: reference list builder context + * @p_reflist: 16-bytes array used to store the P reference list. Each entry + * is an index in the DPB + * + * This functions builds the P reference lists. This procedure is describe in + * section '8.2.4 Decoding process for reference picture lists construction' + * of the H264 spec. This function can be used by H264 decoder drivers that + * need to pass a P reference list to the hardware. + */ +void +v4l2_h264_build_p_ref_list(const struct v4l2_h264_reflist_builder *builder, + u8 *reflist); + +#endif /* _MEDIA_V4L2_H264_H */ diff --git a/include/media/v4l2-image-sizes.h b/include/media/v4l2-image-sizes.h index 450f4f5d3d6a..24a7a0bb59ab 100644 --- a/include/media/v4l2-image-sizes.h +++ b/include/media/v4l2-image-sizes.h @@ -10,6 +10,12 @@ #define CIF_WIDTH 352 #define CIF_HEIGHT 288 +#define HD_720_WIDTH 1280 +#define HD_720_HEIGHT 720 + +#define HD_1080_WIDTH 1920 +#define HD_1080_HEIGHT 1080 + #define QCIF_WIDTH 176 #define QCIF_HEIGHT 144 diff --git a/include/media/v4l2-jpeg.h b/include/media/v4l2-jpeg.h new file mode 100644 index 000000000000..ddba2a56c321 --- /dev/null +++ b/include/media/v4l2-jpeg.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * V4L2 JPEG helpers header + * + * Copyright (C) 2019 Pengutronix, Philipp Zabel <kernel@pengutronix.de> + * + * For reference, see JPEG ITU-T.81 (ISO/IEC 10918-1) + */ + +#ifndef _V4L2_JPEG_H +#define _V4L2_JPEG_H + +#include <linux/v4l2-controls.h> + +#define V4L2_JPEG_MAX_COMPONENTS 4 +#define V4L2_JPEG_MAX_TABLES 4 + +/** + * struct v4l2_jpeg_reference - reference into the JPEG buffer + * @start: pointer to the start of the referenced segment or table + * @length: size of the referenced segment or table + * + * Wnen referencing marker segments, start points right after the marker code, + * and length is the size of the segment parameters, excluding the marker code. + */ +struct v4l2_jpeg_reference { + u8 *start; + size_t length; +}; + +/* B.2.2 Frame header syntax */ + +/** + * struct v4l2_jpeg_frame_component_spec - frame component-specification + * @component_identifier: C[i] + * @horizontal_sampling_factor: H[i] + * @vertical_sampling_factor: V[i] + * @quantization_table_selector: quantization table destination selector Tq[i] + */ +struct v4l2_jpeg_frame_component_spec { + u8 component_identifier; + u8 horizontal_sampling_factor; + u8 vertical_sampling_factor; + u8 quantization_table_selector; +}; + +/** + * struct v4l2_jpeg_frame_header - JPEG frame header + * @height: Y + * @width: X + * @precision: P + * @num_components: Nf + * @component: component-specification, see v4l2_jpeg_frame_component_spec + * @subsampling: decoded subsampling from component-specification + */ +struct v4l2_jpeg_frame_header { + u16 height; + u16 width; + u8 precision; + u8 num_components; + struct v4l2_jpeg_frame_component_spec component[V4L2_JPEG_MAX_COMPONENTS]; + enum v4l2_jpeg_chroma_subsampling subsampling; +}; + +/* B.2.3 Scan header syntax */ + +/** + * struct v4l2_jpeg_scan_component_spec - scan component-specification + * @component_selector: Cs[j] + * @dc_entropy_coding_table_selector: Td[j] + * @ac_entropy_coding_table_selector: Ta[j] + */ +struct v4l2_jpeg_scan_component_spec { + u8 component_selector; + u8 dc_entropy_coding_table_selector; + u8 ac_entropy_coding_table_selector; +}; + +/** + * struct v4l2_jpeg_scan_header - JPEG scan header + * @num_components: Ns + * @component: component-specification, see v4l2_jpeg_scan_component_spec + */ +struct v4l2_jpeg_scan_header { + u8 num_components; /* Ns */ + struct v4l2_jpeg_scan_component_spec component[V4L2_JPEG_MAX_COMPONENTS]; + /* Ss, Se, Ah, and Al are not used by any driver */ +}; + +/** + * struct v4l2_jpeg_header - parsed JPEG header + * @sof: pointer to frame header and size + * @sos: pointer to scan header and size + * @dht: pointers to huffman tables and sizes + * @dqt: pointers to quantization tables and sizes + * @frame: parsed frame header + * @scan: pointer to parsed scan header, optional + * @quantization_tables: references to four quantization tables, optional + * @huffman_tables: references to four Huffman tables in DC0, DC1, AC0, AC1 + * order, optional + * @restart_interval: number of MCU per restart interval, Ri + * @ecs_offset: buffer offset in bytes to the entropy coded segment + * + * When this structure is passed to v4l2_jpeg_parse_header, the optional scan, + * quantization_tables, and huffman_tables pointers must be initialized to NULL + * or point at valid memory. + */ +struct v4l2_jpeg_header { + struct v4l2_jpeg_reference sof; + struct v4l2_jpeg_reference sos; + unsigned int num_dht; + struct v4l2_jpeg_reference dht[V4L2_JPEG_MAX_TABLES]; + unsigned int num_dqt; + struct v4l2_jpeg_reference dqt[V4L2_JPEG_MAX_TABLES]; + + struct v4l2_jpeg_frame_header frame; + struct v4l2_jpeg_scan_header *scan; + struct v4l2_jpeg_reference *quantization_tables; + struct v4l2_jpeg_reference *huffman_tables; + u16 restart_interval; + size_t ecs_offset; +}; + +int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out); + +int v4l2_jpeg_parse_frame_header(void *buf, size_t len, + struct v4l2_jpeg_frame_header *frame_header); +int v4l2_jpeg_parse_scan_header(void *buf, size_t len, + struct v4l2_jpeg_scan_header *scan_header); +int v4l2_jpeg_parse_quantization_tables(void *buf, size_t len, u8 precision, + struct v4l2_jpeg_reference *q_tables); +int v4l2_jpeg_parse_huffman_tables(void *buf, size_t len, + struct v4l2_jpeg_reference *huffman_tables); + +#endif diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h index 5e73eb8e28f6..246eed398648 100644 --- a/include/media/v4l2-mc.h +++ b/include/media/v4l2-mc.h @@ -12,6 +12,7 @@ #include <media/media-device.h> #include <media/v4l2-dev.h> +#include <media/v4l2-subdev.h> #include <linux/types.h> /* We don't need to include pci.h or usb.h here */ @@ -84,6 +85,53 @@ void v4l_disable_media_source(struct video_device *vdev); */ int v4l_vb2q_enable_media_source(struct vb2_queue *q); +/** + * v4l2_create_fwnode_links_to_pad - Create fwnode-based links from a + * source subdev to a sink subdev pad. + * + * @src_sd - pointer to a source subdev + * @sink - pointer to a subdev sink pad + * + * This function searches for fwnode endpoint connections from a source + * subdevice to a single sink pad, and if suitable connections are found, + * translates them into media links to that pad. The function can be + * called by the sink subdevice, in its v4l2-async notifier subdev bound + * callback, to create links from a bound source subdevice. + * + * .. note:: + * + * Any sink subdevice that calls this function must implement the + * .get_fwnode_pad media operation in order to verify endpoints passed + * to the sink are owned by the sink. + * + * Return 0 on success or a negative error code on failure. + */ +int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd, + struct media_pad *sink); + +/** + * v4l2_create_fwnode_links - Create fwnode-based links from a source + * subdev to a sink subdev. + * + * @src_sd - pointer to a source subdevice + * @sink_sd - pointer to a sink subdevice + * + * This function searches for any and all fwnode endpoint connections + * between source and sink subdevices, and translates them into media + * links. The function can be called by the sink subdevice, in its + * v4l2-async notifier subdev bound callback, to create all links from + * a bound source subdevice. + * + * .. note:: + * + * Any sink subdevice that calls this function must implement the + * .get_fwnode_pad media operation in order to verify endpoints passed + * to the sink are owned by the sink. + * + * Return 0 on success or a negative error code on failure. + */ +int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd, + struct v4l2_subdev *sink_sd); /** * v4l2_pipeline_pm_get - Increase the use count of a pipeline diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index a4848de59852..f7fe78a6f65a 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -1028,6 +1028,23 @@ static inline void *v4l2_get_subdev_hostdata(const struct v4l2_subdev *sd) #ifdef CONFIG_MEDIA_CONTROLLER /** + * v4l2_subdev_get_fwnode_pad_1_to_1 - Get pad number from a subdev fwnode + * endpoint, assuming 1:1 port:pad + * + * @entity - Pointer to the subdev entity + * @endpoint - Pointer to a parsed fwnode endpoint + * + * This function can be used as the .get_fwnode_pad operation for + * subdevices that map port numbers and pad indexes 1:1. If the endpoint + * is owned by the subdevice, the function returns the endpoint port + * number. + * + * Returns the endpoint port number on success or a negative error code. + */ +int v4l2_subdev_get_fwnode_pad_1_to_1(struct media_entity *entity, + struct fwnode_endpoint *endpoint); + +/** * v4l2_subdev_link_validate_default - validates a media link * * @sd: pointer to &struct v4l2_subdev diff --git a/include/media/videobuf2-dma-contig.h b/include/media/videobuf2-dma-contig.h index 5604818d137e..5be313cbf7d7 100644 --- a/include/media/videobuf2-dma-contig.h +++ b/include/media/videobuf2-dma-contig.h @@ -25,7 +25,7 @@ vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no) } int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size); -void vb2_dma_contig_clear_max_seg_size(struct device *dev); +static inline void vb2_dma_contig_clear_max_seg_size(struct device *dev) { } extern const struct vb2_mem_ops vb2_dma_contig_memops; diff --git a/include/net/act_api.h b/include/net/act_api.h index c24d7643548e..8c3934880670 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -75,7 +75,8 @@ static inline void tcf_tm_dump(struct tcf_t *dtm, const struct tcf_t *stm) { dtm->install = jiffies_to_clock_t(jiffies - stm->install); dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse); - dtm->firstuse = jiffies_to_clock_t(jiffies - stm->firstuse); + dtm->firstuse = stm->firstuse ? + jiffies_to_clock_t(jiffies - stm->firstuse) : 0; dtm->expires = jiffies_to_clock_t(stm->expires); } @@ -193,7 +194,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, bool rtnl_held, struct netlink_ext_ack *extack); int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind, - int ref); + int ref, bool terse); int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); diff --git a/include/net/addrconf.h b/include/net/addrconf.h index e0eabe58aa8b..fdb07105384c 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -6,8 +6,6 @@ #define RTR_SOLICITATION_INTERVAL (4*HZ) #define RTR_SOLICITATION_MAX_INTERVAL (3600*HZ) /* 1 hour */ -#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */ - #define TEMP_VALID_LIFETIME (7*86400) #define TEMP_PREFERRED_LIFETIME (86400) #define REGEN_MAX_RETRY (3) diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index 04e97bab6f28..91eacbdcf33d 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -59,7 +59,7 @@ bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *, void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *); void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *, struct sockaddr_rxrpc *); -u64 rxrpc_kernel_get_rtt(struct socket *, struct rxrpc_call *); +u32 rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *); int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, rxrpc_user_attach_call_t, unsigned long, gfp_t, unsigned int); @@ -72,4 +72,6 @@ bool rxrpc_kernel_call_is_complete(struct rxrpc_call *); void rxrpc_kernel_set_max_life(struct socket *, struct rxrpc_call *, unsigned long); +int rxrpc_sock_set_min_security_level(struct sock *sk, unsigned int val); + #endif /* _NET_RXRPC_H */ diff --git a/include/net/bareudp.h b/include/net/bareudp.h index cb03f6f15956..dc65a0d71d9b 100644 --- a/include/net/bareudp.h +++ b/include/net/bareudp.h @@ -5,6 +5,7 @@ #include <linux/types.h> #include <linux/skbuff.h> +#include <net/rtnetlink.h> struct bareudp_conf { __be16 ethertype; @@ -17,4 +18,10 @@ struct net_device *bareudp_dev_create(struct net *net, const char *name, u8 name_assign_type, struct bareudp_conf *info); +static inline bool netif_is_bareudp(const struct net_device *dev) +{ + return dev->rtnl_link_ops && + !strcmp(dev->rtnl_link_ops->kind, "bareudp"); +} + #endif diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 1576353a2773..18190055374c 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -139,12 +139,26 @@ struct bt_voice { #define BT_PHY_LE_CODED_TX 0x00002000 #define BT_PHY_LE_CODED_RX 0x00004000 +#define BT_MODE 15 + +#define BT_MODE_BASIC 0x00 +#define BT_MODE_ERTM 0x01 +#define BT_MODE_STREAMING 0x02 +#define BT_MODE_LE_FLOWCTL 0x03 +#define BT_MODE_EXT_FLOWCTL 0x04 + __printf(1, 2) void bt_info(const char *fmt, ...); __printf(1, 2) void bt_warn(const char *fmt, ...); __printf(1, 2) void bt_err(const char *fmt, ...); +#if IS_ENABLED(CONFIG_BT_FEATURE_DEBUG) +void bt_dbg_set(bool enable); +bool bt_dbg_get(void); +__printf(1, 2) +void bt_dbg(const char *fmt, ...); +#endif __printf(1, 2) void bt_warn_ratelimited(const char *fmt, ...); __printf(1, 2) @@ -153,7 +167,12 @@ void bt_err_ratelimited(const char *fmt, ...); #define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__) #define BT_WARN(fmt, ...) bt_warn(fmt "\n", ##__VA_ARGS__) #define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__) + +#if IS_ENABLED(CONFIG_BT_FEATURE_DEBUG) +#define BT_DBG(fmt, ...) bt_dbg(fmt "\n", ##__VA_ARGS__) +#else #define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__) +#endif #define bt_dev_info(hdev, fmt, ...) \ BT_INFO("%s: " fmt, (hdev)->name, ##__VA_ARGS__) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 5f60e135aeb6..16ab6ce87883 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -53,6 +53,9 @@ #define HCI_NOTIFY_CONN_ADD 1 #define HCI_NOTIFY_CONN_DEL 2 #define HCI_NOTIFY_VOICE_SETTING 3 +#define HCI_NOTIFY_ENABLE_SCO_CVSD 4 +#define HCI_NOTIFY_ENABLE_SCO_TRANSP 5 +#define HCI_NOTIFY_DISABLE_SCO 6 /* HCI bus types */ #define HCI_VIRTUAL 0 @@ -65,6 +68,7 @@ #define HCI_SPI 7 #define HCI_I2C 8 #define HCI_SMD 9 +#define HCI_VIRTIO 10 /* HCI controller types */ #define HCI_PRIMARY 0x00 @@ -214,6 +218,15 @@ enum { * This quirk must be set before hci_register_dev is called. */ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, + + /* When this quirk is set, the controller has validated that + * LE states reported through the HCI_LE_READ_SUPPORTED_STATES are + * valid. This mechanism is necessary as many controllers have + * been seen has having trouble initiating a connectable + * advertisement despite the state combination being reported as + * supported. + */ + HCI_QUIRK_VALID_LE_STATES, }; /* HCI device flags */ @@ -245,6 +258,7 @@ enum { HCI_MGMT_DEV_CLASS_EVENTS, HCI_MGMT_LOCAL_NAME_EVENTS, HCI_MGMT_OOB_DATA_EVENTS, + HCI_MGMT_EXP_FEATURE_EVENTS, }; /* @@ -294,6 +308,7 @@ enum { HCI_FORCE_STATIC_ADDR, HCI_LL_RPA_RESOLUTION, HCI_CMD_PENDING, + HCI_FORCE_NO_MITM, __HCI_NUM_FLAGS, }; @@ -455,12 +470,11 @@ enum { #define HCI_LE_SLAVE_FEATURES 0x08 #define HCI_LE_PING 0x10 #define HCI_LE_DATA_LEN_EXT 0x20 -#define HCI_LE_PHY_2M 0x01 -#define HCI_LE_PHY_CODED 0x08 -#define HCI_LE_EXT_ADV 0x10 +#define HCI_LE_LL_PRIVACY 0x40 #define HCI_LE_EXT_SCAN_POLICY 0x80 #define HCI_LE_PHY_2M 0x01 #define HCI_LE_PHY_CODED 0x08 +#define HCI_LE_EXT_ADV 0x10 #define HCI_LE_CHAN_SEL_ALG2 0x40 #define HCI_LE_CIS_MASTER 0x10 #define HCI_LE_CIS_SLAVE 0x20 @@ -1272,6 +1286,13 @@ struct hci_rp_read_data_block_size { #define HCI_OP_READ_LOCAL_CODECS 0x100b +#define HCI_OP_READ_LOCAL_PAIRING_OPTS 0x100c +struct hci_rp_read_local_pairing_opts { + __u8 status; + __u8 pairing_opts; + __u8 max_key_size; +} __packed; + #define HCI_OP_READ_PAGE_SCAN_ACTIVITY 0x0c1b struct hci_rp_read_page_scan_activity { __u8 status; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index d4e28773d378..cdd4f1db8670 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -110,7 +110,7 @@ enum suspend_tasks { enum suspended_state { BT_RUNNING = 0, BT_SUSPEND_DISCONNECT, - BT_SUSPEND_COMPLETE, + BT_SUSPEND_CONFIGURE_WAKE, }; struct hci_conn_hash { @@ -312,6 +312,8 @@ struct hci_dev { __u16 conn_info_max_age; __u16 auth_payload_timeout; __u8 min_enc_key_size; + __u8 max_enc_key_size; + __u8 pairing_opts; __u8 ssp_debug_mode; __u8 hw_error_code; __u32 clock; @@ -484,6 +486,11 @@ struct hci_dev { struct led_trigger *power_led; #endif +#if IS_ENABLED(CONFIG_BT_MSFTEXT) + __u16 msft_opcode; + void *msft_data; +#endif + int (*open)(struct hci_dev *hdev); int (*close)(struct hci_dev *hdev); int (*flush)(struct hci_dev *hdev); @@ -496,6 +503,7 @@ struct hci_dev { int (*set_diag)(struct hci_dev *hdev, bool enable); int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr); void (*cmd_timeout)(struct hci_dev *hdev); + bool (*prevent_wake)(struct hci_dev *hdev); }; #define HCI_PHY_HANDLE(handle) (handle & 0xff) @@ -638,6 +646,7 @@ extern struct mutex hci_cb_list_lock; do { \ hci_dev_clear_flag(hdev, HCI_LE_SCAN); \ hci_dev_clear_flag(hdev, HCI_LE_ADV); \ + hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);\ hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); \ } while (0) @@ -1116,6 +1125,14 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb); int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb); __printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...); __printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...); + +static inline void hci_set_msft_opcode(struct hci_dev *hdev, __u16 opcode) +{ +#if IS_ENABLED(CONFIG_BT_MSFTEXT) + hdev->msft_opcode = opcode; +#endif +} + int hci_dev_open(__u16 dev); int hci_dev_close(__u16 dev); int hci_dev_do_close(struct hci_dev *hdev); @@ -1364,10 +1381,26 @@ static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status) conn->security_cfm_cb(conn, status); } -static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, - __u8 encrypt) +static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status) { struct hci_cb *cb; + __u8 encrypt; + + if (conn->state == BT_CONFIG) { + if (status) + conn->state = BT_CONNECTED; + + hci_connect_cfm(conn, status); + hci_conn_drop(conn); + return; + } + + if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) + encrypt = 0x00; + else if (test_bit(HCI_CONN_AES_CCM, &conn->flags)) + encrypt = 0x02; + else + encrypt = 0x01; if (conn->sec_level == BT_SECURITY_SDP) conn->sec_level = BT_SECURITY_LOW; @@ -1538,6 +1571,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event); #define HCI_MGMT_NO_HDEV BIT(1) #define HCI_MGMT_UNTRUSTED BIT(2) #define HCI_MGMT_UNCONFIGURED BIT(3) +#define HCI_MGMT_HDEV_OPTIONAL BIT(4) struct hci_mgmt_handler { int (*func) (struct sock *sk, struct hci_dev *hdev, void *data, diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index dada14d0622c..8f1e6a7a2df8 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -499,7 +499,7 @@ struct l2cap_ecred_conn_req { __le16 mtu; __le16 mps; __le16 credits; - __le16 scid[0]; + __le16 scid[]; } __packed; struct l2cap_ecred_conn_rsp { @@ -507,13 +507,13 @@ struct l2cap_ecred_conn_rsp { __le16 mps; __le16 credits; __le16 result; - __le16 dcid[0]; + __le16 dcid[]; }; struct l2cap_ecred_reconf_req { __le16 mtu; __le16 mps; - __le16 scid[0]; + __le16 scid[]; } __packed; #define L2CAP_RECONF_SUCCESS 0x0000 diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index f41cd87550dc..16e0d87bd8fa 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -70,14 +70,14 @@ struct mgmt_rp_read_version { struct mgmt_rp_read_commands { __le16 num_commands; __le16 num_events; - __le16 opcodes[0]; + __le16 opcodes[]; } __packed; #define MGMT_OP_READ_INDEX_LIST 0x0003 #define MGMT_READ_INDEX_LIST_SIZE 0 struct mgmt_rp_read_index_list { __le16 num_controllers; - __le16 index[0]; + __le16 index[]; } __packed; /* Reserve one extra byte for names in management messages so that they @@ -183,7 +183,7 @@ struct mgmt_link_key_info { struct mgmt_cp_load_link_keys { __u8 debug_keys; __le16 key_count; - struct mgmt_link_key_info keys[0]; + struct mgmt_link_key_info keys[]; } __packed; #define MGMT_LOAD_LINK_KEYS_SIZE 3 @@ -206,7 +206,7 @@ struct mgmt_ltk_info { #define MGMT_OP_LOAD_LONG_TERM_KEYS 0x0013 struct mgmt_cp_load_long_term_keys { __le16 key_count; - struct mgmt_ltk_info keys[0]; + struct mgmt_ltk_info keys[]; } __packed; #define MGMT_LOAD_LONG_TERM_KEYS_SIZE 2 @@ -223,7 +223,7 @@ struct mgmt_rp_disconnect { #define MGMT_GET_CONNECTIONS_SIZE 0 struct mgmt_rp_get_connections { __le16 conn_count; - struct mgmt_addr_info addr[0]; + struct mgmt_addr_info addr[]; } __packed; #define MGMT_OP_PIN_CODE_REPLY 0x0016 @@ -413,7 +413,7 @@ struct mgmt_irk_info { #define MGMT_OP_LOAD_IRKS 0x0030 struct mgmt_cp_load_irks { __le16 irk_count; - struct mgmt_irk_info irks[0]; + struct mgmt_irk_info irks[]; } __packed; #define MGMT_LOAD_IRKS_SIZE 2 @@ -465,7 +465,7 @@ struct mgmt_conn_param { #define MGMT_OP_LOAD_CONN_PARAM 0x0035 struct mgmt_cp_load_conn_param { __le16 param_count; - struct mgmt_conn_param params[0]; + struct mgmt_conn_param params[]; } __packed; #define MGMT_LOAD_CONN_PARAM_SIZE 2 @@ -473,7 +473,7 @@ struct mgmt_cp_load_conn_param { #define MGMT_READ_UNCONF_INDEX_LIST_SIZE 0 struct mgmt_rp_read_unconf_index_list { __le16 num_controllers; - __le16 index[0]; + __le16 index[]; } __packed; #define MGMT_OPTION_EXTERNAL_CONFIG 0x00000001 @@ -504,7 +504,7 @@ struct mgmt_cp_start_service_discovery { __u8 type; __s8 rssi; __le16 uuid_count; - __u8 uuids[0][16]; + __u8 uuids[][16]; } __packed; #define MGMT_START_SERVICE_DISCOVERY_SIZE 4 @@ -516,7 +516,7 @@ struct mgmt_cp_read_local_oob_ext_data { struct mgmt_rp_read_local_oob_ext_data { __u8 type; __le16 eir_len; - __u8 eir[0]; + __u8 eir[]; } __packed; #define MGMT_OP_READ_EXT_INDEX_LIST 0x003C @@ -527,7 +527,7 @@ struct mgmt_rp_read_ext_index_list { __le16 index; __u8 type; __u8 bus; - } entry[0]; + } entry[]; } __packed; #define MGMT_OP_READ_ADV_FEATURES 0x0003D @@ -538,7 +538,7 @@ struct mgmt_rp_read_adv_features { __u8 max_scan_rsp_len; __u8 max_instances; __u8 num_instances; - __u8 instance[0]; + __u8 instance[]; } __packed; #define MGMT_OP_ADD_ADVERTISING 0x003E @@ -549,7 +549,7 @@ struct mgmt_cp_add_advertising { __le16 timeout; __u8 adv_data_len; __u8 scan_rsp_len; - __u8 data[0]; + __u8 data[]; } __packed; #define MGMT_ADD_ADVERTISING_SIZE 11 struct mgmt_rp_add_advertising { @@ -603,7 +603,7 @@ struct mgmt_rp_read_ext_info { __le32 supported_settings; __le32 current_settings; __le16 eir_len; - __u8 eir[0]; + __u8 eir[]; } __packed; #define MGMT_OP_SET_APPEARANCE 0x0043 @@ -668,17 +668,45 @@ struct mgmt_blocked_key_info { struct mgmt_cp_set_blocked_keys { __le16 key_count; - struct mgmt_blocked_key_info keys[0]; + struct mgmt_blocked_key_info keys[]; } __packed; #define MGMT_OP_SET_BLOCKED_KEYS_SIZE 2 #define MGMT_OP_SET_WIDEBAND_SPEECH 0x0047 +#define MGMT_OP_READ_SECURITY_INFO 0x0048 +#define MGMT_READ_SECURITY_INFO_SIZE 0 +struct mgmt_rp_read_security_info { + __le16 sec_len; + __u8 sec[]; +} __packed; + +#define MGMT_OP_READ_EXP_FEATURES_INFO 0x0049 +#define MGMT_READ_EXP_FEATURES_INFO_SIZE 0 +struct mgmt_rp_read_exp_features_info { + __le16 feature_count; + struct { + __u8 uuid[16]; + __le32 flags; + } features[]; +} __packed; + +#define MGMT_OP_SET_EXP_FEATURE 0x004a +struct mgmt_cp_set_exp_feature { + __u8 uuid[16]; + __u8 param[]; +} __packed; +#define MGMT_SET_EXP_FEATURE_SIZE 16 +struct mgmt_rp_set_exp_feature { + __u8 uuid[16]; + __le32 flags; +} __packed; + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; __u8 status; - __u8 data[0]; + __u8 data[]; } __packed; #define MGMT_EV_CMD_STATUS 0x0002 @@ -726,7 +754,7 @@ struct mgmt_ev_device_connected { struct mgmt_addr_info addr; __le32 flags; __le16 eir_len; - __u8 eir[0]; + __u8 eir[]; } __packed; #define MGMT_DEV_DISCONN_UNKNOWN 0x00 @@ -781,7 +809,7 @@ struct mgmt_ev_device_found { __s8 rssi; __le32 flags; __le16 eir_len; - __u8 eir[0]; + __u8 eir[]; } __packed; #define MGMT_EV_DISCOVERING 0x0013 @@ -876,7 +904,7 @@ struct mgmt_ev_ext_index { struct mgmt_ev_local_oob_data_updated { __u8 type; __le16 eir_len; - __u8 eir[0]; + __u8 eir[]; } __packed; #define MGMT_EV_ADVERTISING_ADDED 0x0023 @@ -892,10 +920,16 @@ struct mgmt_ev_advertising_removed { #define MGMT_EV_EXT_INFO_CHANGED 0x0025 struct mgmt_ev_ext_info_changed { __le16 eir_len; - __u8 eir[0]; + __u8 eir[]; } __packed; #define MGMT_EV_PHY_CONFIGURATION_CHANGED 0x0026 struct mgmt_ev_phy_configuration_changed { __le32 selected_phys; } __packed; + +#define MGMT_EV_EXP_FEATURE_CHANGED 0x0027 +struct mgmt_ev_exp_feature_changed { + __u8 uuid[16]; + __le32 flags; +} __packed; diff --git a/include/net/bond_alb.h b/include/net/bond_alb.h index b3504fcd773d..f6af76c87a6c 100644 --- a/include/net/bond_alb.h +++ b/include/net/bond_alb.h @@ -158,6 +158,10 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave); int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev); int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev); +struct slave *bond_xmit_alb_slave_get(struct bonding *bond, + struct sk_buff *skb); +struct slave *bond_xmit_tlb_slave_get(struct bonding *bond, + struct sk_buff *skb); void bond_alb_monitor(struct work_struct *); int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr); void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id); diff --git a/include/net/bonding.h b/include/net/bonding.h index dc2ce31a1f52..aa854a9c01e2 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -200,7 +200,8 @@ struct bonding { struct slave __rcu *curr_active_slave; struct slave __rcu *current_arp_slave; struct slave __rcu *primary_slave; - struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */ + struct bond_up_slave __rcu *usable_slaves; + struct bond_up_slave __rcu *all_slaves; bool force_primary; s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ int (*recv_probe)(const struct sk_buff *, struct bonding *, @@ -237,7 +238,6 @@ struct bonding { struct dentry *debug_dir; #endif /* CONFIG_DEBUG_FS */ struct rtnl_link_stats64 bond_stats; - struct lock_class_key stats_lock_key; }; #define bond_slave_get_rcu(dev) \ @@ -505,18 +505,17 @@ static inline unsigned long slave_last_rx(struct bonding *bond, } #ifdef CONFIG_NET_POLL_CONTROLLER -static inline void bond_netpoll_send_skb(const struct slave *slave, +static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave, struct sk_buff *skb) { - struct netpoll *np = slave->np; - - if (np) - netpoll_send_skb(np, skb); + return netpoll_send_skb(slave->np, skb); } #else -static inline void bond_netpoll_send_skb(const struct slave *slave, +static inline netdev_tx_t bond_netpoll_send_skb(const struct slave *slave, struct sk_buff *skb) { + BUG(); + return NETDEV_TX_OK; } #endif @@ -610,7 +609,7 @@ struct bond_net { }; int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); -void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); +netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); int bond_create(struct net *net, const char *name); int bond_create_sysfs(struct bond_net *net); void bond_destroy_sysfs(struct bond_net *net); @@ -743,10 +742,11 @@ extern struct bond_parm_tbl ad_select_tbl[]; /* exported from bond_netlink.c */ extern struct rtnl_link_ops bond_link_ops; -static inline void bond_tx_drop(struct net_device *dev, struct sk_buff *skb) +static inline netdev_tx_t bond_tx_drop(struct net_device *dev, struct sk_buff *skb) { atomic_long_inc(&dev->tx_dropped); dev_kfree_skb_any(skb); + return NET_XMIT_DROP; } #endif /* _NET_BONDING_H */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index c78bd4ff9e33..fc7e8807838d 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -128,6 +128,7 @@ enum ieee80211_channel_flags { * with cfg80211. * * @center_freq: center frequency in MHz + * @freq_offset: offset from @center_freq, in KHz * @hw_value: hardware-specific value for the channel * @flags: channel flags from &enum ieee80211_channel_flags. * @orig_flags: channel flags at registration time, used by regulatory @@ -149,6 +150,7 @@ enum ieee80211_channel_flags { struct ieee80211_channel { enum nl80211_band band; u32 center_freq; + u16 freq_offset; u16 hw_value; u32 flags; int max_antenna_gain; @@ -352,10 +354,13 @@ struct ieee80211_sta_he_cap { * * @types_mask: interface types mask * @he_cap: holds the HE capabilities + * @he_6ghz_capa: HE 6 GHz capabilities, must be filled in for a + * 6 GHz band channel (and 0 may be valid value). */ struct ieee80211_sband_iftype_data { u16 types_mask; struct ieee80211_sta_he_cap he_cap; + struct ieee80211_he_6ghz_capa he_6ghz_capa; }; /** @@ -508,6 +513,26 @@ ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband) } /** + * ieee80211_get_he_6ghz_capa - return HE 6 GHz capabilities + * @sband: the sband to search for the STA on + * @iftype: the iftype to search for + * + * Return: the 6GHz capabilities + */ +static inline __le16 +ieee80211_get_he_6ghz_capa(const struct ieee80211_supported_band *sband, + enum nl80211_iftype iftype) +{ + const struct ieee80211_sband_iftype_data *data = + ieee80211_get_sband_iftype_data(sband, iftype); + + if (WARN_ON(!data || !data->he_cap.has_he)) + return 0; + + return data->he_6ghz_capa.capa; +} + +/** * wiphy_read_of_freq_limits - read frequency limits from device tree * * @wiphy: the wireless device to get extra limits for @@ -617,6 +642,7 @@ struct key_params { * If edmg is requested (i.e. the .channels member is non-zero), * chan will define the primary channel and all other * parameters are ignored. + * @freq1_offset: offset from @center_freq1, in KHz */ struct cfg80211_chan_def { struct ieee80211_channel *chan; @@ -624,8 +650,22 @@ struct cfg80211_chan_def { u32 center_freq1; u32 center_freq2; struct ieee80211_edmg edmg; + u16 freq1_offset; +}; + +/* + * cfg80211_bitrate_mask - masks for bitrate control + */ +struct cfg80211_bitrate_mask { + struct { + u32 legacy; + u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN]; + u16 vht_mcs[NL80211_VHT_NSS_MAX]; + enum nl80211_txrate_gi gi; + } control[NUM_NL80211_BANDS]; }; + /** * struct cfg80211_tid_cfg - TID specific configuration * @config_override: Flag to notify driver to reset TID configuration @@ -636,17 +676,23 @@ struct cfg80211_chan_def { * @noack: noack configuration value for the TID * @retry_long: retry count value * @retry_short: retry count value - * @ampdu: Enable/Disable aggregation + * @ampdu: Enable/Disable MPDU aggregation * @rtscts: Enable/Disable RTS/CTS + * @amsdu: Enable/Disable MSDU aggregation + * @txrate_type: Tx bitrate mask type + * @txrate_mask: Tx bitrate to be applied for the TID */ struct cfg80211_tid_cfg { bool config_override; u8 tids; - u32 mask; + u64 mask; enum nl80211_tid_config noack; u8 retry_long, retry_short; enum nl80211_tid_config ampdu; enum nl80211_tid_config rtscts; + enum nl80211_tid_config amsdu; + enum nl80211_tx_rate_setting txrate_type; + struct cfg80211_bitrate_mask txrate_mask; }; /** @@ -713,6 +759,7 @@ cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1, return (chandef1->chan == chandef2->chan && chandef1->width == chandef2->width && chandef1->center_freq1 == chandef2->center_freq1 && + chandef1->freq1_offset == chandef2->freq1_offset && chandef1->center_freq2 == chandef2->center_freq2); } @@ -905,6 +952,8 @@ struct survey_info { * protocol frames. * @control_port_over_nl80211: TRUE if userspace expects to exchange control * port frames over NL80211 instead of the network interface. + * @control_port_no_preauth: disables pre-auth rx over the nl80211 control + * port for mac80211 * @wep_keys: static WEP keys, if not NULL points to an array of * CFG80211_MAX_WEP_KEYS WEP keys * @wep_tx_key: key index (0..3) of the default TX static WEP key @@ -998,18 +1047,6 @@ struct cfg80211_acl_data { struct mac_address mac_addrs[]; }; -/* - * cfg80211_bitrate_mask - masks for bitrate control - */ -struct cfg80211_bitrate_mask { - struct { - u32 legacy; - u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN]; - u16 vht_mcs[NL80211_VHT_NSS_MAX]; - enum nl80211_txrate_gi gi; - } control[NUM_NL80211_BANDS]; -}; - /** * enum cfg80211_ap_settings_flags - AP settings flags * @@ -1052,6 +1089,7 @@ enum cfg80211_ap_settings_flags { * @ht_required: stations must support HT * @vht_required: stations must support VHT * @twt_responder: Enable Target Wait Time + * @he_required: stations must support HE * @flags: flags, as defined in enum cfg80211_ap_settings_flags * @he_obss_pd: OBSS Packet Detection settings * @he_bss_color: BSS Color settings @@ -1081,7 +1119,7 @@ struct cfg80211_ap_settings { const struct ieee80211_vht_cap *vht_cap; const struct ieee80211_he_cap_elem *he_cap; const struct ieee80211_he_operation *he_oper; - bool ht_required, vht_required; + bool ht_required, vht_required, he_required; bool twt_responder; u32 flags; struct ieee80211_he_obss_pd he_obss_pd; @@ -1222,6 +1260,8 @@ struct sta_txpwr { * @he_capa: HE capabilities of station * @he_capa_len: the length of the HE capabilities * @airtime_weight: airtime scheduler weight for this station + * @txpwr: transmit power for an associated station + * @he_6ghz_capa: HE 6 GHz Band capabilities of station */ struct station_parameters { const u8 *supported_rates; @@ -1254,6 +1294,7 @@ struct station_parameters { u8 he_capa_len; u16 airtime_weight; struct sta_txpwr txpwr; + const struct ieee80211_he_6ghz_capa *he_6ghz_capa; }; /** @@ -2026,7 +2067,7 @@ struct cfg80211_scan_request { bool no_cck; /* keep last */ - struct ieee80211_channel *channels[0]; + struct ieee80211_channel *channels[]; }; static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask) @@ -2172,7 +2213,7 @@ struct cfg80211_sched_scan_request { struct list_head list; /* keep last */ - struct ieee80211_channel *channels[0]; + struct ieee80211_channel *channels[]; }; /** @@ -2294,7 +2335,7 @@ struct cfg80211_bss { u8 bssid_index; u8 max_bssid_indicator; - u8 priv[0] __aligned(sizeof(void *)); + u8 priv[] __aligned(sizeof(void *)); }; /** @@ -2895,12 +2936,17 @@ struct cfg80211_wowlan_wakeup { /** * struct cfg80211_gtk_rekey_data - rekey data - * @kek: key encryption key (NL80211_KEK_LEN bytes) - * @kck: key confirmation key (NL80211_KCK_LEN bytes) + * @kek: key encryption key (@kek_len bytes) + * @kck: key confirmation key (@kck_len bytes) * @replay_ctr: replay counter (NL80211_REPLAY_CTR_LEN bytes) + * @kek_len: length of kek + * @kck_len length of kck + * @akm: akm (oui, id) */ struct cfg80211_gtk_rekey_data { const u8 *kek, *kck, *replay_ctr; + u32 akm; + u8 kek_len, kck_len; }; /** @@ -3382,6 +3428,21 @@ struct cfg80211_update_owe_info { }; /** + * struct mgmt_frame_regs - management frame registrations data + * @global_stypes: bitmap of management frame subtypes registered + * for the entire device + * @interface_stypes: bitmap of management frame subtypes registered + * for the given interface + * @global_mcast_rx: mcast RX is needed globally for these subtypes + * @interface_mcast_stypes: mcast RX is needed on this interface + * for these subtypes + */ +struct mgmt_frame_regs { + u32 global_stypes, interface_stypes; + u32 global_mcast_stypes, interface_mcast_stypes; +}; + +/** * struct cfg80211_ops - backend description for wireless configuration * * This struct is registered by fullmac card drivers and/or wireless stacks @@ -3605,8 +3666,8 @@ struct cfg80211_update_owe_info { * The driver should not call cfg80211_sched_scan_stopped() for a requested * stop (when this method returns 0). * - * @mgmt_frame_register: Notify driver that a management frame type was - * registered. The callback is allowed to sleep. + * @update_mgmt_frame_registrations: Notify the driver that management frame + * registrations were updated. The callback is allowed to sleep. * * @set_antenna: Set antenna configuration (tx_ant, rx_ant) on the device. * Parameters are bitmaps of allowed antennas to use for TX/RX. Drivers may @@ -3929,9 +3990,9 @@ struct cfg80211_ops { struct net_device *dev, u32 rate, u32 pkts, u32 intvl); - void (*mgmt_frame_register)(struct wiphy *wiphy, - struct wireless_dev *wdev, - u16 frame_type, bool reg); + void (*update_mgmt_frame_registrations)(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct mgmt_frame_regs *upd); int (*set_antenna)(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant); int (*get_antenna)(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant); @@ -4043,7 +4104,8 @@ struct cfg80211_ops { struct net_device *dev, const u8 *buf, size_t len, const u8 *dest, const __be16 proto, - const bool noencrypt); + const bool noencrypt, + u64 *cookie); int (*get_ftm_responder_stats)(struct wiphy *wiphy, struct net_device *dev, @@ -4109,9 +4171,10 @@ struct cfg80211_ops { * beaconing mode (AP, IBSS, Mesh, ...). * @WIPHY_FLAG_HAS_STATIC_WEP: The device supports static WEP key installation * before connection. + * @WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK: The device supports bigger kek and kck keys */ enum wiphy_flags { - /* use hole at 0 */ + WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0), /* use hole at 1 */ /* use hole at 2 */ WIPHY_FLAG_NETNS_OK = BIT(3), @@ -4666,6 +4729,9 @@ struct wiphy_iftype_akm_suites { * @txq_memory_limit: configuration internal TX queue memory limit * @txq_quantum: configuration of internal TX queue scheduler quantum * + * @tx_queue_len: allow setting transmit queue len for drivers not using + * wake_tx_queue + * * @support_mbssid: can HW support association with nontransmitted AP * @support_only_he_mbssid: don't parse MBSSID elements if it is not * HE AP, in order to avoid compatibility issues. @@ -4681,6 +4747,10 @@ struct wiphy_iftype_akm_suites { * supported by the driver for each peer * @tid_config_support.max_retry: maximum supported retry count for * long/short retry configuration + * + * @max_data_retry_count: maximum supported per TID retry count for + * configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and + * %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes */ struct wiphy { /* assign these fields before you register the wiphy */ @@ -4819,7 +4889,7 @@ struct wiphy { u8 max_data_retry_count; - char priv[0] __aligned(NETDEV_ALIGN); + char priv[] __aligned(NETDEV_ALIGN); }; static inline struct net *wiphy_net(struct wiphy *wiphy) @@ -5005,6 +5075,8 @@ struct cfg80211_cqm_config; * by cfg80211 on change_interface * @mgmt_registrations: list of registrations for management frames * @mgmt_registrations_lock: lock for the list + * @mgmt_registrations_need_update: mgmt registrations were updated, + * need to propagate the update to the driver * @mtx: mutex used to lock data in this struct, may be used by drivers * and some API functions require it held * @beacon_interval: beacon interval used on this device for transmitting @@ -5035,6 +5107,8 @@ struct cfg80211_cqm_config; * @pmsr_list: (private) peer measurement requests * @pmsr_lock: (private) peer measurements requests/results lock * @pmsr_free_wk: (private) peer measurements cleanup work + * @unprot_beacon_reported: (private) timestamp of last + * unprotected beacon report */ struct wireless_dev { struct wiphy *wiphy; @@ -5048,6 +5122,7 @@ struct wireless_dev { struct list_head mgmt_registrations; spinlock_t mgmt_registrations_lock; + u8 mgmt_registrations_need_update:1; struct mutex mtx; @@ -5111,6 +5186,8 @@ struct wireless_dev { struct list_head pmsr_list; spinlock_t pmsr_lock; struct work_struct pmsr_free_wk; + + unsigned long unprot_beacon_reported; }; static inline u8 *wdev_address(struct wireless_dev *wdev) @@ -5146,29 +5223,106 @@ static inline void *wdev_priv(struct wireless_dev *wdev) */ /** + * ieee80211_channel_equal - compare two struct ieee80211_channel + * + * @a: 1st struct ieee80211_channel + * @b: 2nd struct ieee80211_channel + * Return: true if center frequency of @a == @b + */ +static inline bool +ieee80211_channel_equal(struct ieee80211_channel *a, + struct ieee80211_channel *b) +{ + return (a->center_freq == b->center_freq && + a->freq_offset == b->freq_offset); +} + +/** + * ieee80211_channel_to_khz - convert ieee80211_channel to frequency in KHz + * @chan: struct ieee80211_channel to convert + * Return: The corresponding frequency (in KHz) + */ +static inline u32 +ieee80211_channel_to_khz(const struct ieee80211_channel *chan) +{ + return MHZ_TO_KHZ(chan->center_freq) + chan->freq_offset; +} + +/** + * ieee80211_channel_to_freq_khz - convert channel number to frequency + * @chan: channel number + * @band: band, necessary due to channel number overlap + * Return: The corresponding frequency (in KHz), or 0 if the conversion failed. + */ +u32 ieee80211_channel_to_freq_khz(int chan, enum nl80211_band band); + +/** * ieee80211_channel_to_frequency - convert channel number to frequency * @chan: channel number * @band: band, necessary due to channel number overlap * Return: The corresponding frequency (in MHz), or 0 if the conversion failed. */ -int ieee80211_channel_to_frequency(int chan, enum nl80211_band band); +static inline int +ieee80211_channel_to_frequency(int chan, enum nl80211_band band) +{ + return KHZ_TO_MHZ(ieee80211_channel_to_freq_khz(chan, band)); +} + +/** + * ieee80211_freq_khz_to_channel - convert frequency to channel number + * @freq: center frequency in KHz + * Return: The corresponding channel, or 0 if the conversion failed. + */ +int ieee80211_freq_khz_to_channel(u32 freq); /** * ieee80211_frequency_to_channel - convert frequency to channel number - * @freq: center frequency + * @freq: center frequency in MHz * Return: The corresponding channel, or 0 if the conversion failed. */ -int ieee80211_frequency_to_channel(int freq); +static inline int +ieee80211_frequency_to_channel(int freq) +{ + return ieee80211_freq_khz_to_channel(MHZ_TO_KHZ(freq)); +} + +/** + * ieee80211_get_channel_khz - get channel struct from wiphy for specified + * frequency + * @wiphy: the struct wiphy to get the channel for + * @freq: the center frequency (in KHz) of the channel + * Return: The channel struct from @wiphy at @freq. + */ +struct ieee80211_channel * +ieee80211_get_channel_khz(struct wiphy *wiphy, u32 freq); /** * ieee80211_get_channel - get channel struct from wiphy for specified frequency * * @wiphy: the struct wiphy to get the channel for - * @freq: the center frequency of the channel - * + * @freq: the center frequency (in MHz) of the channel * Return: The channel struct from @wiphy at @freq. */ -struct ieee80211_channel *ieee80211_get_channel(struct wiphy *wiphy, int freq); +static inline struct ieee80211_channel * +ieee80211_get_channel(struct wiphy *wiphy, int freq) +{ + return ieee80211_get_channel_khz(wiphy, MHZ_TO_KHZ(freq)); +} + +/** + * cfg80211_channel_is_psc - Check if the channel is a 6 GHz PSC + * @chan: control channel to check + * + * The Preferred Scanning Channels (PSC) are defined in + * Draft IEEE P802.11ax/D5.0, 26.17.2.3.3 + */ +static inline bool cfg80211_channel_is_psc(struct ieee80211_channel *chan) +{ + if (chan->band != NL80211_BAND_6GHZ) + return false; + + return ieee80211_frequency_to_channel(chan->center_freq) % 16 == 5; +} /** * ieee80211_get_response_rate - get basic rate for a given rate @@ -5201,7 +5355,7 @@ u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband, * Radiotap parsing functions -- for controlled injection support * * Implemented in net/wireless/radiotap.c - * Documentation in Documentation/networking/radiotap-headers.txt + * Documentation in Documentation/networking/radiotap-headers.rst */ struct radiotap_align_size { @@ -6125,12 +6279,16 @@ void cfg80211_tx_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); /** * cfg80211_rx_unprot_mlme_mgmt - notification of unprotected mlme mgmt frame * @dev: network device - * @buf: deauthentication frame (header + body) + * @buf: received management frame (header + body) * @len: length of the frame data * * This function is called whenever a received deauthentication or dissassoc * frame has been dropped in station mode because of MFP being used but the - * frame was not protected. This function may sleep. + * frame was not protected. This is also used to notify reception of a Beacon + * frame that was dropped because it did not include a valid MME MIC while + * beacon protection was enabled (BIGTK configured in station mode). + * + * This function may sleep. */ void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf, size_t len); @@ -6884,6 +7042,26 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, gfp_t gfp); /** + * cfg80211_rx_mgmt_khz - notification of received, unprocessed management frame + * @wdev: wireless device receiving the frame + * @freq: Frequency on which the frame was received in KHz + * @sig_dbm: signal strength in dBm, or 0 if unknown + * @buf: Management frame (header + body) + * @len: length of the frame data + * @flags: flags, as defined in enum nl80211_rxmgmt_flags + * + * This function is called whenever an Action frame is received for a station + * mode interface, but is not processed in kernel. + * + * Return: %true if a user space application has registered for this frame. + * For action frames, that makes it responsible for rejecting unrecognized + * action frames; %false otherwise, in which case for action frames the + * driver is responsible for rejecting the frame. + */ +bool cfg80211_rx_mgmt_khz(struct wireless_dev *wdev, int freq, int sig_dbm, + const u8 *buf, size_t len, u32 flags); + +/** * cfg80211_rx_mgmt - notification of received, unprocessed management frame * @wdev: wireless device receiving the frame * @freq: Frequency on which the frame was received in MHz @@ -6900,8 +7078,13 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr, * action frames; %false otherwise, in which case for action frames the * driver is responsible for rejecting the frame. */ -bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm, - const u8 *buf, size_t len, u32 flags); +static inline bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, + int sig_dbm, const u8 *buf, size_t len, + u32 flags) +{ + return cfg80211_rx_mgmt_khz(wdev, MHZ_TO_KHZ(freq), sig_dbm, buf, len, + flags); +} /** * cfg80211_mgmt_tx_status - notification of TX status for management frame @@ -6919,6 +7102,23 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm, void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie, const u8 *buf, size_t len, bool ack, gfp_t gfp); +/** + * cfg80211_control_port_tx_status - notification of TX status for control + * port frames + * @wdev: wireless device receiving the frame + * @cookie: Cookie returned by cfg80211_ops::tx_control_port() + * @buf: Data frame (header + body) + * @len: length of the frame data + * @ack: Whether frame was acknowledged + * @gfp: context flags + * + * This function is called whenever a control port frame was requested to be + * transmitted with cfg80211_ops::tx_control_port() to report the TX status of + * the transmission attempt. + */ +void cfg80211_control_port_tx_status(struct wireless_dev *wdev, u64 cookie, + const u8 *buf, size_t len, bool ack, + gfp_t gfp); /** * cfg80211_rx_control_port - notification about a received control port frame @@ -7100,6 +7300,21 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr, bool is_valid_ack_signal, gfp_t gfp); /** + * cfg80211_report_obss_beacon_khz - report beacon from other APs + * @wiphy: The wiphy that received the beacon + * @frame: the frame + * @len: length of the frame + * @freq: frequency the frame was received on in KHz + * @sig_dbm: signal strength in dBm, or 0 if unknown + * + * Use this function to report to userspace when a beacon was + * received. It is not useful to call this when there is no + * netdev that is in AP/GO mode. + */ +void cfg80211_report_obss_beacon_khz(struct wiphy *wiphy, const u8 *frame, + size_t len, int freq, int sig_dbm); + +/** * cfg80211_report_obss_beacon - report beacon from other APs * @wiphy: The wiphy that received the beacon * @frame: the frame @@ -7111,9 +7326,13 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr, * received. It is not useful to call this when there is no * netdev that is in AP/GO mode. */ -void cfg80211_report_obss_beacon(struct wiphy *wiphy, - const u8 *frame, size_t len, - int freq, int sig_dbm); +static inline void cfg80211_report_obss_beacon(struct wiphy *wiphy, + const u8 *frame, size_t len, + int freq, int sig_dbm) +{ + cfg80211_report_obss_beacon_khz(wiphy, frame, len, MHZ_TO_KHZ(freq), + sig_dbm); +} /** * cfg80211_reg_can_beacon - check if beaconing is allowed @@ -7192,6 +7411,19 @@ bool ieee80211_operating_class_to_band(u8 operating_class, bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef, u8 *op_class); +/** + * ieee80211_chandef_to_khz - convert chandef to frequency in KHz + * + * @chandef: the chandef to convert + * + * Returns the center frequency of chandef (1st segment) in KHz. + */ +static inline u32 +ieee80211_chandef_to_khz(const struct cfg80211_chan_def *chandef) +{ + return MHZ_TO_KHZ(chandef->center_freq1) + chandef->freq1_offset; +} + /* * cfg80211_tdls_oper_request - request userspace to perform TDLS operation * @dev: the device on which the operation is requested diff --git a/include/net/checksum.h b/include/net/checksum.h index 97bf4885a962..46754ba9d7b7 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -26,13 +26,9 @@ static inline __wsum csum_and_copy_from_user (const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) { - if (access_ok(src, len)) - return csum_partial_copy_from_user(src, dst, len, sum, err_ptr); - - if (len) + if (copy_from_user(dst, src, len)) *err_ptr = -EFAULT; - - return sum; + return csum_partial(dst, len, sum); } #endif @@ -42,10 +38,8 @@ static __inline__ __wsum csum_and_copy_to_user { sum = csum_partial(src, len, sum); - if (access_ok(dst, len)) { - if (copy_to_user(dst, src, len) == 0) - return sum; - } + if (copy_to_user(dst, src, len) == 0) + return sum; if (len) *err_ptr = -EFAULT; diff --git a/include/net/compat.h b/include/net/compat.h index e341260642fe..f241666117d8 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -30,6 +30,24 @@ struct compat_cmsghdr { compat_int_t cmsg_type; }; +struct compat_rtentry { + u32 rt_pad1; + struct sockaddr rt_dst; /* target address */ + struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ + struct sockaddr rt_genmask; /* target network mask (IP) */ + unsigned short rt_flags; + short rt_pad2; + u32 rt_pad3; + unsigned char rt_tos; + unsigned char rt_class; + short rt_pad4; + short rt_metric; /* +1 for binary compatibility! */ + compat_uptr_t rt_dev; /* forcing the device at add */ + u32 rt_mtu; /* per route MTU/Window */ + u32 rt_window; /* Window clamping */ + unsigned short rt_irtt; /* Initial RTT */ +}; + #else /* defined(CONFIG_COMPAT) */ /* * To avoid compiler warnings: @@ -49,11 +67,28 @@ int put_cmsg_compat(struct msghdr*, int, int, int, void *); int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int); -int compat_mc_setsockopt(struct sock *, int, int, char __user *, unsigned int, - int (*)(struct sock *, int, int, char __user *, - unsigned int)); -int compat_mc_getsockopt(struct sock *, int, int, char __user *, int __user *, - int (*)(struct sock *, int, int, char __user *, - int __user *)); +struct compat_group_req { + __u32 gr_interface; + struct __kernel_sockaddr_storage gr_group + __aligned(4); +} __packed; + +struct compat_group_source_req { + __u32 gsr_interface; + struct __kernel_sockaddr_storage gsr_group + __aligned(4); + struct __kernel_sockaddr_storage gsr_source + __aligned(4); +} __packed; + +struct compat_group_filter { + __u32 gf_interface; + struct __kernel_sockaddr_storage gf_group + __aligned(4); + __u32 gf_fmode; + __u32 gf_numsrc; + struct __kernel_sockaddr_storage gf_slist[1] + __aligned(4); +} __packed; #endif /* NET_COMPAT_H */ diff --git a/include/net/devlink.h b/include/net/devlink.h index 8ffc1b5cd89b..1df6dfec26c2 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -645,6 +645,50 @@ enum devlink_trap_generic_id { DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC, DEVLINK_TRAP_GENERIC_ID_INGRESS_FLOW_ACTION_DROP, DEVLINK_TRAP_GENERIC_ID_EGRESS_FLOW_ACTION_DROP, + DEVLINK_TRAP_GENERIC_ID_STP, + DEVLINK_TRAP_GENERIC_ID_LACP, + DEVLINK_TRAP_GENERIC_ID_LLDP, + DEVLINK_TRAP_GENERIC_ID_IGMP_QUERY, + DEVLINK_TRAP_GENERIC_ID_IGMP_V1_REPORT, + DEVLINK_TRAP_GENERIC_ID_IGMP_V2_REPORT, + DEVLINK_TRAP_GENERIC_ID_IGMP_V3_REPORT, + DEVLINK_TRAP_GENERIC_ID_IGMP_V2_LEAVE, + DEVLINK_TRAP_GENERIC_ID_MLD_QUERY, + DEVLINK_TRAP_GENERIC_ID_MLD_V1_REPORT, + DEVLINK_TRAP_GENERIC_ID_MLD_V2_REPORT, + DEVLINK_TRAP_GENERIC_ID_MLD_V1_DONE, + DEVLINK_TRAP_GENERIC_ID_IPV4_DHCP, + DEVLINK_TRAP_GENERIC_ID_IPV6_DHCP, + DEVLINK_TRAP_GENERIC_ID_ARP_REQUEST, + DEVLINK_TRAP_GENERIC_ID_ARP_RESPONSE, + DEVLINK_TRAP_GENERIC_ID_ARP_OVERLAY, + DEVLINK_TRAP_GENERIC_ID_IPV6_NEIGH_SOLICIT, + DEVLINK_TRAP_GENERIC_ID_IPV6_NEIGH_ADVERT, + DEVLINK_TRAP_GENERIC_ID_IPV4_BFD, + DEVLINK_TRAP_GENERIC_ID_IPV6_BFD, + DEVLINK_TRAP_GENERIC_ID_IPV4_OSPF, + DEVLINK_TRAP_GENERIC_ID_IPV6_OSPF, + DEVLINK_TRAP_GENERIC_ID_IPV4_BGP, + DEVLINK_TRAP_GENERIC_ID_IPV6_BGP, + DEVLINK_TRAP_GENERIC_ID_IPV4_VRRP, + DEVLINK_TRAP_GENERIC_ID_IPV6_VRRP, + DEVLINK_TRAP_GENERIC_ID_IPV4_PIM, + DEVLINK_TRAP_GENERIC_ID_IPV6_PIM, + DEVLINK_TRAP_GENERIC_ID_UC_LB, + DEVLINK_TRAP_GENERIC_ID_LOCAL_ROUTE, + DEVLINK_TRAP_GENERIC_ID_EXTERNAL_ROUTE, + DEVLINK_TRAP_GENERIC_ID_IPV6_UC_DIP_LINK_LOCAL_SCOPE, + DEVLINK_TRAP_GENERIC_ID_IPV6_DIP_ALL_NODES, + DEVLINK_TRAP_GENERIC_ID_IPV6_DIP_ALL_ROUTERS, + DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_SOLICIT, + DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_ADVERT, + DEVLINK_TRAP_GENERIC_ID_IPV6_REDIRECT, + DEVLINK_TRAP_GENERIC_ID_IPV4_ROUTER_ALERT, + DEVLINK_TRAP_GENERIC_ID_IPV6_ROUTER_ALERT, + DEVLINK_TRAP_GENERIC_ID_PTP_EVENT, + DEVLINK_TRAP_GENERIC_ID_PTP_GENERAL, + DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_SAMPLE, + DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_TRAP, /* Add new generic trap IDs above */ __DEVLINK_TRAP_GENERIC_ID_MAX, @@ -657,9 +701,28 @@ enum devlink_trap_generic_id { enum devlink_trap_group_generic_id { DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS, DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS, + DEVLINK_TRAP_GROUP_GENERIC_ID_L3_EXCEPTIONS, DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS, DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS, DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_DROPS, + DEVLINK_TRAP_GROUP_GENERIC_ID_STP, + DEVLINK_TRAP_GROUP_GENERIC_ID_LACP, + DEVLINK_TRAP_GROUP_GENERIC_ID_LLDP, + DEVLINK_TRAP_GROUP_GENERIC_ID_MC_SNOOPING, + DEVLINK_TRAP_GROUP_GENERIC_ID_DHCP, + DEVLINK_TRAP_GROUP_GENERIC_ID_NEIGH_DISCOVERY, + DEVLINK_TRAP_GROUP_GENERIC_ID_BFD, + DEVLINK_TRAP_GROUP_GENERIC_ID_OSPF, + DEVLINK_TRAP_GROUP_GENERIC_ID_BGP, + DEVLINK_TRAP_GROUP_GENERIC_ID_VRRP, + DEVLINK_TRAP_GROUP_GENERIC_ID_PIM, + DEVLINK_TRAP_GROUP_GENERIC_ID_UC_LB, + DEVLINK_TRAP_GROUP_GENERIC_ID_LOCAL_DELIVERY, + DEVLINK_TRAP_GROUP_GENERIC_ID_IPV6, + DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_EVENT, + DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_GENERAL, + DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_SAMPLE, + DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_TRAP, /* Add new generic trap group IDs above */ __DEVLINK_TRAP_GROUP_GENERIC_ID_MAX, @@ -725,17 +788,143 @@ enum devlink_trap_group_generic_id { "ingress_flow_action_drop" #define DEVLINK_TRAP_GENERIC_NAME_EGRESS_FLOW_ACTION_DROP \ "egress_flow_action_drop" +#define DEVLINK_TRAP_GENERIC_NAME_STP \ + "stp" +#define DEVLINK_TRAP_GENERIC_NAME_LACP \ + "lacp" +#define DEVLINK_TRAP_GENERIC_NAME_LLDP \ + "lldp" +#define DEVLINK_TRAP_GENERIC_NAME_IGMP_QUERY \ + "igmp_query" +#define DEVLINK_TRAP_GENERIC_NAME_IGMP_V1_REPORT \ + "igmp_v1_report" +#define DEVLINK_TRAP_GENERIC_NAME_IGMP_V2_REPORT \ + "igmp_v2_report" +#define DEVLINK_TRAP_GENERIC_NAME_IGMP_V3_REPORT \ + "igmp_v3_report" +#define DEVLINK_TRAP_GENERIC_NAME_IGMP_V2_LEAVE \ + "igmp_v2_leave" +#define DEVLINK_TRAP_GENERIC_NAME_MLD_QUERY \ + "mld_query" +#define DEVLINK_TRAP_GENERIC_NAME_MLD_V1_REPORT \ + "mld_v1_report" +#define DEVLINK_TRAP_GENERIC_NAME_MLD_V2_REPORT \ + "mld_v2_report" +#define DEVLINK_TRAP_GENERIC_NAME_MLD_V1_DONE \ + "mld_v1_done" +#define DEVLINK_TRAP_GENERIC_NAME_IPV4_DHCP \ + "ipv4_dhcp" +#define DEVLINK_TRAP_GENERIC_NAME_IPV6_DHCP \ + "ipv6_dhcp" +#define DEVLINK_TRAP_GENERIC_NAME_ARP_REQUEST \ + "arp_request" +#define DEVLINK_TRAP_GENERIC_NAME_ARP_RESPONSE \ + "arp_response" +#define DEVLINK_TRAP_GENERIC_NAME_ARP_OVERLAY \ + "arp_overlay" +#define DEVLINK_TRAP_GENERIC_NAME_IPV6_NEIGH_SOLICIT \ + "ipv6_neigh_solicit" +#define DEVLINK_TRAP_GENERIC_NAME_IPV6_NEIGH_ADVERT \ + "ipv6_neigh_advert" +#define DEVLINK_TRAP_GENERIC_NAME_IPV4_BFD \ + "ipv4_bfd" +#define DEVLINK_TRAP_GENERIC_NAME_IPV6_BFD \ + "ipv6_bfd" +#define DEVLINK_TRAP_GENERIC_NAME_IPV4_OSPF \ + "ipv4_ospf" +#define DEVLINK_TRAP_GENERIC_NAME_IPV6_OSPF \ + "ipv6_ospf" +#define DEVLINK_TRAP_GENERIC_NAME_IPV4_BGP \ + "ipv4_bgp" +#define DEVLINK_TRAP_GENERIC_NAME_IPV6_BGP \ + "ipv6_bgp" +#define DEVLINK_TRAP_GENERIC_NAME_IPV4_VRRP \ + "ipv4_vrrp" +#define DEVLINK_TRAP_GENERIC_NAME_IPV6_VRRP \ + "ipv6_vrrp" +#define DEVLINK_TRAP_GENERIC_NAME_IPV4_PIM \ + "ipv4_pim" +#define DEVLINK_TRAP_GENERIC_NAME_IPV6_PIM \ + "ipv6_pim" +#define DEVLINK_TRAP_GENERIC_NAME_UC_LB \ + "uc_loopback" +#define DEVLINK_TRAP_GENERIC_NAME_LOCAL_ROUTE \ + "local_route" +#define DEVLINK_TRAP_GENERIC_NAME_EXTERNAL_ROUTE \ + "external_route" +#define DEVLINK_TRAP_GENERIC_NAME_IPV6_UC_DIP_LINK_LOCAL_SCOPE \ + "ipv6_uc_dip_link_local_scope" +#define DEVLINK_TRAP_GENERIC_NAME_IPV6_DIP_ALL_NODES \ + "ipv6_dip_all_nodes" +#define DEVLINK_TRAP_GENERIC_NAME_IPV6_DIP_ALL_ROUTERS \ + "ipv6_dip_all_routers" +#define DEVLINK_TRAP_GENERIC_NAME_IPV6_ROUTER_SOLICIT \ + "ipv6_router_solicit" +#define DEVLINK_TRAP_GENERIC_NAME_IPV6_ROUTER_ADVERT \ + "ipv6_router_advert" +#define DEVLINK_TRAP_GENERIC_NAME_IPV6_REDIRECT \ + "ipv6_redirect" +#define DEVLINK_TRAP_GENERIC_NAME_IPV4_ROUTER_ALERT \ + "ipv4_router_alert" +#define DEVLINK_TRAP_GENERIC_NAME_IPV6_ROUTER_ALERT \ + "ipv6_router_alert" +#define DEVLINK_TRAP_GENERIC_NAME_PTP_EVENT \ + "ptp_event" +#define DEVLINK_TRAP_GENERIC_NAME_PTP_GENERAL \ + "ptp_general" +#define DEVLINK_TRAP_GENERIC_NAME_FLOW_ACTION_SAMPLE \ + "flow_action_sample" +#define DEVLINK_TRAP_GENERIC_NAME_FLOW_ACTION_TRAP \ + "flow_action_trap" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \ "l2_drops" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_L3_DROPS \ "l3_drops" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L3_EXCEPTIONS \ + "l3_exceptions" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_BUFFER_DROPS \ "buffer_drops" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_TUNNEL_DROPS \ "tunnel_drops" #define DEVLINK_TRAP_GROUP_GENERIC_NAME_ACL_DROPS \ "acl_drops" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_STP \ + "stp" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_LACP \ + "lacp" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_LLDP \ + "lldp" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_MC_SNOOPING \ + "mc_snooping" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_DHCP \ + "dhcp" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_NEIGH_DISCOVERY \ + "neigh_discovery" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_BFD \ + "bfd" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_OSPF \ + "ospf" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_BGP \ + "bgp" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_VRRP \ + "vrrp" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_PIM \ + "pim" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_UC_LB \ + "uc_loopback" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_LOCAL_DELIVERY \ + "local_delivery" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_IPV6 \ + "ipv6" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_PTP_EVENT \ + "ptp_event" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_PTP_GENERAL \ + "ptp_general" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_ACL_SAMPLE \ + "acl_sample" +#define DEVLINK_TRAP_GROUP_GENERIC_NAME_ACL_TRAP \ + "acl_trap" #define DEVLINK_TRAP_GENERIC(_type, _init_action, _id, _group_id, \ _metadata_cap) \ diff --git a/include/net/dsa.h b/include/net/dsa.h index fb3f9222f2a1..50389772c597 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -282,6 +282,13 @@ struct dsa_switch { */ bool vlan_filtering_is_global; + /* Pass .port_vlan_add and .port_vlan_del to drivers even for bridges + * that have vlan_filtering=0. All drivers should ideally set this (and + * then the option would get removed), but it is unknown whether this + * would break things or not. + */ + bool configure_vlan_while_not_filtering; + /* In case vlan_filtering_is_global is set, the VLAN awareness state * should be retrieved from here and not from the per-port settings. */ @@ -574,10 +581,12 @@ struct dsa_switch_ops { /* * Cross-chip operations */ - int (*crosschip_bridge_join)(struct dsa_switch *ds, int sw_index, - int port, struct net_device *br); - void (*crosschip_bridge_leave)(struct dsa_switch *ds, int sw_index, - int port, struct net_device *br); + int (*crosschip_bridge_join)(struct dsa_switch *ds, int tree_index, + int sw_index, int port, + struct net_device *br); + void (*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index, + int sw_index, int port, + struct net_device *br); /* * PTP functionality @@ -637,6 +646,7 @@ void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds, void *occ_get_priv); void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds, u64 resource_id); +struct dsa_port *dsa_port_from_netdev(struct net_device *netdev); struct dsa_devlink_priv { struct dsa_switch *ds; @@ -650,7 +660,7 @@ struct dsa_switch_driver { struct net_device *dsa_dev_to_net_device(struct device *dev); /* Keep inline for faster access in hot path */ -static inline bool netdev_uses_dsa(struct net_device *dev) +static inline bool netdev_uses_dsa(const struct net_device *dev) { #if IS_ENABLED(CONFIG_NET_DSA) return dev->dsa_ptr && dev->dsa_ptr->rcv; @@ -669,6 +679,7 @@ static inline bool dsa_can_decode(const struct sk_buff *skb, void dsa_unregister_switch(struct dsa_switch *ds); int dsa_register_switch(struct dsa_switch *ds); +struct dsa_switch *dsa_switch_find(int tree_index, int sw_index); #ifdef CONFIG_PM_SLEEP int dsa_switch_suspend(struct dsa_switch *ds); int dsa_switch_resume(struct dsa_switch *ds); diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index 443863c7b8da..88ff7bb2bb9b 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -53,9 +53,11 @@ static inline int dst_entries_get_slow(struct dst_ops *dst) return percpu_counter_sum_positive(&dst->pcpuc_entries); } +#define DST_PERCPU_COUNTER_BATCH 32 static inline void dst_entries_add(struct dst_ops *dst, int val) { - percpu_counter_add(&dst->pcpuc_entries, val); + percpu_counter_add_batch(&dst->pcpuc_entries, val, + DST_PERCPU_COUNTER_BATCH); } static inline int dst_entries_init(struct dst_ops *dst) diff --git a/include/net/erspan.h b/include/net/erspan.h index b39643ef4c95..0d9e86bd9893 100644 --- a/include/net/erspan.h +++ b/include/net/erspan.h @@ -2,7 +2,19 @@ #define __LINUX_ERSPAN_H /* - * GRE header for ERSPAN encapsulation (8 octets [34:41]) -- 8 bytes + * GRE header for ERSPAN type I encapsulation (4 octets [34:37]) + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |0|0|0|0|0|00000|000000000|00000| Protocol Type for ERSPAN | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * The Type I ERSPAN frame format is based on the barebones IP + GRE + * encapsulation (as described above) on top of the raw mirrored frame. + * There is no extra ERSPAN header. + * + * + * GRE header for ERSPAN type II and II encapsulation (8 octets [34:41]) * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -43,7 +55,7 @@ * | Platform Specific Info | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * - * GRE proto ERSPAN type II = 0x88BE, type III = 0x22EB + * GRE proto ERSPAN type I/II = 0x88BE, type III = 0x22EB */ #include <uapi/linux/erspan.h> @@ -139,6 +151,9 @@ static inline u8 get_hwid(const struct erspan_md2 *md2) static inline int erspan_hdr_len(int version) { + if (version == 0) + return 0; + return sizeof(struct erspan_base_hdr) + (version == 1 ? ERSPAN_V1_MDSIZE : ERSPAN_V2_MDSIZE); } diff --git a/include/net/espintcp.h b/include/net/espintcp.h index dd7026a00066..0335bbd76552 100644 --- a/include/net/espintcp.h +++ b/include/net/espintcp.h @@ -25,6 +25,7 @@ struct espintcp_ctx { struct espintcp_msg partial; void (*saved_data_ready)(struct sock *sk); void (*saved_write_space)(struct sock *sk); + void (*saved_destruct)(struct sock *sk); struct work_struct work; bool tx_running; }; diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 628383915827..a7eba43fe4e4 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -8,6 +8,8 @@ #include <linux/string.h> #include <uapi/linux/if_ether.h> +struct bpf_prog; +struct net; struct sk_buff; /** @@ -59,13 +61,25 @@ struct flow_dissector_key_vlan { __be16 vlan_tpid; }; -struct flow_dissector_key_mpls { +struct flow_dissector_mpls_lse { u32 mpls_ttl:8, mpls_bos:1, mpls_tc:3, mpls_label:20; }; +#define FLOW_DIS_MPLS_MAX 7 +struct flow_dissector_key_mpls { + struct flow_dissector_mpls_lse ls[FLOW_DIS_MPLS_MAX]; /* Label Stack */ + u8 used_lses; /* One bit set for each Label Stack Entry in use */ +}; + +static inline void dissector_set_mpls_lse(struct flow_dissector_key_mpls *mpls, + int lse_index) +{ + mpls->used_lses |= 1 << lse_index; +} + #define FLOW_DIS_TUN_OPTS_MAX 255 /** * struct flow_dissector_key_enc_opts: @@ -357,4 +371,8 @@ flow_dissector_init_keys(struct flow_dissector_key_control *key_control, memset(key_basic, 0, sizeof(*key_basic)); } +#ifdef CONFIG_BPF_SYSCALL +int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog); +#endif /* CONFIG_BPF_SYSCALL */ + #endif diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 3619c6acf60f..f2c8311a0433 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -147,6 +147,7 @@ enum flow_action_id { FLOW_ACTION_MPLS_PUSH, FLOW_ACTION_MPLS_POP, FLOW_ACTION_MPLS_MANGLE, + FLOW_ACTION_GATE, NUM_FLOW_ACTIONS, }; @@ -166,15 +167,20 @@ enum flow_action_mangle_base { enum flow_action_hw_stats_bit { FLOW_ACTION_HW_STATS_IMMEDIATE_BIT, FLOW_ACTION_HW_STATS_DELAYED_BIT, + FLOW_ACTION_HW_STATS_DISABLED_BIT, + + FLOW_ACTION_HW_STATS_NUM_BITS }; enum flow_action_hw_stats { - FLOW_ACTION_HW_STATS_DISABLED = 0, FLOW_ACTION_HW_STATS_IMMEDIATE = BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT), FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT), FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE | FLOW_ACTION_HW_STATS_DELAYED, + FLOW_ACTION_HW_STATS_DISABLED = + BIT(FLOW_ACTION_HW_STATS_DISABLED_BIT), + FLOW_ACTION_HW_STATS_DONT_CARE = BIT(FLOW_ACTION_HW_STATS_NUM_BITS) - 1, }; typedef void (*action_destr)(void *priv); @@ -255,6 +261,15 @@ struct flow_action_entry { u8 bos; u8 ttl; } mpls_mangle; + struct { + u32 index; + s32 prio; + u64 basetime; + u64 cycletime; + u64 cycletimeext; + u32 num_entries; + struct action_gate_entry *entries; + } gate; }; struct flow_action_cookie *cookie; /* user defined action cookie */ }; @@ -325,9 +340,14 @@ __flow_action_hw_stats_check(const struct flow_action *action, return true; if (!flow_action_mixed_hw_stats_check(action, extack)) return false; + action_entry = flow_action_first_entry_get(action); + + /* Zero is not a legal value for hw_stats, catch anyone passing it */ + WARN_ON_ONCE(!action_entry->hw_stats); + if (!check_allow_bit && - action_entry->hw_stats != FLOW_ACTION_HW_STATS_ANY) { + ~action_entry->hw_stats & FLOW_ACTION_HW_STATS_ANY) { NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\""); return false; } else if (check_allow_bit && @@ -423,6 +443,16 @@ enum tc_setup_type; typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data, void *cb_priv); +struct flow_block_cb; + +struct flow_block_indr { + struct list_head list; + struct net_device *dev; + enum flow_block_binder_type binder_type; + void *data; + void (*cleanup)(struct flow_block_cb *block_cb); +}; + struct flow_block_cb { struct list_head driver_list; struct list_head list; @@ -430,6 +460,7 @@ struct flow_block_cb { void *cb_ident; void *cb_priv; void (*release)(void *cb_priv); + struct flow_block_indr indr; unsigned int refcnt; }; @@ -503,37 +534,12 @@ static inline void flow_block_init(struct flow_block *flow_block) typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv, enum tc_setup_type type, void *type_data); -typedef void flow_indr_block_cmd_t(struct net_device *dev, - flow_indr_block_bind_cb_t *cb, void *cb_priv, - enum flow_block_command command); - -struct flow_indr_block_entry { - flow_indr_block_cmd_t *cb; - struct list_head list; -}; - -void flow_indr_add_block_cb(struct flow_indr_block_entry *entry); - -void flow_indr_del_block_cb(struct flow_indr_block_entry *entry); - -int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, - flow_indr_block_bind_cb_t *cb, - void *cb_ident); - -void __flow_indr_block_cb_unregister(struct net_device *dev, - flow_indr_block_bind_cb_t *cb, - void *cb_ident); - -int flow_indr_block_cb_register(struct net_device *dev, void *cb_priv, - flow_indr_block_bind_cb_t *cb, void *cb_ident); - -void flow_indr_block_cb_unregister(struct net_device *dev, - flow_indr_block_bind_cb_t *cb, - void *cb_ident); - -void flow_indr_block_call(struct net_device *dev, - struct flow_block_offload *bo, - enum flow_block_command command, - enum tc_setup_type type); +int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv); +void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv, + flow_setup_cb_t *setup_cb); +int flow_indr_dev_setup_offload(struct net_device *dev, + enum tc_setup_type type, void *data, + struct flow_block_offload *bo, + void (*cleanup)(struct flow_block_cb *block_cb)); #endif /* _NET_FLOW_OFFLOAD_H */ diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index a01981d7108f..8bf5906073bc 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -78,7 +78,7 @@ struct inet6_ifaddr { struct ip6_sf_socklist { unsigned int sl_max; unsigned int sl_count; - struct in6_addr sl_addr[0]; + struct in6_addr sl_addr[]; }; #define IP6_SFLSIZE(count) (sizeof(struct ip6_sf_socklist) + \ @@ -190,7 +190,6 @@ struct inet6_dev { int dead; u32 desync_factor; - u8 rndid[8]; struct list_head tempaddr_list; struct in6_addr token; diff --git a/include/net/inet_common.h b/include/net/inet_common.h index ae2ba897675c..cb2818862919 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -35,8 +35,14 @@ int inet_shutdown(struct socket *sock, int how); int inet_listen(struct socket *sock, int backlog); void inet_sock_destruct(struct sock *sk); int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); +/* Don't allocate port at this moment, defer to connect. */ +#define BIND_FORCE_ADDRESS_NO_PORT (1 << 0) +/* Grab and release socket lock. */ +#define BIND_WITH_LOCK (1 << 1) +/* Called from BPF program. */ +#define BIND_FROM_BPF (1 << 2) int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, - bool force_bind_address_no_port, bool with_lock); + u32 flags); int inet_getname(struct socket *sock, struct sockaddr *uaddr, int peer); int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index a3f076befa4f..e5b388f5fa20 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -287,6 +287,13 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req); +static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk) +{ + /* The below has to be done to allow calling inet_csk_destroy_sock */ + sock_set_flag(sk, SOCK_DEAD); + percpu_counter_inc(sk->sk_prot->orphan_count); +} + void inet_csk_destroy_sock(struct sock *sk); void inet_csk_prepare_forced_close(struct sock *sk); diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index c8e2bebd8d93..0f0d1efe06dd 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h @@ -99,6 +99,20 @@ static inline int IP_ECN_set_ce(struct iphdr *iph) return 1; } +static inline int IP_ECN_set_ect1(struct iphdr *iph) +{ + u32 check = (__force u32)iph->check; + + if ((iph->tos & INET_ECN_MASK) != INET_ECN_ECT_0) + return 0; + + check += (__force u16)htons(0x100); + + iph->check = (__force __sum16)(check + (check>=0xFFFF)); + iph->tos ^= INET_ECN_MASK; + return 1; +} + static inline void IP_ECN_clear(struct iphdr *iph) { iph->tos &= ~INET_ECN_MASK; @@ -134,6 +148,22 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph) return 1; } +static inline int IP6_ECN_set_ect1(struct sk_buff *skb, struct ipv6hdr *iph) +{ + __be32 from, to; + + if ((ipv6_get_dsfield(iph) & INET_ECN_MASK) != INET_ECN_ECT_0) + return 0; + + from = *(__be32 *)iph; + to = from ^ htonl(INET_ECN_MASK << 20); + *(__be32 *)iph = to; + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from), + (__force __wsum)to); + return 1; +} + static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner) { dscp &= ~INET_ECN_MASK; @@ -159,6 +189,25 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb) return 0; } +static inline int INET_ECN_set_ect1(struct sk_buff *skb) +{ + switch (skb->protocol) { + case cpu_to_be16(ETH_P_IP): + if (skb_network_header(skb) + sizeof(struct iphdr) <= + skb_tail_pointer(skb)) + return IP_ECN_set_ect1(ip_hdr(skb)); + break; + + case cpu_to_be16(ETH_P_IPV6): + if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= + skb_tail_pointer(skb)) + return IP6_ECN_set_ect1(skb, ipv6_hdr(skb)); + break; + } + + return 0; +} + /* * RFC 6040 4.2 * To decapsulate the inner header at the tunnel egress, a compliant @@ -208,8 +257,12 @@ static inline int INET_ECN_decapsulate(struct sk_buff *skb, int rc; rc = __INET_ECN_decapsulate(outer, inner, &set_ce); - if (!rc && set_ce) - INET_ECN_set_ce(skb); + if (!rc) { + if (set_ce) + INET_ECN_set_ce(skb); + else if ((outer & INET_ECN_MASK) == INET_ECN_ECT_1) + INET_ECN_set_ect1(skb); + } return rc; } diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index ad64ba6a057f..92560974ea67 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -185,6 +185,12 @@ static inline spinlock_t *inet_ehash_lockp( int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo); +static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h) +{ + kfree(h->lhash2); + h->lhash2 = NULL; +} + static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) { kvfree(hashinfo->ehash_locks); diff --git a/include/net/ip.h b/include/net/ip.h index 5b317c9f4470..04ebe7bf54c6 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -765,4 +765,10 @@ static inline bool inetdev_valid_mtu(unsigned int mtu) return likely(mtu >= IPV4_MIN_MTU); } +void ip_sock_set_freebind(struct sock *sk); +int ip_sock_set_mtu_discover(struct sock *sk, int val); +void ip_sock_set_pktinfo(struct sock *sk); +void ip_sock_set_recverr(struct sock *sk); +void ip_sock_set_tos(struct sock *sk, int val); + #endif /* _IP_H */ diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 80262d2980f5..3f615a29766e 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -65,6 +65,7 @@ struct fib6_config { struct nl_info fc_nlinfo; struct nlattr *fc_encap; u16 fc_encap_type; + bool fc_is_fdb; }; struct fib6_node { @@ -203,6 +204,7 @@ struct fib6_info { struct rt6_info { struct dst_entry dst; struct fib6_info __rcu *from; + int sernum; struct rt6key rt6i_dst; struct rt6key rt6i_src; @@ -291,6 +293,9 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt) struct fib6_info *from; u32 cookie = 0; + if (rt->sernum) + return rt->sernum; + rcu_read_lock(); from = rcu_dereference(rt->from); @@ -540,6 +545,13 @@ static inline bool fib6_metric_locked(struct fib6_info *f6i, int metric) return !!(f6i->fib6_metrics->metrics[RTAX_LOCK - 1] & (1 << metric)); } +#if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL) +struct bpf_iter__ipv6_route { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct fib6_info *, rt); +}; +#endif + #ifdef CONFIG_IPV6_MULTIPLE_TABLES static inline bool fib6_has_custom_rules(const struct net *net) { diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index f7543c095b33..2a5277758379 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -118,12 +118,13 @@ void ip6_route_init_special_entries(void); int ip6_route_init(void); void ip6_route_cleanup(void); -int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg); +int ipv6_route_ioctl(struct net *net, unsigned int cmd, + struct in6_rtmsg *rtmsg); int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack); int ip6_ins_rt(struct net *net, struct fib6_info *f6i); -int ip6_del_rt(struct net *net, struct fib6_info *f6i); +int ip6_del_rt(struct net *net, struct fib6_info *f6i, bool skip_notify); void rt6_flush_exceptions(struct fib6_info *f6i); void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args, @@ -254,6 +255,7 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst, return rt->rt6i_flags & RTF_ANYCAST || (rt->rt6i_dst.plen < 127 && + !(rt->rt6i_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) && ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)); } diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 59e0d4e99f94..2ec062aaa978 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -257,7 +257,6 @@ struct fib_dump_filter { u32 table_id; /* filter_set is an optimization that an entry is set */ bool filter_set; - bool dump_all_families; bool dump_routes; bool dump_exceptions; unsigned char protocol; @@ -448,6 +447,16 @@ static inline int fib_num_tclassid_users(struct net *net) #endif int fib_unmerge(struct net *net); +static inline bool nhc_l3mdev_matches_dev(const struct fib_nh_common *nhc, +const struct net_device *dev) +{ + if (nhc->nhc_dev == dev || + l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) + return true; + + return false; +} + /* Exported by fib_semantics.c */ int ip_fib_check_default(__be32 gw, struct net_device *dev); int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); @@ -480,6 +489,8 @@ void fib_nh_common_release(struct fib_nh_common *nhc); void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri); void fib_trie_init(void); struct fib_table *fib_trie_table(u32 id, struct fib_table *alias); +bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags, + const struct flowi4 *flp); static inline void fib_combine_itag(u32 *itag, const struct fib_result *res) { diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 236503a50759..076e5d7db7d3 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -269,7 +269,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, const u8 protocol); void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, const u8 proto, int tunnel_hlen); -int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); +int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); +int ip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 1bf8065fe871..5e65bf2fd32d 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -908,7 +908,6 @@ static inline int ip6_default_np_autolabel(struct net *net) } } #else -static inline void ip6_set_txhash(struct sock *sk) { } static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, __be32 flowlabel, bool autolabel, struct flowi6 *fl6) @@ -1116,6 +1115,8 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); int inet6_getname(struct socket *sock, struct sockaddr *uaddr, int peer); int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); +int inet6_compat_ioctl(struct socket *sock, unsigned int cmd, + unsigned long arg); int inet6_hash_connect(struct inet_timewait_death_row *death_row, struct sock *sk); @@ -1135,9 +1136,10 @@ struct group_filter; int ip6_mc_source(int add, int omode, struct sock *sk, struct group_source_req *pgsr); -int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf); +int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf, + struct sockaddr_storage *list); int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, - struct group_filter __user *optval, int __user *optlen); + struct sockaddr_storage __user *p); #ifdef CONFIG_PROC_FS int ac6_proc_init(struct net *net); @@ -1175,4 +1177,96 @@ int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex, const struct in6_addr *addr, unsigned int mode); int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); + +static inline int ip6_sock_set_v6only(struct sock *sk) +{ + if (inet_sk(sk)->inet_num) + return -EINVAL; + lock_sock(sk); + sk->sk_ipv6only = true; + release_sock(sk); + return 0; +} + +static inline void ip6_sock_set_recverr(struct sock *sk) +{ + lock_sock(sk); + inet6_sk(sk)->recverr = true; + release_sock(sk); +} + +static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val) +{ + unsigned int pref = 0; + unsigned int prefmask = ~0; + + /* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */ + switch (val & (IPV6_PREFER_SRC_PUBLIC | + IPV6_PREFER_SRC_TMP | + IPV6_PREFER_SRC_PUBTMP_DEFAULT)) { + case IPV6_PREFER_SRC_PUBLIC: + pref |= IPV6_PREFER_SRC_PUBLIC; + prefmask &= ~(IPV6_PREFER_SRC_PUBLIC | + IPV6_PREFER_SRC_TMP); + break; + case IPV6_PREFER_SRC_TMP: + pref |= IPV6_PREFER_SRC_TMP; + prefmask &= ~(IPV6_PREFER_SRC_PUBLIC | + IPV6_PREFER_SRC_TMP); + break; + case IPV6_PREFER_SRC_PUBTMP_DEFAULT: + prefmask &= ~(IPV6_PREFER_SRC_PUBLIC | + IPV6_PREFER_SRC_TMP); + break; + case 0: + break; + default: + return -EINVAL; + } + + /* check HOME/COA conflicts */ + switch (val & (IPV6_PREFER_SRC_HOME | IPV6_PREFER_SRC_COA)) { + case IPV6_PREFER_SRC_HOME: + prefmask &= ~IPV6_PREFER_SRC_COA; + break; + case IPV6_PREFER_SRC_COA: + pref |= IPV6_PREFER_SRC_COA; + break; + case 0: + break; + default: + return -EINVAL; + } + + /* check CGA/NONCGA conflicts */ + switch (val & (IPV6_PREFER_SRC_CGA|IPV6_PREFER_SRC_NONCGA)) { + case IPV6_PREFER_SRC_CGA: + case IPV6_PREFER_SRC_NONCGA: + case 0: + break; + default: + return -EINVAL; + } + + inet6_sk(sk)->srcprefs = (inet6_sk(sk)->srcprefs & prefmask) | pref; + return 0; +} + +static inline int ip6_sock_set_addr_preferences(struct sock *sk, bool val) +{ + int ret; + + lock_sock(sk); + ret = __ip6_sock_set_addr_preferences(sk, val); + release_sock(sk); + return ret; +} + +static inline void ip6_sock_set_recvpktinfo(struct sock *sk) +{ + lock_sock(sk); + inet6_sk(sk)->rxopt.bits.rxinfo = true; + release_sock(sk); +} + #endif /* _NET_IPV6_H */ diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h index 3e7d2c0e79ca..d7a7f7c81e7b 100644 --- a/include/net/ipv6_stubs.h +++ b/include/net/ipv6_stubs.h @@ -48,7 +48,7 @@ struct ipv6_stub { struct netlink_ext_ack *extack); void (*fib6_nh_release)(struct fib6_nh *fib6_nh); void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt); - int (*ip6_del_rt)(struct net *net, struct fib6_info *rt); + int (*ip6_del_rt)(struct net *net, struct fib6_info *rt, bool skip_notify); void (*fib6_rt_update)(struct net *net, struct fib6_info *rt, struct nl_info *info); @@ -56,6 +56,12 @@ struct ipv6_stub { void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr, const struct in6_addr *solicited_addr, bool router, bool solicited, bool override, bool inc_opt); +#if IS_ENABLED(CONFIG_XFRM) + void (*xfrm6_local_rxpmtu)(struct sk_buff *skb, u32 mtu); + int (*xfrm6_udp_encap_rcv)(struct sock *sk, struct sk_buff *skb); + int (*xfrm6_rcv_encap)(struct sk_buff *skb, int nexthdr, __be32 spi, + int encap_type); +#endif struct neigh_table *nd_tbl; }; extern const struct ipv6_stub *ipv6_stub __read_mostly; @@ -63,7 +69,7 @@ extern const struct ipv6_stub *ipv6_stub __read_mostly; /* A stub used by bpf helpers. Similarly ugly as ipv6_stub */ struct ipv6_bpf_stub { int (*inet6_bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len, - bool force_bind_address_no_port, bool with_lock); + u32 flags); struct sock *(*udp6_lib_lookup)(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h index 14a490246be9..9259ce2b22f3 100644 --- a/include/net/iucv/af_iucv.h +++ b/include/net/iucv/af_iucv.h @@ -158,12 +158,4 @@ struct iucv_sock_list { atomic_t autobind_name; }; -__poll_t iucv_sock_poll(struct file *file, struct socket *sock, - poll_table *wait); -void iucv_sock_link(struct iucv_sock_list *l, struct sock *s); -void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s); -void iucv_accept_enqueue(struct sock *parent, struct sock *sk); -void iucv_accept_unlink(struct sock *sk); -struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock); - #endif /* __IUCV_H */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index b6b4de0e4b5e..11d5610d2ad5 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -7,7 +7,7 @@ * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2019 Intel Corporation + * Copyright (C) 2018 - 2020 Intel Corporation */ #ifndef MAC80211_H @@ -230,7 +230,7 @@ struct ieee80211_chanctx_conf { bool radar_enabled; - u8 drv_priv[0] __aligned(sizeof(void *)); + u8 drv_priv[] __aligned(sizeof(void *)); }; /** @@ -508,6 +508,7 @@ struct ieee80211_ftm_responder_params { * mode only, set if the AP advertises TWT responder role) * @twt_responder: does this BSS support TWT requester (relevant for managed * mode only, set if the AP advertises TWT responder role) + * @twt_protected: does this BSS support protected TWT frames * @assoc: association status * @ibss_joined: indicates whether this station is part of an IBSS * or not @@ -603,7 +604,7 @@ struct ieee80211_ftm_responder_params { * nontransmitted BSSIDs * @profile_periodicity: the least number of beacon frames need to be received * in order to discover all the nontransmitted BSSIDs in the set. - * @he_operation: HE operation information of the AP we are connected to + * @he_oper: HE operation information of the AP we are connected to * @he_obss_pd: OBSS Packet Detection parameters. * @he_bss_color: BSS coloring settings, if BSS supports HE */ @@ -618,6 +619,7 @@ struct ieee80211_bss_conf { bool he_support; bool twt_requester; bool twt_responder; + bool twt_protected; /* association related data */ bool assoc, ibss_joined; bool ibss_creator; @@ -666,7 +668,10 @@ struct ieee80211_bss_conf { u8 bssid_indicator; bool ema_ap; u8 profile_periodicity; - struct ieee80211_he_operation he_operation; + struct { + u32 params; + u16 nss_set; + } he_oper; struct ieee80211_he_obss_pd he_obss_pd; struct cfg80211_he_bss_color he_bss_color; }; @@ -818,6 +823,8 @@ enum mac80211_tx_info_flags { * @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame * @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path * @IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP: This frame skips mesh path lookup + * @IEEE80211_TX_CTRL_HW_80211_ENCAP: This frame uses hardware encapsulation + * (header conversion) * * These flags are used in tx_info->control.flags. */ @@ -1333,6 +1340,7 @@ enum mac80211_rx_encoding { * @freq: frequency the radio was tuned to when receiving this frame, in MHz * This field must be set for management frames, but isn't strictly needed * for data (other) frames - for those it only affects radiotap reporting. + * @freq_offset: @freq has a positive offset of 500Khz. * @signal: signal strength when receiving this frame, either in dBm, in dB or * unspecified depending on the hardware capabilities flags * @IEEE80211_HW_SIGNAL_* @@ -1363,7 +1371,7 @@ struct ieee80211_rx_status { u32 device_timestamp; u32 ampdu_reference; u32 flag; - u16 freq; + u16 freq: 13, freq_offset: 1; u8 enc_flags; u8 encoding:2, bw:3, he_ru:3; u8 he_gi:2, he_dcm:1; @@ -1379,6 +1387,13 @@ struct ieee80211_rx_status { u8 zero_length_psdu_type; }; +static inline u32 +ieee80211_rx_status_to_khz(struct ieee80211_rx_status *rx_status) +{ + return MHZ_TO_KHZ(rx_status->freq) + + (rx_status->freq_offset ? 500 : 0); +} + /** * struct ieee80211_vendor_radiotap - vendor radiotap data information * @present: presence bitmap for this vendor namespace @@ -1620,6 +1635,8 @@ enum ieee80211_vif_flags { * monitor interface (if that is requested.) * @probe_req_reg: probe requests should be reported to mac80211 for this * interface. + * @rx_mcast_action_reg: multicast Action frames should be reported to mac80211 + * for this interface. * @drv_priv: data area for driver use, will always be aligned to * sizeof(void \*). * @txq: the multicast data TX queue (if driver uses the TXQ abstraction) @@ -1647,12 +1664,13 @@ struct ieee80211_vif { struct dentry *debugfs_dir; #endif - unsigned int probe_req_reg; + bool probe_req_reg; + bool rx_mcast_action_reg; bool txqs_stopped[IEEE80211_NUM_ACS]; /* must be last */ - u8 drv_priv[0] __aligned(sizeof(void *)); + u8 drv_priv[] __aligned(sizeof(void *)); }; static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif) @@ -1780,7 +1798,7 @@ struct ieee80211_key_conf { s8 keyidx; u16 flags; u8 keylen; - u8 key[0]; + u8 key[]; }; #define IEEE80211_MAX_PN_LEN 16 @@ -1959,6 +1977,7 @@ struct ieee80211_sta_txpwr { * @ht_cap: HT capabilities of this STA; restricted to our own capabilities * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities * @he_cap: HE capabilities of this STA + * @he_6ghz_capa: on 6 GHz, holds the HE 6 GHz band capabilities * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU * that this station is allowed to transmit to us. * Can be modified by driver. @@ -1998,6 +2017,7 @@ struct ieee80211_sta { struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; struct ieee80211_sta_he_cap he_cap; + struct ieee80211_he_6ghz_capa he_6ghz_capa; u16 max_rx_aggregation_subframes; bool wme; u8 uapsd_queues; @@ -2035,7 +2055,7 @@ struct ieee80211_sta { struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1]; /* must be last */ - u8 drv_priv[0] __aligned(sizeof(void *)); + u8 drv_priv[] __aligned(sizeof(void *)); }; /** @@ -2081,7 +2101,7 @@ struct ieee80211_txq { u8 ac; /* must be last */ - u8 drv_priv[0] __aligned(sizeof(void *)); + u8 drv_priv[] __aligned(sizeof(void *)); }; /** @@ -3091,6 +3111,8 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); * @FIF_PSPOLL: pass PS Poll frames * * @FIF_PROBE_REQ: pass probe request frames + * + * @FIF_MCAST_ACTION: pass multicast Action frames */ enum ieee80211_filter_flags { FIF_ALLMULTI = 1<<1, @@ -3101,6 +3123,7 @@ enum ieee80211_filter_flags { FIF_OTHER_BSS = 1<<6, FIF_PSPOLL = 1<<7, FIF_PROBE_REQ = 1<<8, + FIF_MCAST_ACTION = 1<<9, }; /** @@ -3117,7 +3140,10 @@ enum ieee80211_filter_flags { * @IEEE80211_AMPDU_RX_START: start RX aggregation * @IEEE80211_AMPDU_RX_STOP: stop RX aggregation * @IEEE80211_AMPDU_TX_START: start TX aggregation, the driver must either - * call ieee80211_start_tx_ba_cb_irqsafe() or return the special + * call ieee80211_start_tx_ba_cb_irqsafe() or + * call ieee80211_start_tx_ba_cb_irqsafe() with status + * %IEEE80211_AMPDU_TX_START_DELAY_ADDBA to delay addba after + * ieee80211_start_tx_ba_cb_irqsafe is called, or just return the special * status %IEEE80211_AMPDU_TX_START_IMMEDIATE. * @IEEE80211_AMPDU_TX_OPERATIONAL: TX aggregation has become operational * @IEEE80211_AMPDU_TX_STOP_CONT: stop TX aggregation but continue transmitting @@ -3143,6 +3169,7 @@ enum ieee80211_ampdu_mlme_action { }; #define IEEE80211_AMPDU_TX_START_IMMEDIATE 1 +#define IEEE80211_AMPDU_TX_START_DELAY_ADDBA 2 /** * struct ieee80211_ampdu_params - AMPDU action parameters @@ -6007,7 +6034,9 @@ enum rate_control_capabilities { struct rate_control_ops { unsigned long capa; const char *name; - void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir); + void *(*alloc)(struct ieee80211_hw *hw); + void (*add_debugfs)(struct ieee80211_hw *hw, void *priv, + struct dentry *debugfsdir); void (*free)(void *priv); void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp); diff --git a/include/net/mpls.h b/include/net/mpls.h index ccaf238e8ea7..0bb7944e7b08 100644 --- a/include/net/mpls.h +++ b/include/net/mpls.h @@ -8,6 +8,7 @@ #include <linux/if_ether.h> #include <linux/netdevice.h> +#include <linux/mpls.h> #define MPLS_HLEN 4 @@ -25,4 +26,20 @@ static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb) { return (struct mpls_shim_hdr *)skb_network_header(skb); } + +static inline struct mpls_shim_hdr mpls_entry_encode(u32 label, + unsigned int ttl, + unsigned int tc, + bool bos) +{ + struct mpls_shim_hdr result; + + result.label_stack_entry = + cpu_to_be32((label << MPLS_LS_LABEL_SHIFT) | + (tc << MPLS_LS_TC_SHIFT) | + (bos ? (1 << MPLS_LS_S_SHIFT) : 0) | + (ttl << MPLS_LS_TTL_SHIFT)); + return result; +} + #endif diff --git a/include/net/mptcp.h b/include/net/mptcp.h index 0e7c5471010b..46d0487d2b22 100644 --- a/include/net/mptcp.h +++ b/include/net/mptcp.h @@ -16,7 +16,10 @@ struct seq_file; /* MPTCP sk_buff extension data */ struct mptcp_ext { - u64 data_ack; + union { + u64 data_ack; + u32 data_ack32; + }; u64 data_seq; u32 subflow_seq; u16 data_len; @@ -68,11 +71,14 @@ static inline bool rsk_is_mptcp(const struct request_sock *req) return tcp_rsk(req)->is_mptcp; } -void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr, - int opsize, struct tcp_options_received *opt_rx); +static inline bool rsk_drop_req(const struct request_sock *req) +{ + return tcp_rsk(req)->is_mptcp && tcp_rsk(req)->drop_req; +} + +void mptcp_space(const struct sock *ssk, int *space, int *full_space); bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb, unsigned int *size, struct mptcp_out_options *opts); -void mptcp_rcv_synsent(struct sock *sk); bool mptcp_synack_options(const struct request_sock *req, unsigned int *size, struct mptcp_out_options *opts); bool mptcp_established_options(struct sock *sk, struct sk_buff *skb, @@ -123,8 +129,6 @@ static inline bool mptcp_skb_can_collapse(const struct sk_buff *to, skb_ext_find(from, SKB_EXT_MPTCP)); } -bool mptcp_sk_is_subflow(const struct sock *sk); - void mptcp_seq_show(struct seq_file *seq); #else @@ -142,6 +146,11 @@ static inline bool rsk_is_mptcp(const struct request_sock *req) return false; } +static inline bool rsk_drop_req(const struct request_sock *req) +{ + return false; +} + static inline void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr, int opsize, struct tcp_options_received *opt_rx) @@ -192,11 +201,7 @@ static inline bool mptcp_skb_can_collapse(const struct sk_buff *to, return true; } -static inline bool mptcp_sk_is_subflow(const struct sock *sk) -{ - return false; -} - +static inline void mptcp_space(const struct sock *ssk, int *s, int *fs) { } static inline void mptcp_seq_show(struct seq_file *seq) { } #endif /* CONFIG_MPTCP */ diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 7d107113f988..9205a76d967a 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -41,7 +41,7 @@ enum { ND_OPT_DNSSL = 31, /* RFC6106 */ ND_OPT_6CO = 34, /* RFC6775 */ ND_OPT_CAPTIVE_PORTAL = 37, /* RFC7710 */ - ND_OPT_PREF64 = 38, /* RFC-ietf-6man-ra-pref64-09 */ + ND_OPT_PREF64 = 38, /* RFC8781 */ __ND_OPT_MAX }; diff --git a/include/net/neighbour.h b/include/net/neighbour.h index e1476775769c..81ee17594c32 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -392,13 +392,12 @@ void *neigh_seq_next(struct seq_file *, void *, loff_t *); void neigh_seq_stop(struct seq_file *, void *); int neigh_proc_dointvec(struct ctl_table *ctl, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, - void __user *buffer, + void *buffer, size_t *lenp, loff_t *ppos); int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, proc_handler *proc_handler); diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index ab96fb59131c..2ee5901bec7a 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -33,6 +33,7 @@ #include <net/netns/mpls.h> #include <net/netns/can.h> #include <net/netns/xdp.h> +#include <net/netns/bpf.h> #include <linux/ns_common.h> #include <linux/idr.h> #include <linux/skbuff.h> @@ -162,7 +163,8 @@ struct net { #endif struct net_generic __rcu *gen; - struct bpf_prog __rcu *flow_dissector_prog; + /* Used to store attached BPF programs */ + struct netns_bpf bpf; /* Note : following structs are cache line aligned */ #ifdef CONFIG_XFRM @@ -437,6 +439,13 @@ static inline int rt_genid_ipv4(const struct net *net) return atomic_read(&net->ipv4.rt_genid); } +#if IS_ENABLED(CONFIG_IPV6) +static inline int rt_genid_ipv6(const struct net *net) +{ + return atomic_read(&net->ipv6.fib6_sernum); +} +#endif + static inline void rt_genid_bump_ipv4(struct net *net) { atomic_inc(&net->ipv4.rt_genid); diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 9f551f3b69c6..90690e37a56f 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -87,7 +87,7 @@ struct nf_conn { struct hlist_node nat_bysource; #endif /* all members below initialized via memset */ - u8 __nfct_init_offset[0]; + struct { } __nfct_init_offset; /* If we were expected by an expectation, this will be it */ struct nf_conn *master; diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 4cad1f0a327a..88186b95b3c2 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -42,7 +42,8 @@ struct nf_conntrack_l4proto { /* Calculate tuple nlattr size */ unsigned int (*nlattr_tuple_size)(void); int (*nlattr_to_tuple)(struct nlattr *tb[], - struct nf_conntrack_tuple *t); + struct nf_conntrack_tuple *t, + u_int32_t flags); const struct nla_policy *nla_policy; struct { @@ -152,7 +153,8 @@ const struct nf_conntrack_l4proto *nf_ct_l4proto_find(u8 l4proto); int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple); int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[], - struct nf_conntrack_tuple *t); + struct nf_conntrack_tuple *t, + u_int32_t flags); unsigned int nf_ct_port_nlattr_tuple_size(void); extern const struct nla_policy nf_ct_port_nla_policy[]; diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index 6bf69652f57d..16e8b2f8d006 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -127,6 +127,7 @@ enum nf_flow_flags { NF_FLOW_HW_DYING, NF_FLOW_HW_DEAD, NF_FLOW_HW_REFRESH, + NF_FLOW_HW_PENDING, }; enum flow_offload_type { @@ -160,10 +161,51 @@ struct nf_flow_route { struct flow_offload *flow_offload_alloc(struct nf_conn *ct); void flow_offload_free(struct flow_offload *flow); -int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table, - flow_setup_cb_t *cb, void *cb_priv); -void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table, - flow_setup_cb_t *cb, void *cb_priv); +static inline int +nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table, + flow_setup_cb_t *cb, void *cb_priv) +{ + struct flow_block *block = &flow_table->flow_block; + struct flow_block_cb *block_cb; + int err = 0; + + down_write(&flow_table->flow_block_lock); + block_cb = flow_block_cb_lookup(block, cb, cb_priv); + if (block_cb) { + err = -EEXIST; + goto unlock; + } + + block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL); + if (IS_ERR(block_cb)) { + err = PTR_ERR(block_cb); + goto unlock; + } + + list_add_tail(&block_cb->list, &block->cb_list); + +unlock: + up_write(&flow_table->flow_block_lock); + return err; +} + +static inline void +nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table, + flow_setup_cb_t *cb, void *cb_priv) +{ + struct flow_block *block = &flow_table->flow_block; + struct flow_block_cb *block_cb; + + down_write(&flow_table->flow_block_lock); + block_cb = flow_block_cb_lookup(block, cb, cb_priv); + if (block_cb) { + list_del(&block_cb->list); + flow_block_cb_free(block_cb); + } else { + WARN_ON(true); + } + up_write(&flow_table->flow_block_lock); +} int flow_offload_route_init(struct flow_offload *flow, const struct nf_flow_route *route); @@ -174,6 +216,8 @@ void flow_offload_refresh(struct nf_flowtable *flow_table, struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table, struct flow_offload_tuple *tuple); +void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable, + struct net_device *dev); void nf_flow_table_cleanup(struct net_device *dev); int nf_flow_table_init(struct nf_flowtable *flow_table); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 6eb627b3c99b..6f0f6fca9ac3 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -243,6 +243,10 @@ struct nft_set_elem { u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)]; struct nft_data val; } key_end; + union { + u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)]; + struct nft_data val; + } data; void *priv; }; @@ -901,7 +905,7 @@ static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext, { struct nft_expr *expr; - if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) { + if (__nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) { expr = nft_set_ext_expr(ext); expr->ops->eval(expr, regs, pkt); } @@ -998,6 +1002,7 @@ struct nft_stats { struct nft_hook { struct list_head list; + bool inactive; struct nf_hook_ops ops; struct rcu_head rcu; }; @@ -1477,10 +1482,16 @@ struct nft_trans_obj { struct nft_trans_flowtable { struct nft_flowtable *flowtable; + bool update; + struct list_head hook_list; }; #define nft_trans_flowtable(trans) \ (((struct nft_trans_flowtable *)trans->data)->flowtable) +#define nft_trans_flowtable_update(trans) \ + (((struct nft_trans_flowtable *)trans->data)->update) +#define nft_trans_flowtable_hooks(trans) \ + (((struct nft_trans_flowtable *)trans->data)->hook_list) int __init nft_chain_filter_init(void); void nft_chain_filter_fini(void); diff --git a/include/net/netlink.h b/include/net/netlink.h index 67c57d6942e3..c0411f14fb53 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -182,19 +182,28 @@ enum { NLA_BITFIELD32, NLA_REJECT, NLA_EXACT_LEN, - NLA_EXACT_LEN_WARN, NLA_MIN_LEN, __NLA_TYPE_MAX, }; #define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1) +struct netlink_range_validation { + u64 min, max; +}; + +struct netlink_range_validation_signed { + s64 min, max; +}; + enum nla_policy_validation { NLA_VALIDATE_NONE, NLA_VALIDATE_RANGE, NLA_VALIDATE_MIN, NLA_VALIDATE_MAX, + NLA_VALIDATE_RANGE_PTR, NLA_VALIDATE_FUNCTION, + NLA_VALIDATE_WARN_TOO_LONG, }; /** @@ -217,7 +226,7 @@ enum nla_policy_validation { * NLA_NESTED, * NLA_NESTED_ARRAY Length verification is done by checking len of * nested header (or empty); len field is used if - * validation_data is also used, for the max attr + * nested_policy is also used, for the max attr * number in the nested policy. * NLA_U8, NLA_U16, * NLA_U32, NLA_U64, @@ -228,34 +237,32 @@ enum nla_policy_validation { * just like "All other" * NLA_BITFIELD32 Unused * NLA_REJECT Unused - * NLA_EXACT_LEN Attribute must have exactly this length, otherwise - * it is rejected. - * NLA_EXACT_LEN_WARN Attribute should have exactly this length, a warning - * is logged if it is longer, shorter is rejected. + * NLA_EXACT_LEN Attribute should have exactly this length, otherwise + * it is rejected or warned about, the latter happening + * if and only if the `validation_type' is set to + * NLA_VALIDATE_WARN_TOO_LONG. * NLA_MIN_LEN Minimum length of attribute payload * All other Minimum length of attribute payload * - * Meaning of `validation_data' field: + * Meaning of validation union: * NLA_BITFIELD32 This is a 32-bit bitmap/bitselector attribute and - * validation data must point to a u32 value of valid - * flags - * NLA_REJECT This attribute is always rejected and validation data + * `bitfield32_valid' is the u32 value of valid flags + * NLA_REJECT This attribute is always rejected and `reject_message' * may point to a string to report as the error instead * of the generic one in extended ACK. - * NLA_NESTED Points to a nested policy to validate, must also set - * `len' to the max attribute number. + * NLA_NESTED `nested_policy' to a nested policy to validate, must + * also set `len' to the max attribute number. Use the + * provided NLA_POLICY_NESTED() macro. * Note that nla_parse() will validate, but of course not * parse, the nested sub-policies. - * NLA_NESTED_ARRAY Points to a nested policy to validate, must also set - * `len' to the max attribute number. The difference to - * NLA_NESTED is the structure - NLA_NESTED has the - * nested attributes directly inside, while an array has - * the nested attributes at another level down and the - * attributes directly in the nesting don't matter. - * All other Unused - but note that it's a union - * - * Meaning of `min' and `max' fields, use via NLA_POLICY_MIN, NLA_POLICY_MAX - * and NLA_POLICY_RANGE: + * NLA_NESTED_ARRAY `nested_policy' points to a nested policy to validate, + * must also set `len' to the max attribute number. Use + * the provided NLA_POLICY_NESTED_ARRAY() macro. + * The difference to NLA_NESTED is the structure: + * NLA_NESTED has the nested attributes directly inside + * while an array has the nested attributes at another + * level down and the attribute types directly in the + * nesting don't matter. * NLA_U8, * NLA_U16, * NLA_U32, @@ -263,29 +270,47 @@ enum nla_policy_validation { * NLA_S8, * NLA_S16, * NLA_S32, - * NLA_S64 These are used depending on the validation_type - * field, if that is min/max/range then the minimum, - * maximum and both are used (respectively) to check + * NLA_S64 The `min' and `max' fields are used depending on the + * validation_type field, if that is min/max/range then + * the min, max or both are used (respectively) to check * the value of the integer attribute. * Note that in the interest of code simplicity and * struct size both limits are s16, so you cannot * enforce a range that doesn't fall within the range * of s16 - do that as usual in the code instead. + * Use the NLA_POLICY_MIN(), NLA_POLICY_MAX() and + * NLA_POLICY_RANGE() macros. + * NLA_U8, + * NLA_U16, + * NLA_U32, + * NLA_U64 If the validation_type field instead is set to + * NLA_VALIDATE_RANGE_PTR, `range' must be a pointer + * to a struct netlink_range_validation that indicates + * the min/max values. + * Use NLA_POLICY_FULL_RANGE(). + * NLA_S8, + * NLA_S16, + * NLA_S32, + * NLA_S64 If the validation_type field instead is set to + * NLA_VALIDATE_RANGE_PTR, `range_signed' must be a + * pointer to a struct netlink_range_validation_signed + * that indicates the min/max values. + * Use NLA_POLICY_FULL_RANGE_SIGNED(). * All other Unused - but note that it's a union * * Meaning of `validate' field, use via NLA_POLICY_VALIDATE_FN: - * NLA_BINARY Validation function called for the attribute, - * not compatible with use of the validation_data - * as in NLA_BITFIELD32, NLA_REJECT, NLA_NESTED and - * NLA_NESTED_ARRAY. + * NLA_BINARY Validation function called for the attribute. * All other Unused - but note that it's a union * * Example: + * + * static const u32 myvalidflags = 0xff231023; + * * static const struct nla_policy my_policy[ATTR_MAX+1] = { * [ATTR_FOO] = { .type = NLA_U16 }, * [ATTR_BAR] = { .type = NLA_STRING, .len = BARSIZ }, * [ATTR_BAZ] = { .type = NLA_EXACT_LEN, .len = sizeof(struct mystruct) }, - * [ATTR_GOO] = { .type = NLA_BITFIELD32, .validation_data = &myvalidflags }, + * [ATTR_GOO] = NLA_POLICY_BITFIELD32(myvalidflags), * }; */ struct nla_policy { @@ -293,7 +318,11 @@ struct nla_policy { u8 validation_type; u16 len; union { - const void *validation_data; + const u32 bitfield32_valid; + const char *reject_message; + const struct nla_policy *nested_policy; + struct netlink_range_validation *range; + struct netlink_range_validation_signed *range_signed; struct { s16 min, max; }; @@ -321,28 +350,39 @@ struct nla_policy { }; #define NLA_POLICY_EXACT_LEN(_len) { .type = NLA_EXACT_LEN, .len = _len } -#define NLA_POLICY_EXACT_LEN_WARN(_len) { .type = NLA_EXACT_LEN_WARN, \ - .len = _len } +#define NLA_POLICY_EXACT_LEN_WARN(_len) \ + { .type = NLA_EXACT_LEN, .len = _len, \ + .validation_type = NLA_VALIDATE_WARN_TOO_LONG, } #define NLA_POLICY_MIN_LEN(_len) { .type = NLA_MIN_LEN, .len = _len } #define NLA_POLICY_ETH_ADDR NLA_POLICY_EXACT_LEN(ETH_ALEN) #define NLA_POLICY_ETH_ADDR_COMPAT NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN) #define _NLA_POLICY_NESTED(maxattr, policy) \ - { .type = NLA_NESTED, .validation_data = policy, .len = maxattr } + { .type = NLA_NESTED, .nested_policy = policy, .len = maxattr } #define _NLA_POLICY_NESTED_ARRAY(maxattr, policy) \ - { .type = NLA_NESTED_ARRAY, .validation_data = policy, .len = maxattr } + { .type = NLA_NESTED_ARRAY, .nested_policy = policy, .len = maxattr } #define NLA_POLICY_NESTED(policy) \ _NLA_POLICY_NESTED(ARRAY_SIZE(policy) - 1, policy) #define NLA_POLICY_NESTED_ARRAY(policy) \ _NLA_POLICY_NESTED_ARRAY(ARRAY_SIZE(policy) - 1, policy) +#define NLA_POLICY_BITFIELD32(valid) \ + { .type = NLA_BITFIELD32, .bitfield32_valid = valid } #define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition)) +#define NLA_ENSURE_UINT_TYPE(tp) \ + (__NLA_ENSURE(tp == NLA_U8 || tp == NLA_U16 || \ + tp == NLA_U32 || tp == NLA_U64 || \ + tp == NLA_MSECS) + tp) +#define NLA_ENSURE_SINT_TYPE(tp) \ + (__NLA_ENSURE(tp == NLA_S8 || tp == NLA_S16 || \ + tp == NLA_S32 || tp == NLA_S64) + tp) #define NLA_ENSURE_INT_TYPE(tp) \ (__NLA_ENSURE(tp == NLA_S8 || tp == NLA_U8 || \ tp == NLA_S16 || tp == NLA_U16 || \ tp == NLA_S32 || tp == NLA_U32 || \ - tp == NLA_S64 || tp == NLA_U64) + tp) + tp == NLA_S64 || tp == NLA_U64 || \ + tp == NLA_MSECS) + tp) #define NLA_ENSURE_NO_VALIDATION_PTR(tp) \ (__NLA_ENSURE(tp != NLA_BITFIELD32 && \ tp != NLA_REJECT && \ @@ -356,6 +396,18 @@ struct nla_policy { .max = _max \ } +#define NLA_POLICY_FULL_RANGE(tp, _range) { \ + .type = NLA_ENSURE_UINT_TYPE(tp), \ + .validation_type = NLA_VALIDATE_RANGE_PTR, \ + .range = _range, \ +} + +#define NLA_POLICY_FULL_RANGE_SIGNED(tp, _range) { \ + .type = NLA_ENSURE_SINT_TYPE(tp), \ + .validation_type = NLA_VALIDATE_RANGE_PTR, \ + .range_signed = _range, \ +} + #define NLA_POLICY_MIN(tp, _min) { \ .type = NLA_ENSURE_INT_TYPE(tp), \ .validation_type = NLA_VALIDATE_MIN, \ @@ -1876,4 +1928,15 @@ static inline bool nla_is_last(const struct nlattr *nla, int rem) return nla->nla_len == rem; } +void nla_get_range_unsigned(const struct nla_policy *pt, + struct netlink_range_validation *range); +void nla_get_range_signed(const struct nla_policy *pt, + struct netlink_range_validation_signed *range); + +int netlink_policy_dump_start(const struct nla_policy *policy, + unsigned int maxtype, + unsigned long *state); +bool netlink_policy_dump_loop(unsigned long *state); +int netlink_policy_dump_write(struct sk_buff *skb, unsigned long state); + #endif diff --git a/include/net/netns/bpf.h b/include/net/netns/bpf.h new file mode 100644 index 000000000000..a8dce2a380c8 --- /dev/null +++ b/include/net/netns/bpf.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * BPF programs attached to network namespace + */ + +#ifndef __NETNS_BPF_H__ +#define __NETNS_BPF_H__ + +#include <linux/bpf-netns.h> + +struct bpf_prog; + +struct netns_bpf { + struct bpf_prog __rcu *progs[MAX_NETNS_BPF_ATTACH_TYPE]; + struct bpf_link *links[MAX_NETNS_BPF_ATTACH_TYPE]; +}; + +#endif /* __NETNS_BPF_H__ */ diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 154b8f01499b..9e36738c1fe1 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -111,6 +111,8 @@ struct netns_ipv4 { int sysctl_tcp_early_demux; int sysctl_udp_early_demux; + int sysctl_nexthop_compat_mode; + int sysctl_fwmark_reflect; int sysctl_tcp_fwmark_accept; #ifdef CONFIG_NET_L3_MASTER_DEV @@ -171,6 +173,7 @@ struct netns_ipv4 { int sysctl_tcp_rmem[3]; int sysctl_tcp_comp_sack_nr; unsigned long sysctl_tcp_comp_sack_delay_ns; + unsigned long sysctl_tcp_comp_sack_slack_ns; struct inet_timewait_death_row tcp_death_row; int sysctl_max_syn_backlog; int sysctl_tcp_fastopen; diff --git a/include/net/netns/nexthop.h b/include/net/netns/nexthop.h index c712ee5eebd9..1937476c94a0 100644 --- a/include/net/netns/nexthop.h +++ b/include/net/netns/nexthop.h @@ -14,5 +14,6 @@ struct netns_nexthop { unsigned int seq; /* protected by rtnl_mutex */ u32 last_id_allocated; + struct atomic_notifier_head notifier_chain; }; #endif diff --git a/include/net/nexthop.h b/include/net/nexthop.h index c440ccc861fc..3a4f9e3b91a5 100644 --- a/include/net/nexthop.h +++ b/include/net/nexthop.h @@ -10,6 +10,7 @@ #define __LINUX_NEXTHOP_H #include <linux/netdevice.h> +#include <linux/notifier.h> #include <linux/route.h> #include <linux/types.h> #include <net/ip_fib.h> @@ -26,6 +27,7 @@ struct nh_config { u8 nh_family; u8 nh_protocol; u8 nh_blackhole; + u8 nh_fdb; u32 nh_flags; int nh_ifindex; @@ -52,6 +54,7 @@ struct nh_info { u8 family; bool reject_nh; + bool fdb_nh; union { struct fib_nh_common fib_nhc; @@ -70,8 +73,10 @@ struct nh_grp_entry { }; struct nh_group { + struct nh_group *spare; /* spare group for removals */ u16 num_nh; bool mpath; + bool fdb_nh; bool has_v4; struct nh_grp_entry nh_entries[]; }; @@ -80,6 +85,7 @@ struct nexthop { struct rb_node rb_node; /* entry on netns rbtree */ struct list_head fi_list; /* v4 entries using nh */ struct list_head f6i_list; /* v6 entries using nh */ + struct list_head fdb_list; /* fdb entries using this nh */ struct list_head grp_list; /* nh group entries using this nh */ struct net *net; @@ -98,6 +104,17 @@ struct nexthop { }; }; +enum nexthop_event_type { + NEXTHOP_EVENT_ADD, + NEXTHOP_EVENT_DEL +}; + +int call_nexthop_notifier(struct notifier_block *nb, struct net *net, + enum nexthop_event_type event_type, + struct nexthop *nh); +int register_nexthop_notifier(struct net *net, struct notifier_block *nb); +int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb); + /* caller is holding rcu or rtnl; no reference taken to nexthop */ struct nexthop *nexthop_find_by_id(struct net *net, u32 id); void nexthop_free_rcu(struct rcu_head *head); @@ -119,6 +136,32 @@ static inline bool nexthop_cmp(const struct nexthop *nh1, return nh1 == nh2; } +static inline bool nexthop_is_fdb(const struct nexthop *nh) +{ + if (nh->is_group) { + const struct nh_group *nh_grp; + + nh_grp = rcu_dereference_rtnl(nh->nh_grp); + return nh_grp->fdb_nh; + } else { + const struct nh_info *nhi; + + nhi = rcu_dereference_rtnl(nh->nh_info); + return nhi->fdb_nh; + } +} + +static inline bool nexthop_has_v4(const struct nexthop *nh) +{ + if (nh->is_group) { + struct nh_group *nh_grp; + + nh_grp = rcu_dereference_rtnl(nh->nh_grp); + return nh_grp->has_v4; + } + return false; +} + static inline bool nexthop_is_multipath(const struct nexthop *nh) { if (nh->is_group) { @@ -136,21 +179,20 @@ static inline unsigned int nexthop_num_path(const struct nexthop *nh) { unsigned int rc = 1; - if (nexthop_is_multipath(nh)) { + if (nh->is_group) { struct nh_group *nh_grp; nh_grp = rcu_dereference_rtnl(nh->nh_grp); - rc = nh_grp->num_nh; + if (nh_grp->mpath) + rc = nh_grp->num_nh; } return rc; } static inline -struct nexthop *nexthop_mpath_select(const struct nexthop *nh, int nhsel) +struct nexthop *nexthop_mpath_select(const struct nh_group *nhg, int nhsel) { - const struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp); - /* for_nexthops macros in fib_semantics.c grabs a pointer to * the nexthop before checking nhsel */ @@ -185,12 +227,14 @@ static inline bool nexthop_is_blackhole(const struct nexthop *nh) { const struct nh_info *nhi; - if (nexthop_is_multipath(nh)) { - if (nexthop_num_path(nh) > 1) - return false; - nh = nexthop_mpath_select(nh, 0); - if (!nh) + if (nh->is_group) { + struct nh_group *nh_grp; + + nh_grp = rcu_dereference_rtnl(nh->nh_grp); + if (nh_grp->num_nh > 1) return false; + + nh = nh_grp->nh_entries[0].nh; } nhi = rcu_dereference_rtnl(nh->nh_info); @@ -216,16 +260,79 @@ struct fib_nh_common *nexthop_fib_nhc(struct nexthop *nh, int nhsel) BUILD_BUG_ON(offsetof(struct fib_nh, nh_common) != 0); BUILD_BUG_ON(offsetof(struct fib6_nh, nh_common) != 0); - if (nexthop_is_multipath(nh)) { - nh = nexthop_mpath_select(nh, nhsel); - if (!nh) - return NULL; + if (nh->is_group) { + struct nh_group *nh_grp; + + nh_grp = rcu_dereference_rtnl(nh->nh_grp); + if (nh_grp->mpath) { + nh = nexthop_mpath_select(nh_grp, nhsel); + if (!nh) + return NULL; + } } nhi = rcu_dereference_rtnl(nh->nh_info); return &nhi->fib_nhc; } +/* called from fib_table_lookup with rcu_lock */ +static inline +struct fib_nh_common *nexthop_get_nhc_lookup(const struct nexthop *nh, + int fib_flags, + const struct flowi4 *flp, + int *nhsel) +{ + struct nh_info *nhi; + + if (nh->is_group) { + struct nh_group *nhg = rcu_dereference(nh->nh_grp); + int i; + + for (i = 0; i < nhg->num_nh; i++) { + struct nexthop *nhe = nhg->nh_entries[i].nh; + + nhi = rcu_dereference(nhe->nh_info); + if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) { + *nhsel = i; + return &nhi->fib_nhc; + } + } + } else { + nhi = rcu_dereference(nh->nh_info); + if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) { + *nhsel = 0; + return &nhi->fib_nhc; + } + } + + return NULL; +} + +static inline bool nexthop_uses_dev(const struct nexthop *nh, + const struct net_device *dev) +{ + struct nh_info *nhi; + + if (nh->is_group) { + struct nh_group *nhg = rcu_dereference(nh->nh_grp); + int i; + + for (i = 0; i < nhg->num_nh; i++) { + struct nexthop *nhe = nhg->nh_entries[i].nh; + + nhi = rcu_dereference(nhe->nh_info); + if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev)) + return true; + } + } else { + nhi = rcu_dereference(nh->nh_info); + if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev)) + return true; + } + + return false; +} + static inline unsigned int fib_info_num_path(const struct fib_info *fi) { if (unlikely(fi->nh)) @@ -263,8 +370,11 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh) { struct nh_info *nhi; - if (nexthop_is_multipath(nh)) { - nh = nexthop_mpath_select(nh, 0); + if (nh->is_group) { + struct nh_group *nh_grp; + + nh_grp = rcu_dereference_rtnl(nh->nh_grp); + nh = nexthop_mpath_select(nh_grp, 0); if (!nh) return NULL; } @@ -304,4 +414,32 @@ static inline void nexthop_path_fib6_result(struct fib6_result *res, int hash) int nexthop_for_each_fib6_nh(struct nexthop *nh, int (*cb)(struct fib6_nh *nh, void *arg), void *arg); + +static inline int nexthop_get_family(struct nexthop *nh) +{ + struct nh_info *nhi = rcu_dereference_rtnl(nh->nh_info); + + return nhi->family; +} + +static inline +struct fib_nh_common *nexthop_fdb_nhc(struct nexthop *nh) +{ + struct nh_info *nhi = rcu_dereference_rtnl(nh->nh_info); + + return &nhi->fib_nhc; +} + +static inline struct fib_nh_common *nexthop_path_fdb_result(struct nexthop *nh, + int hash) +{ + struct nh_info *nhi; + struct nexthop *nhp; + + nhp = nexthop_select_path(nh, hash); + if (unlikely(!nhp)) + return NULL; + nhi = rcu_dereference(nhp->nh_info); + return &nhi->fib_nhc; +} #endif diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 04aa0649f3b0..ed65619cbc47 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -325,6 +325,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, void tcf_exts_destroy(struct tcf_exts *exts); void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); +int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts); int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); /** diff --git a/include/net/regulatory.h b/include/net/regulatory.h index 3469750df0f4..09a3099886e5 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h @@ -231,13 +231,6 @@ struct ieee80211_regdomain { struct ieee80211_reg_rule reg_rules[]; }; -#define MHZ_TO_KHZ(freq) ((freq) * 1000) -#define KHZ_TO_MHZ(freq) ((freq) / 1000) -#define DBI_TO_MBI(gain) ((gain) * 100) -#define MBI_TO_DBI(gain) ((gain) / 100) -#define DBM_TO_MBM(gain) ((gain) * 100) -#define MBM_TO_DBM(gain) ((gain) / 100) - #define REG_RULE_EXT(start, end, bw, gain, eirp, dfs_cac, reg_flags) \ { \ .freq_range.start_freq_khz = MHZ_TO_KHZ(start), \ diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 25d2ec4c8f00..c510b03b9751 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -330,6 +330,10 @@ struct tcf_proto_ops { int (*dump)(struct net*, struct tcf_proto*, void *, struct sk_buff *skb, struct tcmsg*, bool); + int (*terse_dump)(struct net *net, + struct tcf_proto *tp, void *fh, + struct sk_buff *skb, + struct tcmsg *t, bool rtnl_held); int (*tmplt_dump)(struct sk_buff *skb, struct net *net, void *tmplt_priv); @@ -407,6 +411,7 @@ struct tcf_block { struct mutex lock; struct list_head chain_list; u32 index; /* block index for shared blocks */ + u32 classid; /* which class this block belongs to */ refcount_t refcnt; struct net *net; struct Qdisc *q; @@ -710,11 +715,6 @@ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i) } } -static inline void qdisc_reset_all_tx(struct net_device *dev) -{ - qdisc_reset_all_tx_gt(dev, 0); -} - /* Are all TX queues of the device empty? */ static inline bool qdisc_all_tx_empty(const struct net_device *dev) { diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 3ab5c6bbb90b..f8bcb75bb044 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -615,4 +615,11 @@ static inline bool sctp_newsk_ready(const struct sock *sk) return sock_flag(sk, SOCK_DEAD) || sk->sk_socket; } +static inline void sctp_sock_set_nodelay(struct sock *sk) +{ + lock_sock(sk); + sctp_sk(sk)->nodelay = true; + release_sock(sk); +} + #endif /* __net_sctp_h__ */ diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index 0b032b92da0b..994e984eef32 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h @@ -80,7 +80,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( struct sctp_chunk *chunk, gfp_t gfp); -void sctp_ulpevent_nofity_peer_addr_change(struct sctp_transport *transport, +void sctp_ulpevent_notify_peer_addr_change(struct sctp_transport *transport, int state, int error); struct sctp_ulpevent *sctp_ulpevent_make_remote_error( diff --git a/include/net/seg6.h b/include/net/seg6.h index 640724b35273..9d19c15e8545 100644 --- a/include/net/seg6.h +++ b/include/net/seg6.h @@ -57,7 +57,7 @@ extern void seg6_iptunnel_exit(void); extern int seg6_local_init(void); extern void seg6_local_exit(void); -extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len); +extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced); extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto); extern int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh); diff --git a/include/net/sock.h b/include/net/sock.h index 6d84784d33fa..c53cc42b5ab9 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1156,7 +1156,9 @@ struct proto { int (*sendpage)(struct sock *sk, struct page *page, int offset, size_t size, int flags); int (*bind)(struct sock *sk, - struct sockaddr *uaddr, int addr_len); + struct sockaddr *addr, int addr_len); + int (*bind_add)(struct sock *sk, + struct sockaddr *addr, int addr_len); int (*backlog_rcv) (struct sock *sk, struct sk_buff *skb); @@ -2553,9 +2555,9 @@ sk_is_refcounted(struct sock *sk) } /** - * skb_steal_sock - * @skb to steal the socket from - * @refcounted is set to true if the socket is reference-counted + * skb_steal_sock - steal a socket from an sk_buff + * @skb: sk_buff to steal the socket from + * @refcounted: is set to true if the socket is reference-counted */ static inline struct sock * skb_steal_sock(struct sk_buff *skb, bool *refcounted) @@ -2688,4 +2690,16 @@ static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif) void sock_def_readable(struct sock *sk); +int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk); +void sock_enable_timestamps(struct sock *sk); +void sock_no_linger(struct sock *sk); +void sock_set_keepalive(struct sock *sk); +void sock_set_priority(struct sock *sk, u32 priority); +void sock_set_rcvbuf(struct sock *sk, int val); +void sock_set_reuseaddr(struct sock *sk); +void sock_set_reuseport(struct sock *sk); +void sock_set_sndtimeo(struct sock *sk, s64 secs); + +int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len); + #endif /* _SOCK_H */ diff --git a/include/net/switchdev.h b/include/net/switchdev.h index aee86a189432..b8c059b4e06d 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -40,6 +40,10 @@ enum switchdev_attr_id { SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING, SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED, SWITCHDEV_ATTR_ID_BRIDGE_MROUTER, +#if IS_ENABLED(CONFIG_BRIDGE_MRP) + SWITCHDEV_ATTR_ID_MRP_PORT_STATE, + SWITCHDEV_ATTR_ID_MRP_PORT_ROLE, +#endif }; struct switchdev_attr { @@ -55,6 +59,10 @@ struct switchdev_attr { clock_t ageing_time; /* BRIDGE_AGEING_TIME */ bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */ bool mc_disabled; /* MC_DISABLED */ +#if IS_ENABLED(CONFIG_BRIDGE_MRP) + u8 mrp_port_state; /* MRP_PORT_STATE */ + u8 mrp_port_role; /* MRP_PORT_ROLE */ +#endif } u; }; @@ -63,6 +71,12 @@ enum switchdev_obj_id { SWITCHDEV_OBJ_ID_PORT_VLAN, SWITCHDEV_OBJ_ID_PORT_MDB, SWITCHDEV_OBJ_ID_HOST_MDB, +#if IS_ENABLED(CONFIG_BRIDGE_MRP) + SWITCHDEV_OBJ_ID_MRP, + SWITCHDEV_OBJ_ID_RING_TEST_MRP, + SWITCHDEV_OBJ_ID_RING_ROLE_MRP, + SWITCHDEV_OBJ_ID_RING_STATE_MRP, +#endif }; struct switchdev_obj { @@ -94,6 +108,55 @@ struct switchdev_obj_port_mdb { #define SWITCHDEV_OBJ_PORT_MDB(OBJ) \ container_of((OBJ), struct switchdev_obj_port_mdb, obj) + +#if IS_ENABLED(CONFIG_BRIDGE_MRP) +/* SWITCHDEV_OBJ_ID_MRP */ +struct switchdev_obj_mrp { + struct switchdev_obj obj; + struct net_device *p_port; + struct net_device *s_port; + u32 ring_id; + u16 prio; +}; + +#define SWITCHDEV_OBJ_MRP(OBJ) \ + container_of((OBJ), struct switchdev_obj_mrp, obj) + +/* SWITCHDEV_OBJ_ID_RING_TEST_MRP */ +struct switchdev_obj_ring_test_mrp { + struct switchdev_obj obj; + /* The value is in us and a value of 0 represents to stop */ + u32 interval; + u8 max_miss; + u32 ring_id; + u32 period; + bool monitor; +}; + +#define SWITCHDEV_OBJ_RING_TEST_MRP(OBJ) \ + container_of((OBJ), struct switchdev_obj_ring_test_mrp, obj) + +/* SWICHDEV_OBJ_ID_RING_ROLE_MRP */ +struct switchdev_obj_ring_role_mrp { + struct switchdev_obj obj; + u8 ring_role; + u32 ring_id; +}; + +#define SWITCHDEV_OBJ_RING_ROLE_MRP(OBJ) \ + container_of((OBJ), struct switchdev_obj_ring_role_mrp, obj) + +struct switchdev_obj_ring_state_mrp { + struct switchdev_obj obj; + u8 ring_state; + u32 ring_id; +}; + +#define SWITCHDEV_OBJ_RING_STATE_MRP(OBJ) \ + container_of((OBJ), struct switchdev_obj_ring_state_mrp, obj) + +#endif + typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj); enum switchdev_notifier_type { diff --git a/include/net/tc_act/tc_ct.h b/include/net/tc_act/tc_ct.h index 79654bcb9a29..8250d6f0a462 100644 --- a/include/net/tc_act/tc_ct.h +++ b/include/net/tc_act/tc_ct.h @@ -66,7 +66,16 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a) #endif /* CONFIG_NF_CONNTRACK */ #if IS_ENABLED(CONFIG_NET_ACT_CT) -void tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie); +static inline void +tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie) +{ + enum ip_conntrack_info ctinfo = cookie & NFCT_INFOMASK; + struct nf_conn *ct; + + ct = (struct nf_conn *)(cookie & NFCT_PTRMASK); + nf_conntrack_get(&ct->ct_general); + nf_ct_set(skb, ct, ctinfo); +} #else static inline void tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie) { } diff --git a/include/net/tc_act/tc_gate.h b/include/net/tc_act/tc_gate.h new file mode 100644 index 000000000000..8bc6be81a7ad --- /dev/null +++ b/include/net/tc_act/tc_gate.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Copyright 2020 NXP */ + +#ifndef __NET_TC_GATE_H +#define __NET_TC_GATE_H + +#include <net/act_api.h> +#include <linux/tc_act/tc_gate.h> + +struct action_gate_entry { + u8 gate_state; + u32 interval; + s32 ipv; + s32 maxoctets; +}; + +struct tcfg_gate_entry { + int index; + u8 gate_state; + u32 interval; + s32 ipv; + s32 maxoctets; + struct list_head list; +}; + +struct tcf_gate_params { + s32 tcfg_priority; + u64 tcfg_basetime; + u64 tcfg_cycletime; + u64 tcfg_cycletime_ext; + u32 tcfg_flags; + s32 tcfg_clockid; + size_t num_entries; + struct list_head entries; +}; + +#define GATE_ACT_GATE_OPEN BIT(0) +#define GATE_ACT_PENDING BIT(1) + +struct tcf_gate { + struct tc_action common; + struct tcf_gate_params param; + u8 current_gate_status; + ktime_t current_close_time; + u32 current_entry_octets; + s32 current_max_octets; + struct tcfg_gate_entry *next_entry; + struct hrtimer hitimer; + enum tk_offsets tk_offset; +}; + +#define to_gate(a) ((struct tcf_gate *)a) + +static inline bool is_tcf_gate(const struct tc_action *a) +{ +#ifdef CONFIG_NET_CLS_ACT + if (a->ops && a->ops->id == TCA_ID_GATE) + return true; +#endif + return false; +} + +static inline u32 tcf_gate_index(const struct tc_action *a) +{ + return a->tcfa_index; +} + +static inline s32 tcf_gate_prio(const struct tc_action *a) +{ + s32 tcfg_prio; + + tcfg_prio = to_gate(a)->param.tcfg_priority; + + return tcfg_prio; +} + +static inline u64 tcf_gate_basetime(const struct tc_action *a) +{ + u64 tcfg_basetime; + + tcfg_basetime = to_gate(a)->param.tcfg_basetime; + + return tcfg_basetime; +} + +static inline u64 tcf_gate_cycletime(const struct tc_action *a) +{ + u64 tcfg_cycletime; + + tcfg_cycletime = to_gate(a)->param.tcfg_cycletime; + + return tcfg_cycletime; +} + +static inline u64 tcf_gate_cycletimeext(const struct tc_action *a) +{ + u64 tcfg_cycletimeext; + + tcfg_cycletimeext = to_gate(a)->param.tcfg_cycletime_ext; + + return tcfg_cycletimeext; +} + +static inline u32 tcf_gate_num_entries(const struct tc_action *a) +{ + u32 num_entries; + + num_entries = to_gate(a)->param.num_entries; + + return num_entries; +} + +static inline struct action_gate_entry + *tcf_gate_get_list(const struct tc_action *a) +{ + struct action_gate_entry *oe; + struct tcf_gate_params *p; + struct tcfg_gate_entry *entry; + u32 num_entries; + int i = 0; + + p = &to_gate(a)->param; + num_entries = p->num_entries; + + list_for_each_entry(entry, &p->entries, list) + i++; + + if (i != num_entries) + return NULL; + + oe = kcalloc(num_entries, sizeof(*oe), GFP_ATOMIC); + if (!oe) + return NULL; + + i = 0; + list_for_each_entry(entry, &p->entries, list) { + oe[i].gate_state = entry->gate_state; + oe[i].interval = entry->interval; + oe[i].ipv = entry->ipv; + oe[i].maxoctets = entry->maxoctets; + i++; + } + + return oe; +} +#endif diff --git a/include/net/tcp.h b/include/net/tcp.h index 5fa9eacd965a..4de9485f73d9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -23,7 +23,6 @@ #include <linux/cache.h> #include <linux/percpu.h> #include <linux/skbuff.h> -#include <linux/cryptohash.h> #include <linux/kref.h> #include <linux/ktime.h> @@ -51,7 +50,7 @@ extern struct inet_hashinfo tcp_hashinfo; extern struct percpu_counter tcp_orphan_count; void tcp_time_wait(struct sock *sk, int state, int timeo); -#define MAX_TCP_HEADER (128 + MAX_HEADER) +#define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER) #define MAX_TCP_OPTION_SPACE 40 #define TCP_MIN_SND_MSS 48 #define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE) @@ -126,6 +125,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); * to combine FIN-WAIT-2 timeout with * TIME-WAIT timer. */ +#define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */ #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */ #if HZ >= 100 @@ -436,6 +436,7 @@ u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops, void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); void tcp_v4_mtu_reduced(struct sock *sk); void tcp_req_err(struct sock *sk, u32 seq, bool abort); +void tcp_ld_RTO_revert(struct sock *sk, u32 seq); int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); struct sock *tcp_create_openreq_child(const struct sock *sk, struct request_sock *req, @@ -660,7 +661,6 @@ void tcp_initialize_rcv_mss(struct sock *sk); int tcp_mtu_to_mss(struct sock *sk, int pmtu); int tcp_mss_to_mtu(struct sock *sk, int mss); void tcp_mtup_init(struct sock *sk); -void tcp_init_buffer_space(struct sock *sk); static inline void tcp_bound_rto(const struct sock *sk) { @@ -1288,26 +1288,22 @@ static inline bool tcp_needs_internal_pacing(const struct sock *sk) return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; } -/* Return in jiffies the delay before one skb is sent. - * If @skb is NULL, we look at EDT for next packet being sent on the socket. +/* Estimates in how many jiffies next packet for this flow can be sent. + * Scheduling a retransmit timer too early would be silly. */ -static inline unsigned long tcp_pacing_delay(const struct sock *sk, - const struct sk_buff *skb) +static inline unsigned long tcp_pacing_delay(const struct sock *sk) { - s64 pacing_delay = skb ? skb->tstamp : tcp_sk(sk)->tcp_wstamp_ns; + s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache; - pacing_delay -= tcp_sk(sk)->tcp_clock_cache; - - return pacing_delay > 0 ? nsecs_to_jiffies(pacing_delay) : 0; + return delay > 0 ? nsecs_to_jiffies(delay) : 0; } static inline void tcp_reset_xmit_timer(struct sock *sk, const int what, unsigned long when, - const unsigned long max_when, - const struct sk_buff *skb) + const unsigned long max_when) { - inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk, skb), + inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk), max_when); } @@ -1335,8 +1331,7 @@ static inline void tcp_check_probe_timer(struct sock *sk) { if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, - tcp_probe0_base(sk), TCP_RTO_MAX, - NULL); + tcp_probe0_base(sk), TCP_RTO_MAX); } static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) @@ -1376,7 +1371,6 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) rx_opt->num_sacks = 0; } -u32 tcp_default_init_rwnd(u32 mss); void tcp_cwnd_restart(struct sock *sk, s32 delta); static inline void tcp_slow_start_after_idle_check(struct sock *sk) @@ -1421,6 +1415,19 @@ static inline int tcp_full_space(const struct sock *sk) return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); } +/* We provision sk_rcvbuf around 200% of sk_rcvlowat. + * If 87.5 % (7/8) of the space has been consumed, we want to override + * SO_RCVLOWAT constraint, since we are receiving skbs with too small + * len/truesize ratio. + */ +static inline bool tcp_rmem_pressure(const struct sock *sk) +{ + int rcvbuf = READ_ONCE(sk->sk_rcvbuf); + int threshold = rcvbuf - (rcvbuf >> 3); + + return atomic_read(&sk->sk_rmem_alloc) > threshold; +} + extern void tcp_openreq_init_rwin(struct request_sock *req, const struct sock *sk_listener, const struct dst_entry *dst); diff --git a/include/net/tls.h b/include/net/tls.h index bf9eb4823933..3212d3c214a9 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -135,6 +135,8 @@ struct tls_sw_context_tx { struct tls_rec *open_rec; struct list_head tx_list; atomic_t encrypt_pending; + /* protect crypto_wait with encrypt_pending */ + spinlock_t encrypt_compl_lock; int async_notify; u8 async_capable:1; @@ -155,6 +157,8 @@ struct tls_sw_context_rx { u8 async_capable:1; u8 decrypted:1; atomic_t decrypt_pending; + /* protect crypto_wait with decrypt_pending*/ + spinlock_t decrypt_compl_lock; bool async_notify; }; @@ -567,6 +571,15 @@ static inline bool tls_sw_has_ctx_tx(const struct sock *sk) return !!tls_sw_ctx_tx(ctx); } +static inline bool tls_sw_has_ctx_rx(const struct sock *sk) +{ + struct tls_context *ctx = tls_get_ctx(sk); + + if (!ctx) + return false; + return !!tls_sw_ctx_rx(ctx); +} + void tls_sw_write_space(struct sock *sk, struct tls_context *ctx); void tls_device_write_space(struct sock *sk, struct tls_context *ctx); @@ -594,12 +607,22 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction) #endif /* The TLS context is valid until sk_destruct is called */ +#define RESYNC_REQ (1 << 0) +#define RESYNC_REQ_FORCE (1 << 1) static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) { struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); - atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1); + atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ); +} + +static inline void tls_offload_rx_force_resync_request(struct sock *sk) +{ + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); + + atomic64_set(&rx_ctx->resync_req, RESYNC_REQ | RESYNC_REQ_FORCE); } static inline void diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index 4b1f95e08307..e7312ceb2794 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -143,14 +143,12 @@ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb __be16 df, __be16 src_port, __be16 dst_port, bool xnet, bool nocheck); -#if IS_ENABLED(CONFIG_IPV6) int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, struct net_device *dev, struct in6_addr *saddr, struct in6_addr *daddr, __u8 prio, __u8 ttl, __be32 label, __be16 src_port, __be16 dst_port, bool nocheck); -#endif void udp_tunnel_sock_release(struct socket *sock); diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 373aadcfea21..3a41627cbdfe 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -7,6 +7,7 @@ #include <net/dst_metadata.h> #include <net/rtnetlink.h> #include <net/switchdev.h> +#include <net/nexthop.h> #define IANA_VXLAN_UDP_PORT 4789 @@ -487,4 +488,28 @@ static inline void vxlan_flag_attr_error(int attrtype, #undef VXLAN_FLAG } +static inline bool vxlan_fdb_nh_path_select(struct nexthop *nh, + int hash, + struct vxlan_rdst *rdst) +{ + struct fib_nh_common *nhc; + + nhc = nexthop_path_fdb_result(nh, hash); + if (unlikely(!nhc)) + return false; + + switch (nhc->nhc_gw_family) { + case AF_INET: + rdst->remote_ip.sin.sin_addr.s_addr = nhc->nhc_gw.ipv4; + rdst->remote_ip.sa.sa_family = AF_INET; + break; + case AF_INET6: + rdst->remote_ip.sin6.sin6_addr = nhc->nhc_gw.ipv6; + rdst->remote_ip.sa.sa_family = AF_INET6; + break; + } + + return true; +} + #endif diff --git a/include/net/xdp.h b/include/net/xdp.h index 40c6d3398458..609f819ed08b 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -6,6 +6,8 @@ #ifndef __LINUX_NET_XDP_H__ #define __LINUX_NET_XDP_H__ +#include <linux/skbuff.h> /* skb_shared_info */ + /** * DOC: XDP RX-queue information * @@ -37,7 +39,7 @@ enum xdp_mem_type { MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */ MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */ MEM_TYPE_PAGE_POOL, - MEM_TYPE_ZERO_COPY, + MEM_TYPE_XSK_BUFF_POOL, MEM_TYPE_MAX, }; @@ -52,10 +54,6 @@ struct xdp_mem_info { struct page_pool; -struct zero_copy_allocator { - void (*free)(struct zero_copy_allocator *zca, unsigned long handle); -}; - struct xdp_rxq_info { struct net_device *dev; u32 queue_index; @@ -63,20 +61,36 @@ struct xdp_rxq_info { struct xdp_mem_info mem; } ____cacheline_aligned; /* perf critical, avoid false-sharing */ +struct xdp_txq_info { + struct net_device *dev; +}; + struct xdp_buff { void *data; void *data_end; void *data_meta; void *data_hard_start; - unsigned long handle; struct xdp_rxq_info *rxq; + struct xdp_txq_info *txq; + u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/ }; +/* Reserve memory area at end-of data area. + * + * This macro reserves tailroom in the XDP buffer by limiting the + * XDP/BPF data access to data_hard_end. Notice same area (and size) + * is used for XDP_PASS, when constructing the SKB via build_skb(). + */ +#define xdp_data_hard_end(xdp) \ + ((xdp)->data_hard_start + (xdp)->frame_sz - \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + struct xdp_frame { void *data; u16 len; u16 headroom; - u16 metasize; + u32 metasize:8; + u32 frame_sz:24; /* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time, * while mem info is valid on remote CPU. */ @@ -91,17 +105,31 @@ static inline void xdp_scrub_frame(struct xdp_frame *frame) frame->dev_rx = NULL; } +/* Avoids inlining WARN macro in fast-path */ +void xdp_warn(const char *msg, const char *func, const int line); +#define XDP_WARN(msg) xdp_warn(msg, __func__, __LINE__) + struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp); +static inline +void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp) +{ + xdp->data_hard_start = frame->data - frame->headroom - sizeof(*frame); + xdp->data = frame->data; + xdp->data_end = frame->data + frame->len; + xdp->data_meta = frame->data - frame->metasize; + xdp->frame_sz = frame->frame_sz; +} + /* Convert xdp_buff to xdp_frame */ static inline -struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp) +struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp) { struct xdp_frame *xdp_frame; int metasize; int headroom; - if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) + if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) return xdp_convert_zc_to_xdp_frame(xdp); /* Assure headroom is available for storing info */ @@ -111,6 +139,12 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp) if (unlikely((headroom - metasize) < sizeof(*xdp_frame))) return NULL; + /* Catch if driver didn't reserve tailroom for skb_shared_info */ + if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) { + XDP_WARN("Driver BUG: missing reserved tailroom"); + return NULL; + } + /* Store info in top of packet */ xdp_frame = xdp->data_hard_start; @@ -118,6 +152,7 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp) xdp_frame->len = xdp->data_end - xdp->data; xdp_frame->headroom = headroom - sizeof(*xdp_frame); xdp_frame->metasize = metasize; + xdp_frame->frame_sz = xdp->frame_sz; /* rxq only valid until napi_schedule ends, convert to xdp_mem_info */ xdp_frame->mem = xdp->rxq->mem; @@ -181,4 +216,6 @@ bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, void xdp_attachment_setup(struct xdp_attachment_info *info, struct netdev_bpf *bpf); +#define DEV_MAP_BULK_SIZE 16 + #endif /* __LINUX_NET_XDP_H__ */ diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index e86ec48ef627..96bfc5f5f24e 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -15,42 +15,16 @@ struct net_device; struct xsk_queue; - -/* Masks for xdp_umem_page flags. - * The low 12-bits of the addr will be 0 since this is the page address, so we - * can use them for flags. - */ -#define XSK_NEXT_PG_CONTIG_SHIFT 0 -#define XSK_NEXT_PG_CONTIG_MASK (1ULL << XSK_NEXT_PG_CONTIG_SHIFT) - -struct xdp_umem_page { - void *addr; - dma_addr_t dma; -}; - -struct xdp_umem_fq_reuse { - u32 nentries; - u32 length; - u64 handles[]; -}; - -/* Flags for the umem flags field. - * - * The NEED_WAKEUP flag is 1 due to the reuse of the flags field for public - * flags. See inlude/uapi/include/linux/if_xdp.h. - */ -#define XDP_UMEM_USES_NEED_WAKEUP (1 << 1) +struct xdp_buff; struct xdp_umem { struct xsk_queue *fq; struct xsk_queue *cq; - struct xdp_umem_page *pages; - u64 chunk_mask; + struct xsk_buff_pool *pool; u64 size; u32 headroom; - u32 chunk_size_nohr; + u32 chunk_size; struct user_struct *user; - unsigned long address; refcount_t users; struct work_struct work; struct page **pgs; @@ -60,28 +34,17 @@ struct xdp_umem { u8 flags; int id; struct net_device *dev; - struct xdp_umem_fq_reuse *fq_reuse; bool zc; - spinlock_t xsk_list_lock; - struct list_head xsk_list; + spinlock_t xsk_tx_list_lock; + struct list_head xsk_tx_list; }; -/* Nodes are linked in the struct xdp_sock map_list field, and used to - * track which maps a certain socket reside in. - */ - struct xsk_map { struct bpf_map map; spinlock_t lock; /* Synchronize map updates */ struct xdp_sock *xsk_map[]; }; -struct xsk_map_node { - struct list_head node; - struct xsk_map *map; - struct xdp_sock **map_entry; -}; - struct xdp_sock { /* struct sock must be the first member of struct xdp_sock */ struct sock sk; @@ -112,32 +75,9 @@ struct xdp_sock { spinlock_t map_list_lock; }; -struct xdp_buff; #ifdef CONFIG_XDP_SOCKETS -int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); -bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs); -/* Used from netdev driver */ -bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt); -bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr); -void xsk_umem_release_addr(struct xdp_umem *umem); -void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); -bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc); -void xsk_umem_consume_tx_done(struct xdp_umem *umem); -struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries); -struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, - struct xdp_umem_fq_reuse *newq); -void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq); -struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id); -void xsk_set_rx_need_wakeup(struct xdp_umem *umem); -void xsk_set_tx_need_wakeup(struct xdp_umem *umem); -void xsk_clear_rx_need_wakeup(struct xdp_umem *umem); -void xsk_clear_tx_need_wakeup(struct xdp_umem *umem); -bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem); -void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, - struct xdp_sock **map_entry); -int xsk_map_inc(struct xsk_map *map); -void xsk_map_put(struct xsk_map *map); +int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp); void __xsk_map_flush(void); @@ -154,219 +94,13 @@ static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, return xs; } -static inline u64 xsk_umem_extract_addr(u64 addr) -{ - return addr & XSK_UNALIGNED_BUF_ADDR_MASK; -} - -static inline u64 xsk_umem_extract_offset(u64 addr) -{ - return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; -} - -static inline u64 xsk_umem_add_offset_to_addr(u64 addr) -{ - return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr); -} - -static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) -{ - unsigned long page_addr; - - addr = xsk_umem_add_offset_to_addr(addr); - page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr; - - return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK); -} - -static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) -{ - addr = xsk_umem_add_offset_to_addr(addr); - - return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK); -} - -/* Reuse-queue aware version of FILL queue helpers */ -static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt) -{ - struct xdp_umem_fq_reuse *rq = umem->fq_reuse; - - if (rq->length >= cnt) - return true; - - return xsk_umem_has_addrs(umem, cnt - rq->length); -} - -static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) -{ - struct xdp_umem_fq_reuse *rq = umem->fq_reuse; - - if (!rq->length) - return xsk_umem_peek_addr(umem, addr); - - *addr = rq->handles[rq->length - 1]; - return addr; -} - -static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem) -{ - struct xdp_umem_fq_reuse *rq = umem->fq_reuse; - - if (!rq->length) - xsk_umem_release_addr(umem); - else - rq->length--; -} - -static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) -{ - struct xdp_umem_fq_reuse *rq = umem->fq_reuse; - - rq->handles[rq->length++] = addr; -} - -/* Handle the offset appropriately depending on aligned or unaligned mode. - * For unaligned mode, we store the offset in the upper 16-bits of the address. - * For aligned mode, we simply add the offset to the address. - */ -static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address, - u64 offset) -{ - if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) - return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); - else - return address + offset; -} #else + static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) { return -ENOTSUPP; } -static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) -{ - return false; -} - -static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt) -{ - return false; -} - -static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) -{ - return NULL; -} - -static inline void xsk_umem_release_addr(struct xdp_umem *umem) -{ -} - -static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) -{ -} - -static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, - struct xdp_desc *desc) -{ - return false; -} - -static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem) -{ -} - -static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries) -{ - return NULL; -} - -static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap( - struct xdp_umem *umem, - struct xdp_umem_fq_reuse *newq) -{ - return NULL; -} -static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq) -{ -} - -static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, - u16 queue_id) -{ - return NULL; -} - -static inline u64 xsk_umem_extract_addr(u64 addr) -{ - return 0; -} - -static inline u64 xsk_umem_extract_offset(u64 addr) -{ - return 0; -} - -static inline u64 xsk_umem_add_offset_to_addr(u64 addr) -{ - return 0; -} - -static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) -{ - return NULL; -} - -static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) -{ - return 0; -} - -static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt) -{ - return false; -} - -static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) -{ - return NULL; -} - -static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem) -{ -} - -static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) -{ -} - -static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem) -{ -} - -static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem) -{ -} - -static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem) -{ -} - -static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem) -{ -} - -static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) -{ - return false; -} - -static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle, - u64 offset) -{ - return 0; -} - static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) { return -EOPNOTSUPP; @@ -381,6 +115,7 @@ static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, { return NULL; } + #endif /* CONFIG_XDP_SOCKETS */ #endif /* _LINUX_XDP_SOCK_H */ diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h new file mode 100644 index 000000000000..ccf848f7efa4 --- /dev/null +++ b/include/net/xdp_sock_drv.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Interface for implementing AF_XDP zero-copy support in drivers. + * Copyright(c) 2020 Intel Corporation. + */ + +#ifndef _LINUX_XDP_SOCK_DRV_H +#define _LINUX_XDP_SOCK_DRV_H + +#include <net/xdp_sock.h> +#include <net/xsk_buff_pool.h> + +#ifdef CONFIG_XDP_SOCKETS + +void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); +bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc); +void xsk_umem_consume_tx_done(struct xdp_umem *umem); +struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id); +void xsk_set_rx_need_wakeup(struct xdp_umem *umem); +void xsk_set_tx_need_wakeup(struct xdp_umem *umem); +void xsk_clear_rx_need_wakeup(struct xdp_umem *umem); +void xsk_clear_tx_need_wakeup(struct xdp_umem *umem); +bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem); + +static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem) +{ + return XDP_PACKET_HEADROOM + umem->headroom; +} + +static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem) +{ + return umem->chunk_size; +} + +static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem) +{ + return xsk_umem_get_chunk_size(umem) - xsk_umem_get_headroom(umem); +} + +static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem, + struct xdp_rxq_info *rxq) +{ + xp_set_rxq_info(umem->pool, rxq); +} + +static inline void xsk_buff_dma_unmap(struct xdp_umem *umem, + unsigned long attrs) +{ + xp_dma_unmap(umem->pool, attrs); +} + +static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev, + unsigned long attrs) +{ + return xp_dma_map(umem->pool, dev, attrs, umem->pgs, umem->npgs); +} + +static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) +{ + struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); + + return xp_get_dma(xskb); +} + +static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) +{ + struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); + + return xp_get_frame_dma(xskb); +} + +static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem) +{ + return xp_alloc(umem->pool); +} + +static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count) +{ + return xp_can_alloc(umem->pool, count); +} + +static inline void xsk_buff_free(struct xdp_buff *xdp) +{ + struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); + + xp_free(xskb); +} + +static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr) +{ + return xp_raw_get_dma(umem->pool, addr); +} + +static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr) +{ + return xp_raw_get_data(umem->pool, addr); +} + +static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp) +{ + struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); + + xp_dma_sync_for_cpu(xskb); +} + +static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem, + dma_addr_t dma, + size_t size) +{ + xp_dma_sync_for_device(umem->pool, dma, size); +} + +#else + +static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) +{ +} + +static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, + struct xdp_desc *desc) +{ + return false; +} + +static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem) +{ +} + +static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, + u16 queue_id) +{ + return NULL; +} + +static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem) +{ +} + +static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem) +{ +} + +static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem) +{ +} + +static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem) +{ +} + +static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) +{ + return false; +} + +static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem) +{ + return 0; +} + +static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem) +{ + return 0; +} + +static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem) +{ + return 0; +} + +static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem, + struct xdp_rxq_info *rxq) +{ +} + +static inline void xsk_buff_dma_unmap(struct xdp_umem *umem, + unsigned long attrs) +{ +} + +static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev, + unsigned long attrs) +{ + return 0; +} + +static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp) +{ + return 0; +} + +static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp) +{ + return 0; +} + +static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem) +{ + return NULL; +} + +static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count) +{ + return false; +} + +static inline void xsk_buff_free(struct xdp_buff *xdp) +{ +} + +static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr) +{ + return 0; +} + +static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr) +{ + return NULL; +} + +static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp) +{ +} + +static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem, + dma_addr_t dma, + size_t size) +{ +} + +#endif /* CONFIG_XDP_SOCKETS */ + +#endif /* _LINUX_XDP_SOCK_DRV_H */ diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 8f71c111e65a..094fe682f5d7 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -361,11 +361,6 @@ struct xfrm_state_afinfo { const struct xfrm_type *type_dstopts; int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb); - int (*output_finish)(struct sock *sk, struct sk_buff *skb); - int (*extract_input)(struct xfrm_state *x, - struct sk_buff *skb); - int (*extract_output)(struct xfrm_state *x, - struct sk_buff *skb); int (*transport_finish)(struct sk_buff *skb, int async); void (*local_error)(struct sk_buff *skb, u32 mtu); @@ -1406,6 +1401,8 @@ struct xfrm4_protocol { struct xfrm6_protocol { int (*handler)(struct sk_buff *skb); + int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi, + int encap_type); int (*cb_handler)(struct sk_buff *skb, int err); int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info); @@ -1562,7 +1559,6 @@ int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb); #endif void xfrm_local_error(struct sk_buff *skb, int mtu); -int xfrm4_extract_header(struct sk_buff *skb); int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb); int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); @@ -1578,7 +1574,6 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) return xfrm_input(skb, nexthdr, spi, 0); } -int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb); int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb); int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb); int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol); @@ -1586,10 +1581,11 @@ int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char prot int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); void xfrm4_local_error(struct sk_buff *skb, u32 mtu); -int xfrm6_extract_header(struct sk_buff *skb); int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi, struct ip6_tnl *t); +int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, + int encap_type); int xfrm6_transport_finish(struct sk_buff *skb, int async); int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t); int xfrm6_rcv(struct sk_buff *skb); @@ -1602,14 +1598,15 @@ int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family); int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family); __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr); __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr); -int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb); int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb); int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr); #ifdef CONFIG_XFRM +void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu); int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); +int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen); #else @@ -1992,4 +1989,20 @@ static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x, return 0; } + +#if IS_ENABLED(CONFIG_IPV6) +static inline bool xfrm6_local_dontfrag(const struct sock *sk) +{ + int proto; + + if (!sk || sk->sk_family != AF_INET6) + return false; + + proto = sk->sk_protocol; + if (proto == IPPROTO_UDP || proto == IPPROTO_RAW) + return inet6_sk(sk)->dontfrag; + + return false; +} +#endif #endif /* _NET_XFRM_H */ diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h new file mode 100644 index 000000000000..a4ff226505c9 --- /dev/null +++ b/include/net/xsk_buff_pool.h @@ -0,0 +1,140 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2020 Intel Corporation. */ + +#ifndef XSK_BUFF_POOL_H_ +#define XSK_BUFF_POOL_H_ + +#include <linux/if_xdp.h> +#include <linux/types.h> +#include <linux/dma-mapping.h> +#include <net/xdp.h> + +struct xsk_buff_pool; +struct xdp_rxq_info; +struct xsk_queue; +struct xdp_desc; +struct device; +struct page; + +struct xdp_buff_xsk { + struct xdp_buff xdp; + dma_addr_t dma; + dma_addr_t frame_dma; + struct xsk_buff_pool *pool; + bool unaligned; + u64 orig_addr; + struct list_head free_list_node; +}; + +struct xsk_buff_pool { + struct xsk_queue *fq; + struct list_head free_list; + dma_addr_t *dma_pages; + struct xdp_buff_xsk *heads; + u64 chunk_mask; + u64 addrs_cnt; + u32 free_list_cnt; + u32 dma_pages_cnt; + u32 heads_cnt; + u32 free_heads_cnt; + u32 headroom; + u32 chunk_size; + u32 frame_len; + bool cheap_dma; + bool unaligned; + void *addrs; + struct device *dev; + struct xdp_buff_xsk *free_heads[]; +}; + +/* AF_XDP core. */ +struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks, + u32 chunk_size, u32 headroom, u64 size, + bool unaligned); +void xp_set_fq(struct xsk_buff_pool *pool, struct xsk_queue *fq); +void xp_destroy(struct xsk_buff_pool *pool); +void xp_release(struct xdp_buff_xsk *xskb); + +/* AF_XDP, and XDP core. */ +void xp_free(struct xdp_buff_xsk *xskb); + +/* AF_XDP ZC drivers, via xdp_sock_buff.h */ +void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq); +int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, + unsigned long attrs, struct page **pages, u32 nr_pages); +void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs); +struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool); +bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count); +void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr); +dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr); +static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb) +{ + return xskb->dma; +} + +static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb) +{ + return xskb->frame_dma; +} + +void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb); +static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb) +{ + if (xskb->pool->cheap_dma) + return; + + xp_dma_sync_for_cpu_slow(xskb); +} + +void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, + size_t size); +static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool, + dma_addr_t dma, size_t size) +{ + if (pool->cheap_dma) + return; + + xp_dma_sync_for_device_slow(pool, dma, size); +} + +/* Masks for xdp_umem_page flags. + * The low 12-bits of the addr will be 0 since this is the page address, so we + * can use them for flags. + */ +#define XSK_NEXT_PG_CONTIG_SHIFT 0 +#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT) + +static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool, + u64 addr, u32 len) +{ + bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE; + + if (pool->dma_pages_cnt && cross_pg) { + return !(pool->dma_pages[addr >> PAGE_SHIFT] & + XSK_NEXT_PG_CONTIG_MASK); + } + return false; +} + +static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr) +{ + return addr & pool->chunk_mask; +} + +static inline u64 xp_unaligned_extract_addr(u64 addr) +{ + return addr & XSK_UNALIGNED_BUF_ADDR_MASK; +} + +static inline u64 xp_unaligned_extract_offset(u64 addr) +{ + return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; +} + +static inline u64 xp_unaligned_add_offset_to_addr(u64 addr) +{ + return xp_unaligned_extract_addr(addr) + + xp_unaligned_extract_offset(addr); +} + +#endif /* XSK_BUFF_POOL_H_ */ diff --git a/include/pcmcia/cistpl.h b/include/pcmcia/cistpl.h index 59a011101e0e..749320cc9aba 100644 --- a/include/pcmcia/cistpl.h +++ b/include/pcmcia/cistpl.h @@ -161,7 +161,7 @@ typedef struct cistpl_funcid_t { typedef struct cistpl_funce_t { u_char type; - u_char data[0]; + u_char data[]; } cistpl_funce_t; /*====================================================================== @@ -255,7 +255,7 @@ typedef struct cistpl_data_serv_t { u_char escape; u_char encrypt; u_char misc_features; - u_char ccitt_code[0]; + u_char ccitt_code[]; } cistpl_data_serv_t; typedef struct cistpl_fax_serv_t { @@ -265,7 +265,7 @@ typedef struct cistpl_fax_serv_t { u_char encrypt; u_char features_0; u_char features_1; - u_char ccitt_code[0]; + u_char ccitt_code[]; } cistpl_fax_serv_t; typedef struct cistpl_voice_serv_t { diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index 058cfbc2b37f..0f1ea5f2d01c 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h @@ -11,6 +11,7 @@ #include <rdma/ib_mad.h> #include <rdma/ib_sa.h> +#include <rdma/rdma_cm.h> /* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */ extern struct class cm_class; @@ -115,6 +116,7 @@ struct ib_cm_req_event_param { unsigned int retry_count:3; unsigned int rnr_retry_count:3; unsigned int srq:1; + struct rdma_ucm_ece ece; }; struct ib_cm_rep_event_param { @@ -129,6 +131,7 @@ struct ib_cm_rep_event_param { unsigned int flow_control:1; unsigned int rnr_retry_count:3; unsigned int srq:1; + struct rdma_ucm_ece ece; }; enum ib_cm_rej_reason { @@ -164,7 +167,8 @@ enum ib_cm_rej_reason { IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = 30, IB_CM_REJ_INVALID_CLASS_VERSION = 31, IB_CM_REJ_INVALID_FLOW_LABEL = 32, - IB_CM_REJ_INVALID_ALT_FLOW_LABEL = 33 + IB_CM_REJ_INVALID_ALT_FLOW_LABEL = 33, + IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED = 35, }; struct ib_cm_rej_event_param { @@ -369,6 +373,7 @@ struct ib_cm_req_param { u8 rnr_retry_count; u8 max_cm_retries; u8 srq; + struct rdma_ucm_ece ece; }; /** @@ -392,6 +397,7 @@ struct ib_cm_rep_param { u8 flow_control; u8 rnr_retry_count; u8 srq; + struct rdma_ucm_ece ece; }; /** @@ -546,6 +552,7 @@ struct ib_cm_sidr_rep_param { u8 info_length; const void *private_data; u8 private_data_len; + struct rdma_ucm_ece ece; }; /** diff --git a/include/rdma/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h deleted file mode 100644 index 2fd9bfb6d648..000000000000 --- a/include/rdma/ib_fmr_pool.h +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Copyright (c) 2004 Topspin Corporation. All rights reserved. - * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#if !defined(IB_FMR_POOL_H) -#define IB_FMR_POOL_H - -#include <rdma/ib_verbs.h> - -struct ib_fmr_pool; - -/** - * struct ib_fmr_pool_param - Parameters for creating FMR pool - * @max_pages_per_fmr:Maximum number of pages per map request. - * @page_shift: Log2 of sizeof "pages" mapped by this fmr - * @access:Access flags for FMRs in pool. - * @pool_size:Number of FMRs to allocate for pool. - * @dirty_watermark:Flush is triggered when @dirty_watermark dirty - * FMRs are present. - * @flush_function:Callback called when unmapped FMRs are flushed and - * more FMRs are possibly available for mapping - * @flush_arg:Context passed to user's flush function. - * @cache:If set, FMRs may be reused after unmapping for identical map - * requests. - */ -struct ib_fmr_pool_param { - int max_pages_per_fmr; - int page_shift; - enum ib_access_flags access; - int pool_size; - int dirty_watermark; - void (*flush_function)(struct ib_fmr_pool *pool, - void *arg); - void *flush_arg; - unsigned cache:1; -}; - -struct ib_pool_fmr { - struct ib_fmr *fmr; - struct ib_fmr_pool *pool; - struct list_head list; - struct hlist_node cache_node; - int ref_count; - int remap_count; - u64 io_virtual_address; - int page_list_len; - u64 page_list[]; -}; - -struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, - struct ib_fmr_pool_param *params); - -void ib_destroy_fmr_pool(struct ib_fmr_pool *pool); - -int ib_flush_fmr_pool(struct ib_fmr_pool *pool); - -struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, - u64 *page_list, - int list_len, - u64 io_virtual_address); - -void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr); - -#endif /* IB_FMR_POOL_H */ diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 4e62650e2127..8c093fc1bb9f 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -559,20 +559,6 @@ typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent, struct ib_mad_send_wc *mad_send_wc); /** - * ib_mad_snoop_handler - Callback handler for snooping sent MADs. - * @mad_agent: MAD agent that snooped the MAD. - * @send_buf: send MAD data buffer. - * @mad_send_wc: Work completion information on the sent MAD. Valid - * only for snooping that occurs on a send completion. - * - * Clients snooping MADs should not modify data referenced by the @send_buf - * or @mad_send_wc. - */ -typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent, - struct ib_mad_send_buf *send_buf, - struct ib_mad_send_wc *mad_send_wc); - -/** * ib_mad_recv_handler - callback handler for a received MAD. * @mad_agent: MAD agent requesting the received MAD. * @send_buf: Send buffer if found, else NULL @@ -581,8 +567,7 @@ typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent, * MADs received in response to a send request operation will be handed to * the user before the send operation completes. All data buffers given * to registered agents through this routine are owned by the receiving - * client, except for snooping agents. Clients snooping MADs should not - * modify the data referenced by @mad_recv_wc. + * client. */ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent, struct ib_mad_send_buf *send_buf, @@ -595,7 +580,6 @@ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent, * @mr: Memory region for system memory usable for DMA. * @recv_handler: Callback handler for a received MAD. * @send_handler: Callback handler for a sent MAD. - * @snoop_handler: Callback handler for snooped sent MADs. * @context: User-specified context associated with this registration. * @hi_tid: Access layer assigned transaction ID for this client. * Unsolicited MADs sent by this client will have the upper 32-bits @@ -612,7 +596,6 @@ struct ib_mad_agent { struct ib_qp *qp; ib_mad_recv_handler recv_handler; ib_mad_send_handler send_handler; - ib_mad_snoop_handler snoop_handler; void *context; u32 hi_tid; u32 flags; @@ -720,36 +703,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, ib_mad_recv_handler recv_handler, void *context, u32 registration_flags); - -enum ib_mad_snoop_flags { - /*IB_MAD_SNOOP_POSTED_SENDS = 1,*/ - /*IB_MAD_SNOOP_RMPP_SENDS = (1<<1),*/ - IB_MAD_SNOOP_SEND_COMPLETIONS = (1<<2), - /*IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS = (1<<3),*/ - IB_MAD_SNOOP_RECVS = (1<<4) - /*IB_MAD_SNOOP_RMPP_RECVS = (1<<5),*/ - /*IB_MAD_SNOOP_REDIRECTED_QPS = (1<<6)*/ -}; - -/** - * ib_register_mad_snoop - Register to snoop sent and received MADs. - * @device: The device to register with. - * @port_num: The port on the specified device to use. - * @qp_type: Specifies which QP traffic to snoop. Must be either - * IB_QPT_SMI or IB_QPT_GSI. - * @mad_snoop_flags: Specifies information where snooping occurs. - * @send_handler: The callback routine invoked for a snooped send. - * @recv_handler: The callback routine invoked for a snooped receive. - * @context: User specified context associated with the registration. - */ -struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, - u8 port_num, - enum ib_qp_type qp_type, - int mad_snoop_flags, - ib_mad_snoop_handler snoop_handler, - ib_mad_recv_handler recv_handler, - void *context); - /** * ib_unregister_mad_agent - Unregisters a client from using MAD services. * @mad_agent: Corresponding MAD registration request to deregister. diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index bbc5cfb57cd2..ef2f3986c493 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -100,7 +100,8 @@ void ibdev_notice(const struct ib_device *ibdev, const char *format, ...); __printf(2, 3) __cold void ibdev_info(const struct ib_device *ibdev, const char *format, ...); -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) #define ibdev_dbg(__dev, format, args...) \ dynamic_ibdev_dbg(__dev, format, ##args) #else @@ -133,7 +134,8 @@ do { \ #define ibdev_info_ratelimited(ibdev, fmt, ...) \ ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__) -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) /* descriptor check is first to prevent flooding with "callbacks suppressed" */ #define ibdev_dbg_ratelimited(ibdev, fmt, ...) \ do { \ @@ -305,7 +307,7 @@ enum ib_device_cap_flags { IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */ IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), - IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35), + IB_DEVICE_RDMA_NETDEV_OPA = (1ULL << 35), /* The device supports padding incoming writes to cacheline. */ IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36), IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37), @@ -430,8 +432,6 @@ struct ib_device_attr { int max_mcast_qp_attach; int max_total_mcast_qp_attach; int max_ah; - int max_fmr; - int max_map_per_fmr; int max_srq; int max_srq_wr; int max_srq_sge; @@ -462,6 +462,11 @@ enum ib_mtu { IB_MTU_4096 = 5 }; +enum opa_mtu { + OPA_MTU_8192 = 6, + OPA_MTU_10240 = 7 +}; + static inline int ib_mtu_enum_to_int(enum ib_mtu mtu) { switch (mtu) { @@ -488,6 +493,28 @@ static inline enum ib_mtu ib_mtu_int_to_enum(int mtu) return IB_MTU_256; } +static inline int opa_mtu_enum_to_int(enum opa_mtu mtu) +{ + switch (mtu) { + case OPA_MTU_8192: + return 8192; + case OPA_MTU_10240: + return 10240; + default: + return(ib_mtu_enum_to_int((enum ib_mtu)mtu)); + } +} + +static inline enum opa_mtu opa_mtu_int_to_enum(int mtu) +{ + if (mtu >= 10240) + return OPA_MTU_10240; + else if (mtu >= 8192) + return OPA_MTU_8192; + else + return ((enum opa_mtu)ib_mtu_int_to_enum(mtu)); +} + enum ib_port_state { IB_PORT_NOP = 0, IB_PORT_DOWN = 1, @@ -651,6 +678,7 @@ struct ib_port_attr { enum ib_port_state state; enum ib_mtu max_mtu; enum ib_mtu active_mtu; + u32 phys_mtu; int gid_tbl_len; unsigned int ip_gids:1; /* This is the value from PortInfo CapabilityMask, defined by IBA */ @@ -880,6 +908,12 @@ struct ib_mr_status { */ __attribute_const__ enum ib_rate mult_to_ib_rate(int mult); +struct rdma_ah_init_attr { + struct rdma_ah_attr *ah_attr; + u32 flags; + struct net_device *xmit_slave; +}; + enum rdma_ah_attr_type { RDMA_AH_ATTR_TYPE_UNDEFINED, RDMA_AH_ATTR_TYPE_IB, @@ -1006,9 +1040,9 @@ enum ib_cq_notify_flags { }; enum ib_srq_type { - IB_SRQT_BASIC, - IB_SRQT_XRC, - IB_SRQT_TM, + IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC, + IB_SRQT_XRC = IB_UVERBS_SRQT_XRC, + IB_SRQT_TM = IB_UVERBS_SRQT_TM, }; static inline bool ib_srq_has_cq(enum ib_srq_type srq_type) @@ -1077,16 +1111,16 @@ enum ib_qp_type { IB_QPT_SMI, IB_QPT_GSI, - IB_QPT_RC, - IB_QPT_UC, - IB_QPT_UD, + IB_QPT_RC = IB_UVERBS_QPT_RC, + IB_QPT_UC = IB_UVERBS_QPT_UC, + IB_QPT_UD = IB_UVERBS_QPT_UD, IB_QPT_RAW_IPV6, IB_QPT_RAW_ETHERTYPE, - IB_QPT_RAW_PACKET = 8, - IB_QPT_XRC_INI = 9, - IB_QPT_XRC_TGT, + IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET, + IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI, + IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT, IB_QPT_MAX, - IB_QPT_DRIVER = 0xFF, + IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER, /* Reserve a range for qp types internal to the low level driver. * These qp types will not be visible at the IB core layer, so the * IB_QPT_MAX usages should not be affected in the core layer @@ -1105,17 +1139,21 @@ enum ib_qp_type { enum ib_qp_create_flags { IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0, - IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, + IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = + IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, IB_QP_CREATE_CROSS_CHANNEL = 1 << 2, IB_QP_CREATE_MANAGED_SEND = 1 << 3, IB_QP_CREATE_MANAGED_RECV = 1 << 4, IB_QP_CREATE_NETIF_QP = 1 << 5, IB_QP_CREATE_INTEGRITY_EN = 1 << 6, - /* FREE = 1 << 7, */ - IB_QP_CREATE_SCATTER_FCS = 1 << 8, - IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9, + IB_QP_CREATE_NETDEV_USE = 1 << 7, + IB_QP_CREATE_SCATTER_FCS = + IB_UVERBS_QP_CREATE_SCATTER_FCS, + IB_QP_CREATE_CVLAN_STRIPPING = + IB_UVERBS_QP_CREATE_CVLAN_STRIPPING, IB_QP_CREATE_SOURCE_QPN = 1 << 10, - IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11, + IB_QP_CREATE_PCI_WRITE_END_PADDING = + IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING, /* reserve bits 26-31 for low level drivers' internal use */ IB_QP_CREATE_RESERVED_START = 1 << 26, IB_QP_CREATE_RESERVED_END = 1 << 31, @@ -1267,6 +1305,7 @@ struct ib_qp_attr { u8 alt_port_num; u8 alt_timeout; u32 rate_limit; + struct net_device *xmit_slave; }; enum ib_wr_opcode { @@ -1436,12 +1475,6 @@ enum ib_mr_rereg_flags { IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1) }; -struct ib_fmr_attr { - int max_pages; - int max_maps; - u8 page_shift; -}; - struct ib_umem; enum rdma_remove_reason { @@ -1456,6 +1489,11 @@ enum rdma_remove_reason { RDMA_REMOVE_DRIVER_REMOVE, /* uobj is being cleaned-up before being committed */ RDMA_REMOVE_ABORT, + /* + * uobj has been fully created, with the uobj->object set, but is being + * cleaned up before being comitted + */ + RDMA_REMOVE_ABORT_HWOBJ, }; struct ib_rdmacg_object { @@ -1544,10 +1582,12 @@ struct ib_ah { typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); enum ib_poll_context { - IB_POLL_DIRECT, /* caller context, no hw completions */ IB_POLL_SOFTIRQ, /* poll from softirq context */ IB_POLL_WORKQUEUE, /* poll from workqueue */ IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */ + IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE, + + IB_POLL_DIRECT, /* caller context, no hw completions */ }; struct ib_cq { @@ -1557,9 +1597,11 @@ struct ib_cq { void (*event_handler)(struct ib_event *, void *); void *cq_context; int cqe; + unsigned int cqe_used; atomic_t usecnt; /* count number of work queues */ enum ib_poll_context poll_ctx; struct ib_wc *wc; + struct list_head pool_entry; union { struct irq_poll iop; struct work_struct work; @@ -1569,7 +1611,9 @@ struct ib_cq { /* updated only by trace points */ ktime_t timestamp; - bool interrupt; + u8 interrupt:1; + u8 shared:1; + unsigned int comp_vector; /* * Implementation details of the RDMA core, don't use in drivers: @@ -1614,7 +1658,7 @@ enum ib_raw_packet_caps { }; enum ib_wq_type { - IB_WQT_RQ + IB_WQT_RQ = IB_UVERBS_WQT_RQ, }; enum ib_wq_state { @@ -1637,10 +1681,11 @@ struct ib_wq { }; enum ib_wq_flags { - IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0, - IB_WQ_FLAGS_SCATTER_FCS = 1 << 1, - IB_WQ_FLAGS_DELAY_DROP = 1 << 2, - IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3, + IB_WQ_FLAGS_CVLAN_STRIPPING = IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING, + IB_WQ_FLAGS_SCATTER_FCS = IB_UVERBS_WQ_FLAGS_SCATTER_FCS, + IB_WQ_FLAGS_DELAY_DROP = IB_UVERBS_WQ_FLAGS_DELAY_DROP, + IB_WQ_FLAGS_PCI_WRITE_END_PADDING = + IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING, }; struct ib_wq_init_attr { @@ -1804,14 +1849,6 @@ struct ib_mw { enum ib_mw_type type; }; -struct ib_fmr { - struct ib_device *device; - struct ib_pd *pd; - struct list_head list; - u32 lkey; - u32 rkey; -}; - /* Supported steering options */ enum ib_flow_attr_type { /* steering according to rule specifications */ @@ -2198,6 +2235,7 @@ struct rdma_netdev { void *clnt_priv; struct ib_device *hca; u8 port_num; + int mtu; /* * cleanup function must be specified. @@ -2403,8 +2441,8 @@ struct ib_device_ops { void (*disassociate_ucontext)(struct ib_ucontext *ibcontext); int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata); void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata); - int (*create_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr, - u32 flags, struct ib_udata *udata); + int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr, + struct ib_udata *udata); int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); void (*destroy_ah)(struct ib_ah *ah, u32 flags); @@ -2453,12 +2491,6 @@ struct ib_device_ops { struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type, struct ib_udata *udata); int (*dealloc_mw)(struct ib_mw *mw); - struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags, - struct ib_fmr_attr *fmr_attr); - int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len, - u64 iova); - int (*unmap_fmr)(struct list_head *fmr_list); - int (*dealloc_fmr)(struct ib_fmr *fmr); int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid); struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device, @@ -2687,6 +2719,10 @@ struct ib_device { #endif u32 index; + + spinlock_t cq_pools_lock; + struct list_head cq_pools[IB_POLL_LAST_POOL_TYPE + 1]; + struct rdma_restrack_root *res; const struct uapi_definition *driver_def; @@ -2709,12 +2745,13 @@ struct ib_device { /* Used by iWarp CM */ char iw_ifname[IFNAMSIZ]; u32 iw_driver_flags; + u32 lag_flags; }; struct ib_client_nl_info; struct ib_client { const char *name; - void (*add) (struct ib_device *); + int (*add)(struct ib_device *ibdev); void (*remove)(struct ib_device *, void *client_data); void (*rename)(struct ib_device *dev, void *client_data); int (*get_nl_info)(struct ib_device *ibdev, void *client_data, @@ -3355,6 +3392,55 @@ static inline unsigned int rdma_find_pg_bit(unsigned long addr, return __fls(pgsz); } +/** + * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not. + * @device: Device + * @port_num: 1 based Port number + * + * Return true if port is an Intel OPA port , false if not + */ +static inline bool rdma_core_cap_opa_port(struct ib_device *device, + u32 port_num) +{ + return (device->port_data[port_num].immutable.core_cap_flags & + RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA; +} + +/** + * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value. + * @device: Device + * @port_num: Port number + * @mtu: enum value of MTU + * + * Return the MTU size supported by the port as an integer value. Will return + * -1 if enum value of mtu is not supported. + */ +static inline int rdma_mtu_enum_to_int(struct ib_device *device, u8 port, + int mtu) +{ + if (rdma_core_cap_opa_port(device, port)) + return opa_mtu_enum_to_int((enum opa_mtu)mtu); + else + return ib_mtu_enum_to_int((enum ib_mtu)mtu); +} + +/** + * rdma_mtu_from_attr - Return the mtu of the port from the port attribute. + * @device: Device + * @port_num: Port number + * @attr: port attribute + * + * Return the MTU size supported by the port as an integer value. + */ +static inline int rdma_mtu_from_attr(struct ib_device *device, u8 port, + struct ib_port_attr *attr) +{ + if (rdma_core_cap_opa_port(device, port)) + return attr->phys_mtu; + else + return ib_mtu_enum_to_int(attr->max_mtu); +} + int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port, int state); int ib_get_vf_config(struct ib_device *device, int vf, u8 port, @@ -3551,21 +3637,18 @@ static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags) return rdma_destroy_ah_user(ah, flags, NULL); } -/** - * ib_create_srq - Creates a SRQ associated with the specified protection - * domain. - * @pd: The protection domain associated with the SRQ. - * @srq_init_attr: A list of initial attributes required to create the - * SRQ. If SRQ creation succeeds, then the attributes are updated to - * the actual capabilities of the created SRQ. - * - * srq_attr->max_wr and srq_attr->max_sge are read the determine the - * requested size of the SRQ, and set to the actual values allocated - * on return. If ib_create_srq() succeeds, then max_wr and max_sge - * will always be at least as large as the requested values. - */ -struct ib_srq *ib_create_srq(struct ib_pd *pd, - struct ib_srq_init_attr *srq_init_attr); +struct ib_srq *ib_create_srq_user(struct ib_pd *pd, + struct ib_srq_init_attr *srq_init_attr, + struct ib_usrq_object *uobject, + struct ib_udata *udata); +static inline struct ib_srq * +ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr) +{ + if (!pd->device->ops.create_srq) + return ERR_PTR(-EOPNOTSUPP); + + return ib_create_srq_user(pd, srq_init_attr, NULL, NULL); +} /** * ib_modify_srq - Modifies the attributes for the specified SRQ. @@ -3816,6 +3899,8 @@ static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev, * ib_free_cq_user - Free kernel/user CQ * @cq: The CQ to free * @udata: Valid user data or NULL for kernel objects + * + * NOTE: This function shouldn't be called on shared CQs. */ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata); @@ -3941,6 +4026,12 @@ static inline int ib_req_notify_cq(struct ib_cq *cq, return cq->device->ops.req_notify_cq(cq, flags); } +struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe, + int comp_vector_hint, + enum ib_poll_context poll_ctx); + +void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe); + /** * ib_req_ncomp_notif - Request completion notification when there are * at least the specified number of unreaped completions on the CQ. @@ -4209,45 +4300,6 @@ static inline u32 ib_inc_rkey(u32 rkey) } /** - * ib_alloc_fmr - Allocates a unmapped fast memory region. - * @pd: The protection domain associated with the unmapped region. - * @mr_access_flags: Specifies the memory access rights. - * @fmr_attr: Attributes of the unmapped region. - * - * A fast memory region must be mapped before it can be used as part of - * a work request. - */ -struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd, - int mr_access_flags, - struct ib_fmr_attr *fmr_attr); - -/** - * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. - * @fmr: The fast memory region to associate with the pages. - * @page_list: An array of physical pages to map to the fast memory region. - * @list_len: The number of pages in page_list. - * @iova: The I/O virtual address to use with the mapped region. - */ -static inline int ib_map_phys_fmr(struct ib_fmr *fmr, - u64 *page_list, int list_len, - u64 iova) -{ - return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova); -} - -/** - * ib_unmap_fmr - Removes the mapping from a list of fast memory regions. - * @fmr_list: A linked list of fast memory regions to unmap. - */ -int ib_unmap_fmr(struct list_head *fmr_list); - -/** - * ib_dealloc_fmr - Deallocates a fast memory region. - * @fmr: The fast memory region to deallocate. - */ -int ib_dealloc_fmr(struct ib_fmr *fmr); - -/** * ib_attach_mcast - Attaches the specified QP to a multicast group. * @qp: QP to attach to the multicast group. The QP must be type * IB_QPT_UD. @@ -4701,4 +4753,48 @@ static inline struct ib_device *rdma_device_to_ibdev(struct device *device) bool rdma_dev_access_netns(const struct ib_device *device, const struct net *net); + +#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000) +#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF) + +/** + * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based + * on the flow_label + * + * This function will convert the 20 bit flow_label input to a valid RoCE v2 + * UDP src port 14 bit value. All RoCE V2 drivers should use this same + * convention. + */ +static inline u16 rdma_flow_label_to_udp_sport(u32 fl) +{ + u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000; + + fl_low ^= fl_high >> 14; + return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN); +} + +/** + * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on + * local and remote qpn values + * + * This function folded the multiplication results of two qpns, 24 bit each, + * fields, and converts it to a 20 bit results. + * + * This function will create symmetric flow_label value based on the local + * and remote qpn values. this will allow both the requester and responder + * to calculate the same flow_label for a given connection. + * + * This helper function should be used by driver in case the upper layer + * provide a zero flow_label value. This is to improve entropy of RDMA + * traffic in the network. + */ +static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn) +{ + u64 v = (u64)lqpn * rqpn; + + v ^= v >> 20; + v ^= v >> 40; + + return (u32)(v & IB_GRH_FLOWLABEL_MASK); +} #endif /* IB_VERBS_H */ diff --git a/include/rdma/ibta_vol1_c12.h b/include/rdma/ibta_vol1_c12.h index 269904425d3f..960c86bec76c 100644 --- a/include/rdma/ibta_vol1_c12.h +++ b/include/rdma/ibta_vol1_c12.h @@ -38,6 +38,7 @@ /* Table 106 REQ Message Contents */ #define CM_REQ_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_req_msg, 0, 32) +#define CM_REQ_VENDOR_ID CM_FIELD32_LOC(struct cm_req_msg, 5, 24) #define CM_REQ_SERVICE_ID CM_FIELD64_LOC(struct cm_req_msg, 8) #define CM_REQ_LOCAL_CA_GUID CM_FIELD64_LOC(struct cm_req_msg, 16) #define CM_REQ_LOCAL_Q_KEY CM_FIELD32_LOC(struct cm_req_msg, 28, 32) @@ -119,8 +120,11 @@ CM_STRUCT(struct cm_rej_msg, 84 * 8 + 1184); #define CM_REP_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_rep_msg, 4, 32) #define CM_REP_LOCAL_Q_KEY CM_FIELD32_LOC(struct cm_rep_msg, 8, 32) #define CM_REP_LOCAL_QPN CM_FIELD32_LOC(struct cm_rep_msg, 12, 24) +#define CM_REP_VENDOR_ID_H CM_FIELD8_LOC(struct cm_rep_msg, 15, 8) #define CM_REP_LOCAL_EE_CONTEXT_NUMBER CM_FIELD32_LOC(struct cm_rep_msg, 16, 24) +#define CM_REP_VENDOR_ID_M CM_FIELD8_LOC(struct cm_rep_msg, 19, 8) #define CM_REP_STARTING_PSN CM_FIELD32_LOC(struct cm_rep_msg, 20, 24) +#define CM_REP_VENDOR_ID_L CM_FIELD8_LOC(struct cm_rep_msg, 23, 8) #define CM_REP_RESPONDER_RESOURCES CM_FIELD8_LOC(struct cm_rep_msg, 24, 8) #define CM_REP_INITIATOR_DEPTH CM_FIELD8_LOC(struct cm_rep_msg, 25, 8) #define CM_REP_TARGET_ACK_DELAY CM_FIELD8_LOC(struct cm_rep_msg, 26, 5) @@ -201,7 +205,9 @@ CM_STRUCT(struct cm_sidr_req_msg, 16 * 8 + 1728); #define CM_SIDR_REP_STATUS CM_FIELD8_LOC(struct cm_sidr_rep_msg, 4, 8) #define CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH \ CM_FIELD8_LOC(struct cm_sidr_rep_msg, 5, 8) +#define CM_SIDR_REP_VENDOR_ID_H CM_FIELD16_LOC(struct cm_sidr_rep_msg, 6, 16) #define CM_SIDR_REP_QPN CM_FIELD32_LOC(struct cm_sidr_rep_msg, 8, 24) +#define CM_SIDR_REP_VENDOR_ID_L CM_FIELD8_LOC(struct cm_sidr_rep_msg, 11, 8) #define CM_SIDR_REP_SERVICEID CM_FIELD64_LOC(struct cm_sidr_rep_msg, 12) #define CM_SIDR_REP_Q_KEY CM_FIELD32_LOC(struct cm_sidr_rep_msg, 20, 32) #define CM_SIDR_REP_ADDITIONAL_INFORMATION \ diff --git a/include/rdma/lag.h b/include/rdma/lag.h new file mode 100644 index 000000000000..7c06ec9b2eef --- /dev/null +++ b/include/rdma/lag.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* + * Copyright (c) 2020 Mellanox Technologies. All rights reserved. + */ + +#ifndef _RDMA_LAG_H_ +#define _RDMA_LAG_H_ + +#include <net/lag.h> + +struct ib_device; +struct rdma_ah_attr; + +enum rdma_lag_flags { + RDMA_LAG_FLAGS_HASH_ALL_SLAVES = 1 << 0 +}; + +void rdma_lag_put_ah_roce_slave(struct net_device *xmit_slave); +struct net_device *rdma_lag_get_ah_roce_slave(struct ib_device *device, + struct rdma_ah_attr *ah_attr, + gfp_t flags); + +#endif /* _RDMA_LAG_H_ */ diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h index bdbfe25d3854..0d9e6d74c385 100644 --- a/include/rdma/opa_port_info.h +++ b/include/rdma/opa_port_info.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2017 Intel Corporation. All rights reserved. + * Copyright (c) 2014-2020 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -139,14 +139,6 @@ #define OPA_CAP_MASK3_IsVLMarkerSupported (1 << 1) #define OPA_CAP_MASK3_IsVLrSupported (1 << 0) -/** - * new MTU values - */ -enum { - OPA_MTU_8192 = 6, - OPA_MTU_10240 = 7, -}; - enum { OPA_PORT_PHYS_CONF_DISCONNECTED = 0, OPA_PORT_PHYS_CONF_STANDARD = 1, diff --git a/include/rdma/opa_vnic.h b/include/rdma/opa_vnic.h index e90b149fe92a..6f244e759b4f 100644 --- a/include/rdma/opa_vnic.h +++ b/include/rdma/opa_vnic.h @@ -1,7 +1,7 @@ #ifndef _OPA_VNIC_H #define _OPA_VNIC_H /* - * Copyright(c) 2017 Intel Corporation. + * Copyright(c) 2017 - 2020 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -132,7 +132,7 @@ struct opa_vnic_stats { static inline bool rdma_cap_opa_vnic(struct ib_device *device) { return !!(device->attrs.device_cap_flags & - IB_DEVICE_RDMA_NETDEV_OPA_VNIC); + IB_DEVICE_RDMA_NETDEV_OPA); } #endif /* _OPA_VNIC_H */ diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 71f48cfdc24c..939d7abe026f 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -111,6 +111,7 @@ struct rdma_cm_event { struct rdma_conn_param conn; struct rdma_ud_param ud; } param; + struct rdma_ucm_ece ece; }; struct rdma_cm_id; @@ -264,6 +265,9 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, */ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); +int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, + struct rdma_ucm_ece *ece); + /** * rdma_listen - This function is called by the passive side to * listen for incoming connection requests. @@ -276,6 +280,9 @@ int rdma_listen(struct rdma_cm_id *id, int backlog); int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, const char *caller); +int __rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param, + const char *caller, struct rdma_ucm_ece *ece); + /** * rdma_accept - Called to accept a connection request or response. * @id: Connection identifier associated with the request. @@ -313,7 +320,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event); * rdma_reject - Called to reject a connection request or response. */ int rdma_reject(struct rdma_cm_id *id, const void *private_data, - u8 private_data_len); + u8 private_data_len, u8 reason); /** * rdma_disconnect - This function disconnects the associated QP and @@ -390,14 +397,6 @@ __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr); const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, int reason); /** - * rdma_is_consumer_reject - return true if the consumer rejected the connect - * request. - * @id: Communication identifier that received the REJECT event. - * @reason: Value returned in the REJECT event status field. - */ -bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason); - -/** * rdma_consumer_reject_data - return the consumer reject private data and * length, if any. * @id: Communication identifier that received the REJECT event. diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 5fc10108703a..c4369a6c2951 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -2,7 +2,7 @@ #define DEF_RDMAVT_INCQP_H /* - * Copyright(c) 2016 - 2019 Intel Corporation. + * Copyright(c) 2016 - 2020 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -69,6 +69,33 @@ #define RVT_R_COMM_EST 0x10 /* + * If a packet's QP[23:16] bits match this value, then it is + * a PSM packet and the hardware will expect a KDETH header + * following the BTH. + */ +#define RVT_KDETH_QP_PREFIX 0x80 +#define RVT_KDETH_QP_SUFFIX 0xffff +#define RVT_KDETH_QP_PREFIX_MASK 0x00ff0000 +#define RVT_KDETH_QP_PREFIX_SHIFT 16 +#define RVT_KDETH_QP_BASE (u32)(RVT_KDETH_QP_PREFIX << \ + RVT_KDETH_QP_PREFIX_SHIFT) +#define RVT_KDETH_QP_MAX (u32)(RVT_KDETH_QP_BASE + RVT_KDETH_QP_SUFFIX) + +/* + * If a packet's LNH == BTH and DEST QPN[23:16] in the BTH match this + * prefix value, then it is an AIP packet with a DETH containing the entropy + * value in byte 4 following the BTH. + */ +#define RVT_AIP_QP_PREFIX 0x81 +#define RVT_AIP_QP_SUFFIX 0xffff +#define RVT_AIP_QP_PREFIX_MASK 0x00ff0000 +#define RVT_AIP_QP_PREFIX_SHIFT 16 +#define RVT_AIP_QP_BASE (u32)(RVT_AIP_QP_PREFIX << \ + RVT_AIP_QP_PREFIX_SHIFT) +#define RVT_AIP_QPN_MAX BIT(RVT_AIP_QP_PREFIX_SHIFT) +#define RVT_AIP_QP_MAX (u32)(RVT_AIP_QP_BASE + RVT_AIP_QPN_MAX - 1) + +/* * Bit definitions for s_flags. * * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled @@ -440,7 +467,7 @@ struct rvt_qp { /* * This sge list MUST be last. Do not add anything below here. */ - struct rvt_sge r_sg_list[0] /* verified SGEs */ + struct rvt_sge r_sg_list[] /* verified SGEs */ ____cacheline_aligned_in_smp; }; diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h index 9f3b1e004046..86de10ea30af 100644 --- a/include/rdma/uverbs_ioctl.h +++ b/include/rdma/uverbs_ioctl.h @@ -420,9 +420,9 @@ struct uapi_definition { .scope = UAPI_SCOPE_OBJECT, \ .needs_fn_offset = \ offsetof(struct ib_device_ops, ibdev_fn) + \ - BUILD_BUG_ON_ZERO( \ - sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \ - sizeof(void *)), \ + BUILD_BUG_ON_ZERO(sizeof_field(struct ib_device_ops, \ + ibdev_fn) != \ + sizeof(void *)), \ } /* @@ -435,9 +435,9 @@ struct uapi_definition { .scope = UAPI_SCOPE_METHOD, \ .needs_fn_offset = \ offsetof(struct ib_device_ops, ibdev_fn) + \ - BUILD_BUG_ON_ZERO( \ - sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \ - sizeof(void *)), \ + BUILD_BUG_ON_ZERO(sizeof_field(struct ib_device_ops, \ + ibdev_fn) != \ + sizeof(void *)), \ } /* Call a function to determine if the entire object is supported or not */ @@ -491,8 +491,7 @@ struct uapi_definition { */ #define UVERBS_ATTR_STRUCT(_type, _last) \ .zero_trailing = 1, \ - UVERBS_ATTR_SIZE(((uintptr_t)(&((_type *)0)->_last + 1)), \ - sizeof(_type)) + UVERBS_ATTR_SIZE(offsetofend(_type, _last), sizeof(_type)) /* * Specifies at least min_len bytes must be passed in, but the amount can be * larger, up to the protocol maximum size. No check for zeroing is done. @@ -737,6 +736,9 @@ uverbs_attr_get_len(const struct uverbs_attr_bundle *attrs_bundle, u16 idx) return attr->ptr_attr.len; } +void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *attrs_bundle, + u16 idx); + /* * uverbs_attr_ptr_get_array_size() - Get array size pointer by a ptr * attribute. diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h index 1b28ce1aba07..bf0392ae15eb 100644 --- a/include/rdma/uverbs_std_types.h +++ b/include/rdma/uverbs_std_types.h @@ -88,7 +88,7 @@ struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj, static inline void uobj_put_destroy(struct ib_uobject *uobj) { - rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE); + rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_DESTROY); } static inline void uobj_put_read(struct ib_uobject *uobj) @@ -107,7 +107,7 @@ static inline void uobj_put_write(struct ib_uobject *uobj) static inline void uobj_alloc_abort(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs) { - rdma_alloc_abort_uobject(uobj, attrs); + rdma_alloc_abort_uobject(uobj, attrs, false); } static inline struct ib_uobject * diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h index f1cbdae67250..c15b298aa62f 100644 --- a/include/rdma/uverbs_types.h +++ b/include/rdma/uverbs_types.h @@ -139,7 +139,8 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj, struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj, struct uverbs_attr_bundle *attrs); void rdma_alloc_abort_uobject(struct ib_uobject *uobj, - struct uverbs_attr_bundle *attrs); + struct uverbs_attr_bundle *attrs, + bool hw_obj_valid); void rdma_alloc_commit_uobject(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs); diff --git a/include/scsi/sas.h b/include/scsi/sas.h index a5d8ae49198c..4726c1bbec65 100644 --- a/include/scsi/sas.h +++ b/include/scsi/sas.h @@ -324,7 +324,7 @@ struct ssp_response_iu { __be32 response_data_len; u8 resp_data[0]; - u8 sense_data[0]; + u8 sense_data[]; } __attribute__ ((packed)); struct ssp_command_iu { @@ -346,7 +346,7 @@ struct ssp_command_iu { u8 add_cdb_len:6; u8 cdb[16]; - u8 add_cdb[0]; + u8 add_cdb[]; } __attribute__ ((packed)); struct xfer_rdy_iu { @@ -555,7 +555,7 @@ struct ssp_response_iu { __be32 response_data_len; u8 resp_data[0]; - u8 sense_data[0]; + u8 sense_data[]; } __attribute__ ((packed)); struct ssp_command_iu { @@ -577,7 +577,7 @@ struct ssp_command_iu { u8 _r_c:2; u8 cdb[16]; - u8 add_cdb[0]; + u8 add_cdb[]; } __attribute__ ((packed)); struct xfer_rdy_iu { diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 80ac89e47b47..e76bac4d14c5 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -68,7 +68,6 @@ struct scsi_pointer { struct scsi_cmnd { struct scsi_request req; struct scsi_device *device; - struct list_head list; /* scsi_cmnd participates in queue lists */ struct list_head eh_entry; /* entry for the host eh_cmd_q */ struct delayed_work abort_work; @@ -142,6 +141,7 @@ struct scsi_cmnd { unsigned long state; /* Command completion state */ unsigned char tag; /* SCSI-II queued command tag */ + unsigned int extra_len; /* length of alignment and padding */ }; /* diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index c3cba2aaf934..bc5909033d13 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -229,6 +229,9 @@ struct scsi_device { struct scsi_device_handler *handler; void *handler_data; + size_t dma_drain_len; + void *dma_drain_buf; + unsigned char access_state; struct mutex state_mutex; enum scsi_device_state sdev_state; diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 822e8cda8d9b..46ef8cccc982 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -271,6 +271,13 @@ struct scsi_host_template { int (* map_queues)(struct Scsi_Host *shost); /* + * Check if scatterlists need to be padded for DMA draining. + * + * Status: OPTIONAL + */ + bool (* dma_need_drain)(struct request *rq); + + /* * This function determines the BIOS parameters for a given * harddisk. These tend to be numbers that are made up by * the host adapter. Parameters: diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h index 7800e12ee042..3025aca3c358 100644 --- a/include/soc/bcm2835/raspberrypi-firmware.h +++ b/include/soc/bcm2835/raspberrypi-firmware.h @@ -10,6 +10,7 @@ #include <linux/of_device.h> struct rpi_firmware; +struct pci_dev; enum rpi_firmware_property_status { RPI_FIRMWARE_STATUS_REQUEST = 0, @@ -90,7 +91,7 @@ enum rpi_firmware_property_tag { RPI_FIRMWARE_SET_PERIPH_REG = 0x00038045, RPI_FIRMWARE_GET_POE_HAT_VAL = 0x00030049, RPI_FIRMWARE_SET_POE_HAT_VAL = 0x00030050, - + RPI_FIRMWARE_NOTIFY_XHCI_RESET = 0x00030058, /* Dispmanx TAGS */ RPI_FIRMWARE_FRAMEBUFFER_ALLOCATE = 0x00040001, @@ -141,6 +142,7 @@ int rpi_firmware_property(struct rpi_firmware *fw, int rpi_firmware_property_list(struct rpi_firmware *fw, void *data, size_t tag_size); struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node); +int rpi_firmware_init_vl805(struct pci_dev *pdev); #else static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag, void *data, size_t len) @@ -158,6 +160,11 @@ static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware { return NULL; } + +static inline int rpi_firmware_init_vl805(struct pci_dev *pdev) +{ + return 0; +} #endif #endif /* __SOC_RASPBERRY_FIRMWARE_H__ */ diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h index e282ac01ec08..3feddfec9f87 100644 --- a/include/soc/fsl/qe/qe.h +++ b/include/soc/fsl/qe/qe.h @@ -307,7 +307,7 @@ struct qe_firmware { u8 revision; /* The microcode version revision */ u8 padding; /* Reserved, for alignment */ u8 reserved[4]; /* Reserved, for future expansion */ - } __attribute__ ((packed)) microcode[1]; + } __packed microcode[]; /* All microcode binaries should be located here */ /* CRC32 should be located here, after the microcode binaries */ } __attribute__ ((packed)); diff --git a/include/soc/imx/cpu.h b/include/soc/imx/cpu.h new file mode 100644 index 000000000000..42d6aeb951fa --- /dev/null +++ b/include/soc/imx/cpu.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef __IMX_CPU_H__ +#define __IMX_CPU_H__ + +#define MXC_CPU_MX1 1 +#define MXC_CPU_MX21 21 +#define MXC_CPU_MX25 25 +#define MXC_CPU_MX27 27 +#define MXC_CPU_MX31 31 +#define MXC_CPU_MX35 35 +#define MXC_CPU_MX51 51 +#define MXC_CPU_MX53 53 +#define MXC_CPU_IMX6SL 0x60 +#define MXC_CPU_IMX6DL 0x61 +#define MXC_CPU_IMX6SX 0x62 +#define MXC_CPU_IMX6Q 0x63 +#define MXC_CPU_IMX6UL 0x64 +#define MXC_CPU_IMX6ULL 0x65 +/* virtual cpu id for i.mx6ulz */ +#define MXC_CPU_IMX6ULZ 0x6b +#define MXC_CPU_IMX6SLL 0x67 +#define MXC_CPU_IMX7D 0x72 +#define MXC_CPU_IMX7ULP 0xff + +#define MXC_CPU_VFx10 0x010 +#define MXC_CPU_VF500 0x500 +#define MXC_CPU_VF510 (MXC_CPU_VF500 | MXC_CPU_VFx10) +#define MXC_CPU_VF600 0x600 +#define MXC_CPU_VF610 (MXC_CPU_VF600 | MXC_CPU_VFx10) + +#ifndef __ASSEMBLY__ +extern unsigned int __mxc_cpu_type; +#endif + +#endif diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index ebffcb36a7e3..4953e9994df3 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -92,6 +92,8 @@ #define OCELOT_SPEED_100 2 #define OCELOT_SPEED_10 3 +#define OCELOT_PTP_PINS_NUM 4 + #define TARGET_OFFSET 24 #define REG_MASK GENMASK(TARGET_OFFSET - 1, 0) #define REG(reg, offset) [reg & REG_MASK] = offset @@ -385,6 +387,8 @@ enum ocelot_reg { PTP_PIN_TOD_SEC_MSB, PTP_PIN_TOD_SEC_LSB, PTP_PIN_TOD_NSEC, + PTP_PIN_WF_HIGH_PERIOD, + PTP_PIN_WF_LOW_PERIOD, PTP_CFG_MISC, PTP_CLK_CFG_ADJ_CFG, PTP_CLK_CFG_ADJ_FREQ, @@ -440,10 +444,11 @@ enum ocelot_regfield { REGFIELD_MAX }; -enum ocelot_clk_pins { - ALT_PPS_PIN = 1, - EXT_CLK_PIN, - ALT_LDST_PIN, +enum ocelot_ptp_pins { + PTP_PIN_0, + PTP_PIN_1, + PTP_PIN_2, + PTP_PIN_3, TOD_ACC_PIN }; @@ -476,6 +481,8 @@ struct ocelot_port { void __iomem *regs; + bool vlan_aware; + /* Ingress default VLAN (pvid) */ u16 pvid; @@ -500,6 +507,7 @@ struct ocelot { unsigned int num_stats; int shared_queue_sz; + int num_mact_rows; struct net_device *hw_bridge_dev; u16 bridge_mask; @@ -547,6 +555,7 @@ struct ocelot { struct mutex ptp_lock; /* Protects the PTP clock */ spinlock_t ptp_clock_lock; + struct ptp_pin_desc ptp_pins[OCELOT_PTP_PINS_NUM]; }; struct ocelot_policer { @@ -610,7 +619,7 @@ int ocelot_port_bridge_leave(struct ocelot *ocelot, int port, int ocelot_fdb_dump(struct ocelot *ocelot, int port, dsa_fdb_dump_cb_t *cb, void *data); int ocelot_fdb_add(struct ocelot *ocelot, int port, - const unsigned char *addr, u16 vid, bool vlan_aware); + const unsigned char *addr, u16 vid); int ocelot_fdb_del(struct ocelot *ocelot, int port, const unsigned char *addr, u16 vid); int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, @@ -618,7 +627,6 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid); int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr); int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr); -int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts); int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port, struct sk_buff *skb); void ocelot_get_txtstamp(struct ocelot *ocelot); diff --git a/include/soc/mscc/ocelot_ptp.h b/include/soc/mscc/ocelot_ptp.h new file mode 100644 index 000000000000..4a6b2f71b6b2 --- /dev/null +++ b/include/soc/mscc/ocelot_ptp.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Microsemi Ocelot Switch driver + * + * License: Dual MIT/GPL + * Copyright (c) 2017 Microsemi Corporation + * Copyright 2020 NXP + */ + +#ifndef _MSCC_OCELOT_PTP_H_ +#define _MSCC_OCELOT_PTP_H_ + +#include <linux/ptp_clock_kernel.h> +#include <soc/mscc/ocelot.h> + +#define PTP_PIN_CFG_RSZ 0x20 +#define PTP_PIN_TOD_SEC_MSB_RSZ PTP_PIN_CFG_RSZ +#define PTP_PIN_TOD_SEC_LSB_RSZ PTP_PIN_CFG_RSZ +#define PTP_PIN_TOD_NSEC_RSZ PTP_PIN_CFG_RSZ +#define PTP_PIN_WF_HIGH_PERIOD_RSZ PTP_PIN_CFG_RSZ +#define PTP_PIN_WF_LOW_PERIOD_RSZ PTP_PIN_CFG_RSZ + +#define PTP_PIN_CFG_DOM BIT(0) +#define PTP_PIN_CFG_SYNC BIT(2) +#define PTP_PIN_CFG_ACTION(x) ((x) << 3) +#define PTP_PIN_CFG_ACTION_MASK PTP_PIN_CFG_ACTION(0x7) + +enum { + PTP_PIN_ACTION_IDLE = 0, + PTP_PIN_ACTION_LOAD, + PTP_PIN_ACTION_SAVE, + PTP_PIN_ACTION_CLOCK, + PTP_PIN_ACTION_DELTA, + PTP_PIN_ACTION_NOSYNC, + PTP_PIN_ACTION_SYNC, +}; + +#define PTP_CFG_MISC_PTP_EN BIT(2) + +#define PSEC_PER_SEC 1000000000000LL + +#define PTP_CFG_CLK_ADJ_CFG_ENA BIT(0) +#define PTP_CFG_CLK_ADJ_CFG_DIR BIT(1) + +#define PTP_CFG_CLK_ADJ_FREQ_NS BIT(30) + +int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts); +int ocelot_ptp_settime64(struct ptp_clock_info *ptp, + const struct timespec64 *ts); +int ocelot_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta); +int ocelot_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm); +int ocelot_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan); +int ocelot_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on); +int ocelot_init_timestamp(struct ocelot *ocelot, struct ptp_clock_info *info); +int ocelot_deinit_timestamp(struct ocelot *ocelot); +#endif diff --git a/include/soc/qcom/cmd-db.h b/include/soc/qcom/cmd-db.h index af9722223925..c8bb56e6852a 100644 --- a/include/soc/qcom/cmd-db.h +++ b/include/soc/qcom/cmd-db.h @@ -4,6 +4,7 @@ #ifndef __QCOM_COMMAND_DB_H__ #define __QCOM_COMMAND_DB_H__ +#include <linux/err.h> enum cmd_db_hw_type { CMD_DB_HW_INVALID = 0, diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h index 0dd52b0a5c1b..361cb64246f7 100644 --- a/include/soc/tegra/pmc.h +++ b/include/soc/tegra/pmc.h @@ -168,7 +168,6 @@ int tegra_io_pad_power_disable(enum tegra_io_pad id); int tegra_io_rail_power_on(unsigned int id); int tegra_io_rail_power_off(unsigned int id); -enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void); void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode); void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode); @@ -220,11 +219,6 @@ static inline int tegra_io_rail_power_off(unsigned int id) return -ENOSYS; } -static inline enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void) -{ - return TEGRA_SUSPEND_NONE; -} - static inline void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode) { } @@ -235,4 +229,13 @@ static inline void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode) #endif /* CONFIG_SOC_TEGRA_PMC */ +#if defined(CONFIG_SOC_TEGRA_PMC) && defined(CONFIG_PM_SLEEP) +enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void); +#else +static inline enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void) +{ + return TEGRA_SUSPEND_NONE; +} +#endif + #endif /* __SOC_TEGRA_PMC_H__ */ diff --git a/include/sound/control.h b/include/sound/control.h index 11feeee31e35..aeaed2a05bae 100644 --- a/include/sound/control.h +++ b/include/sound/control.h @@ -75,7 +75,7 @@ struct snd_kcontrol { unsigned long private_value; void *private_data; void (*private_free)(struct snd_kcontrol *kcontrol); - struct snd_kcontrol_volatile vd[0]; /* volatile data */ + struct snd_kcontrol_volatile vd[]; /* volatile data */ }; #define snd_kcontrol(n) list_entry(n, struct snd_kcontrol, list) diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h index 3ee8036f5436..d16a4229209b 100644 --- a/include/sound/hda_codec.h +++ b/include/sound/hda_codec.h @@ -288,6 +288,10 @@ struct hda_codec { #define dev_to_hda_codec(_dev) container_of(_dev, struct hda_codec, core.dev) #define hda_codec_dev(_dev) (&(_dev)->core.dev) +#define hdac_to_hda_priv(_hdac) \ + container_of(_hdac, struct hdac_hda_priv, codec.core) +#define hdac_to_hda_codec(_hdac) container_of(_hdac, struct hda_codec, core) + #define list_for_each_codec(c, bus) \ list_for_each_entry(c, &(bus)->core.codec_list, core.list) #define list_for_each_codec_safe(c, n, bus) \ @@ -362,13 +366,6 @@ struct hda_verb { void snd_hda_sequence_write(struct hda_codec *codec, const struct hda_verb *seq); -/* unsolicited event */ -static inline void -snd_hda_queue_unsol_event(struct hda_bus *bus, u32 res, u32 res_ex) -{ - snd_hdac_bus_queue_event(&bus->core, res, res_ex); -} - /* cached write */ static inline int snd_hda_codec_write_cache(struct hda_codec *codec, hda_nid_t nid, @@ -494,6 +491,11 @@ void snd_hda_update_power_acct(struct hda_codec *codec); static inline void snd_hda_set_power_save(struct hda_bus *bus, int delay) {} #endif +static inline bool hda_codec_need_resume(struct hda_codec *codec) +{ + return !codec->relaxed_resume && codec->jacktbl.used; +} + #ifdef CONFIG_SND_HDA_PATCH_LOADER /* * patch firmware diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index affedc2801c4..c1f78d9a6e47 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -207,8 +207,8 @@ static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; static inline int snd_hdac_keep_power_up(struct hdac_device *codec) { return 0; } static inline void snd_hdac_enter_pm(struct hdac_device *codec) {} static inline void snd_hdac_leave_pm(struct hdac_device *codec) {} -static inline bool snd_hdac_is_in_pm(struct hdac_device *codec) { return 0; } -static inline bool snd_hdac_is_power_on(struct hdac_device *codec) { return 1; } +static inline bool snd_hdac_is_in_pm(struct hdac_device *codec) { return false; } +static inline bool snd_hdac_is_power_on(struct hdac_device *codec) { return true; } #endif /* @@ -364,16 +364,16 @@ struct hdac_bus { /* link management */ struct list_head hlink_list; bool cmd_dma_state; + + /* factor used to derive STRIPE control value */ + unsigned int sdo_limit; }; int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev, const struct hdac_bus_ops *ops); void snd_hdac_bus_exit(struct hdac_bus *bus); -int snd_hdac_bus_exec_verb(struct hdac_bus *bus, unsigned int addr, - unsigned int cmd, unsigned int *res); int snd_hdac_bus_exec_verb_unlocked(struct hdac_bus *bus, unsigned int addr, unsigned int cmd, unsigned int *res); -void snd_hdac_bus_queue_event(struct hdac_bus *bus, u32 res, u32 res_ex); static inline void snd_hdac_codec_link_up(struct hdac_device *codec) { diff --git a/include/sound/intel-nhlt.h b/include/sound/intel-nhlt.h index f657fd8fc0ad..743c2f442280 100644 --- a/include/sound/intel-nhlt.h +++ b/include/sound/intel-nhlt.h @@ -50,7 +50,7 @@ enum nhlt_device_type { struct nhlt_specific_cfg { u32 size; - u8 caps[0]; + u8 caps[]; } __packed; struct nhlt_fmt_cfg { @@ -60,7 +60,7 @@ struct nhlt_fmt_cfg { struct nhlt_fmt { u8 fmt_count; - struct nhlt_fmt_cfg fmt_config[0]; + struct nhlt_fmt_cfg fmt_config[]; } __packed; struct nhlt_endpoint { @@ -80,7 +80,7 @@ struct nhlt_endpoint { struct nhlt_acpi_table { struct acpi_table_header header; u8 endpoint_count; - struct nhlt_endpoint desc[0]; + struct nhlt_endpoint desc[]; } __packed; struct nhlt_resource_desc { diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h index a36b7227a15a..334842daa904 100644 --- a/include/sound/rawmidi.h +++ b/include/sound/rawmidi.h @@ -61,6 +61,7 @@ struct snd_rawmidi_runtime { size_t avail_min; /* min avail for wakeup */ size_t avail; /* max used buffer for wakeup */ size_t xruns; /* over/underruns counter */ + int buffer_ref; /* buffer reference count */ /* misc */ spinlock_t lock; wait_queue_head_t sleep; diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h index 392e953d561e..d2e9e3b4d7ea 100644 --- a/include/sound/soc-acpi.h +++ b/include/sound/soc-acpi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 +/* SPDX-License-Identifier: GPL-2.0-only * * Copyright (C) 2013-15, Intel Corporation. All rights reserved. */ diff --git a/include/sound/soc-card.h b/include/sound/soc-card.h new file mode 100644 index 000000000000..4f2cc4fb56b7 --- /dev/null +++ b/include/sound/soc-card.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * soc-card.h + * + * Copyright (C) 2019 Renesas Electronics Corp. + * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> + */ +#ifndef __SOC_CARD_H +#define __SOC_CARD_H + +enum snd_soc_card_subclass { + SND_SOC_CARD_CLASS_INIT = 0, + SND_SOC_CARD_CLASS_RUNTIME = 1, +}; + +struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card, + const char *name); +int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type, + struct snd_soc_jack *jack, + struct snd_soc_jack_pin *pins, unsigned int num_pins); + +int snd_soc_card_suspend_pre(struct snd_soc_card *card); +int snd_soc_card_suspend_post(struct snd_soc_card *card); +int snd_soc_card_resume_pre(struct snd_soc_card *card); +int snd_soc_card_resume_post(struct snd_soc_card *card); + +int snd_soc_card_probe(struct snd_soc_card *card); +int snd_soc_card_late_probe(struct snd_soc_card *card); +int snd_soc_card_remove(struct snd_soc_card *card); + +int snd_soc_card_set_bias_level(struct snd_soc_card *card, + struct snd_soc_dapm_context *dapm, + enum snd_soc_bias_level level); +int snd_soc_card_set_bias_level_post(struct snd_soc_card *card, + struct snd_soc_dapm_context *dapm, + enum snd_soc_bias_level level); + +int snd_soc_card_add_dai_link(struct snd_soc_card *card, + struct snd_soc_dai_link *dai_link); +void snd_soc_card_remove_dai_link(struct snd_soc_card *card, + struct snd_soc_dai_link *dai_link); + +/* device driver data */ +static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card, + void *data) +{ + card->drvdata = data; +} + +static inline void *snd_soc_card_get_drvdata(struct snd_soc_card *card) +{ + return card->drvdata; +} + +static inline +struct snd_soc_dai *snd_soc_card_get_codec_dai(struct snd_soc_card *card, + const char *dai_name) +{ + struct snd_soc_pcm_runtime *rtd; + + for_each_card_rtds(card, rtd) { + if (!strcmp(asoc_rtd_to_codec(rtd, 0)->name, dai_name)) + return asoc_rtd_to_codec(rtd, 0); + } + + return NULL; +} + +#endif /* __SOC_CARD_H */ diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h index 154d02fbbfed..5663891148e3 100644 --- a/include/sound/soc-component.h +++ b/include/sound/soc-component.h @@ -25,6 +25,44 @@ order++) /* component interface */ +struct snd_compress_ops { + int (*open)(struct snd_soc_component *component, + struct snd_compr_stream *stream); + int (*free)(struct snd_soc_component *component, + struct snd_compr_stream *stream); + int (*set_params)(struct snd_soc_component *component, + struct snd_compr_stream *stream, + struct snd_compr_params *params); + int (*get_params)(struct snd_soc_component *component, + struct snd_compr_stream *stream, + struct snd_codec *params); + int (*set_metadata)(struct snd_soc_component *component, + struct snd_compr_stream *stream, + struct snd_compr_metadata *metadata); + int (*get_metadata)(struct snd_soc_component *component, + struct snd_compr_stream *stream, + struct snd_compr_metadata *metadata); + int (*trigger)(struct snd_soc_component *component, + struct snd_compr_stream *stream, int cmd); + int (*pointer)(struct snd_soc_component *component, + struct snd_compr_stream *stream, + struct snd_compr_tstamp *tstamp); + int (*copy)(struct snd_soc_component *component, + struct snd_compr_stream *stream, char __user *buf, + size_t count); + int (*mmap)(struct snd_soc_component *component, + struct snd_compr_stream *stream, + struct vm_area_struct *vma); + int (*ack)(struct snd_soc_component *component, + struct snd_compr_stream *stream, size_t bytes); + int (*get_caps)(struct snd_soc_component *component, + struct snd_compr_stream *stream, + struct snd_compr_caps *caps); + int (*get_codec_caps)(struct snd_soc_component *component, + struct snd_compr_stream *stream, + struct snd_compr_codec_caps *codec); +}; + struct snd_soc_component_driver { const char *name; @@ -108,7 +146,7 @@ struct snd_soc_component_driver { struct snd_pcm_substream *substream, struct vm_area_struct *vma); - const struct snd_compr_ops *compr_ops; + const struct snd_compress_ops *compress_ops; /* probe ordering - for components with runtime dependencies */ int probe_order; @@ -351,10 +389,10 @@ static inline void *snd_soc_component_get_drvdata(struct snd_soc_component *c) return dev_get_drvdata(c->dev); } -static inline bool snd_soc_component_is_active( - struct snd_soc_component *component) +static inline unsigned int +snd_soc_component_active(struct snd_soc_component *component) { - return component->active != 0; + return component->active; } /* component pin */ diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index d4825b82c7a3..212257e84fac 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -154,21 +154,59 @@ int snd_soc_dai_startup(struct snd_soc_dai *dai, struct snd_pcm_substream *substream); void snd_soc_dai_shutdown(struct snd_soc_dai *dai, struct snd_pcm_substream *substream); -int snd_soc_dai_prepare(struct snd_soc_dai *dai, - struct snd_pcm_substream *substream); -int snd_soc_dai_trigger(struct snd_soc_dai *dai, - struct snd_pcm_substream *substream, int cmd); -int snd_soc_dai_bespoke_trigger(struct snd_soc_dai *dai, - struct snd_pcm_substream *substream, int cmd); snd_pcm_sframes_t snd_soc_dai_delay(struct snd_soc_dai *dai, struct snd_pcm_substream *substream); void snd_soc_dai_suspend(struct snd_soc_dai *dai); void snd_soc_dai_resume(struct snd_soc_dai *dai); -int snd_soc_dai_probe(struct snd_soc_dai *dai); -int snd_soc_dai_remove(struct snd_soc_dai *dai); int snd_soc_dai_compress_new(struct snd_soc_dai *dai, struct snd_soc_pcm_runtime *rtd, int num); bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int stream); +void snd_soc_dai_action(struct snd_soc_dai *dai, + int stream, int action); +static inline void snd_soc_dai_activate(struct snd_soc_dai *dai, + int stream) +{ + snd_soc_dai_action(dai, stream, 1); +} +static inline void snd_soc_dai_deactivate(struct snd_soc_dai *dai, + int stream) +{ + snd_soc_dai_action(dai, stream, -1); +} +int snd_soc_dai_active(struct snd_soc_dai *dai); + +int snd_soc_pcm_dai_probe(struct snd_soc_pcm_runtime *rtd, int order); +int snd_soc_pcm_dai_remove(struct snd_soc_pcm_runtime *rtd, int order); +int snd_soc_pcm_dai_new(struct snd_soc_pcm_runtime *rtd); +int snd_soc_pcm_dai_prepare(struct snd_pcm_substream *substream); +int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream, int cmd); +int snd_soc_pcm_dai_bespoke_trigger(struct snd_pcm_substream *substream, + int cmd); + +int snd_soc_dai_compr_startup(struct snd_soc_dai *dai, + struct snd_compr_stream *cstream); +void snd_soc_dai_compr_shutdown(struct snd_soc_dai *dai, + struct snd_compr_stream *cstream); +int snd_soc_dai_compr_trigger(struct snd_soc_dai *dai, + struct snd_compr_stream *cstream, int cmd); +int snd_soc_dai_compr_set_params(struct snd_soc_dai *dai, + struct snd_compr_stream *cstream, + struct snd_compr_params *params); +int snd_soc_dai_compr_get_params(struct snd_soc_dai *dai, + struct snd_compr_stream *cstream, + struct snd_codec *params); +int snd_soc_dai_compr_ack(struct snd_soc_dai *dai, + struct snd_compr_stream *cstream, + size_t bytes); +int snd_soc_dai_compr_pointer(struct snd_soc_dai *dai, + struct snd_compr_stream *cstream, + struct snd_compr_tstamp *tstamp); +int snd_soc_dai_compr_set_metadata(struct snd_soc_dai *dai, + struct snd_compr_stream *cstream, + struct snd_compr_metadata *metadata); +int snd_soc_dai_compr_get_metadata(struct snd_soc_dai *dai, + struct snd_compr_stream *cstream, + struct snd_compr_metadata *metadata); struct snd_soc_dai_ops { /* @@ -326,8 +364,6 @@ struct snd_soc_dai { /* DAI runtime info */ unsigned int stream_active[SNDRV_PCM_STREAM_LAST + 1]; /* usage count */ - unsigned int active; - struct snd_soc_dapm_widget *playback_widget; struct snd_soc_dapm_widget *capture_widget; @@ -351,7 +387,6 @@ struct snd_soc_dai { /* bit field */ unsigned int probed:1; - unsigned int started[SNDRV_PCM_STREAM_LAST + 1]; }; static inline struct snd_soc_pcm_stream * @@ -444,4 +479,10 @@ static inline void *snd_soc_dai_get_sdw_stream(struct snd_soc_dai *dai, return ERR_PTR(-ENOTSUPP); } +static inline unsigned int +snd_soc_dai_stream_active(struct snd_soc_dai *dai, int stream) +{ + return dai->stream_active[stream]; +} + #endif diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 08495f8d86dc..cc3dcb815282 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h @@ -689,7 +689,7 @@ struct snd_soc_dapm_context { /* A list of widgets associated with an object, typically a snd_kcontrol */ struct snd_soc_dapm_widget_list { int num_widgets; - struct snd_soc_dapm_widget *widgets[0]; + struct snd_soc_dapm_widget *widgets[]; }; #define for_each_dapm_widgets(list, i, widget) \ diff --git a/include/sound/soc-link.h b/include/sound/soc-link.h new file mode 100644 index 000000000000..3dd6e33e94ec --- /dev/null +++ b/include/sound/soc-link.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * soc-link.h + * + * Copyright (C) 2019 Renesas Electronics Corp. + * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> + */ +#ifndef __SOC_LINK_H +#define __SOC_LINK_H + +int snd_soc_link_init(struct snd_soc_pcm_runtime *rtd); +int snd_soc_link_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, + struct snd_pcm_hw_params *params); + +int snd_soc_link_startup(struct snd_pcm_substream *substream); +void snd_soc_link_shutdown(struct snd_pcm_substream *substream); +int snd_soc_link_prepare(struct snd_pcm_substream *substream); +int snd_soc_link_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params); +void snd_soc_link_hw_free(struct snd_pcm_substream *substream); +int snd_soc_link_trigger(struct snd_pcm_substream *substream, int cmd); + +int snd_soc_link_compr_startup(struct snd_compr_stream *cstream); +void snd_soc_link_compr_shutdown(struct snd_compr_stream *cstream); +int snd_soc_link_compr_set_params(struct snd_compr_stream *cstream); + +#endif /* __SOC_LINK_H */ diff --git a/include/sound/soc.h b/include/sound/soc.h index 13458e4fbb13..ef5dd28e10a9 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -414,11 +414,6 @@ enum snd_soc_pcm_subclass { SND_SOC_PCM_CLASS_BE = 1, }; -enum snd_soc_card_subclass { - SND_SOC_CARD_CLASS_INIT = 0, - SND_SOC_CARD_CLASS_RUNTIME = 1, -}; - int snd_soc_register_card(struct snd_soc_card *card); int snd_soc_unregister_card(struct snd_soc_card *card); int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card); @@ -468,8 +463,19 @@ struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card, struct snd_soc_dai_link *dai_link); bool snd_soc_runtime_ignore_pmdown_time(struct snd_soc_pcm_runtime *rtd); -void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd, int stream); -void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream); + +void snd_soc_runtime_action(struct snd_soc_pcm_runtime *rtd, + int stream, int action); +static inline void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd, + int stream) +{ + snd_soc_runtime_action(rtd, stream, 1); +} +static inline void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, + int stream) +{ + snd_soc_runtime_action(rtd, stream, -1); +} int snd_soc_runtime_calc_hw(struct snd_soc_pcm_runtime *rtd, struct snd_pcm_hardware *hw, int stream); @@ -498,10 +504,6 @@ int snd_soc_set_runtime_hwparams(struct snd_pcm_substream *substream, const struct snd_pcm_hardware *hw); /* Jack reporting */ -int snd_soc_card_jack_new(struct snd_soc_card *card, const char *id, int type, - struct snd_soc_jack *jack, struct snd_soc_jack_pin *pins, - unsigned int num_pins); - void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask); int snd_soc_jack_add_pins(struct snd_soc_jack *jack, int count, struct snd_soc_jack_pin *pins); @@ -571,8 +573,6 @@ static inline int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops) struct snd_kcontrol *snd_soc_cnew(const struct snd_kcontrol_new *_template, void *data, const char *long_name, const char *prefix); -struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card, - const char *name); int snd_soc_add_component_controls(struct snd_soc_component *component, const struct snd_kcontrol_new *controls, unsigned int num_controls); int snd_soc_add_card_controls(struct snd_soc_card *soc_card, @@ -806,7 +806,7 @@ struct snd_soc_dai_link { const struct snd_soc_compr_ops *compr_ops; /* Mark this pcm with non atomic ops */ - bool nonatomic; + unsigned int nonatomic:1; /* For unidirectional dai links */ unsigned int playback_only:1; @@ -1002,9 +1002,6 @@ struct snd_soc_card { spinlock_t dpcm_lock; - bool instantiated; - bool topology_shortname_created; - int (*probe)(struct snd_soc_card *card); int (*late_probe)(struct snd_soc_card *card); int (*remove)(struct snd_soc_card *card); @@ -1065,8 +1062,6 @@ struct snd_soc_card { int num_of_dapm_widgets; const struct snd_soc_dapm_route *of_dapm_routes; int num_of_dapm_routes; - bool fully_routed; - bool disable_route_checks; /* lists of probed devices belonging to this card */ struct list_head component_dev_list; @@ -1093,6 +1088,13 @@ struct snd_soc_card { #endif u32 pop_time; + /* bit field */ + unsigned int instantiated:1; + unsigned int topology_shortname_created:1; + unsigned int fully_routed:1; + unsigned int disable_route_checks:1; + unsigned int probed:1; + void *drvdata; }; #define for_each_card_prelinks(card, i, link) \ @@ -1143,16 +1145,21 @@ struct snd_soc_pcm_runtime { /* runtime devices */ struct snd_pcm *pcm; struct snd_compr *compr; - struct snd_soc_dai *codec_dai; - struct snd_soc_dai *cpu_dai; - struct snd_soc_dai **dais; - struct snd_soc_dai **codec_dais; + /* + * dais = cpu_dai + codec_dai + * see + * soc_new_pcm_runtime() + * asoc_rtd_to_cpu() + * asoc_rtd_to_codec() + */ + struct snd_soc_dai **dais; unsigned int num_codecs; - - struct snd_soc_dai **cpu_dais; unsigned int num_cpus; + struct snd_soc_dapm_widget *playback_widget; + struct snd_soc_dapm_widget *capture_widget; + struct delayed_work delayed_work; void (*close_delayed_work_func)(struct snd_soc_pcm_runtime *rtd); #ifdef CONFIG_DEBUG_FS @@ -1167,28 +1174,28 @@ struct snd_soc_pcm_runtime { unsigned int fe_compr:1; /* for Dynamic PCM */ int num_components; - struct snd_soc_component *components[0]; /* CPU/Codec/Platform */ + struct snd_soc_component *components[]; /* CPU/Codec/Platform */ }; /* see soc_new_pcm_runtime() */ #define asoc_rtd_to_cpu(rtd, n) (rtd)->dais[n] #define asoc_rtd_to_codec(rtd, n) (rtd)->dais[n + (rtd)->num_cpus] #define for_each_rtd_components(rtd, i, component) \ - for ((i) = 0; \ + for ((i) = 0, component = NULL; \ ((i) < rtd->num_components) && ((component) = rtd->components[i]);\ (i)++) #define for_each_rtd_cpu_dais(rtd, i, dai) \ for ((i) = 0; \ - ((i) < rtd->num_cpus) && ((dai) = rtd->cpu_dais[i]); \ + ((i) < rtd->num_cpus) && ((dai) = asoc_rtd_to_cpu(rtd, i)); \ (i)++) #define for_each_rtd_cpu_dais_rollback(rtd, i, dai) \ - for (; (--(i) >= 0) && ((dai) = rtd->cpu_dais[i]);) + for (; (--(i) >= 0) && ((dai) = asoc_rtd_to_cpu(rtd, i));) #define for_each_rtd_codec_dais(rtd, i, dai) \ for ((i) = 0; \ - ((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \ + ((i) < rtd->num_codecs) && ((dai) = asoc_rtd_to_codec(rtd, i)); \ (i)++) #define for_each_rtd_codec_dais_rollback(rtd, i, dai) \ - for (; (--(i) >= 0) && ((dai) = rtd->codec_dais[i]);) + for (; (--(i) >= 0) && ((dai) = asoc_rtd_to_codec(rtd, i));) #define for_each_rtd_dais(rtd, i, dai) \ for ((i) = 0; \ ((i) < (rtd)->num_cpus + (rtd)->num_codecs) && \ @@ -1249,29 +1256,16 @@ struct soc_enum { #endif }; -/* device driver data */ - -static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card, - void *data) -{ - card->drvdata = data; -} - -static inline void *snd_soc_card_get_drvdata(struct snd_soc_card *card) -{ - return card->drvdata; -} - static inline bool snd_soc_volsw_is_stereo(struct soc_mixer_control *mc) { if (mc->reg == mc->rreg && mc->shift == mc->rshift) - return 0; + return false; /* * mc->reg == mc->rreg && mc->shift != mc->rshift, or * mc->reg != mc->rreg means that the control is * stereo (bits in one register or in two registers) */ - return 1; + return true; } static inline unsigned int snd_soc_enum_val_to_item(struct soc_enum *e, @@ -1375,20 +1369,6 @@ struct snd_soc_dai *snd_soc_find_dai( #include <sound/soc-dai.h> static inline -struct snd_soc_dai *snd_soc_card_get_codec_dai(struct snd_soc_card *card, - const char *dai_name) -{ - struct snd_soc_pcm_runtime *rtd; - - list_for_each_entry(rtd, &card->rtd_list, list) { - if (!strcmp(rtd->codec_dai->name, dai_name)) - return rtd->codec_dai; - } - - return NULL; -} - -static inline int snd_soc_fixup_dai_links_platform_name(struct snd_soc_card *card, const char *platform_name) { @@ -1433,5 +1413,6 @@ static inline void snd_soc_dapm_mutex_unlock(struct snd_soc_dapm_context *dapm) } #include <sound/soc-component.h> +#include <sound/soc-card.h> #endif diff --git a/include/sound/sof.h b/include/sound/sof.h index a0cbca021230..f3e716c8ce1c 100644 --- a/include/sound/sof.h +++ b/include/sound/sof.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -27,6 +27,9 @@ struct snd_sof_pdata { struct device *dev; + /* indicate how many first bytes shouldn't be loaded into DSP memory. */ + size_t fw_offset; + /* * notification callback used if the hardware initialization * can take time or is handled in a workqueue. This callback diff --git a/include/sound/sof/channel_map.h b/include/sound/sof/channel_map.h index 21044eb5f377..fd3a30fcf756 100644 --- a/include/sound/sof/channel_map.h +++ b/include/sound/sof/channel_map.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/control.h b/include/sound/sof/control.h index 6080ea0facd7..7379a33d7247 100644 --- a/include/sound/sof/control.h +++ b/include/sound/sof/control.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/dai-imx.h b/include/sound/sof/dai-imx.h index ff9088dcc6f2..ca8325353d41 100644 --- a/include/sound/sof/dai-imx.h +++ b/include/sound/sof/dai-imx.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* * Copyright 2019 NXP * diff --git a/include/sound/sof/dai-intel.h b/include/sound/sof/dai-intel.h index 04e48227f542..136adf6686e2 100644 --- a/include/sound/sof/dai-intel.h +++ b/include/sound/sof/dai-intel.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -49,6 +49,9 @@ /* bclk idle */ #define SOF_DAI_INTEL_SSP_CLKCTRL_BCLK_IDLE_HIGH BIT(5) +/* DMIC max. four controllers for eight microphone channels */ +#define SOF_DAI_INTEL_DMIC_NUM_CTRL 4 + /* SSP Configuration Request - SOF_IPC_DAI_SSP_CONFIG */ struct sof_ipc_dai_ssp_params { struct sof_ipc_hdr hdr; @@ -85,15 +88,19 @@ struct sof_ipc_dai_ssp_params { struct sof_ipc_dai_hda_params { struct sof_ipc_hdr hdr; uint32_t link_dma_ch; + uint32_t rate; + uint32_t channels; } __packed; /* ALH Configuration Request - SOF_IPC_DAI_ALH_CONFIG */ struct sof_ipc_dai_alh_params { struct sof_ipc_hdr hdr; uint32_t stream_id; + uint32_t rate; + uint32_t channels; /* reserved for future use */ - uint32_t reserved[15]; + uint32_t reserved[13]; } __packed; /* DMIC Configuration Request - SOF_IPC_DAI_DMIC_CONFIG */ @@ -135,7 +142,7 @@ struct sof_ipc_dai_dmic_pdm_ctrl { * version number used in configuration data is checked vs. version used by * device driver src/drivers/dmic.c need to match. It is incremented from * initial value 1 if updates done for the to driver would alter the operation - * of the microhone. + * of the microphone. * * Note: The microphone clock (pdmclk_min, pdmclk_max, duty_min, duty_max) * parameters need to be set as defined in microphone data sheet. E.g. clock @@ -170,12 +177,13 @@ struct sof_ipc_dai_dmic_params { uint32_t fifo_fs; /**< FIFO sample rate in Hz (8000..96000) */ uint32_t reserved_1; /**< Reserved */ uint16_t fifo_bits; /**< FIFO word length (16 or 32) */ - uint16_t reserved_2; /**< Reserved */ + uint16_t fifo_bits_b; /**< Deprecated since firmware ABI 3.0.1 */ uint16_t duty_min; /**< Min. mic clock duty cycle in % (20..80) */ uint16_t duty_max; /**< Max. mic clock duty cycle in % (min..80) */ - uint32_t num_pdm_active; /**< Number of active pdm controllers */ + uint32_t num_pdm_active; /**< Number of active pdm controllers. */ + /**< Range is 1..SOF_DAI_INTEL_DMIC_NUM_CTRL */ uint32_t wake_up_time; /**< Time from clock start to data (us) */ uint32_t min_clock_on_time; /**< Min. time that clk is kept on (us) */ @@ -184,8 +192,8 @@ struct sof_ipc_dai_dmic_params { /* reserved for future use */ uint32_t reserved[5]; - /**< variable number of pdm controller config */ - struct sof_ipc_dai_dmic_pdm_ctrl pdm[0]; + /**< PDM controllers configuration */ + struct sof_ipc_dai_dmic_pdm_ctrl pdm[SOF_DAI_INTEL_DMIC_NUM_CTRL]; } __packed; #endif diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h index 2565edd336f1..34f135adf8ec 100644 --- a/include/sound/sof/dai.h +++ b/include/sound/sof/dai.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/ext_manifest.h b/include/sound/sof/ext_manifest.h new file mode 100644 index 000000000000..04359cda92dc --- /dev/null +++ b/include/sound/sof/ext_manifest.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * Copyright(c) 2020 Intel Corporation. All rights reserved. + */ + +/* + * Extended manifest is a place to store metadata about firmware, known during + * compilation time - for example firmware version or used compiler. + * Given information are read on host side before firmware startup. + * This part of output binary is not signed. + */ + +#ifndef __SOF_FIRMWARE_EXT_MANIFEST_H__ +#define __SOF_FIRMWARE_EXT_MANIFEST_H__ + +#include <linux/bits.h> +#include <linux/compiler.h> +#include <linux/types.h> +#include <sound/sof/info.h> + +/* In ASCII `XMan` */ +#define SOF_EXT_MAN_MAGIC_NUMBER 0x6e614d58 + +/* Build u32 number in format MMmmmppp */ +#define SOF_EXT_MAN_BUILD_VERSION(MAJOR, MINOR, PATH) ((uint32_t)( \ + ((MAJOR) << 24) | \ + ((MINOR) << 12) | \ + (PATH))) + +/* check extended manifest version consistency */ +#define SOF_EXT_MAN_VERSION_INCOMPATIBLE(host_ver, cli_ver) ( \ + ((host_ver) & GENMASK(31, 24)) != \ + ((cli_ver) & GENMASK(31, 24))) + +/* used extended manifest header version */ +#define SOF_EXT_MAN_VERSION SOF_EXT_MAN_BUILD_VERSION(1, 0, 0) + +/* extended manifest header, deleting any field breaks backward compatibility */ +struct sof_ext_man_header { + uint32_t magic; /*< identification number, */ + /*< EXT_MAN_MAGIC_NUMBER */ + uint32_t full_size; /*< [bytes] full size of ext_man, */ + /*< (header + content + padding) */ + uint32_t header_size; /*< [bytes] makes header extensionable, */ + /*< after append new field to ext_man header */ + /*< then backward compatible won't be lost */ + uint32_t header_version; /*< value of EXT_MAN_VERSION */ + /*< not related with following content */ + + /* just after this header should be list of ext_man_elem_* elements */ +} __packed; + +/* Now define extended manifest elements */ + +/* Extended manifest elements types */ +enum sof_ext_man_elem_type { + SOF_EXT_MAN_ELEM_FW_VERSION = 0, + SOF_EXT_MAN_ELEM_WINDOW = SOF_IPC_EXT_WINDOW, + SOF_EXT_MAN_ELEM_CC_VERSION = SOF_IPC_EXT_CC_INFO, +}; + +/* extended manifest element header */ +struct sof_ext_man_elem_header { + uint32_t type; /*< SOF_EXT_MAN_ELEM_ */ + uint32_t size; /*< in bytes, including header size */ + + /* just after this header should be type dependent content */ +} __packed; + +/* FW version */ +struct sof_ext_man_fw_version { + struct sof_ext_man_elem_header hdr; + /* use sof_ipc struct because of code re-use */ + struct sof_ipc_fw_version version; + uint32_t flags; +} __packed; + +/* extended data memory windows for IPC, trace and debug */ +struct sof_ext_man_window { + struct sof_ext_man_elem_header hdr; + /* use sof_ipc struct because of code re-use */ + struct sof_ipc_window ipc_window; +} __packed; + +/* Used C compiler description */ +struct sof_ext_man_cc_version { + struct sof_ext_man_elem_header hdr; + /* use sof_ipc struct because of code re-use */ + struct sof_ipc_cc_version cc_version; +} __packed; + +#endif /* __SOF_FIRMWARE_EXT_MANIFEST_H__ */ diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h index b79479575cc8..2d35997ace40 100644 --- a/include/sound/sof/header.h +++ b/include/sound/sof/header.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h index 438a11fcf272..5a55ba8b7e56 100644 --- a/include/sound/sof/info.h +++ b/include/sound/sof/info.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -31,6 +31,8 @@ enum sof_ipc_ext_data { SOF_IPC_EXT_UNUSED = 0, SOF_IPC_EXT_WINDOW = 1, SOF_IPC_EXT_CC_INFO = 2, + SOF_IPC_EXT_PROBE_INFO = 3, + SOF_IPC_EXT_USER_ABI_INFO = 4, }; /* FW version - SOF_IPC_GLB_VERSION */ @@ -109,9 +111,27 @@ struct sof_ipc_cc_version { /* reserved for future use */ uint32_t reserved[4]; - char name[16]; /* null terminated compiler name */ - char optim[4]; /* null terminated compiler -O flag value */ - char desc[]; /* null terminated compiler description */ + uint8_t name[16]; /* null terminated compiler name */ + uint8_t optim[4]; /* null terminated compiler -O flag value */ + uint8_t desc[32]; /* null terminated compiler description */ } __packed; +/* extended data: Probe setup */ +struct sof_ipc_probe_support { + struct sof_ipc_ext_data_hdr ext_hdr; + + uint32_t probe_points_max; + uint32_t injection_dmas_max; + + /* reserved for future use */ + uint32_t reserved[2]; +} __packed; + +/* extended data: user abi version(s) */ +struct sof_ipc_user_abi_version { + struct sof_ipc_ext_data_hdr ext_hdr; + + uint32_t abi_dbg_version; +} __packed; + #endif diff --git a/include/sound/sof/pm.h b/include/sound/sof/pm.h index 3cf2e0f39d94..366aa6ec442b 100644 --- a/include/sound/sof/pm.h +++ b/include/sound/sof/pm.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/stream.h b/include/sound/sof/stream.h index 7facefb541b3..58a0d49977d6 100644 --- a/include/sound/sof/stream.h +++ b/include/sound/sof/stream.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h index 402e0250c508..f56e80d09b32 100644 --- a/include/sound/sof/topology.h +++ b/include/sound/sof/topology.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -37,6 +37,8 @@ enum sof_comp_type { SOF_COMP_SELECTOR, /**< channel selector component */ SOF_COMP_DEMUX, SOF_COMP_ASRC, /**< Asynchronous sample rate converter */ + SOF_COMP_DCBLOCK, + SOF_COMP_SMART_AMP, /**< smart amplifier component */ /* keep FILEREAD/FILEWRITE as the last ones */ SOF_COMP_FILEREAD = 10000, /**< host test based file IO */ SOF_COMP_FILEWRITE = 10001, /**< host test based file IO */ @@ -75,11 +77,23 @@ struct sof_ipc_comp { #define SOF_MEM_CAPS_CACHE (1 << 6) /**< cacheable */ #define SOF_MEM_CAPS_EXEC (1 << 7) /**< executable */ +/* + * overrun will cause ring buffer overwrite, instead of XRUN. + */ +#define SOF_BUF_OVERRUN_PERMITTED BIT(0) + +/* + * underrun will cause readback of 0s, instead of XRUN. + */ +#define SOF_BUF_UNDERRUN_PERMITTED BIT(1) + /* create new component buffer - SOF_IPC_TPLG_BUFFER_NEW */ struct sof_ipc_buffer { struct sof_ipc_comp comp; uint32_t size; /**< buffer size in bytes */ uint32_t caps; /**< SOF_MEM_CAPS_ */ + uint32_t flags; /**< SOF_BUF_ flags defined above */ + uint32_t reserved; /**< reserved for future use */ } __packed; /* generic component config data - must always be after struct sof_ipc_comp */ @@ -206,6 +220,8 @@ enum sof_ipc_process_type { SOF_PROCESS_CHAN_SELECTOR, /**< Channel Selector */ SOF_PROCESS_MUX, SOF_PROCESS_DEMUX, + SOF_PROCESS_DCBLOCK, + SOF_PROCESS_SMART_AMP, /**< Smart Amplifier */ }; /* generic "effect", "codec" or proprietary processing component */ @@ -218,7 +234,7 @@ struct sof_ipc_comp_process { /* reserved for future use */ uint32_t reserved[7]; - unsigned char data[0]; + uint8_t data[0]; } __packed; /* frees components, buffers and pipelines diff --git a/include/sound/sof/trace.h b/include/sound/sof/trace.h index fda6e8f6ead4..c31a94a13ce0 100644 --- a/include/sound/sof/trace.h +++ b/include/sound/sof/trace.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -72,7 +72,7 @@ struct sof_ipc_dma_trace_posn { struct sof_ipc_panic_info { struct sof_ipc_hdr hdr; uint32_t code; /* SOF_IPC_PANIC_ */ - char filename[SOF_TRACE_FILENAME_SIZE]; + uint8_t filename[SOF_TRACE_FILENAME_SIZE]; uint32_t linenum; } __packed; diff --git a/include/sound/sof/xtensa.h b/include/sound/sof/xtensa.h index dd53d36b34e1..87a07e520415 100644 --- a/include/sound/sof/xtensa.h +++ b/include/sound/sof/xtensa.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 591cd9e4692c..4fda324f4b35 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -301,16 +301,6 @@ struct iscsi_queue_req { struct list_head qr_list; }; -struct iscsi_data_count { - int data_length; - int sync_and_steering; - enum data_count_type type; - u32 iov_count; - u32 ss_iov_count; - u32 ss_marker_count; - struct kvec *iov; -}; - struct iscsi_param_list { bool iser; struct list_head param_list; diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 1b752d8ea529..f51452e3b984 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -23,7 +23,8 @@ struct target_backend_ops { char inquiry_rev[4]; struct module *owner; - u8 transport_flags; + u8 transport_flags_default; + u8 transport_flags_changeable; int (*attach_hba)(struct se_hba *, u32); void (*detach_hba)(struct se_hba *); @@ -94,6 +95,7 @@ int transport_set_vpd_ident(struct t10_vpd *, unsigned char *); extern struct configfs_attribute *sbc_attrib_attrs[]; extern struct configfs_attribute *passthrough_attrib_attrs[]; +extern struct configfs_attribute *passthrough_pr_attrib_attrs[]; /* core helpers also used by command snooping in pscsi */ void *transport_kmap_data_sg(struct se_cmd *); diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 6d4a694f6ea7..18c3f277b770 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -772,6 +772,7 @@ struct se_device { #define DF_USING_UDEV_PATH 0x00000008 #define DF_USING_ALIAS 0x00000010 #define DF_READ_ONLY 0x00000020 + u8 transport_flags; /* Physical device queue depth */ u32 queue_depth; /* Used for SPC-2 reservations enforce of ISIDs */ diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index 063f133e47c2..6adf4d71acf6 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h @@ -150,9 +150,10 @@ void transport_deregister_session(struct se_session *); void transport_init_se_cmd(struct se_cmd *, const struct target_core_fabric_ops *, - struct se_session *, u32, int, int, unsigned char *); -sense_reason_t transport_lookup_cmd_lun(struct se_cmd *, u64); -sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *); + struct se_session *, u32, int, int, unsigned char *, u64); +sense_reason_t transport_lookup_cmd_lun(struct se_cmd *); +sense_reason_t target_cmd_init_cdb(struct se_cmd *, unsigned char *); +sense_reason_t target_cmd_parse_cdb(struct se_cmd *); int target_submit_cmd_map_sgls(struct se_cmd *, struct se_session *, unsigned char *, unsigned char *, u64, u32, int, int, int, struct scatterlist *, u32, struct scatterlist *, u32, @@ -187,7 +188,7 @@ int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t); void core_tmr_release_req(struct se_tmr_req *); int transport_generic_handle_tmr(struct se_cmd *); void transport_generic_request_failure(struct se_cmd *, sense_reason_t); -int transport_lookup_tmr_lun(struct se_cmd *, u64); +int transport_lookup_tmr_lun(struct se_cmd *); void core_allocate_nexus_loss_ua(struct se_node_acl *acl); struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg, diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index c612cabbc378..5f0c1cf1ea13 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -33,20 +33,40 @@ enum afs_server_trace { afs_server_trace_destroy, afs_server_trace_free, afs_server_trace_gc, + afs_server_trace_get_by_addr, afs_server_trace_get_by_uuid, afs_server_trace_get_caps, afs_server_trace_get_install, afs_server_trace_get_new_cbi, + afs_server_trace_get_probe, afs_server_trace_give_up_cb, afs_server_trace_put_call, afs_server_trace_put_cbi, afs_server_trace_put_find_rsq, + afs_server_trace_put_probe, afs_server_trace_put_slist, afs_server_trace_put_slist_isort, afs_server_trace_put_uuid_rsq, afs_server_trace_update, }; +enum afs_volume_trace { + afs_volume_trace_alloc, + afs_volume_trace_free, + afs_volume_trace_get_alloc_sbi, + afs_volume_trace_get_cell_insert, + afs_volume_trace_get_new_op, + afs_volume_trace_get_query_alias, + afs_volume_trace_put_cell_dup, + afs_volume_trace_put_cell_root, + afs_volume_trace_put_destroy_sbi, + afs_volume_trace_put_free_fc, + afs_volume_trace_put_put_op, + afs_volume_trace_put_query_alias, + afs_volume_trace_put_validate_fc, + afs_volume_trace_remove, +}; + enum afs_fs_operation { afs_FS_FetchData = 130, /* AFS Fetch file data */ afs_FS_FetchACL = 131, /* AFS Fetch file ACL */ @@ -108,6 +128,7 @@ enum afs_vl_operation { afs_VL_GetEntryByNameU = 527, /* AFS Get Vol Entry By Name operation ID */ afs_VL_GetAddrsU = 533, /* AFS Get FS server addresses */ afs_YFSVL_GetEndpoints = 64002, /* YFS Get FS & Vol server addresses */ + afs_YFSVL_GetCellName = 64014, /* YFS Get actual cell name */ afs_VL_GetCapabilities = 65537, /* AFS Get VL server capabilities */ }; @@ -140,6 +161,7 @@ enum afs_eproto_cause { afs_eproto_bad_status, afs_eproto_cb_count, afs_eproto_cb_fid_count, + afs_eproto_cellname_len, afs_eproto_file_type, afs_eproto_ibulkst_cb_count, afs_eproto_ibulkst_count, @@ -241,19 +263,38 @@ enum afs_cb_break_reason { EM(afs_server_trace_destroy, "DESTROY ") \ EM(afs_server_trace_free, "FREE ") \ EM(afs_server_trace_gc, "GC ") \ + EM(afs_server_trace_get_by_addr, "GET addr ") \ EM(afs_server_trace_get_by_uuid, "GET uuid ") \ EM(afs_server_trace_get_caps, "GET caps ") \ EM(afs_server_trace_get_install, "GET inst ") \ EM(afs_server_trace_get_new_cbi, "GET cbi ") \ + EM(afs_server_trace_get_probe, "GET probe") \ EM(afs_server_trace_give_up_cb, "giveup-cb") \ EM(afs_server_trace_put_call, "PUT call ") \ EM(afs_server_trace_put_cbi, "PUT cbi ") \ EM(afs_server_trace_put_find_rsq, "PUT f-rsq") \ + EM(afs_server_trace_put_probe, "PUT probe") \ EM(afs_server_trace_put_slist, "PUT slist") \ EM(afs_server_trace_put_slist_isort, "PUT isort") \ EM(afs_server_trace_put_uuid_rsq, "PUT u-req") \ E_(afs_server_trace_update, "UPDATE") +#define afs_volume_traces \ + EM(afs_volume_trace_alloc, "ALLOC ") \ + EM(afs_volume_trace_free, "FREE ") \ + EM(afs_volume_trace_get_alloc_sbi, "GET sbi-alloc ") \ + EM(afs_volume_trace_get_cell_insert, "GET cell-insrt") \ + EM(afs_volume_trace_get_new_op, "GET op-new ") \ + EM(afs_volume_trace_get_query_alias, "GET cell-alias") \ + EM(afs_volume_trace_put_cell_dup, "PUT cell-dup ") \ + EM(afs_volume_trace_put_cell_root, "PUT cell-root ") \ + EM(afs_volume_trace_put_destroy_sbi, "PUT sbi-destry") \ + EM(afs_volume_trace_put_free_fc, "PUT fc-free ") \ + EM(afs_volume_trace_put_put_op, "PUT op-put ") \ + EM(afs_volume_trace_put_query_alias, "PUT cell-alias") \ + EM(afs_volume_trace_put_validate_fc, "PUT fc-validat") \ + E_(afs_volume_trace_remove, "REMOVE ") + #define afs_fs_operations \ EM(afs_FS_FetchData, "FS.FetchData") \ EM(afs_FS_FetchStatus, "FS.FetchStatus") \ @@ -310,6 +351,7 @@ enum afs_cb_break_reason { EM(afs_VL_GetEntryByNameU, "VL.GetEntryByNameU") \ EM(afs_VL_GetAddrsU, "VL.GetAddrsU") \ EM(afs_YFSVL_GetEndpoints, "YFSVL.GetEndpoints") \ + EM(afs_YFSVL_GetCellName, "YFSVL.GetCellName") \ E_(afs_VL_GetCapabilities, "VL.GetCapabilities") #define afs_edit_dir_ops \ @@ -339,6 +381,7 @@ enum afs_cb_break_reason { EM(afs_eproto_bad_status, "BadStatus") \ EM(afs_eproto_cb_count, "CbCount") \ EM(afs_eproto_cb_fid_count, "CbFidCount") \ + EM(afs_eproto_cellname_len, "CellNameLen") \ EM(afs_eproto_file_type, "FileTYpe") \ EM(afs_eproto_ibulkst_cb_count, "IBS.CbCount") \ EM(afs_eproto_ibulkst_count, "IBS.FidCount") \ @@ -636,7 +679,7 @@ TRACE_EVENT(afs_make_fs_calli, TRACE_EVENT(afs_make_fs_call1, TP_PROTO(struct afs_call *call, const struct afs_fid *fid, - const char *name), + const struct qstr *name), TP_ARGS(call, fid, name), @@ -648,8 +691,7 @@ TRACE_EVENT(afs_make_fs_call1, ), TP_fast_assign( - int __len = strlen(name); - __len = min(__len, 23); + unsigned int __len = min_t(unsigned int, name->len, 23); __entry->call = call->debug_id; __entry->op = call->operation_ID; if (fid) { @@ -659,7 +701,7 @@ TRACE_EVENT(afs_make_fs_call1, __entry->fid.vnode = 0; __entry->fid.unique = 0; } - memcpy(__entry->name, name, __len); + memcpy(__entry->name, name->name, __len); __entry->name[__len] = 0; ), @@ -674,7 +716,7 @@ TRACE_EVENT(afs_make_fs_call1, TRACE_EVENT(afs_make_fs_call2, TP_PROTO(struct afs_call *call, const struct afs_fid *fid, - const char *name, const char *name2), + const struct qstr *name, const struct qstr *name2), TP_ARGS(call, fid, name, name2), @@ -687,10 +729,8 @@ TRACE_EVENT(afs_make_fs_call2, ), TP_fast_assign( - int __len = strlen(name); - int __len2 = strlen(name2); - __len = min(__len, 23); - __len2 = min(__len2, 23); + unsigned int __len = min_t(unsigned int, name->len, 23); + unsigned int __len2 = min_t(unsigned int, name2->len, 23); __entry->call = call->debug_id; __entry->op = call->operation_ID; if (fid) { @@ -700,9 +740,9 @@ TRACE_EVENT(afs_make_fs_call2, __entry->fid.vnode = 0; __entry->fid.unique = 0; } - memcpy(__entry->name, name, __len); + memcpy(__entry->name, name->name, __len); __entry->name[__len] = 0; - memcpy(__entry->name2, name2, __len2); + memcpy(__entry->name2, name2->name, __len2); __entry->name2[__len2] = 0; ), @@ -988,24 +1028,22 @@ TRACE_EVENT(afs_edit_dir, ); TRACE_EVENT(afs_protocol_error, - TP_PROTO(struct afs_call *call, int error, enum afs_eproto_cause cause), + TP_PROTO(struct afs_call *call, enum afs_eproto_cause cause), - TP_ARGS(call, error, cause), + TP_ARGS(call, cause), TP_STRUCT__entry( __field(unsigned int, call ) - __field(int, error ) __field(enum afs_eproto_cause, cause ) ), TP_fast_assign( __entry->call = call ? call->debug_id : 0; - __entry->error = error; __entry->cause = cause; ), - TP_printk("c=%08x r=%d %s", - __entry->call, __entry->error, + TP_printk("c=%08x %s", + __entry->call, __print_symbolic(__entry->cause, afs_eproto_causes)) ); @@ -1271,26 +1309,53 @@ TRACE_EVENT(afs_cb_miss, ); TRACE_EVENT(afs_server, - TP_PROTO(struct afs_server *server, int usage, enum afs_server_trace reason), + TP_PROTO(struct afs_server *server, int ref, int active, + enum afs_server_trace reason), - TP_ARGS(server, usage, reason), + TP_ARGS(server, ref, active, reason), TP_STRUCT__entry( __field(unsigned int, server ) - __field(int, usage ) + __field(int, ref ) + __field(int, active ) __field(int, reason ) ), TP_fast_assign( __entry->server = server->debug_id; - __entry->usage = usage; + __entry->ref = ref; + __entry->active = active; __entry->reason = reason; ), - TP_printk("s=%08x %s u=%d", + TP_printk("s=%08x %s u=%d a=%d", __entry->server, __print_symbolic(__entry->reason, afs_server_traces), - __entry->usage) + __entry->ref, + __entry->active) + ); + +TRACE_EVENT(afs_volume, + TP_PROTO(afs_volid_t vid, int ref, enum afs_volume_trace reason), + + TP_ARGS(vid, ref, reason), + + TP_STRUCT__entry( + __field(afs_volid_t, vid ) + __field(int, ref ) + __field(enum afs_volume_trace, reason ) + ), + + TP_fast_assign( + __entry->vid = vid; + __entry->ref = ref; + __entry->reason = reason; + ), + + TP_printk("V=%llx %s u=%d", + __entry->vid, + __print_symbolic(__entry->reason, afs_volume_traces), + __entry->ref) ); #endif /* _TRACE_AFS_H */ diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 81b43f5bdf23..93b114226af8 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -254,16 +254,15 @@ TRACE_EVENT(block_bio_bounce, * block_bio_complete - completed all work on the block operation * @q: queue holding the block operation * @bio: block operation completed - * @error: io error value * * This tracepoint indicates there is no further work to do on this * block IO operation @bio. */ TRACE_EVENT(block_bio_complete, - TP_PROTO(struct request_queue *q, struct bio *bio, int error), + TP_PROTO(struct request_queue *q, struct bio *bio), - TP_ARGS(q, bio, error), + TP_ARGS(q, bio), TP_STRUCT__entry( __field( dev_t, dev ) @@ -277,7 +276,7 @@ TRACE_EVENT(block_bio_complete, __entry->dev = bio_dev(bio); __entry->sector = bio->bi_iter.bi_sector; __entry->nr_sector = bio_sectors(bio); - __entry->error = error; + __entry->error = blk_status_to_errno(bio->bi_status); blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); ), diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index bcbc763b8814..360b0f9d2220 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -89,6 +89,7 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS); { IO_TREE_TRANS_DIRTY_PAGES, "TRANS_DIRTY_PAGES" }, \ { IO_TREE_ROOT_DIRTY_LOG_PAGES, "ROOT_DIRTY_LOG_PAGES" }, \ { IO_TREE_INODE_FILE_EXTENT, "INODE_FILE_EXTENT" }, \ + { IO_TREE_LOG_CSUM_RANGE, "LOG_CSUM_RANGE" }, \ { IO_TREE_SELFTEST, "SELFTEST" }) #define BTRFS_GROUP_FLAGS \ diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h index e5bf6ee4e814..54e5bf081171 100644 --- a/include/trace/events/compaction.h +++ b/include/trace/events/compaction.h @@ -314,40 +314,44 @@ TRACE_EVENT(mm_compaction_kcompactd_sleep, DECLARE_EVENT_CLASS(kcompactd_wake_template, - TP_PROTO(int nid, int order, enum zone_type classzone_idx), + TP_PROTO(int nid, int order, enum zone_type highest_zoneidx), - TP_ARGS(nid, order, classzone_idx), + TP_ARGS(nid, order, highest_zoneidx), TP_STRUCT__entry( __field(int, nid) __field(int, order) - __field(enum zone_type, classzone_idx) + __field(enum zone_type, highest_zoneidx) ), TP_fast_assign( __entry->nid = nid; __entry->order = order; - __entry->classzone_idx = classzone_idx; + __entry->highest_zoneidx = highest_zoneidx; ), + /* + * classzone_idx is previous name of the highest_zoneidx. + * Reason not to change it is the ABI requirement of the tracepoint. + */ TP_printk("nid=%d order=%d classzone_idx=%-8s", __entry->nid, __entry->order, - __print_symbolic(__entry->classzone_idx, ZONE_TYPE)) + __print_symbolic(__entry->highest_zoneidx, ZONE_TYPE)) ); DEFINE_EVENT(kcompactd_wake_template, mm_compaction_wakeup_kcompactd, - TP_PROTO(int nid, int order, enum zone_type classzone_idx), + TP_PROTO(int nid, int order, enum zone_type highest_zoneidx), - TP_ARGS(nid, order, classzone_idx) + TP_ARGS(nid, order, highest_zoneidx) ); DEFINE_EVENT(kcompactd_wake_template, mm_compaction_kcompactd_wake, - TP_PROTO(int nid, int order, enum zone_type classzone_idx), + TP_PROTO(int nid, int order, enum zone_type highest_zoneidx), - TP_ARGS(nid, order, classzone_idx) + TP_ARGS(nid, order, highest_zoneidx) ); #endif diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h index 27f5caa6299a..bf9806fd1306 100644 --- a/include/trace/events/erofs.h +++ b/include/trace/events/erofs.h @@ -113,10 +113,10 @@ TRACE_EVENT(erofs_readpage, TRACE_EVENT(erofs_readpages, - TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage, + TP_PROTO(struct inode *inode, pgoff_t start, unsigned int nrpage, bool raw), - TP_ARGS(inode, page, nrpage, raw), + TP_ARGS(inode, start, nrpage, raw), TP_STRUCT__entry( __field(dev_t, dev ) @@ -129,7 +129,7 @@ TRACE_EVENT(erofs_readpages, TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->nid = EROFS_I(inode)->nid; - __entry->start = page->index; + __entry->start = start; __entry->nrpage = nrpage; __entry->raw = raw; ), diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 19c87661eeec..cc41d692ae8e 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -35,7 +35,8 @@ struct partial_cluster; { EXT4_MB_DELALLOC_RESERVED, "DELALLOC_RESV" }, \ { EXT4_MB_STREAM_ALLOC, "STREAM_ALLOC" }, \ { EXT4_MB_USE_ROOT_BLOCKS, "USE_ROOT_BLKS" }, \ - { EXT4_MB_USE_RESERVED, "USE_RESV" }) + { EXT4_MB_USE_RESERVED, "USE_RESV" }, \ + { EXT4_MB_STRICT_CHECK, "STRICT_CHECK" }) #define show_map_flags(flags) __print_flags(flags, "|", \ { EXT4_GET_BLOCKS_CREATE, "CREATE" }, \ @@ -45,8 +46,10 @@ struct partial_cluster; { EXT4_GET_BLOCKS_CONVERT, "CONVERT" }, \ { EXT4_GET_BLOCKS_METADATA_NOFAIL, "METADATA_NOFAIL" }, \ { EXT4_GET_BLOCKS_NO_NORMALIZE, "NO_NORMALIZE" }, \ - { EXT4_GET_BLOCKS_KEEP_SIZE, "KEEP_SIZE" }, \ - { EXT4_GET_BLOCKS_ZERO, "ZERO" }) + { EXT4_GET_BLOCKS_CONVERT_UNWRITTEN, "CONVERT_UNWRITTEN" }, \ + { EXT4_GET_BLOCKS_ZERO, "ZERO" }, \ + { EXT4_GET_BLOCKS_IO_SUBMIT, "IO_SUBMIT" }, \ + { EXT4_EX_NOCACHE, "EX_NOCACHE" }) /* * __print_flags() requires that all enum values be wrapped in the diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index d97adfc327f0..8639ab962a71 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -50,6 +50,7 @@ TRACE_DEFINE_ENUM(CP_RECOVERY); TRACE_DEFINE_ENUM(CP_DISCARD); TRACE_DEFINE_ENUM(CP_TRIMMED); TRACE_DEFINE_ENUM(CP_PAUSE); +TRACE_DEFINE_ENUM(CP_RESIZE); #define show_block_type(type) \ __print_symbolic(type, \ @@ -126,7 +127,8 @@ TRACE_DEFINE_ENUM(CP_PAUSE); { CP_RECOVERY, "Recovery" }, \ { CP_DISCARD, "Discard" }, \ { CP_PAUSE, "Pause" }, \ - { CP_TRIMMED, "Trimmed" }) + { CP_TRIMMED, "Trimmed" }, \ + { CP_RESIZE, "Resize" }) #define show_fsync_cpreason(type) \ __print_symbolic(type, \ @@ -154,7 +156,8 @@ TRACE_DEFINE_ENUM(CP_PAUSE); __print_symbolic(type, \ { COMPRESS_LZO, "LZO" }, \ { COMPRESS_LZ4, "LZ4" }, \ - { COMPRESS_ZSTD, "ZSTD" }) + { COMPRESS_ZSTD, "ZSTD" }, \ + { COMPRESS_LZORLE, "LZO-RLE" }) struct f2fs_sb_info; struct f2fs_io_info; @@ -1376,9 +1379,9 @@ TRACE_EVENT(f2fs_writepages, TRACE_EVENT(f2fs_readpages, - TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage), + TP_PROTO(struct inode *inode, pgoff_t start, unsigned int nrpage), - TP_ARGS(inode, page, nrpage), + TP_ARGS(inode, start, nrpage), TP_STRUCT__entry( __field(dev_t, dev) @@ -1390,7 +1393,7 @@ TRACE_EVENT(f2fs_readpages, TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; - __entry->start = page->index; + __entry->start = start; __entry->nrpage = nrpage; ), @@ -1812,6 +1815,82 @@ DEFINE_EVENT(f2fs_zip_end, f2fs_decompress_pages_end, TP_ARGS(inode, cluster_idx, compressed_size, ret) ); +TRACE_EVENT(f2fs_iostat, + + TP_PROTO(struct f2fs_sb_info *sbi, unsigned long long *iostat), + + TP_ARGS(sbi, iostat), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned long long, app_dio) + __field(unsigned long long, app_bio) + __field(unsigned long long, app_wio) + __field(unsigned long long, app_mio) + __field(unsigned long long, fs_dio) + __field(unsigned long long, fs_nio) + __field(unsigned long long, fs_mio) + __field(unsigned long long, fs_gc_dio) + __field(unsigned long long, fs_gc_nio) + __field(unsigned long long, fs_cp_dio) + __field(unsigned long long, fs_cp_nio) + __field(unsigned long long, fs_cp_mio) + __field(unsigned long long, app_drio) + __field(unsigned long long, app_brio) + __field(unsigned long long, app_rio) + __field(unsigned long long, app_mrio) + __field(unsigned long long, fs_drio) + __field(unsigned long long, fs_gdrio) + __field(unsigned long long, fs_cdrio) + __field(unsigned long long, fs_nrio) + __field(unsigned long long, fs_mrio) + __field(unsigned long long, fs_discard) + ), + + TP_fast_assign( + __entry->dev = sbi->sb->s_dev; + __entry->app_dio = iostat[APP_DIRECT_IO]; + __entry->app_bio = iostat[APP_BUFFERED_IO]; + __entry->app_wio = iostat[APP_WRITE_IO]; + __entry->app_mio = iostat[APP_MAPPED_IO]; + __entry->fs_dio = iostat[FS_DATA_IO]; + __entry->fs_nio = iostat[FS_NODE_IO]; + __entry->fs_mio = iostat[FS_META_IO]; + __entry->fs_gc_dio = iostat[FS_GC_DATA_IO]; + __entry->fs_gc_nio = iostat[FS_GC_NODE_IO]; + __entry->fs_cp_dio = iostat[FS_CP_DATA_IO]; + __entry->fs_cp_nio = iostat[FS_CP_NODE_IO]; + __entry->fs_cp_mio = iostat[FS_CP_META_IO]; + __entry->app_drio = iostat[APP_DIRECT_READ_IO]; + __entry->app_brio = iostat[APP_BUFFERED_READ_IO]; + __entry->app_rio = iostat[APP_READ_IO]; + __entry->app_mrio = iostat[APP_MAPPED_READ_IO]; + __entry->fs_drio = iostat[FS_DATA_READ_IO]; + __entry->fs_gdrio = iostat[FS_GDATA_READ_IO]; + __entry->fs_cdrio = iostat[FS_CDATA_READ_IO]; + __entry->fs_nrio = iostat[FS_NODE_READ_IO]; + __entry->fs_mrio = iostat[FS_META_READ_IO]; + __entry->fs_discard = iostat[FS_DISCARD]; + ), + + TP_printk("dev = (%d,%d), " + "app [write=%llu (direct=%llu, buffered=%llu), mapped=%llu], " + "fs [data=%llu, node=%llu, meta=%llu, discard=%llu], " + "gc [data=%llu, node=%llu], " + "cp [data=%llu, node=%llu, meta=%llu], " + "app [read=%llu (direct=%llu, buffered=%llu), mapped=%llu], " + "fs [data=%llu, (gc_data=%llu, compr_data=%llu), " + "node=%llu, meta=%llu]", + show_dev(__entry->dev), __entry->app_wio, __entry->app_dio, + __entry->app_bio, __entry->app_mio, __entry->fs_dio, + __entry->fs_nio, __entry->fs_mio, __entry->fs_discard, + __entry->fs_gc_dio, __entry->fs_gc_nio, __entry->fs_cp_dio, + __entry->fs_cp_nio, __entry->fs_cp_mio, + __entry->app_rio, __entry->app_drio, __entry->app_brio, + __entry->app_mrio, __entry->fs_drio, __entry->fs_gdrio, + __entry->fs_cdrio, __entry->fs_nrio, __entry->fs_mrio) +); + #endif /* _TRACE_F2FS_H */ /* This part must be outside protection */ diff --git a/include/trace/events/gpu_mem.h b/include/trace/events/gpu_mem.h index 1897822a9150..26d871f96e94 100644 --- a/include/trace/events/gpu_mem.h +++ b/include/trace/events/gpu_mem.h @@ -24,7 +24,7 @@ * * @pid: Put 0 for global total, while positive pid for process total. * - * @size: Virtual size of the allocation in bytes. + * @size: Size of the allocation in bytes. * */ TRACE_EVENT(gpu_mem_total, diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h index 70e32ff096ec..4fdb14a81108 100644 --- a/include/trace/events/huge_memory.h +++ b/include/trace/events/huge_memory.h @@ -12,6 +12,8 @@ EM( SCAN_SUCCEED, "succeeded") \ EM( SCAN_PMD_NULL, "pmd_null") \ EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \ + EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \ + EM( SCAN_EXCEED_SHARED_PTE, "exceed_shared_pte") \ EM( SCAN_PTE_NON_PRESENT, "pte_non_present") \ EM( SCAN_PTE_UFFD_WP, "pte_uffd_wp") \ EM( SCAN_PAGE_RO, "no_writable_page") \ @@ -31,7 +33,6 @@ EM( SCAN_DEL_PAGE_LRU, "could_not_delete_page_from_lru")\ EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \ EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \ - EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \ EM( SCAN_TRUNCATED, "truncated") \ EMe(SCAN_PAGE_HAS_PRIVATE, "page_has_private") \ diff --git a/include/trace/events/iocost.h b/include/trace/events/iocost.h index 7ecaa65b7106..c2f580fd371b 100644 --- a/include/trace/events/iocost.h +++ b/include/trace/events/iocost.h @@ -130,7 +130,7 @@ DEFINE_EVENT(iocg_inuse_update, iocost_inuse_reset, TRACE_EVENT(iocost_ioc_vrate_adj, - TP_PROTO(struct ioc *ioc, u64 new_vrate, u32 (*missed_ppm)[2], + TP_PROTO(struct ioc *ioc, u64 new_vrate, u32 *missed_ppm, u32 rq_wait_pct, int nr_lagging, int nr_shortages, int nr_surpluses), @@ -155,8 +155,8 @@ TRACE_EVENT(iocost_ioc_vrate_adj, __entry->old_vrate = atomic64_read(&ioc->vtime_rate);; __entry->new_vrate = new_vrate; __entry->busy_level = ioc->busy_level; - __entry->read_missed_ppm = (*missed_ppm)[READ]; - __entry->write_missed_ppm = (*missed_ppm)[WRITE]; + __entry->read_missed_ppm = missed_ppm[READ]; + __entry->write_missed_ppm = missed_ppm[WRITE]; __entry->rq_wait_pct = rq_wait_pct; __entry->nr_lagging = nr_lagging; __entry->nr_shortages = nr_shortages; diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h index 0d1a9ebf55ba..330d32d84485 100644 --- a/include/trace/events/qdisc.h +++ b/include/trace/events/qdisc.h @@ -8,6 +8,8 @@ #include <linux/netdevice.h> #include <linux/tracepoint.h> #include <linux/ftrace.h> +#include <linux/pkt_sched.h> +#include <net/sch_generic.h> TRACE_EVENT(qdisc_dequeue, @@ -44,6 +46,79 @@ TRACE_EVENT(qdisc_dequeue, __entry->txq_state, __entry->packets, __entry->skbaddr ) ); +TRACE_EVENT(qdisc_reset, + + TP_PROTO(struct Qdisc *q), + + TP_ARGS(q), + + TP_STRUCT__entry( + __string( dev, qdisc_dev(q) ) + __string( kind, q->ops->id ) + __field( u32, parent ) + __field( u32, handle ) + ), + + TP_fast_assign( + __assign_str(dev, qdisc_dev(q)); + __assign_str(kind, q->ops->id); + __entry->parent = q->parent; + __entry->handle = q->handle; + ), + + TP_printk("dev=%s kind=%s parent=%x:%x handle=%x:%x", __get_str(dev), + __get_str(kind), TC_H_MAJ(__entry->parent) >> 16, TC_H_MIN(__entry->parent), + TC_H_MAJ(__entry->handle) >> 16, TC_H_MIN(__entry->handle)) +); + +TRACE_EVENT(qdisc_destroy, + + TP_PROTO(struct Qdisc *q), + + TP_ARGS(q), + + TP_STRUCT__entry( + __string( dev, qdisc_dev(q) ) + __string( kind, q->ops->id ) + __field( u32, parent ) + __field( u32, handle ) + ), + + TP_fast_assign( + __assign_str(dev, qdisc_dev(q)); + __assign_str(kind, q->ops->id); + __entry->parent = q->parent; + __entry->handle = q->handle; + ), + + TP_printk("dev=%s kind=%s parent=%x:%x handle=%x:%x", __get_str(dev), + __get_str(kind), TC_H_MAJ(__entry->parent) >> 16, TC_H_MIN(__entry->parent), + TC_H_MAJ(__entry->handle) >> 16, TC_H_MIN(__entry->handle)) +); + +TRACE_EVENT(qdisc_create, + + TP_PROTO(const struct Qdisc_ops *ops, struct net_device *dev, u32 parent), + + TP_ARGS(ops, dev, parent), + + TP_STRUCT__entry( + __string( dev, dev->name ) + __string( kind, ops->id ) + __field( u32, parent ) + ), + + TP_fast_assign( + __assign_str(dev, dev->name); + __assign_str(kind, ops->id); + __entry->parent = parent; + ), + + TP_printk("dev=%s kind=%s parent=%x:%x", + __get_str(dev), __get_str(kind), + TC_H_MAJ(__entry->parent) >> 16, TC_H_MIN(__entry->parent)) +); + #endif /* _TRACE_QDISC_H */ /* This part must be outside protection */ diff --git a/include/trace/events/qla.h b/include/trace/events/qla.h index b71f680968eb..5857cf682ee7 100644 --- a/include/trace/events/qla.h +++ b/include/trace/events/qla.h @@ -9,6 +9,11 @@ #define QLA_MSG_MAX 256 +#pragma GCC diagnostic push +#ifndef __clang__ +#pragma GCC diagnostic ignored "-Wsuggest-attribute=format" +#endif + DECLARE_EVENT_CLASS(qla_log_event, TP_PROTO(const char *buf, struct va_format *vaf), @@ -27,6 +32,8 @@ DECLARE_EVENT_CLASS(qla_log_event, TP_printk("%s %s", __get_str(buf), __get_str(msg)) ); +#pragma GCC diagnostic pop + DEFINE_EVENT(qla_log_event, ql_dbg_log, TP_PROTO(const char *buf, struct va_format *vaf), TP_ARGS(buf, vaf) diff --git a/include/trace/events/qrtr.h b/include/trace/events/qrtr.h new file mode 100644 index 000000000000..b1de14c3bb93 --- /dev/null +++ b/include/trace/events/qrtr.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM qrtr + +#if !defined(_TRACE_QRTR_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_QRTR_H + +#include <linux/qrtr.h> +#include <linux/tracepoint.h> + +TRACE_EVENT(qrtr_ns_service_announce_new, + + TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port), + + TP_ARGS(service, instance, node, port), + + TP_STRUCT__entry( + __field(__le32, service) + __field(__le32, instance) + __field(__le32, node) + __field(__le32, port) + ), + + TP_fast_assign( + __entry->service = service; + __entry->instance = instance; + __entry->node = node; + __entry->port = port; + ), + + TP_printk("advertising new server [%d:%x]@[%d:%d]", + __entry->service, __entry->instance, __entry->node, + __entry->port + ) +); + +TRACE_EVENT(qrtr_ns_service_announce_del, + + TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port), + + TP_ARGS(service, instance, node, port), + + TP_STRUCT__entry( + __field(__le32, service) + __field(__le32, instance) + __field(__le32, node) + __field(__le32, port) + ), + + TP_fast_assign( + __entry->service = service; + __entry->instance = instance; + __entry->node = node; + __entry->port = port; + ), + + TP_printk("advertising removal of server [%d:%x]@[%d:%d]", + __entry->service, __entry->instance, __entry->node, + __entry->port + ) +); + +TRACE_EVENT(qrtr_ns_server_add, + + TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port), + + TP_ARGS(service, instance, node, port), + + TP_STRUCT__entry( + __field(__le32, service) + __field(__le32, instance) + __field(__le32, node) + __field(__le32, port) + ), + + TP_fast_assign( + __entry->service = service; + __entry->instance = instance; + __entry->node = node; + __entry->port = port; + ), + + TP_printk("add server [%d:%x]@[%d:%d]", + __entry->service, __entry->instance, __entry->node, + __entry->port + ) +); + +TRACE_EVENT(qrtr_ns_message, + + TP_PROTO(const char * const ctrl_pkt_str, __u32 sq_node, __u32 sq_port), + + TP_ARGS(ctrl_pkt_str, sq_node, sq_port), + + TP_STRUCT__entry( + __string(ctrl_pkt_str, ctrl_pkt_str) + __field(__u32, sq_node) + __field(__u32, sq_port) + ), + + TP_fast_assign( + __assign_str(ctrl_pkt_str, ctrl_pkt_str); + __entry->sq_node = sq_node; + __entry->sq_port = sq_port; + ), + + TP_printk("%s from %d:%d", + __get_str(ctrl_pkt_str), __entry->sq_node, __entry->sq_port + ) +); + +#endif /* _TRACE_QRTR_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/regulator.h b/include/trace/events/regulator.h index b70583c32c08..72b3ba93b0a5 100644 --- a/include/trace/events/regulator.h +++ b/include/trace/events/regulator.h @@ -70,6 +70,38 @@ DEFINE_EVENT(regulator_basic, regulator_disable_complete, ); +DEFINE_EVENT(regulator_basic, regulator_bypass_enable, + + TP_PROTO(const char *name), + + TP_ARGS(name) + +); + +DEFINE_EVENT(regulator_basic, regulator_bypass_enable_complete, + + TP_PROTO(const char *name), + + TP_ARGS(name) + +); + +DEFINE_EVENT(regulator_basic, regulator_bypass_disable, + + TP_PROTO(const char *name), + + TP_ARGS(name) + +); + +DEFINE_EVENT(regulator_basic, regulator_bypass_disable_complete, + + TP_PROTO(const char *name), + + TP_ARGS(name) + +); + /* * Events that take a range of numerical values, mostly for voltages * and so on. diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h index 32d88c4fb063..b9b51a4b1db1 100644 --- a/include/trace/events/rpcgss.h +++ b/include/trace/events/rpcgss.h @@ -17,6 +17,16 @@ ** GSS-API related trace events **/ +TRACE_DEFINE_ENUM(RPC_GSS_SVC_NONE); +TRACE_DEFINE_ENUM(RPC_GSS_SVC_INTEGRITY); +TRACE_DEFINE_ENUM(RPC_GSS_SVC_PRIVACY); + +#define show_gss_service(x) \ + __print_symbolic(x, \ + { RPC_GSS_SVC_NONE, "none" }, \ + { RPC_GSS_SVC_INTEGRITY, "integrity" }, \ + { RPC_GSS_SVC_PRIVACY, "privacy" }) + TRACE_DEFINE_ENUM(GSS_S_BAD_MECH); TRACE_DEFINE_ENUM(GSS_S_BAD_NAME); TRACE_DEFINE_ENUM(GSS_S_BAD_NAMETYPE); @@ -126,6 +136,40 @@ DEFINE_GSSAPI_EVENT(verify_mic); DEFINE_GSSAPI_EVENT(wrap); DEFINE_GSSAPI_EVENT(unwrap); +DECLARE_EVENT_CLASS(rpcgss_ctx_class, + TP_PROTO( + const struct gss_cred *gc + ), + + TP_ARGS(gc), + + TP_STRUCT__entry( + __field(const void *, cred) + __field(unsigned long, service) + __string(principal, gc->gc_principal) + ), + + TP_fast_assign( + __entry->cred = gc; + __entry->service = gc->gc_service; + __assign_str(principal, gc->gc_principal) + ), + + TP_printk("cred=%p service=%s principal='%s'", + __entry->cred, show_gss_service(__entry->service), + __get_str(principal)) +); + +#define DEFINE_CTX_EVENT(name) \ + DEFINE_EVENT(rpcgss_ctx_class, rpcgss_ctx_##name, \ + TP_PROTO( \ + const struct gss_cred *gc \ + ), \ + TP_ARGS(gc)) + +DEFINE_CTX_EVENT(init); +DEFINE_CTX_EVENT(destroy); + TRACE_EVENT(rpcgss_svc_accept_upcall, TP_PROTO( __be32 xid, @@ -291,6 +335,40 @@ TRACE_EVENT(rpcgss_need_reencode, __entry->ret ? "" : "un") ); +TRACE_EVENT(rpcgss_update_slack, + TP_PROTO( + const struct rpc_task *task, + const struct rpc_auth *auth + ), + + TP_ARGS(task, auth), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(u32, xid) + __field(const void *, auth) + __field(unsigned int, rslack) + __field(unsigned int, ralign) + __field(unsigned int, verfsize) + ), + + TP_fast_assign( + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; + __entry->xid = be32_to_cpu(task->tk_rqstp->rq_xid); + __entry->auth = auth; + __entry->rslack = auth->au_rslack; + __entry->ralign = auth->au_ralign; + __entry->verfsize = auth->au_verfsize; + ), + + TP_printk("task:%u@%u xid=0x%08x auth=%p rslack=%u ralign=%u verfsize=%u\n", + __entry->task_id, __entry->client_id, __entry->xid, + __entry->auth, __entry->rslack, __entry->ralign, + __entry->verfsize) +); + DECLARE_EVENT_CLASS(rpcgss_svc_seqno_class, TP_PROTO( __be32 xid, @@ -371,6 +449,7 @@ TRACE_EVENT(rpcgss_upcall_result, TRACE_EVENT(rpcgss_context, TP_PROTO( + u32 window_size, unsigned long expiry, unsigned long now, unsigned int timeout, @@ -378,12 +457,13 @@ TRACE_EVENT(rpcgss_context, const u8 *data ), - TP_ARGS(expiry, now, timeout, len, data), + TP_ARGS(window_size, expiry, now, timeout, len, data), TP_STRUCT__entry( __field(unsigned long, expiry) __field(unsigned long, now) __field(unsigned int, timeout) + __field(u32, window_size) __field(int, len) __string(acceptor, data) ), @@ -392,13 +472,14 @@ TRACE_EVENT(rpcgss_context, __entry->expiry = expiry; __entry->now = now; __entry->timeout = timeout; + __entry->window_size = window_size; __entry->len = len; strncpy(__get_str(acceptor), data, len); ), - TP_printk("gc_expiry=%lu now=%lu timeout=%u acceptor=%.*s", - __entry->expiry, __entry->now, __entry->timeout, - __entry->len, __get_str(acceptor)) + TP_printk("win_size=%u expiry=%lu now=%lu timeout=%u acceptor=%.*s", + __entry->window_size, __entry->expiry, __entry->now, + __entry->timeout, __entry->len, __get_str(acceptor)) ); diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h index 051f26fedc4d..0f05a6e2b9cb 100644 --- a/include/trace/events/rpcrdma.h +++ b/include/trace/events/rpcrdma.h @@ -380,12 +380,8 @@ TRACE_EVENT(xprtrdma_inline_thresh, DEFINE_CONN_EVENT(connect); DEFINE_CONN_EVENT(disconnect); -DEFINE_CONN_EVENT(flush_dct); -DEFINE_RXPRT_EVENT(xprtrdma_create); -DEFINE_RXPRT_EVENT(xprtrdma_op_destroy); DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc); -DEFINE_RXPRT_EVENT(xprtrdma_op_close); DEFINE_RXPRT_EVENT(xprtrdma_op_setport); TRACE_EVENT(xprtrdma_op_connect, @@ -692,11 +688,10 @@ TRACE_EVENT(xprtrdma_prepsend_failed, TRACE_EVENT(xprtrdma_post_send, TP_PROTO( - const struct rpcrdma_req *req, - int status + const struct rpcrdma_req *req ), - TP_ARGS(req, status), + TP_ARGS(req), TP_STRUCT__entry( __field(const void *, req) @@ -705,7 +700,6 @@ TRACE_EVENT(xprtrdma_post_send, __field(unsigned int, client_id) __field(int, num_sge) __field(int, signaled) - __field(int, status) ), TP_fast_assign( @@ -718,15 +712,13 @@ TRACE_EVENT(xprtrdma_post_send, __entry->sc = req->rl_sendctx; __entry->num_sge = req->rl_wr.num_sge; __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED; - __entry->status = status; ), - TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %sstatus=%d", + TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %s", __entry->task_id, __entry->client_id, __entry->req, __entry->sc, __entry->num_sge, (__entry->num_sge == 1 ? "" : "s"), - (__entry->signaled ? "signaled " : ""), - __entry->status + (__entry->signaled ? "signaled" : "") ) ); @@ -1283,38 +1275,42 @@ TRACE_EVENT(xprtrdma_leaked_rep, ** Server-side RPC/RDMA events **/ -DECLARE_EVENT_CLASS(svcrdma_xprt_event, +DECLARE_EVENT_CLASS(svcrdma_accept_class, TP_PROTO( - const struct svc_xprt *xprt + const struct svcxprt_rdma *rdma, + long status ), - TP_ARGS(xprt), + TP_ARGS(rdma, status), TP_STRUCT__entry( - __field(const void *, xprt) - __string(addr, xprt->xpt_remotebuf) + __field(long, status) + __string(addr, rdma->sc_xprt.xpt_remotebuf) ), TP_fast_assign( - __entry->xprt = xprt; - __assign_str(addr, xprt->xpt_remotebuf); + __entry->status = status; + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); ), - TP_printk("xprt=%p addr=%s", - __entry->xprt, __get_str(addr) + TP_printk("addr=%s status=%ld", + __get_str(addr), __entry->status ) ); -#define DEFINE_XPRT_EVENT(name) \ - DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name, \ - TP_PROTO( \ - const struct svc_xprt *xprt \ - ), \ - TP_ARGS(xprt)) +#define DEFINE_ACCEPT_EVENT(name) \ + DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \ + TP_PROTO( \ + const struct svcxprt_rdma *rdma, \ + long status \ + ), \ + TP_ARGS(rdma, status)) -DEFINE_XPRT_EVENT(accept); -DEFINE_XPRT_EVENT(fail); -DEFINE_XPRT_EVENT(free); +DEFINE_ACCEPT_EVENT(pd); +DEFINE_ACCEPT_EVENT(qp); +DEFINE_ACCEPT_EVENT(fabric); +DEFINE_ACCEPT_EVENT(initdepth); +DEFINE_ACCEPT_EVENT(accept); TRACE_DEFINE_ENUM(RDMA_MSG); TRACE_DEFINE_ENUM(RDMA_NOMSG); @@ -1359,7 +1355,7 @@ TRACE_EVENT(svcrdma_decode_rqst, show_rpcrdma_proc(__entry->proc), __entry->hdrlen) ); -TRACE_EVENT(svcrdma_decode_short, +TRACE_EVENT(svcrdma_decode_short_err, TP_PROTO( unsigned int hdrlen ), @@ -1403,7 +1399,8 @@ DECLARE_EVENT_CLASS(svcrdma_badreq_event, ); #define DEFINE_BADREQ_EVENT(name) \ - DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\ + DEFINE_EVENT(svcrdma_badreq_event, \ + svcrdma_decode_##name##_err, \ TP_PROTO( \ __be32 *p \ ), \ @@ -1587,28 +1584,117 @@ DECLARE_EVENT_CLASS(svcrdma_dma_map_class, DEFINE_SVC_DMA_EVENT(dma_map_page); DEFINE_SVC_DMA_EVENT(dma_unmap_page); -TRACE_EVENT(svcrdma_dma_map_rwctx, +TRACE_EVENT(svcrdma_dma_map_rw_err, TP_PROTO( const struct svcxprt_rdma *rdma, + unsigned int nents, int status ), - TP_ARGS(rdma, status), + TP_ARGS(rdma, nents, status), TP_STRUCT__entry( __field(int, status) + __field(unsigned int, nents) __string(device, rdma->sc_cm_id->device->name) __string(addr, rdma->sc_xprt.xpt_remotebuf) ), TP_fast_assign( __entry->status = status; + __entry->nents = nents; __assign_str(device, rdma->sc_cm_id->device->name); __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); ), - TP_printk("addr=%s device=%s status=%d", - __get_str(addr), __get_str(device), __entry->status + TP_printk("addr=%s device=%s nents=%u status=%d", + __get_str(addr), __get_str(device), __entry->nents, + __entry->status + ) +); + +TRACE_EVENT(svcrdma_no_rwctx_err, + TP_PROTO( + const struct svcxprt_rdma *rdma, + unsigned int num_sges + ), + + TP_ARGS(rdma, num_sges), + + TP_STRUCT__entry( + __field(unsigned int, num_sges) + __string(device, rdma->sc_cm_id->device->name) + __string(addr, rdma->sc_xprt.xpt_remotebuf) + ), + + TP_fast_assign( + __entry->num_sges = num_sges; + __assign_str(device, rdma->sc_cm_id->device->name); + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); + ), + + TP_printk("addr=%s device=%s num_sges=%d", + __get_str(addr), __get_str(device), __entry->num_sges + ) +); + +TRACE_EVENT(svcrdma_page_overrun_err, + TP_PROTO( + const struct svcxprt_rdma *rdma, + const struct svc_rqst *rqst, + unsigned int pageno + ), + + TP_ARGS(rdma, rqst, pageno), + + TP_STRUCT__entry( + __field(unsigned int, pageno) + __field(u32, xid) + __string(device, rdma->sc_cm_id->device->name) + __string(addr, rdma->sc_xprt.xpt_remotebuf) + ), + + TP_fast_assign( + __entry->pageno = pageno; + __entry->xid = __be32_to_cpu(rqst->rq_xid); + __assign_str(device, rdma->sc_cm_id->device->name); + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); + ), + + TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr), + __get_str(device), __entry->xid, __entry->pageno + ) +); + +TRACE_EVENT(svcrdma_small_wrch_err, + TP_PROTO( + const struct svcxprt_rdma *rdma, + unsigned int remaining, + unsigned int seg_no, + unsigned int num_segs + ), + + TP_ARGS(rdma, remaining, seg_no, num_segs), + + TP_STRUCT__entry( + __field(unsigned int, remaining) + __field(unsigned int, seg_no) + __field(unsigned int, num_segs) + __string(device, rdma->sc_cm_id->device->name) + __string(addr, rdma->sc_xprt.xpt_remotebuf) + ), + + TP_fast_assign( + __entry->remaining = remaining; + __entry->seg_no = seg_no; + __entry->num_segs = num_segs; + __assign_str(device, rdma->sc_cm_id->device->name); + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); + ), + + TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u", + __get_str(addr), __get_str(device), __entry->remaining, + __entry->seg_no, __entry->num_segs ) ); @@ -1695,17 +1781,15 @@ DECLARE_EVENT_CLASS(svcrdma_sendcomp_event, TRACE_EVENT(svcrdma_post_send, TP_PROTO( - const struct ib_send_wr *wr, - int status + const struct ib_send_wr *wr ), - TP_ARGS(wr, status), + TP_ARGS(wr), TP_STRUCT__entry( __field(const void *, cqe) __field(unsigned int, num_sge) __field(u32, inv_rkey) - __field(int, status) ), TP_fast_assign( @@ -1713,12 +1797,11 @@ TRACE_EVENT(svcrdma_post_send, __entry->num_sge = wr->num_sge; __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ? wr->ex.invalidate_rkey : 0; - __entry->status = status; ), - TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d", + TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x", __entry->cqe, __entry->num_sge, - __entry->inv_rkey, __entry->status + __entry->inv_rkey ) ); @@ -1783,26 +1866,23 @@ TRACE_EVENT(svcrdma_wc_receive, TRACE_EVENT(svcrdma_post_rw, TP_PROTO( const void *cqe, - int sqecount, - int status + int sqecount ), - TP_ARGS(cqe, sqecount, status), + TP_ARGS(cqe, sqecount), TP_STRUCT__entry( __field(const void *, cqe) __field(int, sqecount) - __field(int, status) ), TP_fast_assign( __entry->cqe = cqe; __entry->sqecount = sqecount; - __entry->status = status; ), - TP_printk("cqe=%p sqecount=%d status=%d", - __entry->cqe, __entry->sqecount, __entry->status + TP_printk("cqe=%p sqecount=%d", + __entry->cqe, __entry->sqecount ) ); @@ -1870,6 +1950,34 @@ DECLARE_EVENT_CLASS(svcrdma_sendqueue_event, DEFINE_SQ_EVENT(full); DEFINE_SQ_EVENT(retry); +TRACE_EVENT(svcrdma_sq_post_err, + TP_PROTO( + const struct svcxprt_rdma *rdma, + int status + ), + + TP_ARGS(rdma, status), + + TP_STRUCT__entry( + __field(int, avail) + __field(int, depth) + __field(int, status) + __string(addr, rdma->sc_xprt.xpt_remotebuf) + ), + + TP_fast_assign( + __entry->avail = atomic_read(&rdma->sc_sq_avail); + __entry->depth = rdma->sc_sq_depth; + __entry->status = status; + __assign_str(addr, rdma->sc_xprt.xpt_remotebuf); + ), + + TP_printk("addr=%s sc_sq_avail=%d/%d status=%d", + __get_str(addr), __entry->avail, __entry->depth, + __entry->status + ) +); + #endif /* _TRACE_RPCRDMA_H */ #include <trace/define_trace.h> diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 191fe447f990..ba9efdc848f9 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -1112,18 +1112,17 @@ TRACE_EVENT(rxrpc_rtt_tx, TRACE_EVENT(rxrpc_rtt_rx, TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, - s64 rtt, u8 nr, s64 avg), + u32 rtt, u32 rto), - TP_ARGS(call, why, send_serial, resp_serial, rtt, nr, avg), + TP_ARGS(call, why, send_serial, resp_serial, rtt, rto), TP_STRUCT__entry( __field(unsigned int, call ) __field(enum rxrpc_rtt_rx_trace, why ) - __field(u8, nr ) __field(rxrpc_serial_t, send_serial ) __field(rxrpc_serial_t, resp_serial ) - __field(s64, rtt ) - __field(u64, avg ) + __field(u32, rtt ) + __field(u32, rto ) ), TP_fast_assign( @@ -1132,18 +1131,16 @@ TRACE_EVENT(rxrpc_rtt_rx, __entry->send_serial = send_serial; __entry->resp_serial = resp_serial; __entry->rtt = rtt; - __entry->nr = nr; - __entry->avg = avg; + __entry->rto = rto; ), - TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld", + TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%u rto=%u", __entry->call, __print_symbolic(__entry->why, rxrpc_rtt_rx_traces), __entry->send_serial, __entry->resp_serial, __entry->rtt, - __entry->nr, - __entry->avg) + __entry->rto) ); TRACE_EVENT(rxrpc_timer, @@ -1544,6 +1541,41 @@ TRACE_EVENT(rxrpc_notify_socket, __entry->serial) ); +TRACE_EVENT(rxrpc_rx_discard_ack, + TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, + rxrpc_seq_t first_soft_ack, rxrpc_seq_t call_ackr_first, + rxrpc_seq_t prev_pkt, rxrpc_seq_t call_ackr_prev), + + TP_ARGS(debug_id, serial, first_soft_ack, call_ackr_first, + prev_pkt, call_ackr_prev), + + TP_STRUCT__entry( + __field(unsigned int, debug_id ) + __field(rxrpc_serial_t, serial ) + __field(rxrpc_seq_t, first_soft_ack) + __field(rxrpc_seq_t, call_ackr_first) + __field(rxrpc_seq_t, prev_pkt) + __field(rxrpc_seq_t, call_ackr_prev) + ), + + TP_fast_assign( + __entry->debug_id = debug_id; + __entry->serial = serial; + __entry->first_soft_ack = first_soft_ack; + __entry->call_ackr_first = call_ackr_first; + __entry->prev_pkt = prev_pkt; + __entry->call_ackr_prev = call_ackr_prev; + ), + + TP_printk("c=%08x r=%08x %08x<%08x %08x<%08x", + __entry->debug_id, + __entry->serial, + __entry->first_soft_ack, + __entry->call_ackr_first, + __entry->prev_pkt, + __entry->call_ackr_prev) + ); + #endif /* _TRACE_RXRPC_H */ /* This part must be outside protection */ diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index ffd2215950dc..6a12935b8b14 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -14,14 +14,50 @@ #include <linux/net.h> #include <linux/tracepoint.h> -DECLARE_EVENT_CLASS(xdr_buf_class, +TRACE_DEFINE_ENUM(SOCK_STREAM); +TRACE_DEFINE_ENUM(SOCK_DGRAM); +TRACE_DEFINE_ENUM(SOCK_RAW); +TRACE_DEFINE_ENUM(SOCK_RDM); +TRACE_DEFINE_ENUM(SOCK_SEQPACKET); +TRACE_DEFINE_ENUM(SOCK_DCCP); +TRACE_DEFINE_ENUM(SOCK_PACKET); + +#define show_socket_type(type) \ + __print_symbolic(type, \ + { SOCK_STREAM, "STREAM" }, \ + { SOCK_DGRAM, "DGRAM" }, \ + { SOCK_RAW, "RAW" }, \ + { SOCK_RDM, "RDM" }, \ + { SOCK_SEQPACKET, "SEQPACKET" }, \ + { SOCK_DCCP, "DCCP" }, \ + { SOCK_PACKET, "PACKET" }) + +/* This list is known to be incomplete, add new enums as needed. */ +TRACE_DEFINE_ENUM(AF_UNSPEC); +TRACE_DEFINE_ENUM(AF_UNIX); +TRACE_DEFINE_ENUM(AF_LOCAL); +TRACE_DEFINE_ENUM(AF_INET); +TRACE_DEFINE_ENUM(AF_INET6); + +#define rpc_show_address_family(family) \ + __print_symbolic(family, \ + { AF_UNSPEC, "AF_UNSPEC" }, \ + { AF_UNIX, "AF_UNIX" }, \ + { AF_LOCAL, "AF_LOCAL" }, \ + { AF_INET, "AF_INET" }, \ + { AF_INET6, "AF_INET6" }) + +DECLARE_EVENT_CLASS(rpc_xdr_buf_class, TP_PROTO( + const struct rpc_task *task, const struct xdr_buf *xdr ), - TP_ARGS(xdr), + TP_ARGS(task, xdr), TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) __field(const void *, head_base) __field(size_t, head_len) __field(const void *, tail_base) @@ -31,6 +67,8 @@ DECLARE_EVENT_CLASS(xdr_buf_class, ), TP_fast_assign( + __entry->task_id = task->tk_pid; + __entry->client_id = task->tk_client->cl_clid; __entry->head_base = xdr->head[0].iov_base; __entry->head_len = xdr->head[0].iov_len; __entry->tail_base = xdr->tail[0].iov_base; @@ -39,23 +77,137 @@ DECLARE_EVENT_CLASS(xdr_buf_class, __entry->msg_len = xdr->len; ), - TP_printk("head=[%p,%zu] page=%u tail=[%p,%zu] len=%u", + TP_printk("task:%u@%u head=[%p,%zu] page=%u tail=[%p,%zu] len=%u", + __entry->task_id, __entry->client_id, __entry->head_base, __entry->head_len, __entry->page_len, __entry->tail_base, __entry->tail_len, __entry->msg_len ) ); -#define DEFINE_XDRBUF_EVENT(name) \ - DEFINE_EVENT(xdr_buf_class, name, \ +#define DEFINE_RPCXDRBUF_EVENT(name) \ + DEFINE_EVENT(rpc_xdr_buf_class, \ + rpc_xdr_##name, \ TP_PROTO( \ + const struct rpc_task *task, \ const struct xdr_buf *xdr \ ), \ - TP_ARGS(xdr)) + TP_ARGS(task, xdr)) + +DEFINE_RPCXDRBUF_EVENT(sendto); +DEFINE_RPCXDRBUF_EVENT(recvfrom); +DEFINE_RPCXDRBUF_EVENT(reply_pages); + + +DECLARE_EVENT_CLASS(rpc_clnt_class, + TP_PROTO( + const struct rpc_clnt *clnt + ), + + TP_ARGS(clnt), + + TP_STRUCT__entry( + __field(unsigned int, client_id) + ), + + TP_fast_assign( + __entry->client_id = clnt->cl_clid; + ), + + TP_printk("clid=%u", __entry->client_id) +); + +#define DEFINE_RPC_CLNT_EVENT(name) \ + DEFINE_EVENT(rpc_clnt_class, \ + rpc_clnt_##name, \ + TP_PROTO( \ + const struct rpc_clnt *clnt \ + ), \ + TP_ARGS(clnt)) + +DEFINE_RPC_CLNT_EVENT(free); +DEFINE_RPC_CLNT_EVENT(killall); +DEFINE_RPC_CLNT_EVENT(shutdown); +DEFINE_RPC_CLNT_EVENT(release); +DEFINE_RPC_CLNT_EVENT(replace_xprt); +DEFINE_RPC_CLNT_EVENT(replace_xprt_err); + +TRACE_EVENT(rpc_clnt_new, + TP_PROTO( + const struct rpc_clnt *clnt, + const struct rpc_xprt *xprt, + const char *program, + const char *server + ), + + TP_ARGS(clnt, xprt, program, server), + + TP_STRUCT__entry( + __field(unsigned int, client_id) + __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR]) + __string(port, xprt->address_strings[RPC_DISPLAY_PORT]) + __string(program, program) + __string(server, server) + ), + + TP_fast_assign( + __entry->client_id = clnt->cl_clid; + __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]); + __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]); + __assign_str(program, program) + __assign_str(server, server) + ), + + TP_printk("client=%u peer=[%s]:%s program=%s server=%s", + __entry->client_id, __get_str(addr), __get_str(port), + __get_str(program), __get_str(server)) +); + +TRACE_EVENT(rpc_clnt_new_err, + TP_PROTO( + const char *program, + const char *server, + int error + ), + + TP_ARGS(program, server, error), + + TP_STRUCT__entry( + __field(int, error) + __string(program, program) + __string(server, server) + ), + + TP_fast_assign( + __entry->error = error; + __assign_str(program, program) + __assign_str(server, server) + ), + + TP_printk("program=%s server=%s error=%d", + __get_str(program), __get_str(server), __entry->error) +); + +TRACE_EVENT(rpc_clnt_clone_err, + TP_PROTO( + const struct rpc_clnt *clnt, + int error + ), + + TP_ARGS(clnt, error), + + TP_STRUCT__entry( + __field(unsigned int, client_id) + __field(int, error) + ), + + TP_fast_assign( + __entry->client_id = clnt->cl_clid; + __entry->error = error; + ), + + TP_printk("client=%u error=%d", __entry->client_id, __entry->error) +); -DEFINE_XDRBUF_EVENT(xprt_sendto); -DEFINE_XDRBUF_EVENT(xprt_recvfrom); -DEFINE_XDRBUF_EVENT(svc_recvfrom); -DEFINE_XDRBUF_EVENT(svc_sendto); TRACE_DEFINE_ENUM(RPC_AUTH_OK); TRACE_DEFINE_ENUM(RPC_AUTH_BADCRED); @@ -142,29 +294,35 @@ TRACE_EVENT(rpc_request, TRACE_DEFINE_ENUM(RPC_TASK_ASYNC); TRACE_DEFINE_ENUM(RPC_TASK_SWAPPER); +TRACE_DEFINE_ENUM(RPC_TASK_NULLCREDS); TRACE_DEFINE_ENUM(RPC_CALL_MAJORSEEN); TRACE_DEFINE_ENUM(RPC_TASK_ROOTCREDS); TRACE_DEFINE_ENUM(RPC_TASK_DYNAMIC); +TRACE_DEFINE_ENUM(RPC_TASK_NO_ROUND_ROBIN); TRACE_DEFINE_ENUM(RPC_TASK_SOFT); TRACE_DEFINE_ENUM(RPC_TASK_SOFTCONN); TRACE_DEFINE_ENUM(RPC_TASK_SENT); TRACE_DEFINE_ENUM(RPC_TASK_TIMEOUT); TRACE_DEFINE_ENUM(RPC_TASK_NOCONNECT); TRACE_DEFINE_ENUM(RPC_TASK_NO_RETRANS_TIMEOUT); +TRACE_DEFINE_ENUM(RPC_TASK_CRED_NOREF); #define rpc_show_task_flags(flags) \ __print_flags(flags, "|", \ { RPC_TASK_ASYNC, "ASYNC" }, \ { RPC_TASK_SWAPPER, "SWAPPER" }, \ + { RPC_TASK_NULLCREDS, "NULLCREDS" }, \ { RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \ { RPC_TASK_ROOTCREDS, "ROOTCREDS" }, \ { RPC_TASK_DYNAMIC, "DYNAMIC" }, \ + { RPC_TASK_NO_ROUND_ROBIN, "NO_ROUND_ROBIN" }, \ { RPC_TASK_SOFT, "SOFT" }, \ { RPC_TASK_SOFTCONN, "SOFTCONN" }, \ { RPC_TASK_SENT, "SENT" }, \ { RPC_TASK_TIMEOUT, "TIMEOUT" }, \ { RPC_TASK_NOCONNECT, "NOCONNECT" }, \ - { RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" }) + { RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" }, \ + { RPC_TASK_CRED_NOREF, "CRED_NOREF" }) TRACE_DEFINE_ENUM(RPC_TASK_RUNNING); TRACE_DEFINE_ENUM(RPC_TASK_QUEUED); @@ -359,6 +517,34 @@ DEFINE_RPC_REPLY_EVENT(stale_creds); DEFINE_RPC_REPLY_EVENT(bad_creds); DEFINE_RPC_REPLY_EVENT(auth_tooweak); +TRACE_EVENT(rpc_call_rpcerror, + TP_PROTO( + const struct rpc_task *task, + int tk_status, + int rpc_status + ), + + TP_ARGS(task, tk_status, rpc_status), + + TP_STRUCT__entry( + __field(unsigned int, task_id) + __field(unsigned int, client_id) + __field(int, tk_status) + __field(int, rpc_status) + ), + + TP_fast_assign( + __entry->client_id = task->tk_client->cl_clid; + __entry->task_id = task->tk_pid; + __entry->tk_status = tk_status; + __entry->rpc_status = rpc_status; + ), + + TP_printk("task:%u@%u tk_status=%d rpc_status=%d", + __entry->task_id, __entry->client_id, + __entry->tk_status, __entry->rpc_status) +); + TRACE_EVENT(rpc_stats_latency, TP_PROTO( @@ -526,43 +712,6 @@ TRACE_EVENT(rpc_xdr_alignment, ) ); -TRACE_EVENT(rpc_reply_pages, - TP_PROTO( - const struct rpc_rqst *req - ), - - TP_ARGS(req), - - TP_STRUCT__entry( - __field(unsigned int, task_id) - __field(unsigned int, client_id) - __field(const void *, head_base) - __field(size_t, head_len) - __field(const void *, tail_base) - __field(size_t, tail_len) - __field(unsigned int, page_len) - ), - - TP_fast_assign( - __entry->task_id = req->rq_task->tk_pid; - __entry->client_id = req->rq_task->tk_client->cl_clid; - - __entry->head_base = req->rq_rcv_buf.head[0].iov_base; - __entry->head_len = req->rq_rcv_buf.head[0].iov_len; - __entry->page_len = req->rq_rcv_buf.page_len; - __entry->tail_base = req->rq_rcv_buf.tail[0].iov_base; - __entry->tail_len = req->rq_rcv_buf.tail[0].iov_len; - ), - - TP_printk( - "task:%u@%u xdr=[%p,%zu]/%u/[%p,%zu]\n", - __entry->task_id, __entry->client_id, - __entry->head_base, __entry->head_len, - __entry->page_len, - __entry->tail_base, __entry->tail_len - ) -); - /* * First define the enums in the below macros to be exported to userspace * via TRACE_DEFINE_ENUM(). @@ -575,9 +724,9 @@ TRACE_EVENT(rpc_reply_pages, #define RPC_SHOW_SOCKET \ EM( SS_FREE, "FREE" ) \ EM( SS_UNCONNECTED, "UNCONNECTED" ) \ - EM( SS_CONNECTING, "CONNECTING," ) \ - EM( SS_CONNECTED, "CONNECTED," ) \ - EMe(SS_DISCONNECTING, "DISCONNECTING" ) + EM( SS_CONNECTING, "CONNECTING" ) \ + EM( SS_CONNECTED, "CONNECTED" ) \ + EMe( SS_DISCONNECTING, "DISCONNECTING" ) #define rpc_show_socket_state(state) \ __print_symbolic(state, RPC_SHOW_SOCKET) @@ -719,6 +868,69 @@ DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection); DEFINE_RPC_SOCKET_EVENT(rpc_socket_close); DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown); +TRACE_DEFINE_ENUM(XPRT_LOCKED); +TRACE_DEFINE_ENUM(XPRT_CONNECTED); +TRACE_DEFINE_ENUM(XPRT_CONNECTING); +TRACE_DEFINE_ENUM(XPRT_CLOSE_WAIT); +TRACE_DEFINE_ENUM(XPRT_BOUND); +TRACE_DEFINE_ENUM(XPRT_BINDING); +TRACE_DEFINE_ENUM(XPRT_CLOSING); +TRACE_DEFINE_ENUM(XPRT_CONGESTED); +TRACE_DEFINE_ENUM(XPRT_CWND_WAIT); +TRACE_DEFINE_ENUM(XPRT_WRITE_SPACE); + +#define rpc_show_xprt_state(x) \ + __print_flags(x, "|", \ + { (1UL << XPRT_LOCKED), "LOCKED"}, \ + { (1UL << XPRT_CONNECTED), "CONNECTED"}, \ + { (1UL << XPRT_CONNECTING), "CONNECTING"}, \ + { (1UL << XPRT_CLOSE_WAIT), "CLOSE_WAIT"}, \ + { (1UL << XPRT_BOUND), "BOUND"}, \ + { (1UL << XPRT_BINDING), "BINDING"}, \ + { (1UL << XPRT_CLOSING), "CLOSING"}, \ + { (1UL << XPRT_CONGESTED), "CONGESTED"}, \ + { (1UL << XPRT_CWND_WAIT), "CWND_WAIT"}, \ + { (1UL << XPRT_WRITE_SPACE), "WRITE_SPACE"}) + +DECLARE_EVENT_CLASS(rpc_xprt_lifetime_class, + TP_PROTO( + const struct rpc_xprt *xprt + ), + + TP_ARGS(xprt), + + TP_STRUCT__entry( + __field(unsigned long, state) + __string(addr, xprt->address_strings[RPC_DISPLAY_ADDR]) + __string(port, xprt->address_strings[RPC_DISPLAY_PORT]) + ), + + TP_fast_assign( + __entry->state = xprt->state; + __assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]); + __assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]); + ), + + TP_printk("peer=[%s]:%s state=%s", + __get_str(addr), __get_str(port), + rpc_show_xprt_state(__entry->state)) +); + +#define DEFINE_RPC_XPRT_LIFETIME_EVENT(name) \ + DEFINE_EVENT(rpc_xprt_lifetime_class, \ + xprt_##name, \ + TP_PROTO( \ + const struct rpc_xprt *xprt \ + ), \ + TP_ARGS(xprt)) + +DEFINE_RPC_XPRT_LIFETIME_EVENT(create); +DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto); +DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done); +DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force); +DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_cleanup); +DEFINE_RPC_XPRT_LIFETIME_EVENT(destroy); + DECLARE_EVENT_CLASS(rpc_xprt_event, TP_PROTO( const struct rpc_xprt *xprt, @@ -990,6 +1202,54 @@ TRACE_EVENT(xs_stream_read_request, __entry->copied, __entry->reclen, __entry->offset) ); + +DECLARE_EVENT_CLASS(svc_xdr_buf_class, + TP_PROTO( + const struct svc_rqst *rqst, + const struct xdr_buf *xdr + ), + + TP_ARGS(rqst, xdr), + + TP_STRUCT__entry( + __field(u32, xid) + __field(const void *, head_base) + __field(size_t, head_len) + __field(const void *, tail_base) + __field(size_t, tail_len) + __field(unsigned int, page_len) + __field(unsigned int, msg_len) + ), + + TP_fast_assign( + __entry->xid = be32_to_cpu(rqst->rq_xid); + __entry->head_base = xdr->head[0].iov_base; + __entry->head_len = xdr->head[0].iov_len; + __entry->tail_base = xdr->tail[0].iov_base; + __entry->tail_len = xdr->tail[0].iov_len; + __entry->page_len = xdr->page_len; + __entry->msg_len = xdr->len; + ), + + TP_printk("xid=0x%08x head=[%p,%zu] page=%u tail=[%p,%zu] len=%u", + __entry->xid, + __entry->head_base, __entry->head_len, __entry->page_len, + __entry->tail_base, __entry->tail_len, __entry->msg_len + ) +); + +#define DEFINE_SVCXDRBUF_EVENT(name) \ + DEFINE_EVENT(svc_xdr_buf_class, \ + svc_xdr_##name, \ + TP_PROTO( \ + const struct svc_rqst *rqst, \ + const struct xdr_buf *xdr \ + ), \ + TP_ARGS(rqst, xdr)) + +DEFINE_SVCXDRBUF_EVENT(recvfrom); +DEFINE_SVCXDRBUF_EVENT(sendto); + #define show_rqstp_flags(flags) \ __print_flags(flags, "|", \ { (1UL << RQ_SECURE), "RQ_SECURE"}, \ @@ -1024,6 +1284,17 @@ TRACE_EVENT(svc_recv, show_rqstp_flags(__entry->flags)) ); +TRACE_DEFINE_ENUM(SVC_GARBAGE); +TRACE_DEFINE_ENUM(SVC_SYSERR); +TRACE_DEFINE_ENUM(SVC_VALID); +TRACE_DEFINE_ENUM(SVC_NEGATIVE); +TRACE_DEFINE_ENUM(SVC_OK); +TRACE_DEFINE_ENUM(SVC_DROP); +TRACE_DEFINE_ENUM(SVC_CLOSE); +TRACE_DEFINE_ENUM(SVC_DENIED); +TRACE_DEFINE_ENUM(SVC_PENDING); +TRACE_DEFINE_ENUM(SVC_COMPLETE); + #define svc_show_status(status) \ __print_symbolic(status, \ { SVC_GARBAGE, "SVC_GARBAGE" }, \ @@ -1167,28 +1438,54 @@ DEFINE_EVENT(svc_rqst_status, svc_send, { (1UL << XPT_KILL_TEMP), "XPT_KILL_TEMP"}, \ { (1UL << XPT_CONG_CTRL), "XPT_CONG_CTRL"}) +TRACE_EVENT(svc_xprt_create_err, + TP_PROTO( + const char *program, + const char *protocol, + struct sockaddr *sap, + const struct svc_xprt *xprt + ), + + TP_ARGS(program, protocol, sap, xprt), + + TP_STRUCT__entry( + __field(long, error) + __string(program, program) + __string(protocol, protocol) + __array(unsigned char, addr, sizeof(struct sockaddr_in6)) + ), + + TP_fast_assign( + __entry->error = PTR_ERR(xprt); + __assign_str(program, program); + __assign_str(protocol, protocol); + memcpy(__entry->addr, sap, sizeof(__entry->addr)); + ), + + TP_printk("addr=%pISpc program=%s protocol=%s error=%ld", + __entry->addr, __get_str(program), __get_str(protocol), + __entry->error) +); + TRACE_EVENT(svc_xprt_do_enqueue, TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst), TP_ARGS(xprt, rqst), TP_STRUCT__entry( - __field(struct svc_xprt *, xprt) __field(int, pid) __field(unsigned long, flags) __string(addr, xprt->xpt_remotebuf) ), TP_fast_assign( - __entry->xprt = xprt; __entry->pid = rqst? rqst->rq_task->pid : 0; __entry->flags = xprt->xpt_flags; __assign_str(addr, xprt->xpt_remotebuf); ), - TP_printk("xprt=%p addr=%s pid=%d flags=%s", - __entry->xprt, __get_str(addr), - __entry->pid, show_svc_xprt_flags(__entry->flags)) + TP_printk("addr=%s pid=%d flags=%s", __get_str(addr), + __entry->pid, show_svc_xprt_flags(__entry->flags)) ); DECLARE_EVENT_CLASS(svc_xprt_event, @@ -1197,25 +1494,55 @@ DECLARE_EVENT_CLASS(svc_xprt_event, TP_ARGS(xprt), TP_STRUCT__entry( - __field(struct svc_xprt *, xprt) __field(unsigned long, flags) __string(addr, xprt->xpt_remotebuf) ), TP_fast_assign( - __entry->xprt = xprt; __entry->flags = xprt->xpt_flags; __assign_str(addr, xprt->xpt_remotebuf); ), - TP_printk("xprt=%p addr=%s flags=%s", - __entry->xprt, __get_str(addr), - show_svc_xprt_flags(__entry->flags)) + TP_printk("addr=%s flags=%s", __get_str(addr), + show_svc_xprt_flags(__entry->flags)) ); -DEFINE_EVENT(svc_xprt_event, svc_xprt_no_write_space, - TP_PROTO(struct svc_xprt *xprt), - TP_ARGS(xprt)); +#define DEFINE_SVC_XPRT_EVENT(name) \ + DEFINE_EVENT(svc_xprt_event, svc_xprt_##name, \ + TP_PROTO( \ + struct svc_xprt *xprt \ + ), \ + TP_ARGS(xprt)) + +DEFINE_SVC_XPRT_EVENT(no_write_space); +DEFINE_SVC_XPRT_EVENT(close); +DEFINE_SVC_XPRT_EVENT(detach); +DEFINE_SVC_XPRT_EVENT(free); + +TRACE_EVENT(svc_xprt_accept, + TP_PROTO( + const struct svc_xprt *xprt, + const char *service + ), + + TP_ARGS(xprt, service), + + TP_STRUCT__entry( + __string(addr, xprt->xpt_remotebuf) + __string(protocol, xprt->xpt_class->xcl_name) + __string(service, service) + ), + + TP_fast_assign( + __assign_str(addr, xprt->xpt_remotebuf); + __assign_str(protocol, xprt->xpt_class->xcl_name) + __assign_str(service, service); + ), + + TP_printk("addr=%s protocol=%s service=%s", + __get_str(addr), __get_str(protocol), __get_str(service) + ) +); TRACE_EVENT(svc_xprt_dequeue, TP_PROTO(struct svc_rqst *rqst), @@ -1223,24 +1550,20 @@ TRACE_EVENT(svc_xprt_dequeue, TP_ARGS(rqst), TP_STRUCT__entry( - __field(struct svc_xprt *, xprt) __field(unsigned long, flags) __field(unsigned long, wakeup) __string(addr, rqst->rq_xprt->xpt_remotebuf) ), TP_fast_assign( - __entry->xprt = rqst->rq_xprt; __entry->flags = rqst->rq_xprt->xpt_flags; __entry->wakeup = ktime_to_us(ktime_sub(ktime_get(), rqst->rq_qtime)); __assign_str(addr, rqst->rq_xprt->xpt_remotebuf); ), - TP_printk("xprt=%p addr=%s flags=%s wakeup-us=%lu", - __entry->xprt, __get_str(addr), - show_svc_xprt_flags(__entry->flags), - __entry->wakeup) + TP_printk("addr=%s flags=%s wakeup-us=%lu", __get_str(addr), + show_svc_xprt_flags(__entry->flags), __entry->wakeup) ); TRACE_EVENT(svc_wake_up, @@ -1265,21 +1588,18 @@ TRACE_EVENT(svc_handle_xprt, TP_ARGS(xprt, len), TP_STRUCT__entry( - __field(struct svc_xprt *, xprt) __field(int, len) __field(unsigned long, flags) __string(addr, xprt->xpt_remotebuf) ), TP_fast_assign( - __entry->xprt = xprt; __entry->len = len; __entry->flags = xprt->xpt_flags; __assign_str(addr, xprt->xpt_remotebuf); ), - TP_printk("xprt=%p addr=%s len=%d flags=%s", - __entry->xprt, __get_str(addr), + TP_printk("addr=%s len=%d flags=%s", __get_str(addr), __entry->len, show_svc_xprt_flags(__entry->flags)) ); @@ -1313,27 +1633,221 @@ DECLARE_EVENT_CLASS(svc_deferred_event, TP_ARGS(dr), TP_STRUCT__entry( + __field(const void *, dr) __field(u32, xid) __string(addr, dr->xprt->xpt_remotebuf) ), TP_fast_assign( + __entry->dr = dr; __entry->xid = be32_to_cpu(*(__be32 *)(dr->args + (dr->xprt_hlen>>2))); __assign_str(addr, dr->xprt->xpt_remotebuf); ), - TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid) + TP_printk("addr=%s dr=%p xid=0x%08x", __get_str(addr), __entry->dr, + __entry->xid) ); + #define DEFINE_SVC_DEFERRED_EVENT(name) \ - DEFINE_EVENT(svc_deferred_event, svc_##name##_deferred, \ + DEFINE_EVENT(svc_deferred_event, svc_defer_##name, \ TP_PROTO( \ const struct svc_deferred_req *dr \ ), \ TP_ARGS(dr)) DEFINE_SVC_DEFERRED_EVENT(drop); -DEFINE_SVC_DEFERRED_EVENT(revisit); +DEFINE_SVC_DEFERRED_EVENT(queue); +DEFINE_SVC_DEFERRED_EVENT(recv); + +TRACE_EVENT(svcsock_new_socket, + TP_PROTO( + const struct socket *socket + ), + + TP_ARGS(socket), + + TP_STRUCT__entry( + __field(unsigned long, type) + __field(unsigned long, family) + __field(bool, listener) + ), + + TP_fast_assign( + __entry->type = socket->type; + __entry->family = socket->sk->sk_family; + __entry->listener = (socket->sk->sk_state == TCP_LISTEN); + ), + + TP_printk("type=%s family=%s%s", + show_socket_type(__entry->type), + rpc_show_address_family(__entry->family), + __entry->listener ? " (listener)" : "" + ) +); + +TRACE_EVENT(svcsock_marker, + TP_PROTO( + const struct svc_xprt *xprt, + __be32 marker + ), + + TP_ARGS(xprt, marker), + + TP_STRUCT__entry( + __field(unsigned int, length) + __field(bool, last) + __string(addr, xprt->xpt_remotebuf) + ), + + TP_fast_assign( + __entry->length = be32_to_cpu(marker) & RPC_FRAGMENT_SIZE_MASK; + __entry->last = be32_to_cpu(marker) & RPC_LAST_STREAM_FRAGMENT; + __assign_str(addr, xprt->xpt_remotebuf); + ), + + TP_printk("addr=%s length=%u%s", __get_str(addr), + __entry->length, __entry->last ? " (last)" : "") +); + +DECLARE_EVENT_CLASS(svcsock_class, + TP_PROTO( + const struct svc_xprt *xprt, + ssize_t result + ), + + TP_ARGS(xprt, result), + + TP_STRUCT__entry( + __field(ssize_t, result) + __field(unsigned long, flags) + __string(addr, xprt->xpt_remotebuf) + ), + + TP_fast_assign( + __entry->result = result; + __entry->flags = xprt->xpt_flags; + __assign_str(addr, xprt->xpt_remotebuf); + ), + + TP_printk("addr=%s result=%zd flags=%s", __get_str(addr), + __entry->result, show_svc_xprt_flags(__entry->flags) + ) +); + +#define DEFINE_SVCSOCK_EVENT(name) \ + DEFINE_EVENT(svcsock_class, svcsock_##name, \ + TP_PROTO( \ + const struct svc_xprt *xprt, \ + ssize_t result \ + ), \ + TP_ARGS(xprt, result)) + +DEFINE_SVCSOCK_EVENT(udp_send); +DEFINE_SVCSOCK_EVENT(udp_recv); +DEFINE_SVCSOCK_EVENT(udp_recv_err); +DEFINE_SVCSOCK_EVENT(tcp_send); +DEFINE_SVCSOCK_EVENT(tcp_recv); +DEFINE_SVCSOCK_EVENT(tcp_recv_eagain); +DEFINE_SVCSOCK_EVENT(tcp_recv_err); +DEFINE_SVCSOCK_EVENT(data_ready); +DEFINE_SVCSOCK_EVENT(write_space); + +TRACE_EVENT(svcsock_tcp_recv_short, + TP_PROTO( + const struct svc_xprt *xprt, + u32 expected, + u32 received + ), + + TP_ARGS(xprt, expected, received), + + TP_STRUCT__entry( + __field(u32, expected) + __field(u32, received) + __field(unsigned long, flags) + __string(addr, xprt->xpt_remotebuf) + ), + + TP_fast_assign( + __entry->expected = expected; + __entry->received = received; + __entry->flags = xprt->xpt_flags; + __assign_str(addr, xprt->xpt_remotebuf); + ), + + TP_printk("addr=%s flags=%s expected=%u received=%u", + __get_str(addr), show_svc_xprt_flags(__entry->flags), + __entry->expected, __entry->received + ) +); + +TRACE_EVENT(svcsock_tcp_state, + TP_PROTO( + const struct svc_xprt *xprt, + const struct socket *socket + ), + + TP_ARGS(xprt, socket), + + TP_STRUCT__entry( + __field(unsigned long, socket_state) + __field(unsigned long, sock_state) + __field(unsigned long, flags) + __string(addr, xprt->xpt_remotebuf) + ), + + TP_fast_assign( + __entry->socket_state = socket->state; + __entry->sock_state = socket->sk->sk_state; + __entry->flags = xprt->xpt_flags; + __assign_str(addr, xprt->xpt_remotebuf); + ), + + TP_printk("addr=%s state=%s sk_state=%s flags=%s", __get_str(addr), + rpc_show_socket_state(__entry->socket_state), + rpc_show_sock_state(__entry->sock_state), + show_svc_xprt_flags(__entry->flags) + ) +); + +DECLARE_EVENT_CLASS(svcsock_accept_class, + TP_PROTO( + const struct svc_xprt *xprt, + const char *service, + long status + ), + + TP_ARGS(xprt, service, status), + + TP_STRUCT__entry( + __field(long, status) + __string(service, service) + __array(unsigned char, addr, sizeof(struct sockaddr_in6)) + ), + + TP_fast_assign( + __entry->status = status; + __assign_str(service, service); + memcpy(__entry->addr, &xprt->xpt_local, sizeof(__entry->addr)); + ), + + TP_printk("listener=%pISpc service=%s status=%ld", + __entry->addr, __get_str(service), __entry->status + ) +); + +#define DEFINE_ACCEPT_EVENT(name) \ + DEFINE_EVENT(svcsock_accept_class, svcsock_##name##_err, \ + TP_PROTO( \ + const struct svc_xprt *xprt, \ + const char *service, \ + long status \ + ), \ + TP_ARGS(xprt, service, status)) + +DEFINE_ACCEPT_EVENT(accept); +DEFINE_ACCEPT_EVENT(getpeername); DECLARE_EVENT_CLASS(cache_event, TP_PROTO( @@ -1368,6 +1882,86 @@ DEFINE_CACHE_EVENT(cache_entry_update); DEFINE_CACHE_EVENT(cache_entry_make_negative); DEFINE_CACHE_EVENT(cache_entry_no_listener); +DECLARE_EVENT_CLASS(register_class, + TP_PROTO( + const char *program, + const u32 version, + const int family, + const unsigned short protocol, + const unsigned short port, + int error + ), + + TP_ARGS(program, version, family, protocol, port, error), + + TP_STRUCT__entry( + __field(u32, version) + __field(unsigned long, family) + __field(unsigned short, protocol) + __field(unsigned short, port) + __field(int, error) + __string(program, program) + ), + + TP_fast_assign( + __entry->version = version; + __entry->family = family; + __entry->protocol = protocol; + __entry->port = port; + __entry->error = error; + __assign_str(program, program); + ), + + TP_printk("program=%sv%u proto=%s port=%u family=%s error=%d", + __get_str(program), __entry->version, + __entry->protocol == IPPROTO_UDP ? "udp" : "tcp", + __entry->port, rpc_show_address_family(__entry->family), + __entry->error + ) +); + +#define DEFINE_REGISTER_EVENT(name) \ + DEFINE_EVENT(register_class, svc_##name, \ + TP_PROTO( \ + const char *program, \ + const u32 version, \ + const int family, \ + const unsigned short protocol, \ + const unsigned short port, \ + int error \ + ), \ + TP_ARGS(program, version, family, protocol, \ + port, error)) + +DEFINE_REGISTER_EVENT(register); +DEFINE_REGISTER_EVENT(noregister); + +TRACE_EVENT(svc_unregister, + TP_PROTO( + const char *program, + const u32 version, + int error + ), + + TP_ARGS(program, version, error), + + TP_STRUCT__entry( + __field(u32, version) + __field(int, error) + __string(program, program) + ), + + TP_fast_assign( + __entry->version = version; + __entry->error = error; + __assign_str(program, program); + ), + + TP_printk("program=%sv%u error=%d", + __get_str(program), __entry->version, __entry->error + ) +); + #endif /* _TRACE_SUNRPC_H */ #include <trace/define_trace.h> diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index 74bb594ccb25..2070df64958e 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -265,7 +265,7 @@ TRACE_EVENT(mm_shrink_slab_end, ); TRACE_EVENT(mm_vmscan_lru_isolate, - TP_PROTO(int classzone_idx, + TP_PROTO(int highest_zoneidx, int order, unsigned long nr_requested, unsigned long nr_scanned, @@ -274,10 +274,10 @@ TRACE_EVENT(mm_vmscan_lru_isolate, isolate_mode_t isolate_mode, int lru), - TP_ARGS(classzone_idx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru), + TP_ARGS(highest_zoneidx, order, nr_requested, nr_scanned, nr_skipped, nr_taken, isolate_mode, lru), TP_STRUCT__entry( - __field(int, classzone_idx) + __field(int, highest_zoneidx) __field(int, order) __field(unsigned long, nr_requested) __field(unsigned long, nr_scanned) @@ -288,7 +288,7 @@ TRACE_EVENT(mm_vmscan_lru_isolate, ), TP_fast_assign( - __entry->classzone_idx = classzone_idx; + __entry->highest_zoneidx = highest_zoneidx; __entry->order = order; __entry->nr_requested = nr_requested; __entry->nr_scanned = nr_scanned; @@ -298,9 +298,13 @@ TRACE_EVENT(mm_vmscan_lru_isolate, __entry->lru = lru; ), + /* + * classzone is previous name of the highest_zoneidx. + * Reason not to change it is the ABI requirement of the tracepoint. + */ TP_printk("isolate_mode=%d classzone=%d order=%d nr_requested=%lu nr_scanned=%lu nr_skipped=%lu nr_taken=%lu lru=%s", __entry->isolate_mode, - __entry->classzone_idx, + __entry->highest_zoneidx, __entry->order, __entry->nr_requested, __entry->nr_scanned, diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h index 37342a13c9cb..9c66e59d859c 100644 --- a/include/trace/events/wbt.h +++ b/include/trace/events/wbt.h @@ -33,7 +33,7 @@ TRACE_EVENT(wbt_stat, ), TP_fast_assign( - strlcpy(__entry->name, dev_name(bdi->dev), + strlcpy(__entry->name, bdi_dev_name(bdi), ARRAY_SIZE(__entry->name)); __entry->rmean = stat[0].mean; __entry->rmin = stat[0].min; @@ -46,7 +46,7 @@ TRACE_EVENT(wbt_stat, ), TP_printk("%s: rmean=%llu, rmin=%llu, rmax=%llu, rsamples=%llu, " - "wmean=%llu, wmin=%llu, wmax=%llu, wsamples=%llu\n", + "wmean=%llu, wmin=%llu, wmax=%llu, wsamples=%llu", __entry->name, __entry->rmean, __entry->rmin, __entry->rmax, __entry->rnr_samples, __entry->wmean, __entry->wmin, __entry->wmax, __entry->wnr_samples) @@ -68,12 +68,12 @@ TRACE_EVENT(wbt_lat, ), TP_fast_assign( - strlcpy(__entry->name, dev_name(bdi->dev), + strlcpy(__entry->name, bdi_dev_name(bdi), ARRAY_SIZE(__entry->name)); __entry->lat = div_u64(lat, 1000); ), - TP_printk("%s: latency %lluus\n", __entry->name, + TP_printk("%s: latency %lluus", __entry->name, (unsigned long long) __entry->lat) ); @@ -105,7 +105,7 @@ TRACE_EVENT(wbt_step, ), TP_fast_assign( - strlcpy(__entry->name, dev_name(bdi->dev), + strlcpy(__entry->name, bdi_dev_name(bdi), ARRAY_SIZE(__entry->name)); __entry->msg = msg; __entry->step = step; @@ -115,7 +115,7 @@ TRACE_EVENT(wbt_step, __entry->max = max; ), - TP_printk("%s: %s: step=%d, window=%luus, background=%u, normal=%u, max=%u\n", + TP_printk("%s: %s: step=%d, window=%luus, background=%u, normal=%u, max=%u", __entry->name, __entry->msg, __entry->step, __entry->window, __entry->bg, __entry->normal, __entry->max) ); @@ -141,14 +141,14 @@ TRACE_EVENT(wbt_timer, ), TP_fast_assign( - strlcpy(__entry->name, dev_name(bdi->dev), + strlcpy(__entry->name, bdi_dev_name(bdi), ARRAY_SIZE(__entry->name)); __entry->status = status; __entry->step = step; __entry->inflight = inflight; ), - TP_printk("%s: status=%u, step=%d, inflight=%u\n", __entry->name, + TP_printk("%s: status=%u, step=%d, inflight=%u", __entry->name, __entry->status, __entry->step, __entry->inflight) ); diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index d94def25e4dc..10f5d1fa7347 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -36,7 +36,6 @@ EM( WB_REASON_SYNC, "sync") \ EM( WB_REASON_PERIODIC, "periodic") \ EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \ - EM( WB_REASON_FREE_MORE_MEM, "free_more_memory") \ EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \ EMe(WB_REASON_FORKER_THREAD, "forker_thread") @@ -542,7 +541,6 @@ TRACE_EVENT(global_dirty_state, TP_STRUCT__entry( __field(unsigned long, nr_dirty) __field(unsigned long, nr_writeback) - __field(unsigned long, nr_unstable) __field(unsigned long, background_thresh) __field(unsigned long, dirty_thresh) __field(unsigned long, dirty_limit) @@ -553,7 +551,6 @@ TRACE_EVENT(global_dirty_state, TP_fast_assign( __entry->nr_dirty = global_node_page_state(NR_FILE_DIRTY); __entry->nr_writeback = global_node_page_state(NR_WRITEBACK); - __entry->nr_unstable = global_node_page_state(NR_UNSTABLE_NFS); __entry->nr_dirtied = global_node_page_state(NR_DIRTIED); __entry->nr_written = global_node_page_state(NR_WRITTEN); __entry->background_thresh = background_thresh; @@ -561,12 +558,11 @@ TRACE_EVENT(global_dirty_state, __entry->dirty_limit = global_wb_domain.dirty_limit; ), - TP_printk("dirty=%lu writeback=%lu unstable=%lu " + TP_printk("dirty=%lu writeback=%lu " "bg_thresh=%lu thresh=%lu limit=%lu " "dirtied=%lu written=%lu", __entry->nr_dirty, __entry->nr_writeback, - __entry->nr_unstable, __entry->background_thresh, __entry->dirty_thresh, __entry->dirty_limit, diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index b95d65e8c628..b73d3e141323 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -287,7 +287,7 @@ TRACE_EVENT(xdp_devmap_xmit, FN(PAGE_SHARED) \ FN(PAGE_ORDER0) \ FN(PAGE_POOL) \ - FN(ZERO_COPY) + FN(XSK_BUFF_POOL) #define __MEM_TYPE_TP_FN(x) \ TRACE_DEFINE_ENUM(MEM_TYPE_##x); diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 3a3201e4618e..f4a01305d9a6 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -855,9 +855,11 @@ __SYSCALL(__NR_clone3, sys_clone3) __SYSCALL(__NR_openat2, sys_openat2) #define __NR_pidfd_getfd 438 __SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd) +#define __NR_faccessat2 439 +__SYSCALL(__NR_faccessat2, sys_faccessat2) #undef __NR_syscalls -#define __NR_syscalls 439 +#define __NR_syscalls 440 /* * 32 bit systems traditionally used different diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 65f69723cbdc..4e873dcbe68f 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -133,6 +133,11 @@ extern "C" { * releasing the memory */ #define AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE (1 << 9) +/* Flag that BO will be encrypted and that the TMZ bit should be + * set in the PTEs when mapping this buffer via GPUVM or + * accessing it with various hw blocks + */ +#define AMDGPU_GEM_CREATE_ENCRYPTED (1 << 10) struct drm_amdgpu_gem_create_in { /** the requested memory size */ @@ -346,6 +351,10 @@ struct drm_amdgpu_gem_userptr { #define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF #define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43 #define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1 +#define AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT 44 +#define AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK 0x1 +#define AMDGPU_TILING_SCANOUT_SHIFT 63 +#define AMDGPU_TILING_SCANOUT_MASK 0x1 /* Set/Get helpers for tiling flags. */ #define AMDGPU_TILING_SET(field, value) \ @@ -555,7 +564,7 @@ struct drm_amdgpu_cs_in { /** Handle of resource list associated with CS */ __u32 bo_list_handle; __u32 num_chunks; - __u32 _pad; + __u32 flags; /** this points to __u64 * which point to cs chunks */ __u64 chunks; }; @@ -589,6 +598,14 @@ union drm_amdgpu_cs { */ #define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4) +/* Flag the IB as secure (TMZ) + */ +#define AMDGPU_IB_FLAGS_SECURE (1 << 5) + +/* Tell KMD to flush and invalidate caches + */ +#define AMDGPU_IB_FLAG_EMIT_MEM_SYNC (1 << 6) + struct drm_amdgpu_cs_chunk_ib { __u32 _pad; /** AMDGPU_IB_FLAG_* */ diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 299f9ac4840b..993c1b342315 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -549,7 +549,113 @@ extern "C" { #define DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED fourcc_mod_code(NVIDIA, 1) /* - * 16Bx2 Block Linear layout, used by desktop GPUs, and Tegra K1 and later + * Generalized Block Linear layout, used by desktop GPUs starting with NV50/G80, + * and Tegra GPUs starting with Tegra K1. + * + * Pixels are arranged in Groups of Bytes (GOBs). GOB size and layout varies + * based on the architecture generation. GOBs themselves are then arranged in + * 3D blocks, with the block dimensions (in terms of GOBs) always being a power + * of two, and hence expressible as their log2 equivalent (E.g., "2" represents + * a block depth or height of "4"). + * + * Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format + * in full detail. + * + * Macro + * Bits Param Description + * ---- ----- ----------------------------------------------------------------- + * + * 3:0 h log2(height) of each block, in GOBs. Placed here for + * compatibility with the existing + * DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers. + * + * 4:4 - Must be 1, to indicate block-linear layout. Necessary for + * compatibility with the existing + * DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK()-based modifiers. + * + * 8:5 - Reserved (To support 3D-surfaces with variable log2(depth) block + * size). Must be zero. + * + * Note there is no log2(width) parameter. Some portions of the + * hardware support a block width of two gobs, but it is impractical + * to use due to lack of support elsewhere, and has no known + * benefits. + * + * 11:9 - Reserved (To support 2D-array textures with variable array stride + * in blocks, specified via log2(tile width in blocks)). Must be + * zero. + * + * 19:12 k Page Kind. This value directly maps to a field in the page + * tables of all GPUs >= NV50. It affects the exact layout of bits + * in memory and can be derived from the tuple + * + * (format, GPU model, compression type, samples per pixel) + * + * Where compression type is defined below. If GPU model were + * implied by the format modifier, format, or memory buffer, page + * kind would not need to be included in the modifier itself, but + * since the modifier should define the layout of the associated + * memory buffer independent from any device or other context, it + * must be included here. + * + * 21:20 g GOB Height and Page Kind Generation. The height of a GOB changed + * starting with Fermi GPUs. Additionally, the mapping between page + * kind and bit layout has changed at various points. + * + * 0 = Gob Height 8, Fermi - Volta, Tegra K1+ Page Kind mapping + * 1 = Gob Height 4, G80 - GT2XX Page Kind mapping + * 2 = Gob Height 8, Turing+ Page Kind mapping + * 3 = Reserved for future use. + * + * 22:22 s Sector layout. On Tegra GPUs prior to Xavier, there is a further + * bit remapping step that occurs at an even lower level than the + * page kind and block linear swizzles. This causes the layout of + * surfaces mapped in those SOC's GPUs to be incompatible with the + * equivalent mapping on other GPUs in the same system. + * + * 0 = Tegra K1 - Tegra Parker/TX2 Layout. + * 1 = Desktop GPU and Tegra Xavier+ Layout + * + * 25:23 c Lossless Framebuffer Compression type. + * + * 0 = none + * 1 = ROP/3D, layout 1, exact compression format implied by Page + * Kind field + * 2 = ROP/3D, layout 2, exact compression format implied by Page + * Kind field + * 3 = CDE horizontal + * 4 = CDE vertical + * 5 = Reserved for future use + * 6 = Reserved for future use + * 7 = Reserved for future use + * + * 55:25 - Reserved for future use. Must be zero. + */ +#define DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(c, s, g, k, h) \ + fourcc_mod_code(NVIDIA, (0x10 | \ + ((h) & 0xf) | \ + (((k) & 0xff) << 12) | \ + (((g) & 0x3) << 20) | \ + (((s) & 0x1) << 22) | \ + (((c) & 0x7) << 23))) + +/* To grandfather in prior block linear format modifiers to the above layout, + * the page kind "0", which corresponds to "pitch/linear" and hence is unusable + * with block-linear layouts, is remapped within drivers to the value 0xfe, + * which corresponds to the "generic" kind used for simple single-sample + * uncompressed color formats on Fermi - Volta GPUs. + */ +static inline __u64 +drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) +{ + if (!(modifier & 0x10) || (modifier & (0xff << 12))) + return modifier; + else + return modifier | (0xfe << 12); +} + +/* + * 16Bx2 Block Linear layout, used by Tegra K1 and later * * Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked * vertically by a power of 2 (1 to 32 GOBs) to form a block. @@ -570,20 +676,20 @@ extern "C" { * in full detail. */ #define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(v) \ - fourcc_mod_code(NVIDIA, 0x10 | ((v) & 0xf)) + DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 0, 0, 0, (v)) #define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_ONE_GOB \ - fourcc_mod_code(NVIDIA, 0x10) + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) #define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_TWO_GOB \ - fourcc_mod_code(NVIDIA, 0x11) + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) #define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_FOUR_GOB \ - fourcc_mod_code(NVIDIA, 0x12) + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) #define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_EIGHT_GOB \ - fourcc_mod_code(NVIDIA, 0x13) + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) #define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_SIXTEEN_GOB \ - fourcc_mod_code(NVIDIA, 0x14) + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) #define DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK_THIRTYTWO_GOB \ - fourcc_mod_code(NVIDIA, 0x15) + DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) /* * Some Broadcom modifiers take parameters, for example the number of diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 2813e579b480..14b67cd6b54b 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -1969,6 +1969,30 @@ enum drm_i915_perf_property_id { */ DRM_I915_PERF_PROP_HOLD_PREEMPTION, + /** + * Specifying this pins all contexts to the specified SSEU power + * configuration for the duration of the recording. + * + * This parameter's value is a pointer to a struct + * drm_i915_gem_context_param_sseu. + * + * This property is available in perf revision 4. + */ + DRM_I915_PERF_PROP_GLOBAL_SSEU, + + /** + * This optional parameter specifies the timer interval in nanoseconds + * at which the i915 driver will check the OA buffer for available data. + * Minimum allowed value is 100 microseconds. A default value is used by + * the driver if this parameter is not specified. Note that larger timer + * values will reduce cpu consumption during OA perf captures. However, + * excessively large values would potentially result in OA buffer + * overwrites as captures reach end of the OA buffer. + * + * This property is available in perf revision 5. + */ + DRM_I915_PERF_PROP_POLL_OA_PERIOD, + DRM_I915_PERF_PROP_MAX /* non-ABI */ }; diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 0b85ed6a3710..19806eb3a8e8 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -217,13 +217,28 @@ struct drm_msm_gem_submit_bo { #define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */ #define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */ #define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */ +#define MSM_SUBMIT_SYNCOBJ_IN 0x08000000 /* enable input syncobj */ +#define MSM_SUBMIT_SYNCOBJ_OUT 0x04000000 /* enable output syncobj */ #define MSM_SUBMIT_FLAGS ( \ MSM_SUBMIT_NO_IMPLICIT | \ MSM_SUBMIT_FENCE_FD_IN | \ MSM_SUBMIT_FENCE_FD_OUT | \ MSM_SUBMIT_SUDO | \ + MSM_SUBMIT_SYNCOBJ_IN | \ + MSM_SUBMIT_SYNCOBJ_OUT | \ 0) +#define MSM_SUBMIT_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */ +#define MSM_SUBMIT_SYNCOBJ_FLAGS ( \ + MSM_SUBMIT_SYNCOBJ_RESET | \ + 0) + +struct drm_msm_gem_submit_syncobj { + __u32 handle; /* in, syncobj handle. */ + __u32 flags; /* in, from MSM_SUBMIT_SYNCOBJ_FLAGS */ + __u64 point; /* in, timepoint for timeline syncobjs. */ +}; + /* Each cmdstream submit consists of a table of buffers involved, and * one or more cmdstream buffers. This allows for conditional execution * (context-restore), and IB buffers needed for per tile/bin draw cmds. @@ -236,7 +251,14 @@ struct drm_msm_gem_submit { __u64 bos; /* in, ptr to array of submit_bo's */ __u64 cmds; /* in, ptr to array of submit_cmd's */ __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */ - __u32 queueid; /* in, submitqueue id */ + __u32 queueid; /* in, submitqueue id */ + __u64 in_syncobjs; /* in, ptr to to array of drm_msm_gem_submit_syncobj */ + __u64 out_syncobjs; /* in, ptr to to array of drm_msm_gem_submit_syncobj */ + __u32 nr_in_syncobjs; /* in, number of entries in in_syncobj */ + __u32 nr_out_syncobjs; /* in, number of entries in out_syncobj. */ + __u32 syncobj_stride; /* in, stride of syncobj arrays. */ + __u32 pad; /*in, reserved for future use, always 0. */ + }; /* The normal way to synchronize with the GPU is just to CPU_PREP on diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index a534d71e689a..9b6a973f4cc3 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -117,6 +117,7 @@ #define AUDIT_TIME_INJOFFSET 1332 /* Timekeeping offset injected */ #define AUDIT_TIME_ADJNTPVAL 1333 /* NTP value adjustment */ #define AUDIT_BPF 1334 /* BPF subsystem */ +#define AUDIT_EVENT_LISTENER 1335 /* Task joined multicast read socket */ #define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ #define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 2e29a671d67e..19684813faae 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -73,7 +73,7 @@ struct bpf_insn { /* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ struct bpf_lpm_trie_key { __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ - __u8 data[]; /* Arbitrary size */ + __u8 data[0]; /* Arbitrary size */ }; struct bpf_cgroup_storage_key { @@ -113,6 +113,10 @@ enum bpf_cmd { BPF_MAP_DELETE_BATCH, BPF_LINK_CREATE, BPF_LINK_UPDATE, + BPF_LINK_GET_FD_BY_ID, + BPF_LINK_GET_NEXT_ID, + BPF_ENABLE_STATS, + BPF_ITER_CREATE, }; enum bpf_map_type { @@ -143,6 +147,7 @@ enum bpf_map_type { BPF_MAP_TYPE_SK_STORAGE, BPF_MAP_TYPE_DEVMAP_HASH, BPF_MAP_TYPE_STRUCT_OPS, + BPF_MAP_TYPE_RINGBUF, }; /* Note that tracing related programs such as @@ -215,11 +220,28 @@ enum bpf_attach_type { BPF_TRACE_FEXIT, BPF_MODIFY_RETURN, BPF_LSM_MAC, + BPF_TRACE_ITER, + BPF_CGROUP_INET4_GETPEERNAME, + BPF_CGROUP_INET6_GETPEERNAME, + BPF_CGROUP_INET4_GETSOCKNAME, + BPF_CGROUP_INET6_GETSOCKNAME, + BPF_XDP_DEVMAP, __MAX_BPF_ATTACH_TYPE }; #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE +enum bpf_link_type { + BPF_LINK_TYPE_UNSPEC = 0, + BPF_LINK_TYPE_RAW_TRACEPOINT = 1, + BPF_LINK_TYPE_TRACING = 2, + BPF_LINK_TYPE_CGROUP = 3, + BPF_LINK_TYPE_ITER = 4, + BPF_LINK_TYPE_NETNS = 5, + + MAX_BPF_LINK_TYPE, +}; + /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command * * NONE(default): No further bpf programs allowed in the subtree. @@ -379,6 +401,12 @@ enum { */ #define BPF_F_QUERY_EFFECTIVE (1U << 0) +/* type for BPF_ENABLE_STATS */ +enum bpf_stats_type { + /* enabled run_time_ns and run_cnt */ + BPF_STATS_RUN_TIME = 0, +}; + enum bpf_stack_build_id_status { /* user space need an empty entry to identify end of a trace */ BPF_STACK_BUILD_ID_EMPTY = 0, @@ -523,6 +551,7 @@ union bpf_attr { __u32 prog_id; __u32 map_id; __u32 btf_id; + __u32 link_id; }; __u32 next_id; __u32 open_flags; @@ -589,6 +618,15 @@ union bpf_attr { __u32 old_prog_fd; } link_update; + struct { /* struct used by BPF_ENABLE_STATS command */ + __u32 type; + } enable_stats; + + struct { /* struct used by BPF_ITER_CREATE command */ + __u32 link_fd; + __u32 flags; + } iter_create; + } __attribute__((aligned(8))); /* The description below is an attempt at providing documentation to eBPF @@ -644,14 +682,16 @@ union bpf_attr { * For tracing programs, safely attempt to read *size* bytes from * kernel space address *unsafe_ptr* and store the data in *dst*. * - * Generally, use bpf_probe_read_user() or bpf_probe_read_kernel() - * instead. + * Generally, use **bpf_probe_read_user**\ () or + * **bpf_probe_read_kernel**\ () instead. * Return * 0 on success, or a negative error in case of failure. * * u64 bpf_ktime_get_ns(void) * Description * Return the time elapsed since system boot, in nanoseconds. + * Does not include time the system was suspended. + * See: **clock_gettime**\ (**CLOCK_MONOTONIC**) * Return * Current *ktime*. * @@ -1510,11 +1550,11 @@ union bpf_attr { * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address - * *unsafe_ptr* to *dst*. See bpf_probe_read_kernel_str() for + * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for * more details. * - * Generally, use bpf_probe_read_user_str() or bpf_probe_read_kernel_str() - * instead. + * Generally, use **bpf_probe_read_user_str**\ () or + * **bpf_probe_read_kernel_str**\ () instead. * Return * On success, the strictly positive length of the string, * including the trailing NUL character. On error, a negative @@ -1542,7 +1582,7 @@ union bpf_attr { * * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) * Description - * Equivalent to bpf_get_socket_cookie() helper that accepts + * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts * *skb*, but gets socket from **struct bpf_sock_ops** context. * Return * A 8-byte long non-decreasing number. @@ -1562,7 +1602,7 @@ union bpf_attr { * Return * 0 * - * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen) + * int bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **setsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at @@ -1570,6 +1610,12 @@ union bpf_attr { * must be specified, see **setsockopt(2)** for more information. * The option value of length *optlen* is pointed by *optval*. * + * *bpf_socket* should be one of the following: + * + * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. + * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** + * and **BPF_CGROUP_INET6_CONNECT**. + * * This helper actually implements a subset of **setsockopt()**. * It supports the following *level*\ s: * @@ -1589,6 +1635,13 @@ union bpf_attr { * Grow or shrink the room for data in the packet associated to * *skb* by *len_diff*, and according to the selected *mode*. * + * By default, the helper will reset any offloaded checksum + * indicator of the skb to CHECKSUM_NONE. This can be avoided + * by the following flag: + * + * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded + * checksum data of the skb to CHECKSUM_NONE. + * * There are two supported modes at this time: * * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer @@ -1634,15 +1687,15 @@ union bpf_attr { * * The lower two bits of *flags* are used as the return code if * the map lookup fails. This is so that the return value can be - * one of the XDP program return codes up to XDP_TX, as chosen by - * the caller. Any higher bits in the *flags* argument must be + * one of the XDP program return codes up to **XDP_TX**, as chosen + * by the caller. Any higher bits in the *flags* argument must be * unset. * - * See also bpf_redirect(), which only supports redirecting to an - * ifindex, but doesn't require a map to do so. + * See also **bpf_redirect**\ (), which only supports redirecting + * to an ifindex, but doesn't require a map to do so. * Return * **XDP_REDIRECT** on success, or the value of the two lower bits - * of the **flags* argument on error. + * of the *flags* argument on error. * * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags) * Description @@ -1747,7 +1800,7 @@ union bpf_attr { * the time running for event since last normalization. The * enabled and running times are accumulated since the perf event * open. To achieve scaling factor between two invocations of an - * eBPF program, users can can use CPU id as the key (which is + * eBPF program, users can use CPU id as the key (which is * typical for perf array usage model) to remember the previous * value and do the calculation inside the eBPF program. * Return @@ -1764,7 +1817,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen) + * int bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen) * Description * Emulate a call to **getsockopt()** on the socket associated to * *bpf_socket*, which must be a full socket. The *level* at @@ -1773,6 +1826,12 @@ union bpf_attr { * The retrieved value is stored in the structure pointed by * *opval* and of length *optlen*. * + * *bpf_socket* should be one of the following: + * + * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**. + * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT** + * and **BPF_CGROUP_INET6_CONNECT**. + * * This helper actually implements a subset of **getsockopt()**. * It supports the following *level*\ s: * @@ -1790,7 +1849,7 @@ union bpf_attr { * The first argument is the context *regs* on which the kprobe * works. * - * This helper works by setting setting the PC (program counter) + * This helper works by setting the PC (program counter) * to an override function which is run in place of the original * probed function. This means the probed function is not run at * all. The replacement function just returns with the required @@ -1959,18 +2018,19 @@ union bpf_attr { * * This helper works for IPv4 and IPv6, TCP and UDP sockets. The * domain (*addr*\ **->sa_family**) must be **AF_INET** (or - * **AF_INET6**). Looking for a free port to bind to can be - * expensive, therefore binding to port is not permitted by the - * helper: *addr*\ **->sin_port** (or **sin6_port**, respectively) - * must be set to zero. + * **AF_INET6**). It's advised to pass zero port (**sin_port** + * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like + * behavior and lets the kernel efficiently pick up an unused + * port as long as 4-tuple is unique. Passing non-zero port might + * lead to degraded performance. * Return * 0 on success, or a negative error in case of failure. * * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) * Description * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is - * only possible to shrink the packet as of this writing, - * therefore *delta* must be a negative integer. + * possible to both shrink and grow the packet tail. + * Shrink done via *delta* being a negative integer. * * A call to this helper is susceptible to change the underlying * packet buffer. Therefore, at load time, all checks on pointers @@ -2256,7 +2316,7 @@ union bpf_attr { * **bpf_rc_keydown**\ () again with the same values, or calling * **bpf_rc_repeat**\ (). * - * Some protocols include a toggle bit, in case the button was + * Some protocols include a toggle bit, in case the button was * released and pressed again between consecutive scancodes. * * The *ctx* should point to the lirc sample as passed into @@ -2602,7 +2662,6 @@ union bpf_attr { * * *th* points to the start of the TCP header, while *th_len* * contains **sizeof**\ (**struct tcphdr**). - * * Return * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative * error otherwise. @@ -2785,7 +2844,6 @@ union bpf_attr { * * *th* points to the start of the TCP header, while *th_len* * contains the length of the TCP header. - * * Return * On success, lower 32 bits hold the generated SYN cookie in * followed by 16 bits which hold the MSS value for that cookie, @@ -2868,7 +2926,7 @@ union bpf_attr { * // size, after checking its boundaries. * } * - * In comparison, using **bpf_probe_read_user()** helper here + * In comparison, using **bpf_probe_read_user**\ () helper here * instead to read the string would require to estimate the length * at compile time, and would often result in copying more memory * than necessary. @@ -2886,14 +2944,14 @@ union bpf_attr { * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) * Description * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* - * to *dst*. Same semantics as with bpf_probe_read_user_str() apply. + * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply. * Return - * On success, the strictly positive length of the string, including + * On success, the strictly positive length of the string, including * the trailing NUL character. On error, a negative value. * * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt) * Description - * Send out a tcp-ack. *tp* is the in-kernel struct tcp_sock. + * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**. * *rcv_nxt* is the ack_seq to be sent out. * Return * 0 on success, or a negative error in case of failure. @@ -2921,19 +2979,19 @@ union bpf_attr { * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags) * Description * For an eBPF program attached to a perf event, retrieve the - * branch records (struct perf_branch_entry) associated to *ctx* - * and store it in the buffer pointed by *buf* up to size + * branch records (**struct perf_branch_entry**) associated to *ctx* + * and store it in the buffer pointed by *buf* up to size * *size* bytes. * Return * On success, number of bytes written to *buf*. On error, a * negative value. * * The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to - * instead return the number of bytes required to store all the + * instead return the number of bytes required to store all the * branch entries. If this flag is set, *buf* may be NULL. * * **-EINVAL** if arguments invalid or **size** not a multiple - * of sizeof(struct perf_branch_entry). + * of **sizeof**\ (**struct perf_branch_entry**\ ). * * **-ENOENT** if architecture does not support branch records. * @@ -2941,8 +2999,8 @@ union bpf_attr { * Description * Returns 0 on success, values for *pid* and *tgid* as seen from the current * *namespace* will be returned in *nsdata*. - * - * On failure, the returned value is one of the following: + * Return + * 0 on success, or one of the following in case of failure: * * **-EINVAL** if dev and inum supplied don't match dev_t and inode number * with nsfs of current task, or if dev conversion to dev_t lost high bits. @@ -2981,8 +3039,8 @@ union bpf_attr { * a global identifier that can be assumed unique. If *ctx* is * NULL, then the helper returns the cookie for the initial * network namespace. The cookie itself is very similar to that - * of bpf_get_socket_cookie() helper, but for network namespaces - * instead of sockets. + * of **bpf_get_socket_cookie**\ () helper, but for network + * namespaces instead of sockets. * Return * A 8-byte long opaque number. * @@ -3017,14 +3075,183 @@ union bpf_attr { * * The *flags* argument must be zero. * Return - * 0 on success, or a negative errno in case of failure. - * - * * **-EINVAL** Unsupported flags specified. - * * **-ENOENT** Socket is unavailable for assignment. - * * **-ENETUNREACH** Socket is unreachable (wrong netns). - * * **-EOPNOTSUPP** Unsupported operation, for example a - * call from outside of TC ingress. - * * **-ESOCKTNOSUPPORT** Socket type not supported (reuseport). + * 0 on success, or a negative error in case of failure: + * + * **-EINVAL** if specified *flags* are not supported. + * + * **-ENOENT** if the socket is unavailable for assignment. + * + * **-ENETUNREACH** if the socket is unreachable (wrong netns). + * + * **-EOPNOTSUPP** if the operation is not supported, for example + * a call from outside of TC ingress. + * + * **-ESOCKTNOSUPPORT** if the socket type is not supported + * (reuseport). + * + * u64 bpf_ktime_get_boot_ns(void) + * Description + * Return the time elapsed since system boot, in nanoseconds. + * Does include the time the system was suspended. + * See: **clock_gettime**\ (**CLOCK_BOOTTIME**) + * Return + * Current *ktime*. + * + * int bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len) + * Description + * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print + * out the format string. + * The *m* represents the seq_file. The *fmt* and *fmt_size* are for + * the format string itself. The *data* and *data_len* are format string + * arguments. The *data* are a **u64** array and corresponding format string + * values are stored in the array. For strings and pointers where pointees + * are accessed, only the pointer values are stored in the *data* array. + * The *data_len* is the size of *data* in bytes. + * + * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory. + * Reading kernel memory may fail due to either invalid address or + * valid address but requiring a major memory fault. If reading kernel memory + * fails, the string for **%s** will be an empty string, and the ip + * address for **%p{i,I}{4,6}** will be 0. Not returning error to + * bpf program is consistent with what **bpf_trace_printk**\ () does for now. + * Return + * 0 on success, or a negative error in case of failure: + * + * **-EBUSY** if per-CPU memory copy buffer is busy, can try again + * by returning 1 from bpf program. + * + * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported. + * + * **-E2BIG** if *fmt* contains too many format specifiers. + * + * **-EOVERFLOW** if an overflow happened: The same object will be tried again. + * + * int bpf_seq_write(struct seq_file *m, const void *data, u32 len) + * Description + * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data. + * The *m* represents the seq_file. The *data* and *len* represent the + * data to write in bytes. + * Return + * 0 on success, or a negative error in case of failure: + * + * **-EOVERFLOW** if an overflow happened: The same object will be tried again. + * + * u64 bpf_sk_cgroup_id(struct bpf_sock *sk) + * Description + * Return the cgroup v2 id of the socket *sk*. + * + * *sk* must be a non-**NULL** pointer to a full socket, e.g. one + * returned from **bpf_sk_lookup_xxx**\ (), + * **bpf_sk_fullsock**\ (), etc. The format of returned id is + * same as in **bpf_skb_cgroup_id**\ (). + * + * This helper is available only if the kernel was compiled with + * the **CONFIG_SOCK_CGROUP_DATA** configuration option. + * Return + * The id is returned or 0 in case the id could not be retrieved. + * + * u64 bpf_sk_ancestor_cgroup_id(struct bpf_sock *sk, int ancestor_level) + * Description + * Return id of cgroup v2 that is ancestor of cgroup associated + * with the *sk* at the *ancestor_level*. The root cgroup is at + * *ancestor_level* zero and each step down the hierarchy + * increments the level. If *ancestor_level* == level of cgroup + * associated with *sk*, then return value will be same as that + * of **bpf_sk_cgroup_id**\ (). + * + * The helper is useful to implement policies based on cgroups + * that are upper in hierarchy than immediate cgroup associated + * with *sk*. + * + * The format of returned id and helper limitations are same as in + * **bpf_sk_cgroup_id**\ (). + * Return + * The id is returned or 0 in case the id could not be retrieved. + * + * void *bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags) + * Description + * Copy *size* bytes from *data* into a ring buffer *ringbuf*. + * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of + * new data availability is sent. + * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of + * new data availability is sent unconditionally. + * Return + * 0, on success; + * < 0, on error. + * + * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags) + * Description + * Reserve *size* bytes of payload in a ring buffer *ringbuf*. + * Return + * Valid pointer with *size* bytes of memory available; NULL, + * otherwise. + * + * void bpf_ringbuf_submit(void *data, u64 flags) + * Description + * Submit reserved ring buffer sample, pointed to by *data*. + * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of + * new data availability is sent. + * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of + * new data availability is sent unconditionally. + * Return + * Nothing. Always succeeds. + * + * void bpf_ringbuf_discard(void *data, u64 flags) + * Description + * Discard reserved ring buffer sample, pointed to by *data*. + * If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of + * new data availability is sent. + * IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of + * new data availability is sent unconditionally. + * Return + * Nothing. Always succeeds. + * + * u64 bpf_ringbuf_query(void *ringbuf, u64 flags) + * Description + * Query various characteristics of provided ring buffer. What + * exactly is queries is determined by *flags*: + * - BPF_RB_AVAIL_DATA - amount of data not yet consumed; + * - BPF_RB_RING_SIZE - the size of ring buffer; + * - BPF_RB_CONS_POS - consumer position (can wrap around); + * - BPF_RB_PROD_POS - producer(s) position (can wrap around); + * Data returned is just a momentary snapshots of actual values + * and could be inaccurate, so this facility should be used to + * power heuristics and for reporting, not to make 100% correct + * calculation. + * Return + * Requested value, or 0, if flags are not recognized. + * + * int bpf_csum_level(struct sk_buff *skb, u64 level) + * Description + * Change the skbs checksum level by one layer up or down, or + * reset it entirely to none in order to have the stack perform + * checksum validation. The level is applicable to the following + * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of + * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP | + * through **bpf_skb_adjust_room**\ () helper with passing in + * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call + * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since + * the UDP header is removed. Similarly, an encap of the latter + * into the former could be accompanied by a helper call to + * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the + * skb is still intended to be processed in higher layers of the + * stack instead of just egressing at tc. + * + * There are three supported level settings at this time: + * + * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs + * with CHECKSUM_UNNECESSARY. + * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs + * with CHECKSUM_UNNECESSARY. + * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and + * sets CHECKSUM_NONE to force checksum validation by the stack. + * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current + * skb->csum_level. + * Return + * 0 on success, or a negative error in case of failure. In the + * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level + * is returned or the error code -EACCES in case the skb is not + * subject to CHECKSUM_UNNECESSARY. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -3151,7 +3378,18 @@ union bpf_attr { FN(xdp_output), \ FN(get_netns_cookie), \ FN(get_current_ancestor_cgroup_id), \ - FN(sk_assign), + FN(sk_assign), \ + FN(ktime_get_boot_ns), \ + FN(seq_printf), \ + FN(seq_write), \ + FN(sk_cgroup_id), \ + FN(sk_ancestor_cgroup_id), \ + FN(ringbuf_output), \ + FN(ringbuf_reserve), \ + FN(ringbuf_submit), \ + FN(ringbuf_discard), \ + FN(ringbuf_query), \ + FN(csum_level), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -3228,6 +3466,14 @@ enum { BPF_F_CURRENT_NETNS = (-1L), }; +/* BPF_FUNC_csum_level level values. */ +enum { + BPF_CSUM_LEVEL_QUERY, + BPF_CSUM_LEVEL_INC, + BPF_CSUM_LEVEL_DEC, + BPF_CSUM_LEVEL_RESET, +}; + /* BPF_FUNC_skb_adjust_room flags. */ enum { BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0), @@ -3235,6 +3481,7 @@ enum { BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2), BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3), BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4), + BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5), }; enum { @@ -3261,6 +3508,29 @@ enum { BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0), }; +/* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and + * BPF_FUNC_bpf_ringbuf_output flags. + */ +enum { + BPF_RB_NO_WAKEUP = (1ULL << 0), + BPF_RB_FORCE_WAKEUP = (1ULL << 1), +}; + +/* BPF_FUNC_bpf_ringbuf_query flags */ +enum { + BPF_RB_AVAIL_DATA = 0, + BPF_RB_RING_SIZE = 1, + BPF_RB_CONS_POS = 2, + BPF_RB_PROD_POS = 3, +}; + +/* BPF ring buffer constants */ +enum { + BPF_RINGBUF_BUSY_BIT = (1U << 31), + BPF_RINGBUF_DISCARD_BIT = (1U << 30), + BPF_RINGBUF_HDR_SZ = 8, +}; + /* Mode for BPF_FUNC_skb_adjust_room helper. */ enum bpf_adj_room_mode { BPF_ADJ_ROOM_NET, @@ -3393,6 +3663,7 @@ struct bpf_sock { __u32 dst_ip4; __u32 dst_ip6[4]; __u32 state; + __s32 rx_queue_mapping; }; struct bpf_tcp_sock { @@ -3486,6 +3757,21 @@ struct xdp_md { /* Below access go through struct xdp_rxq_info */ __u32 ingress_ifindex; /* rxq->dev->ifindex */ __u32 rx_queue_index; /* rxq->queue_index */ + + __u32 egress_ifindex; /* txq->dev->ifindex */ +}; + +/* DEVMAP map-value layout + * + * The struct data-layout of map-value is a configuration interface. + * New members can only be added to the end of this structure. + */ +struct bpf_devmap_val { + __u32 ifindex; /* device index */ + union { + int fd; /* prog fd on map write */ + __u32 id; /* prog id on map read */ + } bpf_prog; }; enum sk_action { @@ -3508,6 +3794,8 @@ struct sk_msg_md { __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ __u32 size; /* Total size of sk_msg */ + + __bpf_md_ptr(struct bpf_sock *, sk); /* current socket */ }; struct sk_reuseport_md { @@ -3598,6 +3886,29 @@ struct bpf_btf_info { __u32 id; } __attribute__((aligned(8))); +struct bpf_link_info { + __u32 type; + __u32 id; + __u32 prog_id; + union { + struct { + __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */ + __u32 tp_name_len; /* in/out: tp_name buffer len */ + } raw_tracepoint; + struct { + __u32 attach_type; + } tracing; + struct { + __u64 cgroup_id; + __u32 attach_type; + } cgroup; + struct { + __u32 netns_ino; + __u32 attach_type; + } netns; + }; +} __attribute__((aligned(8))); + /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed * by user and intended to be used by socket (e.g. to bind to, depends on * attach attach type). @@ -3610,7 +3921,7 @@ struct bpf_sock_addr { __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. * Stored in network byte order. */ - __u32 user_port; /* Allows 4-byte read and write. + __u32 user_port; /* Allows 1,2,4-byte read and 4-byte write. * Stored in network byte order */ __u32 family; /* Allows 4-byte read, but no write */ diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 8134924cfc17..e6b6cb0f8bc6 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -36,12 +36,10 @@ struct btrfs_ioctl_vol_args { #define BTRFS_DEVICE_PATH_NAME_MAX 1024 #define BTRFS_SUBVOL_NAME_MAX 4039 -/* - * Deprecated since 5.7: - * - * BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0) - */ - +#ifndef __KERNEL__ +/* Deprecated since 5.7 */ +# define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0) +#endif #define BTRFS_SUBVOL_RDONLY (1ULL << 1) #define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2) diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index 8e322e2c7e78..a3f3975df0de 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -519,15 +519,6 @@ struct btrfs_extent_inline_ref { __le64 offset; } __attribute__ ((__packed__)); -/* old style backrefs item */ -struct btrfs_extent_ref_v0 { - __le64 root; - __le64 generation; - __le64 objectid; - __le32 count; -} __attribute__ ((__packed__)); - - /* dev extents record free space on individual devices. The owner * field points back to the chunk allocation mapping tree that allocated * the extent. The chunk tree uuid field is a way to double check the owner diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h index 272dc69fa080..48ff0757ae5e 100644 --- a/include/uapi/linux/capability.h +++ b/include/uapi/linux/capability.h @@ -274,6 +274,7 @@ struct vfs_ns_cap_data { arbitrary SCSI commands */ /* Allow setting encryption key on loopback filesystem */ /* Allow setting zone reclaim policy */ +/* Allow everything under CAP_BPF and CAP_PERFMON for backward compatibility */ #define CAP_SYS_ADMIN 21 @@ -332,6 +333,8 @@ struct vfs_ns_cap_data { #define CAP_AUDIT_CONTROL 30 +/* Set or remove capabilities on files */ + #define CAP_SETFCAP 31 /* Override MAC access. @@ -367,8 +370,45 @@ struct vfs_ns_cap_data { #define CAP_AUDIT_READ 37 +/* + * Allow system performance and observability privileged operations + * using perf_events, i915_perf and other kernel subsystems + */ + +#define CAP_PERFMON 38 + +/* + * CAP_BPF allows the following BPF operations: + * - Creating all types of BPF maps + * - Advanced verifier features + * - Indirect variable access + * - Bounded loops + * - BPF to BPF function calls + * - Scalar precision tracking + * - Larger complexity limits + * - Dead code elimination + * - And potentially other features + * - Loading BPF Type Format (BTF) data + * - Retrieve xlated and JITed code of BPF programs + * - Use bpf_spin_lock() helper + * + * CAP_PERFMON relaxes the verifier checks further: + * - BPF progs can use of pointer-to-integer conversions + * - speculation attack hardening measures are bypassed + * - bpf_probe_read to read arbitrary kernel memory is allowed + * - bpf_trace_printk to print kernel memory is allowed + * + * CAP_SYS_ADMIN is required to use bpf_probe_write_user. + * + * CAP_SYS_ADMIN is required to iterate system wide loaded + * programs, maps, links, BTFs and convert their IDs to file descriptors. + * + * CAP_PERFMON and CAP_BPF are required to load tracing programs. + * CAP_NET_ADMIN and CAP_BPF are required to load networking programs. + */ +#define CAP_BPF 39 -#define CAP_LAST_CAP CAP_AUDIT_READ +#define CAP_LAST_CAP CAP_BPF #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP) diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 1ae90e06c06d..08563e6a424d 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -233,10 +233,13 @@ enum { * @DEVLINK_TRAP_ACTION_DROP: Packet is dropped by the device and a copy is not * sent to the CPU. * @DEVLINK_TRAP_ACTION_TRAP: The sole copy of the packet is sent to the CPU. + * @DEVLINK_TRAP_ACTION_MIRROR: Packet is forwarded by the device and a copy is + * sent to the CPU. */ enum devlink_trap_action { DEVLINK_TRAP_ACTION_DROP, DEVLINK_TRAP_ACTION_TRAP, + DEVLINK_TRAP_ACTION_MIRROR, }; /** @@ -250,10 +253,16 @@ enum devlink_trap_action { * control plane for resolution. Trapped packets * are processed by devlink and injected to * the kernel's Rx path. + * @DEVLINK_TRAP_TYPE_CONTROL: Packet was trapped because it is required for + * the correct functioning of the control plane. + * For example, an ARP request packet. Trapped + * packets are injected to the kernel's Rx path, + * but not reported to drop monitor. */ enum devlink_trap_type { DEVLINK_TRAP_TYPE_DROP, DEVLINK_TRAP_TYPE_EXCEPTION, + DEVLINK_TRAP_TYPE_CONTROL, }; enum { diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h index dbc7092e04b5..7f30393b92c3 100644 --- a/include/uapi/linux/dma-buf.h +++ b/include/uapi/linux/dma-buf.h @@ -39,6 +39,12 @@ struct dma_buf_sync { #define DMA_BUF_BASE 'b' #define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync) + +/* 32/64bitness of this uapi was botched in android, there's no difference + * between them in actual uapi, they're just different numbers. + */ #define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *) +#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, u32) +#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, u64) #endif diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index 34c02e4290fe..c6dd0215482e 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -36,6 +36,7 @@ typedef __s64 Elf64_Sxword; #define PT_LOPROC 0x70000000 #define PT_HIPROC 0x7fffffff #define PT_GNU_EH_FRAME 0x6474e550 +#define PT_GNU_PROPERTY 0x6474e553 #define PT_GNU_STACK (PT_LOOS + 0x474e551) @@ -367,6 +368,7 @@ typedef struct elf64_shdr { * Notes used in ET_CORE. Architectures export some of the arch register sets * using the corresponding note types via the PTRACE_GETREGSET and * PTRACE_SETREGSET requests. + * The note name for all these is "LINUX". */ #define NT_PRSTATUS 1 #define NT_PRFPREG 2 @@ -429,6 +431,9 @@ typedef struct elf64_shdr { #define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */ #define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */ +/* Note types with note name "GNU" */ +#define NT_GNU_PROPERTY_TYPE_0 5 + /* Note header in a PT_NOTE section */ typedef struct elf32_note { Elf32_Word n_namesz; /* Name size */ @@ -443,4 +448,10 @@ typedef struct elf64_note { Elf64_Word n_type; /* Content type */ } Elf64_Nhdr; +/* .note.gnu.property types for EM_AARCH64: */ +#define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000 + +/* Bits for GNU_PROPERTY_AARCH64_FEATURE_1_BTI */ +#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1U << 0) + #endif /* _UAPI_LINUX_ELF_H */ diff --git a/include/uapi/linux/errqueue.h b/include/uapi/linux/errqueue.h index 0cca19670fd2..ca5cb3e3c6df 100644 --- a/include/uapi/linux/errqueue.h +++ b/include/uapi/linux/errqueue.h @@ -36,7 +36,7 @@ struct sock_extended_err { * * The timestamping interfaces SO_TIMESTAMPING, MSG_TSTAMP_* * communicate network timestamps by passing this struct in a cmsg with - * recvmsg(). See Documentation/networking/timestamping.txt for details. + * recvmsg(). See Documentation/networking/timestamping.rst for details. * User space sees a timespec definition that matches either * __kernel_timespec or __kernel_old_timespec, in the kernel we * require two structure definitions to provide both. diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 92f737f10117..f4662b3a9e1e 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1666,6 +1666,18 @@ static inline int ethtool_validate_duplex(__u8 duplex) return 0; } +#define MASTER_SLAVE_CFG_UNSUPPORTED 0 +#define MASTER_SLAVE_CFG_UNKNOWN 1 +#define MASTER_SLAVE_CFG_MASTER_PREFERRED 2 +#define MASTER_SLAVE_CFG_SLAVE_PREFERRED 3 +#define MASTER_SLAVE_CFG_MASTER_FORCE 4 +#define MASTER_SLAVE_CFG_SLAVE_FORCE 5 +#define MASTER_SLAVE_STATE_UNSUPPORTED 0 +#define MASTER_SLAVE_STATE_UNKNOWN 1 +#define MASTER_SLAVE_STATE_MASTER 2 +#define MASTER_SLAVE_STATE_SLAVE 3 +#define MASTER_SLAVE_STATE_ERR 4 + /* Which connector port. */ #define PORT_TP 0x00 #define PORT_AUI 0x01 @@ -1904,7 +1916,9 @@ struct ethtool_link_settings { __u8 eth_tp_mdix_ctrl; __s8 link_mode_masks_nwords; __u8 transceiver; - __u8 reserved1[3]; + __u8 master_slave_cfg; + __u8 master_slave_state; + __u8 reserved1[1]; __u32 reserved[7]; __u32 link_mode_masks[0]; /* layout of link_mode_masks fields: diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index 7fde76366ba4..4dda5e4244a7 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -2,7 +2,7 @@ /* * include/uapi/linux/ethtool_netlink.h - netlink interface for ethtool * - * See Documentation/networking/ethtool-netlink.txt in kernel source tree for + * See Documentation/networking/ethtool-netlink.rst in kernel source tree for * doucumentation of the interface. */ @@ -39,6 +39,8 @@ enum { ETHTOOL_MSG_EEE_GET, ETHTOOL_MSG_EEE_SET, ETHTOOL_MSG_TSINFO_GET, + ETHTOOL_MSG_CABLE_TEST_ACT, + ETHTOOL_MSG_CABLE_TEST_TDR_ACT, /* add new constants above here */ __ETHTOOL_MSG_USER_CNT, @@ -74,6 +76,8 @@ enum { ETHTOOL_MSG_EEE_GET_REPLY, ETHTOOL_MSG_EEE_NTF, ETHTOOL_MSG_TSINFO_GET_REPLY, + ETHTOOL_MSG_CABLE_TEST_NTF, + ETHTOOL_MSG_CABLE_TEST_TDR_NTF, /* add new constants above here */ __ETHTOOL_MSG_KERNEL_CNT, @@ -216,6 +220,8 @@ enum { ETHTOOL_A_LINKMODES_PEER, /* bitset */ ETHTOOL_A_LINKMODES_SPEED, /* u32 */ ETHTOOL_A_LINKMODES_DUPLEX, /* u8 */ + ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG, /* u8 */ + ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE, /* u8 */ /* add new constants above here */ __ETHTOOL_A_LINKMODES_CNT, @@ -228,6 +234,8 @@ enum { ETHTOOL_A_LINKSTATE_UNSPEC, ETHTOOL_A_LINKSTATE_HEADER, /* nest - _A_HEADER_* */ ETHTOOL_A_LINKSTATE_LINK, /* u8 */ + ETHTOOL_A_LINKSTATE_SQI, /* u32 */ + ETHTOOL_A_LINKSTATE_SQI_MAX, /* u32 */ /* add new constants above here */ __ETHTOOL_A_LINKSTATE_CNT, @@ -403,6 +411,149 @@ enum { ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1) }; +/* CABLE TEST */ + +enum { + ETHTOOL_A_CABLE_TEST_UNSPEC, + ETHTOOL_A_CABLE_TEST_HEADER, /* nest - _A_HEADER_* */ + + /* add new constants above here */ + __ETHTOOL_A_CABLE_TEST_CNT, + ETHTOOL_A_CABLE_TEST_MAX = __ETHTOOL_A_CABLE_TEST_CNT - 1 +}; + +/* CABLE TEST NOTIFY */ +enum { + ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC, + ETHTOOL_A_CABLE_RESULT_CODE_OK, + ETHTOOL_A_CABLE_RESULT_CODE_OPEN, + ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT, + ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT, +}; + +enum { + ETHTOOL_A_CABLE_PAIR_A, + ETHTOOL_A_CABLE_PAIR_B, + ETHTOOL_A_CABLE_PAIR_C, + ETHTOOL_A_CABLE_PAIR_D, +}; + +enum { + ETHTOOL_A_CABLE_RESULT_UNSPEC, + ETHTOOL_A_CABLE_RESULT_PAIR, /* u8 ETHTOOL_A_CABLE_PAIR_ */ + ETHTOOL_A_CABLE_RESULT_CODE, /* u8 ETHTOOL_A_CABLE_RESULT_CODE_ */ + + __ETHTOOL_A_CABLE_RESULT_CNT, + ETHTOOL_A_CABLE_RESULT_MAX = (__ETHTOOL_A_CABLE_RESULT_CNT - 1) +}; + +enum { + ETHTOOL_A_CABLE_FAULT_LENGTH_UNSPEC, + ETHTOOL_A_CABLE_FAULT_LENGTH_PAIR, /* u8 ETHTOOL_A_CABLE_PAIR_ */ + ETHTOOL_A_CABLE_FAULT_LENGTH_CM, /* u32 */ + + __ETHTOOL_A_CABLE_FAULT_LENGTH_CNT, + ETHTOOL_A_CABLE_FAULT_LENGTH_MAX = (__ETHTOOL_A_CABLE_FAULT_LENGTH_CNT - 1) +}; + +enum { + ETHTOOL_A_CABLE_TEST_NTF_STATUS_UNSPEC, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_STARTED, + ETHTOOL_A_CABLE_TEST_NTF_STATUS_COMPLETED +}; + +enum { + ETHTOOL_A_CABLE_NEST_UNSPEC, + ETHTOOL_A_CABLE_NEST_RESULT, /* nest - ETHTOOL_A_CABLE_RESULT_ */ + ETHTOOL_A_CABLE_NEST_FAULT_LENGTH, /* nest - ETHTOOL_A_CABLE_FAULT_LENGTH_ */ + __ETHTOOL_A_CABLE_NEST_CNT, + ETHTOOL_A_CABLE_NEST_MAX = (__ETHTOOL_A_CABLE_NEST_CNT - 1) +}; + +enum { + ETHTOOL_A_CABLE_TEST_NTF_UNSPEC, + ETHTOOL_A_CABLE_TEST_NTF_HEADER, /* nest - ETHTOOL_A_HEADER_* */ + ETHTOOL_A_CABLE_TEST_NTF_STATUS, /* u8 - _STARTED/_COMPLETE */ + ETHTOOL_A_CABLE_TEST_NTF_NEST, /* nest - of results: */ + + __ETHTOOL_A_CABLE_TEST_NTF_CNT, + ETHTOOL_A_CABLE_TEST_NTF_MAX = (__ETHTOOL_A_CABLE_TEST_NTF_CNT - 1) +}; + +/* CABLE TEST TDR */ + +enum { + ETHTOOL_A_CABLE_TEST_TDR_CFG_UNSPEC, + ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST, /* u32 */ + ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST, /* u32 */ + ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP, /* u32 */ + ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR, /* u8 */ + + /* add new constants above here */ + __ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT, + ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX = __ETHTOOL_A_CABLE_TEST_TDR_CFG_CNT - 1 +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_UNSPEC, + ETHTOOL_A_CABLE_TEST_TDR_HEADER, /* nest - _A_HEADER_* */ + ETHTOOL_A_CABLE_TEST_TDR_CFG, /* nest - *_TDR_CFG_* */ + + /* add new constants above here */ + __ETHTOOL_A_CABLE_TEST_TDR_CNT, + ETHTOOL_A_CABLE_TEST_TDR_MAX = __ETHTOOL_A_CABLE_TEST_TDR_CNT - 1 +}; + +/* CABLE TEST TDR NOTIFY */ + +enum { + ETHTOOL_A_CABLE_AMPLITUDE_UNSPEC, + ETHTOOL_A_CABLE_AMPLITUDE_PAIR, /* u8 */ + ETHTOOL_A_CABLE_AMPLITUDE_mV, /* s16 */ + + __ETHTOOL_A_CABLE_AMPLITUDE_CNT, + ETHTOOL_A_CABLE_AMPLITUDE_MAX = (__ETHTOOL_A_CABLE_AMPLITUDE_CNT - 1) +}; + +enum { + ETHTOOL_A_CABLE_PULSE_UNSPEC, + ETHTOOL_A_CABLE_PULSE_mV, /* s16 */ + + __ETHTOOL_A_CABLE_PULSE_CNT, + ETHTOOL_A_CABLE_PULSE_MAX = (__ETHTOOL_A_CABLE_PULSE_CNT - 1) +}; + +enum { + ETHTOOL_A_CABLE_STEP_UNSPEC, + ETHTOOL_A_CABLE_STEP_FIRST_DISTANCE, /* u32 */ + ETHTOOL_A_CABLE_STEP_LAST_DISTANCE, /* u32 */ + ETHTOOL_A_CABLE_STEP_STEP_DISTANCE, /* u32 */ + + __ETHTOOL_A_CABLE_STEP_CNT, + ETHTOOL_A_CABLE_STEP_MAX = (__ETHTOOL_A_CABLE_STEP_CNT - 1) +}; + +enum { + ETHTOOL_A_CABLE_TDR_NEST_UNSPEC, + ETHTOOL_A_CABLE_TDR_NEST_STEP, /* nest - ETHTTOOL_A_CABLE_STEP */ + ETHTOOL_A_CABLE_TDR_NEST_AMPLITUDE, /* nest - ETHTOOL_A_CABLE_AMPLITUDE */ + ETHTOOL_A_CABLE_TDR_NEST_PULSE, /* nest - ETHTOOL_A_CABLE_PULSE */ + + __ETHTOOL_A_CABLE_TDR_NEST_CNT, + ETHTOOL_A_CABLE_TDR_NEST_MAX = (__ETHTOOL_A_CABLE_TDR_NEST_CNT - 1) +}; + +enum { + ETHTOOL_A_CABLE_TEST_TDR_NTF_UNSPEC, + ETHTOOL_A_CABLE_TEST_TDR_NTF_HEADER, /* nest - ETHTOOL_A_HEADER_* */ + ETHTOOL_A_CABLE_TEST_TDR_NTF_STATUS, /* u8 - _STARTED/_COMPLETE */ + ETHTOOL_A_CABLE_TEST_TDR_NTF_NEST, /* nest - of results: */ + + /* add new constants above here */ + __ETHTOOL_A_CABLE_TEST_TDR_NTF_CNT, + ETHTOOL_A_CABLE_TEST_TDR_NTF_MAX = __ETHTOOL_A_CABLE_TEST_TDR_NTF_CNT - 1 +}; + /* generic netlink info */ #define ETHTOOL_GENL_NAME "ethtool" #define ETHTOOL_GENL_VERSION 1 diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h index ca88b7bce553..2f86b2ad6d7e 100644 --- a/include/uapi/linux/fcntl.h +++ b/include/uapi/linux/fcntl.h @@ -84,10 +84,20 @@ #define DN_ATTRIB 0x00000020 /* File changed attibutes */ #define DN_MULTISHOT 0x80000000 /* Don't remove notifier */ +/* + * The constants AT_REMOVEDIR and AT_EACCESS have the same value. AT_EACCESS is + * meaningful only to faccessat, while AT_REMOVEDIR is meaningful only to + * unlinkat. The two functions do completely different things and therefore, + * the flags can be allowed to overlap. For example, passing AT_REMOVEDIR to + * faccessat would be undefined behavior and thus treating it equivalent to + * AT_EACCESS is valid undefined behavior. + */ #define AT_FDCWD -100 /* Special value used to indicate openat should use the current working directory. */ #define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic links. */ +#define AT_EACCESS 0x200 /* Test access permitted for + effective IDs, not real IDs. */ #define AT_REMOVEDIR 0x200 /* Remove directory instead of unlinking file. */ #define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */ diff --git a/include/uapi/linux/fd.h b/include/uapi/linux/fd.h index 90fb94712c41..8b80c63b971c 100644 --- a/include/uapi/linux/fd.h +++ b/include/uapi/linux/fd.h @@ -172,7 +172,10 @@ struct floppy_drive_params { * used in succession to try to read the disk. If the FDC cannot lock onto * the disk, the next format is tried. This uses the variable 'probing'. */ - short autodetect[8]; /* autodetected formats */ + +#define FD_AUTODETECT_SIZE 8 + + short autodetect[FD_AUTODETECT_SIZE]; /* autodetected formats */ int checkfreq; /* how often should the drive be checked for disk * changes */ @@ -357,10 +360,25 @@ struct floppy_raw_cmd { int buffer_length; /* length of allocated buffer */ unsigned char rate; + +#define FD_RAW_CMD_SIZE 16 +#define FD_RAW_REPLY_SIZE 16 +#define FD_RAW_CMD_FULLSIZE (FD_RAW_CMD_SIZE + 1 + FD_RAW_REPLY_SIZE) + + /* The command may take up the space initially intended for the reply + * and the reply count. Needed for long 82078 commands such as RESTORE, + * which takes 17 command bytes. + */ + unsigned char cmd_count; - unsigned char cmd[16]; - unsigned char reply_count; - unsigned char reply[16]; + union { + struct { + unsigned char cmd[FD_RAW_CMD_SIZE]; + unsigned char reply_count; + unsigned char reply[FD_RAW_REPLY_SIZE]; + }; + unsigned char fullcmd[FD_RAW_CMD_FULLSIZE]; + }; int track; int resultcode; diff --git a/include/uapi/linux/fdreg.h b/include/uapi/linux/fdreg.h index 1318881954e1..10d33632939d 100644 --- a/include/uapi/linux/fdreg.h +++ b/include/uapi/linux/fdreg.h @@ -7,13 +7,23 @@ * Handbook", Sanches and Canton. */ -/* Fd controller regs. S&C, about page 340 */ -#define FD_STATUS 4 -#define FD_DATA 5 +/* 82077's auxiliary status registers A & B (R) */ +#define FD_SRA 0 +#define FD_SRB 1 /* Digital Output Register */ #define FD_DOR 2 +/* 82077's tape drive register (R/W) */ +#define FD_TDR 3 + +/* 82077's data rate select register (W) */ +#define FD_DSR 4 + +/* Fd controller regs. S&C, about page 340 */ +#define FD_STATUS 4 +#define FD_DATA 5 + /* Digital Input Register (read) */ #define FD_DIR 7 diff --git a/include/uapi/linux/fiemap.h b/include/uapi/linux/fiemap.h index 8c0bc24d5d95..07c1cdcb715e 100644 --- a/include/uapi/linux/fiemap.h +++ b/include/uapi/linux/fiemap.h @@ -9,8 +9,8 @@ * Andreas Dilger <adilger@sun.com> */ -#ifndef _LINUX_FIEMAP_H -#define _LINUX_FIEMAP_H +#ifndef _UAPI_LINUX_FIEMAP_H +#define _UAPI_LINUX_FIEMAP_H #include <linux/types.h> @@ -67,4 +67,4 @@ struct fiemap { #define FIEMAP_EXTENT_SHARED 0x00002000 /* Space shared with other * files. */ -#endif /* _LINUX_FIEMAP_H */ +#endif /* _UAPI_LINUX_FIEMAP_H */ diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h index 1acd2b179aef..7e5b5c10a49c 100644 --- a/include/uapi/linux/firewire-cdev.h +++ b/include/uapi/linux/firewire-cdev.h @@ -308,7 +308,7 @@ struct fw_cdev_event_iso_interrupt_mc { /** * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed * @closure: See &fw_cdev_event_common; - * set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl + * set by``FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE)`` ioctl * @type: %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED * @handle: Reference by which an allocated resource can be deallocated diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 379a612f8f1d..f44eb0a04afd 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -262,6 +262,7 @@ struct fsxattr { #define FS_EA_INODE_FL 0x00200000 /* Inode used for large EA */ #define FS_EOFBLOCKS_FL 0x00400000 /* Reserved for ext4 */ #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ +#define FS_DAX_FL 0x02000000 /* Inode is DAX */ #define FS_INLINE_DATA_FL 0x10000000 /* Reserved for ext4 */ #define FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ #define FS_CASEFOLD_FL 0x40000000 /* Folder is case insensitive */ diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index a10e3cdc2839..7875709ccfeb 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -19,7 +19,8 @@ #define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03 #define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04 #define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 0x08 -#define FSCRYPT_POLICY_FLAGS_VALID 0x0F +#define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32 0x10 +#define FSCRYPT_POLICY_FLAGS_VALID 0x1F /* Encryption algorithms */ #define FSCRYPT_MODE_AES_256_XTS 1 diff --git a/include/uapi/linux/genetlink.h b/include/uapi/linux/genetlink.h index 877f7fa95466..9c0636ec2286 100644 --- a/include/uapi/linux/genetlink.h +++ b/include/uapi/linux/genetlink.h @@ -48,6 +48,7 @@ enum { CTRL_CMD_NEWMCAST_GRP, CTRL_CMD_DELMCAST_GRP, CTRL_CMD_GETMCAST_GRP, /* unused */ + CTRL_CMD_GETPOLICY, __CTRL_CMD_MAX, }; @@ -62,6 +63,7 @@ enum { CTRL_ATTR_MAXATTR, CTRL_ATTR_OPS, CTRL_ATTR_MCAST_GROUPS, + CTRL_ATTR_POLICY, __CTRL_ATTR_MAX, }; diff --git a/include/uapi/linux/gfs2_ondisk.h b/include/uapi/linux/gfs2_ondisk.h index 2dc10a034de1..07e508e6691b 100644 --- a/include/uapi/linux/gfs2_ondisk.h +++ b/include/uapi/linux/gfs2_ondisk.h @@ -171,6 +171,12 @@ struct gfs2_rindex { #define GFS2_RGF_NOALLOC 0x00000008 #define GFS2_RGF_TRIMMED 0x00000010 +struct gfs2_inode_lvb { + __be32 ri_magic; + __be32 __pad; + __be64 ri_generation_deleted; +}; + struct gfs2_rgrp_lvb { __be32 rl_magic; __be32 rl_flags; diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h index 991b2b7ada7a..8f24404ad04f 100644 --- a/include/uapi/linux/hyperv.h +++ b/include/uapi/linux/hyperv.h @@ -119,8 +119,8 @@ enum hv_fcopy_op { struct hv_fcopy_hdr { __u32 operation; - uuid_le service_id0; /* currently unused */ - uuid_le service_id1; /* currently unused */ + __u8 service_id0[16]; /* currently unused */ + __u8 service_id1[16]; /* currently unused */ } __attribute__((packed)); #define OVER_WRITE 0x1 diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h index be714cd8c826..797ba2c1562a 100644 --- a/include/uapi/linux/if.h +++ b/include/uapi/linux/if.h @@ -178,6 +178,7 @@ enum { enum { IF_LINK_MODE_DEFAULT, IF_LINK_MODE_DORMANT, /* limit upward transition to dormant */ + IF_LINK_MODE_TESTING, /* limit upward transition to testing */ }; /* diff --git a/include/uapi/linux/if_arcnet.h b/include/uapi/linux/if_arcnet.h index b122cfac7128..683878036d76 100644 --- a/include/uapi/linux/if_arcnet.h +++ b/include/uapi/linux/if_arcnet.h @@ -60,7 +60,7 @@ struct arc_rfc1201 { __u8 proto; /* protocol ID field - varies */ __u8 split_flag; /* for use with split packets */ __be16 sequence; /* sequence number */ - __u8 payload[]; /* space remaining in packet (504 bytes)*/ + __u8 payload[0]; /* space remaining in packet (504 bytes)*/ }; #define RFC1201_HDR_SIZE 4 @@ -69,7 +69,7 @@ struct arc_rfc1201 { */ struct arc_rfc1051 { __u8 proto; /* ARC_P_RFC1051_ARP/RFC1051_IP */ - __u8 payload[]; /* 507 bytes */ + __u8 payload[0]; /* 507 bytes */ }; #define RFC1051_HDR_SIZE 1 @@ -80,7 +80,7 @@ struct arc_rfc1051 { struct arc_eth_encap { __u8 proto; /* Always ARC_P_ETHER */ struct ethhdr eth; /* standard ethernet header (yuck!) */ - __u8 payload[]; /* 493 bytes */ + __u8 payload[0]; /* 493 bytes */ }; #define ETH_ENCAP_HDR_SIZE 14 diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index bfe621ea51b3..caa6914a3e53 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -120,6 +120,7 @@ enum { IFLA_BRIDGE_MODE, IFLA_BRIDGE_VLAN_INFO, IFLA_BRIDGE_VLAN_TUNNEL_INFO, + IFLA_BRIDGE_MRP, __IFLA_BRIDGE_MAX, }; #define IFLA_BRIDGE_MAX (__IFLA_BRIDGE_MAX - 1) @@ -157,6 +158,101 @@ struct bridge_vlan_xstats { __u32 pad2; }; +enum { + IFLA_BRIDGE_MRP_UNSPEC, + IFLA_BRIDGE_MRP_INSTANCE, + IFLA_BRIDGE_MRP_PORT_STATE, + IFLA_BRIDGE_MRP_PORT_ROLE, + IFLA_BRIDGE_MRP_RING_STATE, + IFLA_BRIDGE_MRP_RING_ROLE, + IFLA_BRIDGE_MRP_START_TEST, + __IFLA_BRIDGE_MRP_MAX, +}; + +#define IFLA_BRIDGE_MRP_MAX (__IFLA_BRIDGE_MRP_MAX - 1) + +enum { + IFLA_BRIDGE_MRP_INSTANCE_UNSPEC, + IFLA_BRIDGE_MRP_INSTANCE_RING_ID, + IFLA_BRIDGE_MRP_INSTANCE_P_IFINDEX, + IFLA_BRIDGE_MRP_INSTANCE_S_IFINDEX, + IFLA_BRIDGE_MRP_INSTANCE_PRIO, + __IFLA_BRIDGE_MRP_INSTANCE_MAX, +}; + +#define IFLA_BRIDGE_MRP_INSTANCE_MAX (__IFLA_BRIDGE_MRP_INSTANCE_MAX - 1) + +enum { + IFLA_BRIDGE_MRP_PORT_STATE_UNSPEC, + IFLA_BRIDGE_MRP_PORT_STATE_STATE, + __IFLA_BRIDGE_MRP_PORT_STATE_MAX, +}; + +#define IFLA_BRIDGE_MRP_PORT_STATE_MAX (__IFLA_BRIDGE_MRP_PORT_STATE_MAX - 1) + +enum { + IFLA_BRIDGE_MRP_PORT_ROLE_UNSPEC, + IFLA_BRIDGE_MRP_PORT_ROLE_ROLE, + __IFLA_BRIDGE_MRP_PORT_ROLE_MAX, +}; + +#define IFLA_BRIDGE_MRP_PORT_ROLE_MAX (__IFLA_BRIDGE_MRP_PORT_ROLE_MAX - 1) + +enum { + IFLA_BRIDGE_MRP_RING_STATE_UNSPEC, + IFLA_BRIDGE_MRP_RING_STATE_RING_ID, + IFLA_BRIDGE_MRP_RING_STATE_STATE, + __IFLA_BRIDGE_MRP_RING_STATE_MAX, +}; + +#define IFLA_BRIDGE_MRP_RING_STATE_MAX (__IFLA_BRIDGE_MRP_RING_STATE_MAX - 1) + +enum { + IFLA_BRIDGE_MRP_RING_ROLE_UNSPEC, + IFLA_BRIDGE_MRP_RING_ROLE_RING_ID, + IFLA_BRIDGE_MRP_RING_ROLE_ROLE, + __IFLA_BRIDGE_MRP_RING_ROLE_MAX, +}; + +#define IFLA_BRIDGE_MRP_RING_ROLE_MAX (__IFLA_BRIDGE_MRP_RING_ROLE_MAX - 1) + +enum { + IFLA_BRIDGE_MRP_START_TEST_UNSPEC, + IFLA_BRIDGE_MRP_START_TEST_RING_ID, + IFLA_BRIDGE_MRP_START_TEST_INTERVAL, + IFLA_BRIDGE_MRP_START_TEST_MAX_MISS, + IFLA_BRIDGE_MRP_START_TEST_PERIOD, + IFLA_BRIDGE_MRP_START_TEST_MONITOR, + __IFLA_BRIDGE_MRP_START_TEST_MAX, +}; + +#define IFLA_BRIDGE_MRP_START_TEST_MAX (__IFLA_BRIDGE_MRP_START_TEST_MAX - 1) + +struct br_mrp_instance { + __u32 ring_id; + __u32 p_ifindex; + __u32 s_ifindex; + __u16 prio; +}; + +struct br_mrp_ring_state { + __u32 ring_id; + __u32 ring_state; +}; + +struct br_mrp_ring_role { + __u32 ring_id; + __u32 ring_role; +}; + +struct br_mrp_start_test { + __u32 ring_id; + __u32 interval; + __u32 max_miss; + __u32 period; + __u32 monitor; +}; + struct bridge_stp_xstats { __u64 transition_blk; __u64 transition_fwd; diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index f6ceb2e63d1e..d6de2b167448 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -92,6 +92,7 @@ #define ETH_P_PREAUTH 0x88C7 /* 802.11 Preauthentication */ #define ETH_P_TIPC 0x88CA /* TIPC */ #define ETH_P_LLDP 0x88CC /* Link Layer Discovery Protocol */ +#define ETH_P_MRP 0x88E3 /* Media Redundancy Protocol */ #define ETH_P_MACSEC 0x88E5 /* 802.1ae MACsec */ #define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */ #define ETH_P_MVRP 0x88F5 /* 802.1Q MVRP */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 127c704eeba9..a009365ad67b 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -343,6 +343,7 @@ enum { IFLA_BRPORT_NEIGH_SUPPRESS, IFLA_BRPORT_ISOLATED, IFLA_BRPORT_BACKUP_PORT, + IFLA_BRPORT_MRP_RING_OPEN, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) diff --git a/include/uapi/linux/if_x25.h b/include/uapi/linux/if_x25.h index 5d962448345f..3a5938e38370 100644 --- a/include/uapi/linux/if_x25.h +++ b/include/uapi/linux/if_x25.h @@ -18,7 +18,7 @@ #include <linux/types.h> -/* Documentation/networking/x25-iface.txt */ +/* Documentation/networking/x25-iface.rst */ #define X25_IFACE_DATA 0x00 #define X25_IFACE_CONNECT 0x01 #define X25_IFACE_DISCONNECT 0x02 diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index 57cc429a9177..e6f183ee8417 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -96,6 +96,7 @@ enum { INET_DIAG_BC_MARK_COND, INET_DIAG_BC_S_EQ, INET_DIAG_BC_D_EQ, + INET_DIAG_BC_CGROUP_COND, /* u64 cgroup v2 ID */ }; struct inet_diag_hostcond { @@ -157,6 +158,7 @@ enum { INET_DIAG_MD5SIG, INET_DIAG_ULP_INFO, INET_DIAG_SK_BPF_STORAGES, + INET_DIAG_CGROUP_ID, __INET_DIAG_MAX, }; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index e48d746b8e2a..92c22699a5a7 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -129,6 +129,7 @@ enum { IORING_OP_SPLICE, IORING_OP_PROVIDE_BUFFERS, IORING_OP_REMOVE_BUFFERS, + IORING_OP_TEE, /* this goes last, obviously */ IORING_OP_LAST, @@ -204,10 +205,19 @@ struct io_cqring_offsets { __u32 ring_entries; __u32 overflow; __u32 cqes; - __u64 resv[2]; + __u32 flags; + __u32 resv1; + __u64 resv2; }; /* + * cq_ring->flags + */ + +/* disable eventfd notifications */ +#define IORING_CQ_EVENTFD_DISABLED (1U << 0) + +/* * io_uring_enter(2) flags */ #define IORING_ENTER_GETEVENTS (1U << 0) diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h index 4ad3496e5c43..e907b7091a46 100644 --- a/include/uapi/linux/iommu.h +++ b/include/uapi/linux/iommu.h @@ -285,6 +285,11 @@ struct iommu_gpasid_bind_data_vtd { __u32 emt; }; +#define IOMMU_SVA_VTD_GPASID_MTS_MASK (IOMMU_SVA_VTD_GPASID_CD | \ + IOMMU_SVA_VTD_GPASID_EMTE | \ + IOMMU_SVA_VTD_GPASID_PCD | \ + IOMMU_SVA_VTD_GPASID_PWT) + /** * struct iommu_gpasid_bind_data - Information about device and guest PASID binding * @version: Version of this data structure diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h index ed3d5893830d..4c8884eea808 100644 --- a/include/uapi/linux/keyctl.h +++ b/include/uapi/linux/keyctl.h @@ -69,6 +69,7 @@ #define KEYCTL_RESTRICT_KEYRING 29 /* Restrict keys allowed to link to a keyring */ #define KEYCTL_MOVE 30 /* Move keys between keyrings */ #define KEYCTL_CAPABILITIES 31 /* Find capabilities of keyrings subsystem */ +#define KEYCTL_WATCH_KEY 32 /* Watch a key or ring of keys for changes */ /* keyctl structures */ struct keyctl_dh_params { @@ -130,5 +131,6 @@ struct keyctl_pkey_params { #define KEYCTL_CAPS0_MOVE 0x80 /* KEYCTL_MOVE supported */ #define KEYCTL_CAPS1_NS_KEYRING_NAME 0x01 /* Keyring names are per-user_namespace */ #define KEYCTL_CAPS1_NS_KEY_TAG 0x02 /* Key indexing can include a namespace tag */ +#define KEYCTL_CAPS1_NOTIFICATIONS 0x04 /* Keys generate watchable notifications */ #endif /* _LINUX_KEYCTL_H */ diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 20917c59f39c..b6be62356d34 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -251,7 +251,7 @@ struct kfd_memory_exception_failure { __u32 imprecise; /* Can't determine the exact fault address */ }; -/* memory exception data*/ +/* memory exception data */ struct kfd_hsa_memory_exception_data { struct kfd_memory_exception_failure failure; __u64 va; @@ -410,6 +410,20 @@ struct kfd_ioctl_unmap_memory_from_gpu_args { __u32 n_success; /* to/from KFD */ }; +/* Allocate GWS for specific queue + * + * @queue_id: queue's id that GWS is allocated for + * @num_gws: how many GWS to allocate + * @first_gws: index of the first GWS allocated. + * only support contiguous GWS allocation + */ +struct kfd_ioctl_alloc_queue_gws_args { + __u32 queue_id; /* to KFD */ + __u32 num_gws; /* to KFD */ + __u32 first_gws; /* from KFD */ + __u32 pad; +}; + struct kfd_ioctl_get_dmabuf_info_args { __u64 size; /* from KFD */ __u64 metadata_ptr; /* to KFD */ @@ -529,7 +543,10 @@ enum kfd_mmio_remap { #define AMDKFD_IOC_IMPORT_DMABUF \ AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args) +#define AMDKFD_IOC_ALLOC_QUEUE_GWS \ + AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args) + #define AMDKFD_COMMAND_START 0x01 -#define AMDKFD_COMMAND_END 0x1E +#define AMDKFD_COMMAND_END 0x1F #endif diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 428c7dde6b4b..4fdf30316582 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -116,7 +116,7 @@ struct kvm_irq_level { * ACPI gsi notion of irq. * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47.. * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23.. - * For ARM: See Documentation/virt/kvm/api.txt + * For ARM: See Documentation/virt/kvm/api.rst */ union { __u32 irq; @@ -188,10 +188,13 @@ struct kvm_s390_cmma_log { struct kvm_hyperv_exit { #define KVM_EXIT_HYPERV_SYNIC 1 #define KVM_EXIT_HYPERV_HCALL 2 +#define KVM_EXIT_HYPERV_SYNDBG 3 __u32 type; + __u32 pad1; union { struct { __u32 msr; + __u32 pad2; __u64 control; __u64 evt_page; __u64 msg_page; @@ -201,6 +204,15 @@ struct kvm_hyperv_exit { __u64 result; __u64 params[2]; } hcall; + struct { + __u32 msr; + __u32 pad2; + __u64 control; + __u64 status; + __u64 send_page; + __u64 recv_page; + __u64 pending_page; + } syndbg; } u; }; @@ -1017,6 +1029,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_S390_VCPU_RESETS 179 #define KVM_CAP_S390_PROTECTED 180 #define KVM_CAP_PPC_SECURE_GUEST 181 +#define KVM_CAP_HALT_POLL 182 +#define KVM_CAP_ASYNC_PF_INT 183 #ifdef KVM_CAP_IRQ_ROUTING @@ -1107,7 +1121,7 @@ struct kvm_xen_hvm_config { * * KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies * the irqfd to operate in resampling mode for level triggered interrupt - * emulation. See Documentation/virt/kvm/api.txt. + * emulation. See Documentation/virt/kvm/api.rst. */ #define KVM_IRQFD_FLAG_RESAMPLE (1 << 1) diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h index 080a8df134ef..24a1c45bd1ae 100644 --- a/include/uapi/linux/loop.h +++ b/include/uapi/linux/loop.h @@ -25,6 +25,16 @@ enum { LO_FLAGS_DIRECT_IO = 16, }; +/* LO_FLAGS that can be set using LOOP_SET_STATUS(64) */ +#define LOOP_SET_STATUS_SETTABLE_FLAGS (LO_FLAGS_AUTOCLEAR | LO_FLAGS_PARTSCAN) + +/* LO_FLAGS that can be cleared using LOOP_SET_STATUS(64) */ +#define LOOP_SET_STATUS_CLEARABLE_FLAGS (LO_FLAGS_AUTOCLEAR) + +/* LO_FLAGS that can be set using LOOP_CONFIGURE */ +#define LOOP_CONFIGURE_SETTABLE_FLAGS (LO_FLAGS_READ_ONLY | LO_FLAGS_AUTOCLEAR \ + | LO_FLAGS_PARTSCAN | LO_FLAGS_DIRECT_IO) + #include <asm/posix_types.h> /* for __kernel_old_dev_t */ #include <linux/types.h> /* for __u64 */ @@ -37,7 +47,7 @@ struct loop_info { int lo_offset; int lo_encrypt_type; int lo_encrypt_key_size; /* ioctl w/o */ - int lo_flags; /* ioctl r/o */ + int lo_flags; char lo_name[LO_NAME_SIZE]; unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ unsigned long lo_init[2]; @@ -53,13 +63,29 @@ struct loop_info64 { __u32 lo_number; /* ioctl r/o */ __u32 lo_encrypt_type; __u32 lo_encrypt_key_size; /* ioctl w/o */ - __u32 lo_flags; /* ioctl r/o */ + __u32 lo_flags; __u8 lo_file_name[LO_NAME_SIZE]; __u8 lo_crypt_name[LO_NAME_SIZE]; __u8 lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */ __u64 lo_init[2]; }; +/** + * struct loop_config - Complete configuration for a loop device. + * @fd: fd of the file to be used as a backing file for the loop device. + * @block_size: block size to use; ignored if 0. + * @info: struct loop_info64 to configure the loop device with. + * + * This structure is used with the LOOP_CONFIGURE ioctl, and can be used to + * atomically setup and configure all loop device parameters at once. + */ +struct loop_config { + __u32 fd; + __u32 block_size; + struct loop_info64 info; + __u64 __reserved[8]; +}; + /* * Loop filter types */ @@ -90,6 +116,7 @@ struct loop_info64 { #define LOOP_SET_CAPACITY 0x4C07 #define LOOP_SET_DIRECT_IO 0x4C08 #define LOOP_SET_BLOCK_SIZE 0x4C09 +#define LOOP_CONFIGURE 0x4C0A /* /dev/loop-control interface */ #define LOOP_CTL_ADD 0x4C80 diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index d78064007b17..f3956fc11de6 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -94,6 +94,7 @@ #define BALLOON_KVM_MAGIC 0x13661366 #define ZSMALLOC_MAGIC 0x58295829 #define DMA_BUF_MAGIC 0x444d4142 /* "DMAB" */ +#define DEVMEM_MAGIC 0x454d444d /* "DMEM" */ #define Z3FOLD_MAGIC 0x33 #define PPC_CMM_MAGIC 0xc7571590 diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h index 90f9b4e1ba27..39f7c44baf53 100644 --- a/include/uapi/linux/mii.h +++ b/include/uapi/linux/mii.h @@ -151,11 +151,13 @@ /* 1000BASE-T Control register */ #define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ #define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +#define CTL1000_PREFER_MASTER 0x0400 /* prefer to operate as master */ #define CTL1000_AS_MASTER 0x0800 #define CTL1000_ENABLE_MASTER 0x1000 /* 1000BASE-T Status register */ #define LPA_1000MSFAIL 0x8000 /* Master/Slave resolution failure */ +#define LPA_1000MSRES 0x4000 /* Master/Slave resolution status */ #define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ #define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ #define LPA_1000FULL 0x0800 /* Link partner 1000BASE-T full duplex */ diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h index 98e29e7f54ac..27a39847d55c 100644 --- a/include/uapi/linux/mmc/ioctl.h +++ b/include/uapi/linux/mmc/ioctl.h @@ -3,6 +3,7 @@ #define LINUX_MMC_IOCTL_H #include <linux/types.h> +#include <linux/major.h> struct mmc_ioc_cmd { /* @@ -57,7 +58,7 @@ struct mmc_ioc_cmd { */ struct mmc_ioc_multi_cmd { __u64 num_of_cmds; - struct mmc_ioc_cmd cmds[]; + struct mmc_ioc_cmd cmds[0]; }; #define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd) diff --git a/include/uapi/linux/mrp_bridge.h b/include/uapi/linux/mrp_bridge.h new file mode 100644 index 000000000000..84f15f48a7cb --- /dev/null +++ b/include/uapi/linux/mrp_bridge.h @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ + +#ifndef _UAPI_LINUX_MRP_BRIDGE_H_ +#define _UAPI_LINUX_MRP_BRIDGE_H_ + +#include <linux/types.h> +#include <linux/if_ether.h> + +#define MRP_MAX_FRAME_LENGTH 200 +#define MRP_DEFAULT_PRIO 0x8000 +#define MRP_DOMAIN_UUID_LENGTH 16 +#define MRP_VERSION 1 +#define MRP_FRAME_PRIO 7 +#define MRP_OUI_LENGTH 3 +#define MRP_MANUFACTURE_DATA_LENGTH 2 + +enum br_mrp_ring_role_type { + BR_MRP_RING_ROLE_DISABLED, + BR_MRP_RING_ROLE_MRC, + BR_MRP_RING_ROLE_MRM, + BR_MRP_RING_ROLE_MRA, +}; + +enum br_mrp_ring_state_type { + BR_MRP_RING_STATE_OPEN, + BR_MRP_RING_STATE_CLOSED, +}; + +enum br_mrp_port_state_type { + BR_MRP_PORT_STATE_DISABLED, + BR_MRP_PORT_STATE_BLOCKED, + BR_MRP_PORT_STATE_FORWARDING, + BR_MRP_PORT_STATE_NOT_CONNECTED, +}; + +enum br_mrp_port_role_type { + BR_MRP_PORT_ROLE_PRIMARY, + BR_MRP_PORT_ROLE_SECONDARY, + BR_MRP_PORT_ROLE_NONE, +}; + +enum br_mrp_tlv_header_type { + BR_MRP_TLV_HEADER_END = 0x0, + BR_MRP_TLV_HEADER_COMMON = 0x1, + BR_MRP_TLV_HEADER_RING_TEST = 0x2, + BR_MRP_TLV_HEADER_RING_TOPO = 0x3, + BR_MRP_TLV_HEADER_RING_LINK_DOWN = 0x4, + BR_MRP_TLV_HEADER_RING_LINK_UP = 0x5, + BR_MRP_TLV_HEADER_OPTION = 0x7f, +}; + +enum br_mrp_sub_tlv_header_type { + BR_MRP_SUB_TLV_HEADER_TEST_MGR_NACK = 0x1, + BR_MRP_SUB_TLV_HEADER_TEST_PROPAGATE = 0x2, + BR_MRP_SUB_TLV_HEADER_TEST_AUTO_MGR = 0x3, +}; + +struct br_mrp_tlv_hdr { + __u8 type; + __u8 length; +}; + +struct br_mrp_sub_tlv_hdr { + __u8 type; + __u8 length; +}; + +struct br_mrp_end_hdr { + struct br_mrp_tlv_hdr hdr; +}; + +struct br_mrp_common_hdr { + __be16 seq_id; + __u8 domain[MRP_DOMAIN_UUID_LENGTH]; +}; + +struct br_mrp_ring_test_hdr { + __be16 prio; + __u8 sa[ETH_ALEN]; + __be16 port_role; + __be16 state; + __be16 transitions; + __be32 timestamp; +}; + +struct br_mrp_ring_topo_hdr { + __be16 prio; + __u8 sa[ETH_ALEN]; + __be16 interval; +}; + +struct br_mrp_ring_link_hdr { + __u8 sa[ETH_ALEN]; + __be16 port_role; + __be16 interval; + __be16 blocked; +}; + +struct br_mrp_sub_opt_hdr { + __u8 type; + __u8 manufacture_data[MRP_MANUFACTURE_DATA_LENGTH]; +}; + +struct br_mrp_test_mgr_nack_hdr { + __be16 prio; + __u8 sa[ETH_ALEN]; + __be16 other_prio; + __u8 other_sa[ETH_ALEN]; +}; + +struct br_mrp_test_prop_hdr { + __be16 prio; + __u8 sa[ETH_ALEN]; + __be16 other_prio; + __u8 other_sa[ETH_ALEN]; +}; + +struct br_mrp_oui_hdr { + __u8 oui[MRP_OUI_LENGTH]; +}; + +#endif diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h index de5d90212409..0e09dc5cec19 100644 --- a/include/uapi/linux/ndctl.h +++ b/include/uapi/linux/ndctl.h @@ -244,6 +244,7 @@ struct nd_cmd_pkg { #define NVDIMM_FAMILY_HPE2 2 #define NVDIMM_FAMILY_MSFT 3 #define NVDIMM_FAMILY_HYPERV 4 +#define NVDIMM_FAMILY_PAPR 5 #define ND_IOCTL_CALL _IOWR(ND_IOCTL, ND_CMD_CALL,\ struct nd_cmd_pkg) diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h index cd144e3099a3..eefcda8ca44e 100644 --- a/include/uapi/linux/neighbour.h +++ b/include/uapi/linux/neighbour.h @@ -29,6 +29,7 @@ enum { NDA_LINK_NETNSID, NDA_SRC_VNI, NDA_PROTOCOL, /* Originator of entry */ + NDA_NH_ID, __NDA_MAX }; diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h index 67e31f329190..66048cc5d7b3 100644 --- a/include/uapi/linux/net_dropmon.h +++ b/include/uapi/linux/net_dropmon.h @@ -29,12 +29,12 @@ struct net_dm_config_entry { struct net_dm_config_msg { __u32 entries; - struct net_dm_config_entry options[]; + struct net_dm_config_entry options[0]; }; struct net_dm_alert_msg { __u32 entries; - struct net_dm_drop_point points[]; + struct net_dm_drop_point points[0]; }; struct net_dm_user_msg { diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h index b6f0bb1dc799..4b3395082d15 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_common.h +++ b/include/uapi/linux/netfilter/nf_conntrack_common.h @@ -114,15 +114,19 @@ enum ip_conntrack_status { IPS_OFFLOAD_BIT = 14, IPS_OFFLOAD = (1 << IPS_OFFLOAD_BIT), + /* Conntrack has been offloaded to hardware. */ + IPS_HW_OFFLOAD_BIT = 15, + IPS_HW_OFFLOAD = (1 << IPS_HW_OFFLOAD_BIT), + /* Be careful here, modifying these bits can make things messy, * so don't let users modify them directly. */ IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK | IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING | IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_UNTRACKED | - IPS_OFFLOAD), + IPS_OFFLOAD | IPS_HW_OFFLOAD), - __IPS_MAX_BIT = 15, + __IPS_MAX_BIT = 16, }; /* Connection tracking event types */ diff --git a/include/uapi/linux/netfilter/nf_nat.h b/include/uapi/linux/netfilter/nf_nat.h index 4a95c0db14d4..a64586e77b24 100644 --- a/include/uapi/linux/netfilter/nf_nat.h +++ b/include/uapi/linux/netfilter/nf_nat.h @@ -11,6 +11,7 @@ #define NF_NAT_RANGE_PERSISTENT (1 << 3) #define NF_NAT_RANGE_PROTO_RANDOM_FULLY (1 << 4) #define NF_NAT_RANGE_PROTO_OFFSET (1 << 5) +#define NF_NAT_RANGE_NETMAP (1 << 6) #define NF_NAT_RANGE_PROTO_RANDOM_ALL \ (NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY) @@ -18,7 +19,8 @@ #define NF_NAT_RANGE_MASK \ (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED | \ NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT | \ - NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET) + NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET | \ + NF_NAT_RANGE_NETMAP) struct nf_nat_ipv4_range { unsigned int flags; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 30f2a87270dc..4565456c0ef4 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -276,6 +276,7 @@ enum nft_rule_compat_attributes { * @NFT_SET_TIMEOUT: set uses timeouts * @NFT_SET_EVAL: set can be updated from the evaluation path * @NFT_SET_OBJECT: set contains stateful objects + * @NFT_SET_CONCAT: set contains a concatenation */ enum nft_set_flags { NFT_SET_ANONYMOUS = 0x1, @@ -285,6 +286,7 @@ enum nft_set_flags { NFT_SET_TIMEOUT = 0x10, NFT_SET_EVAL = 0x20, NFT_SET_OBJECT = 0x40, + NFT_SET_CONCAT = 0x80, }; /** diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h index 1d41810d17e2..262881792671 100644 --- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h +++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h @@ -55,6 +55,7 @@ enum ctattr_type { CTA_LABELS, CTA_LABELS_MASK, CTA_SYNPROXY, + CTA_FILTER, __CTA_MAX }; #define CTA_MAX (__CTA_MAX - 1) @@ -276,4 +277,12 @@ enum ctattr_expect_stats { }; #define CTA_STATS_EXP_MAX (__CTA_STATS_EXP_MAX - 1) +enum ctattr_filter { + CTA_FILTER_UNSPEC, + CTA_FILTER_ORIG_FLAGS, + CTA_FILTER_REPLY_FLAGS, + __CTA_FILTER_MAX +}; +#define CTA_FILTER_MAX (__CTA_FILTER_MAX - 1) + #endif /* _IPCONNTRACK_NETLINK_H */ diff --git a/include/uapi/linux/netfilter/xt_IDLETIMER.h b/include/uapi/linux/netfilter/xt_IDLETIMER.h index 434e6506abaa..49ddcdc61c09 100644 --- a/include/uapi/linux/netfilter/xt_IDLETIMER.h +++ b/include/uapi/linux/netfilter/xt_IDLETIMER.h @@ -48,6 +48,7 @@ struct idletimer_tg_info_v1 { char label[MAX_IDLETIMER_LABEL_SIZE]; + __u8 send_nl_msg; /* unused: for compatibility with Android */ __u8 timer_type; /* for kernel module internal use only */ diff --git a/include/uapi/linux/netfilter_bridge/ebt_among.h b/include/uapi/linux/netfilter_bridge/ebt_among.h index 73b26a280c4f..9acf757bc1f7 100644 --- a/include/uapi/linux/netfilter_bridge/ebt_among.h +++ b/include/uapi/linux/netfilter_bridge/ebt_among.h @@ -40,7 +40,7 @@ struct ebt_mac_wormhash_tuple { struct ebt_mac_wormhash { int table[257]; int poolsize; - struct ebt_mac_wormhash_tuple pool[]; + struct ebt_mac_wormhash_tuple pool[0]; }; #define ebt_mac_wormhash_size(x) ((x) ? sizeof(struct ebt_mac_wormhash) \ diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index 0a4d73317759..eac8a6a648ea 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -249,4 +249,107 @@ struct nla_bitfield32 { __u32 selector; }; +/* + * policy descriptions - it's specific to each family how this is used + * Normally, it should be retrieved via a dump inside another attribute + * specifying where it applies. + */ + +/** + * enum netlink_attribute_type - type of an attribute + * @NL_ATTR_TYPE_INVALID: unused + * @NL_ATTR_TYPE_FLAG: flag attribute (present/not present) + * @NL_ATTR_TYPE_U8: 8-bit unsigned attribute + * @NL_ATTR_TYPE_U16: 16-bit unsigned attribute + * @NL_ATTR_TYPE_U32: 32-bit unsigned attribute + * @NL_ATTR_TYPE_U64: 64-bit unsigned attribute + * @NL_ATTR_TYPE_S8: 8-bit signed attribute + * @NL_ATTR_TYPE_S16: 16-bit signed attribute + * @NL_ATTR_TYPE_S32: 32-bit signed attribute + * @NL_ATTR_TYPE_S64: 64-bit signed attribute + * @NL_ATTR_TYPE_BINARY: binary data, min/max length may be specified + * @NL_ATTR_TYPE_STRING: string, min/max length may be specified + * @NL_ATTR_TYPE_NUL_STRING: NUL-terminated string, + * min/max length may be specified + * @NL_ATTR_TYPE_NESTED: nested, i.e. the content of this attribute + * consists of sub-attributes. The nested policy and maxtype + * inside may be specified. + * @NL_ATTR_TYPE_NESTED_ARRAY: nested array, i.e. the content of this + * attribute contains sub-attributes whose type is irrelevant + * (just used to separate the array entries) and each such array + * entry has attributes again, the policy for those inner ones + * and the corresponding maxtype may be specified. + * @NL_ATTR_TYPE_BITFIELD32: &struct nla_bitfield32 attribute + */ +enum netlink_attribute_type { + NL_ATTR_TYPE_INVALID, + + NL_ATTR_TYPE_FLAG, + + NL_ATTR_TYPE_U8, + NL_ATTR_TYPE_U16, + NL_ATTR_TYPE_U32, + NL_ATTR_TYPE_U64, + + NL_ATTR_TYPE_S8, + NL_ATTR_TYPE_S16, + NL_ATTR_TYPE_S32, + NL_ATTR_TYPE_S64, + + NL_ATTR_TYPE_BINARY, + NL_ATTR_TYPE_STRING, + NL_ATTR_TYPE_NUL_STRING, + + NL_ATTR_TYPE_NESTED, + NL_ATTR_TYPE_NESTED_ARRAY, + + NL_ATTR_TYPE_BITFIELD32, +}; + +/** + * enum netlink_policy_type_attr - policy type attributes + * @NL_POLICY_TYPE_ATTR_UNSPEC: unused + * @NL_POLICY_TYPE_ATTR_TYPE: type of the attribute, + * &enum netlink_attribute_type (U32) + * @NL_POLICY_TYPE_ATTR_MIN_VALUE_S: minimum value for signed + * integers (S64) + * @NL_POLICY_TYPE_ATTR_MAX_VALUE_S: maximum value for signed + * integers (S64) + * @NL_POLICY_TYPE_ATTR_MIN_VALUE_U: minimum value for unsigned + * integers (U64) + * @NL_POLICY_TYPE_ATTR_MAX_VALUE_U: maximum value for unsigned + * integers (U64) + * @NL_POLICY_TYPE_ATTR_MIN_LENGTH: minimum length for binary + * attributes, no minimum if not given (U32) + * @NL_POLICY_TYPE_ATTR_MAX_LENGTH: maximum length for binary + * attributes, no maximum if not given (U32) + * @NL_POLICY_TYPE_ATTR_POLICY_IDX: sub policy for nested and + * nested array types (U32) + * @NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE: maximum sub policy + * attribute for nested and nested array types, this can + * in theory be < the size of the policy pointed to by + * the index, if limited inside the nesting (U32) + * @NL_POLICY_TYPE_ATTR_BITFIELD32_MASK: valid mask for the + * bitfield32 type (U32) + * @NL_POLICY_TYPE_ATTR_PAD: pad attribute for 64-bit alignment + */ +enum netlink_policy_type_attr { + NL_POLICY_TYPE_ATTR_UNSPEC, + NL_POLICY_TYPE_ATTR_TYPE, + NL_POLICY_TYPE_ATTR_MIN_VALUE_S, + NL_POLICY_TYPE_ATTR_MAX_VALUE_S, + NL_POLICY_TYPE_ATTR_MIN_VALUE_U, + NL_POLICY_TYPE_ATTR_MAX_VALUE_U, + NL_POLICY_TYPE_ATTR_MIN_LENGTH, + NL_POLICY_TYPE_ATTR_MAX_LENGTH, + NL_POLICY_TYPE_ATTR_POLICY_IDX, + NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE, + NL_POLICY_TYPE_ATTR_BITFIELD32_MASK, + NL_POLICY_TYPE_ATTR_PAD, + + /* keep last */ + __NL_POLICY_TYPE_ATTR_MAX, + NL_POLICY_TYPE_ATTR_MAX = __NL_POLICY_TYPE_ATTR_MAX - 1 +}; + #endif /* _UAPI__LINUX_NETLINK_H */ diff --git a/include/uapi/linux/nexthop.h b/include/uapi/linux/nexthop.h index 7b61867e9848..2d4a1e784cf0 100644 --- a/include/uapi/linux/nexthop.h +++ b/include/uapi/linux/nexthop.h @@ -49,6 +49,9 @@ enum { NHA_GROUPS, /* flag; only return nexthop groups in dump */ NHA_MASTER, /* u32; only return nexthops with given master dev */ + NHA_FDB, /* flag; nexthop belongs to a bridge fdb */ + /* if NHA_FDB is added, OIF, BLACKHOLE, ENCAP cannot be set */ + __NHA_MAX, }; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 2b691161830f..4e6339ab1fce 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -296,13 +296,14 @@ * to get a list of all present wiphys. * @NL80211_CMD_SET_WIPHY: set wiphy parameters, needs %NL80211_ATTR_WIPHY or * %NL80211_ATTR_IFINDEX; can be used to set %NL80211_ATTR_WIPHY_NAME, - * %NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ (and the - * attributes determining the channel width; this is used for setting - * monitor mode channel), %NL80211_ATTR_WIPHY_RETRY_SHORT, - * %NL80211_ATTR_WIPHY_RETRY_LONG, %NL80211_ATTR_WIPHY_FRAG_THRESHOLD, - * and/or %NL80211_ATTR_WIPHY_RTS_THRESHOLD. - * However, for setting the channel, see %NL80211_CMD_SET_CHANNEL - * instead, the support here is for backward compatibility only. + * %NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ, + * %NL80211_ATTR_WIPHY_FREQ_OFFSET (and the attributes determining the + * channel width; this is used for setting monitor mode channel), + * %NL80211_ATTR_WIPHY_RETRY_SHORT, %NL80211_ATTR_WIPHY_RETRY_LONG, + * %NL80211_ATTR_WIPHY_FRAG_THRESHOLD, and/or + * %NL80211_ATTR_WIPHY_RTS_THRESHOLD. However, for setting the channel, + * see %NL80211_CMD_SET_CHANNEL instead, the support here is for backward + * compatibility only. * @NL80211_CMD_NEW_WIPHY: Newly created wiphy, response to get request * or rename notification. Has attributes %NL80211_ATTR_WIPHY and * %NL80211_ATTR_WIPHY_NAME. @@ -351,7 +352,8 @@ * %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_INACTIVITY_TIMEOUT, * %NL80211_ATTR_ACL_POLICY and %NL80211_ATTR_MAC_ADDRS. * The channel to use can be set on the interface or be given using the - * %NL80211_ATTR_WIPHY_FREQ and the attributes determining channel width. + * %NL80211_ATTR_WIPHY_FREQ and %NL80211_ATTR_WIPHY_FREQ_OFFSET, and the + * attributes determining channel width. * @NL80211_CMD_NEW_BEACON: old alias for %NL80211_CMD_START_AP * @NL80211_CMD_STOP_AP: Stop AP operation on the given interface * @NL80211_CMD_DEL_BEACON: old alias for %NL80211_CMD_STOP_AP @@ -536,11 +538,12 @@ * interface. %NL80211_ATTR_MAC is used to specify PeerSTAAddress (and * BSSID in case of station mode). %NL80211_ATTR_SSID is used to specify * the SSID (mainly for association, but is included in authentication - * request, too, to help BSS selection. %NL80211_ATTR_WIPHY_FREQ is used - * to specify the frequence of the channel in MHz. %NL80211_ATTR_AUTH_TYPE - * is used to specify the authentication type. %NL80211_ATTR_IE is used to - * define IEs (VendorSpecificInfo, but also including RSN IE and FT IEs) - * to be added to the frame. + * request, too, to help BSS selection. %NL80211_ATTR_WIPHY_FREQ + + * %NL80211_ATTR_WIPHY_FREQ_OFFSET is used to specify the frequence of the + * channel in MHz. %NL80211_ATTR_AUTH_TYPE is used to specify the + * authentication type. %NL80211_ATTR_IE is used to define IEs + * (VendorSpecificInfo, but also including RSN IE and FT IEs) to be added + * to the frame. * When used as an event, this reports reception of an Authentication * frame in station and IBSS modes when the local MLME processed the * frame, i.e., it was for the local STA and was received in correct @@ -595,8 +598,9 @@ * requests to connect to a specified network but without separating * auth and assoc steps. For this, you need to specify the SSID in a * %NL80211_ATTR_SSID attribute, and can optionally specify the association - * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_USE_MFP, - * %NL80211_ATTR_MAC, %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT, + * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, + * %NL80211_ATTR_USE_MFP, %NL80211_ATTR_MAC, %NL80211_ATTR_WIPHY_FREQ, + * %NL80211_ATTR_WIPHY_FREQ_OFFSET, %NL80211_ATTR_CONTROL_PORT, * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE, * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT, * %NL80211_ATTR_CONTROL_PORT_OVER_NL80211, %NL80211_ATTR_MAC_HINT, and @@ -687,6 +691,10 @@ * four bytes for vendor frames including the OUI. The registration * cannot be dropped, but is removed automatically when the netlink * socket is closed. Multiple registrations can be made. + * The %NL80211_ATTR_RECEIVE_MULTICAST flag attribute can be given if + * %NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS is available, in which + * case the registration can also be modified to include/exclude the + * flag, rather than requiring unregistration to change it. * @NL80211_CMD_REGISTER_ACTION: Alias for @NL80211_CMD_REGISTER_FRAME for * backward compatibility * @NL80211_CMD_FRAME: Management frame TX request and RX notification. This @@ -786,7 +794,7 @@ * various triggers. These triggers can be configured through this * command with the %NL80211_ATTR_WOWLAN_TRIGGERS attribute. For * more background information, see - * http://wireless.kernel.org/en/users/Documentation/WoWLAN. + * https://wireless.wiki.kernel.org/en/users/Documentation/WoWLAN. * The @NL80211_CMD_SET_WOWLAN command can also be used as a notification * from the driver reporting the wakeup reason. In this case, the * @NL80211_ATTR_WOWLAN_TRIGGERS attribute will contain the reason @@ -1151,6 +1159,17 @@ * @NL80211_CMD_SET_TID_CONFIG: Data frame TID specific configuration * is passed using %NL80211_ATTR_TID_CONFIG attribute. * + * @NL80211_CMD_UNPROT_BEACON: Unprotected or incorrectly protected Beacon + * frame. This event is used to indicate that a received Beacon frame was + * dropped because it did not include a valid MME MIC while beacon + * protection was enabled (BIGTK configured in station mode). + * + * @NL80211_CMD_CONTROL_PORT_FRAME_TX_STATUS: Report TX status of a control + * port frame transmitted with %NL80211_CMD_CONTROL_PORT_FRAME. + * %NL80211_ATTR_COOKIE identifies the TX command and %NL80211_ATTR_FRAME + * includes the contents of the frame. %NL80211_ATTR_ACK flag is included + * if the recipient acknowledged the frame. + * * @NL80211_CMD_MAX: highest used command number * @__NL80211_CMD_AFTER_LAST: internal use */ @@ -1377,6 +1396,10 @@ enum nl80211_commands { NL80211_CMD_SET_TID_CONFIG, + NL80211_CMD_UNPROT_BEACON, + + NL80211_CMD_CONTROL_PORT_FRAME_TX_STATUS, + /* add new commands above here */ /* used to define NL80211_CMD_MAX below */ @@ -1422,7 +1445,8 @@ enum nl80211_commands { * of &enum nl80211_chan_width, describing the channel width. See the * documentation of the enum for more information. * @NL80211_ATTR_CENTER_FREQ1: Center frequency of the first part of the - * channel, used for anything but 20 MHz bandwidth + * channel, used for anything but 20 MHz bandwidth. In S1G this is the + * operating channel center frequency. * @NL80211_ATTR_CENTER_FREQ2: Center frequency of the second part of the * channel, used only for 80+80 MHz bandwidth * @NL80211_ATTR_WIPHY_CHANNEL_TYPE: included with NL80211_ATTR_WIPHY_FREQ @@ -2469,6 +2493,17 @@ enum nl80211_commands { * entry without having to force a disconnection after the PMK timeout. If * no roaming occurs between the reauth threshold and PMK expiration, * disassociation is still forced. + * @NL80211_ATTR_RECEIVE_MULTICAST: multicast flag for the + * %NL80211_CMD_REGISTER_FRAME command, see the description there. + * @NL80211_ATTR_WIPHY_FREQ_OFFSET: offset of the associated + * %NL80211_ATTR_WIPHY_FREQ in positive KHz. Only valid when supplied with + * an %NL80211_ATTR_WIPHY_FREQ_OFFSET. + * @NL80211_ATTR_CENTER_FREQ1_OFFSET: Center frequency offset in KHz for the + * first channel segment specified in %NL80211_ATTR_CENTER_FREQ1. + * @NL80211_ATTR_SCAN_FREQ_KHZ: nested attribute with KHz frequencies + * + * @NL80211_ATTR_HE_6GHZ_CAPABILITY: HE 6 GHz Band Capability element (from + * association request when used with NL80211_CMD_NEW_STATION). * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined @@ -2945,6 +2980,13 @@ enum nl80211_attrs { NL80211_ATTR_PMK_LIFETIME, NL80211_ATTR_PMK_REAUTH_THRESHOLD, + NL80211_ATTR_RECEIVE_MULTICAST, + NL80211_ATTR_WIPHY_FREQ_OFFSET, + NL80211_ATTR_CENTER_FREQ1_OFFSET, + NL80211_ATTR_SCAN_FREQ_KHZ, + + NL80211_ATTR_HE_6GHZ_CAPABILITY, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3523,6 +3565,8 @@ enum nl80211_mpath_info { * defined in HE capabilities IE * @NL80211_BAND_IFTYPE_ATTR_MAX: highest band HE capability attribute currently * defined + * @NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA: HE 6GHz band capabilities (__le16), + * given for all 6 GHz band channels * @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use */ enum nl80211_band_iftype_attr { @@ -3533,6 +3577,7 @@ enum nl80211_band_iftype_attr { NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY, NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET, NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE, + NL80211_BAND_IFTYPE_ATTR_HE_6GHZ_CAPA, /* keep last */ __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST, @@ -3666,6 +3711,7 @@ enum nl80211_wmm_rule { * (see &enum nl80211_wmm_rule) * @NL80211_FREQUENCY_ATTR_NO_HE: HE operation is not allowed on this channel * in current regulatory domain. + * @NL80211_FREQUENCY_ATTR_OFFSET: frequency offset in KHz * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number * currently defined * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use @@ -3696,6 +3742,7 @@ enum nl80211_frequency_attr { NL80211_FREQUENCY_ATTR_NO_10MHZ, NL80211_FREQUENCY_ATTR_WMM, NL80211_FREQUENCY_ATTR_NO_HE, + NL80211_FREQUENCY_ATTR_OFFSET, /* keep last */ __NL80211_FREQUENCY_ATTR_AFTER_LAST, @@ -4466,6 +4513,7 @@ enum nl80211_bss_scan_width { * @NL80211_BSS_CHAIN_SIGNAL: per-chain signal strength of last BSS update. * Contains a nested array of signal strength attributes (u8, dBm), * using the nesting index as the antenna number. + * @NL80211_BSS_FREQUENCY_OFFSET: frequency offset in KHz * @__NL80211_BSS_AFTER_LAST: internal * @NL80211_BSS_MAX: highest BSS attribute */ @@ -4490,6 +4538,7 @@ enum nl80211_bss { NL80211_BSS_PARENT_TSF, NL80211_BSS_PARENT_BSSID, NL80211_BSS_CHAIN_SIGNAL, + NL80211_BSS_FREQUENCY_OFFSET, /* keep last */ __NL80211_BSS_AFTER_LAST, @@ -4800,6 +4849,17 @@ enum nl80211_tid_config { NL80211_TID_CONFIG_DISABLE, }; +/* enum nl80211_tx_rate_setting - TX rate configuration type + * @NL80211_TX_RATE_AUTOMATIC: automatically determine TX rate + * @NL80211_TX_RATE_LIMITED: limit the TX rate by the TX rate parameter + * @NL80211_TX_RATE_FIXED: fix TX rate to the TX rate parameter + */ +enum nl80211_tx_rate_setting { + NL80211_TX_RATE_AUTOMATIC, + NL80211_TX_RATE_LIMITED, + NL80211_TX_RATE_FIXED, +}; + /* enum nl80211_tid_config_attr - TID specific configuration. * @NL80211_TID_CONFIG_ATTR_PAD: pad attribute for 64-bit values * @NL80211_TID_CONFIG_ATTR_VIF_SUPP: a bitmap (u64) of attributes supported @@ -4807,12 +4867,10 @@ enum nl80211_tid_config { * (%NL80211_TID_CONFIG_ATTR_TIDS, %NL80211_TID_CONFIG_ATTR_OVERRIDE). * @NL80211_TID_CONFIG_ATTR_PEER_SUPP: same as the previous per-vif one, but * per peer instead. - * @NL80211_TID_CONFIG_ATTR_OVERRIDE: flag attribue, if no peer - * is selected, if set indicates that the new configuration overrides - * all previous peer configurations, otherwise previous peer specific - * configurations should be left untouched. If peer is selected then - * it will reset particular TID configuration of that peer and it will - * not accept other TID config attributes along with peer. + * @NL80211_TID_CONFIG_ATTR_OVERRIDE: flag attribue, if set indicates + * that the new configuration overrides all previous peer + * configurations, otherwise previous peer specific configurations + * should be left untouched. * @NL80211_TID_CONFIG_ATTR_TIDS: a bitmask value of TIDs (bit 0 to 7) * Its type is u16. * @NL80211_TID_CONFIG_ATTR_NOACK: Configure ack policy for the TID. @@ -4828,12 +4886,23 @@ enum nl80211_tid_config { * &NL80211_CMD_SET_TID_CONFIG. Its type is u8, min value is 1 and * the max value is advertised by the driver in this attribute on * output in wiphy capabilities. - * @NL80211_TID_CONFIG_ATTR_AMPDU_CTRL: Enable/Disable aggregation for the TIDs - * specified in %NL80211_TID_CONFIG_ATTR_TIDS. Its type is u8, using - * the values from &nl80211_tid_config. + * @NL80211_TID_CONFIG_ATTR_AMPDU_CTRL: Enable/Disable MPDU aggregation + * for the TIDs specified in %NL80211_TID_CONFIG_ATTR_TIDS. + * Its type is u8, using the values from &nl80211_tid_config. * @NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL: Enable/Disable RTS_CTS for the TIDs * specified in %NL80211_TID_CONFIG_ATTR_TIDS. It is u8 type, using * the values from &nl80211_tid_config. + * @NL80211_TID_CONFIG_ATTR_AMSDU_CTRL: Enable/Disable MSDU aggregation + * for the TIDs specified in %NL80211_TID_CONFIG_ATTR_TIDS. + * Its type is u8, using the values from &nl80211_tid_config. + * @NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE: This attribute will be useful + * to notfiy the driver that what type of txrate should be used + * for the TIDs specified in %NL80211_TID_CONFIG_ATTR_TIDS. using + * the values form &nl80211_tx_rate_setting. + * @NL80211_TID_CONFIG_ATTR_TX_RATE: Data frame TX rate mask should be applied + * with the parameters passed through %NL80211_ATTR_TX_RATES. + * configuration is applied to the data frame for the tid to that connected + * station. */ enum nl80211_tid_config_attr { __NL80211_TID_CONFIG_ATTR_INVALID, @@ -4847,6 +4916,9 @@ enum nl80211_tid_config_attr { NL80211_TID_CONFIG_ATTR_RETRY_LONG, NL80211_TID_CONFIG_ATTR_AMPDU_CTRL, NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL, + NL80211_TID_CONFIG_ATTR_AMSDU_CTRL, + NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE, + NL80211_TID_CONFIG_ATTR_TX_RATE, /* keep last */ __NL80211_TID_CONFIG_ATTR_AFTER_LAST, @@ -5324,6 +5396,8 @@ enum plink_actions { #define NL80211_KCK_LEN 16 #define NL80211_KEK_LEN 16 +#define NL80211_KCK_EXT_LEN 24 +#define NL80211_KEK_EXT_LEN 32 #define NL80211_REPLAY_CTR_LEN 8 /** @@ -5332,6 +5406,7 @@ enum plink_actions { * @NL80211_REKEY_DATA_KEK: key encryption key (binary) * @NL80211_REKEY_DATA_KCK: key confirmation key (binary) * @NL80211_REKEY_DATA_REPLAY_CTR: replay counter (binary) + * @NL80211_REKEY_DATA_AKM: AKM data (OUI, suite type) * @NUM_NL80211_REKEY_DATA: number of rekey attributes (internal) * @MAX_NL80211_REKEY_DATA: highest rekey attribute (internal) */ @@ -5340,6 +5415,7 @@ enum nl80211_rekey_data { NL80211_REKEY_DATA_KEK, NL80211_REKEY_DATA_KCK, NL80211_REKEY_DATA_REPLAY_CTR, + NL80211_REKEY_DATA_AKM, /* keep last */ NUM_NL80211_REKEY_DATA, @@ -5674,6 +5750,8 @@ enum nl80211_feature_flags { * * @NL80211_EXT_FEATURE_BEACON_PROTECTION: The driver supports Beacon protection * and can receive key configuration for BIGTK using key indexes 6 and 7. + * @NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT: The driver supports Beacon + * protection as a client only and cannot transmit protected beacons. * * @NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH: The driver can disable the * forwarding of preauth frames over the control port. They are then @@ -5684,6 +5762,17 @@ enum nl80211_feature_flags { * @NL80211_EXT_FEATURE_DEL_IBSS_STA: The driver supports removing stations * in IBSS mode, essentially by dropping their state. * + * @NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS: management frame registrations + * are possible for multicast frames and those will be reported properly. + * + * @NL80211_EXT_FEATURE_SCAN_FREQ_KHZ: This driver supports receiving and + * reporting scan request with %NL80211_ATTR_SCAN_FREQ_KHZ. In order to + * report %NL80211_ATTR_SCAN_FREQ_KHZ, %NL80211_SCAN_FLAG_FREQ_KHZ must be + * included in the scan request. + * + * @NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS: The driver + * can report tx status for control port over nl80211 tx operations. + * * @NUM_NL80211_EXT_FEATURES: number of extended features. * @MAX_NL80211_EXT_FEATURES: highest extended feature index. */ @@ -5735,6 +5824,10 @@ enum nl80211_ext_feature_index { NL80211_EXT_FEATURE_CONTROL_PORT_NO_PREAUTH, NL80211_EXT_FEATURE_PROTECTED_TWT, NL80211_EXT_FEATURE_DEL_IBSS_STA, + NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS, + NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT, + NL80211_EXT_FEATURE_SCAN_FREQ_KHZ, + NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS, /* add new features before the definition below */ NUM_NL80211_EXT_FEATURES, @@ -5846,6 +5939,9 @@ enum nl80211_timeout_reason { * @NL80211_SCAN_FLAG_MIN_PREQ_CONTENT: minimize probe request content to * only have supported rates and no additional capabilities (unless * added by userspace explicitly.) + * @NL80211_SCAN_FLAG_FREQ_KHZ: report scan results with + * %NL80211_ATTR_SCAN_FREQ_KHZ. This also means + * %NL80211_ATTR_SCAN_FREQUENCIES will not be included. */ enum nl80211_scan_flags { NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0, @@ -5861,6 +5957,7 @@ enum nl80211_scan_flags { NL80211_SCAN_FLAG_HIGH_ACCURACY = 1<<10, NL80211_SCAN_FLAG_RANDOM_SN = 1<<11, NL80211_SCAN_FLAG_MIN_PREQ_CONTENT = 1<<12, + NL80211_SCAN_FLAG_FREQ_KHZ = 1<<13, }; /** diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 9f06d29cab70..7576209d96f9 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -134,6 +134,7 @@ enum tca_id { TCA_ID_CTINFO, TCA_ID_MPLS, TCA_ID_CT, + TCA_ID_GATE, /* other actions go here */ __TCA_ID_MAX = 255 }; @@ -575,6 +576,8 @@ enum { TCA_FLOWER_KEY_CT_LABELS, /* u128 */ TCA_FLOWER_KEY_CT_LABELS_MASK, /* u128 */ + TCA_FLOWER_KEY_MPLS_OPTS, + __TCA_FLOWER_MAX, }; @@ -640,6 +643,27 @@ enum { (__TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX - 1) enum { + TCA_FLOWER_KEY_MPLS_OPTS_UNSPEC, + TCA_FLOWER_KEY_MPLS_OPTS_LSE, + __TCA_FLOWER_KEY_MPLS_OPTS_MAX, +}; + +#define TCA_FLOWER_KEY_MPLS_OPTS_MAX (__TCA_FLOWER_KEY_MPLS_OPTS_MAX - 1) + +enum { + TCA_FLOWER_KEY_MPLS_OPT_LSE_UNSPEC, + TCA_FLOWER_KEY_MPLS_OPT_LSE_DEPTH, + TCA_FLOWER_KEY_MPLS_OPT_LSE_TTL, + TCA_FLOWER_KEY_MPLS_OPT_LSE_BOS, + TCA_FLOWER_KEY_MPLS_OPT_LSE_TC, + TCA_FLOWER_KEY_MPLS_OPT_LSE_LABEL, + __TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX, +}; + +#define TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX \ + (__TCA_FLOWER_KEY_MPLS_OPT_LSE_MAX - 1) + +enum { TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0), TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1), }; diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 0c02737c8f47..a95f3ae7ab37 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -913,6 +913,10 @@ enum { TCA_FQ_TIMER_SLACK, /* timer slack */ + TCA_FQ_HORIZON, /* time horizon in us */ + + TCA_FQ_HORIZON_DROP, /* drop packets beyond horizon, or cap their EDT */ + __TCA_FQ_MAX }; @@ -932,6 +936,8 @@ struct tc_fq_qd_stats { __u32 throttled_flows; __u32 unthrottle_latency_ns; __u64 ce_mark; /* packets above ce_threshold */ + __u64 horizon_drops; + __u64 horizon_caps; }; /* Heavy-Hitter Filter */ diff --git a/include/uapi/linux/psample.h b/include/uapi/linux/psample.h index ce1116cff53d..aea26ab1431c 100644 --- a/include/uapi/linux/psample.h +++ b/include/uapi/linux/psample.h @@ -11,6 +11,7 @@ enum { PSAMPLE_ATTR_GROUP_SEQ, PSAMPLE_ATTR_SAMPLE_RATE, PSAMPLE_ATTR_DATA, + PSAMPLE_ATTR_TUNNEL, /* commands attributes */ PSAMPLE_ATTR_GROUP_REFCOUNT, @@ -25,6 +26,27 @@ enum psample_command { PSAMPLE_CMD_DEL_GROUP, }; +enum psample_tunnel_key_attr { + PSAMPLE_TUNNEL_KEY_ATTR_ID, /* be64 Tunnel ID */ + PSAMPLE_TUNNEL_KEY_ATTR_IPV4_SRC, /* be32 src IP address. */ + PSAMPLE_TUNNEL_KEY_ATTR_IPV4_DST, /* be32 dst IP address. */ + PSAMPLE_TUNNEL_KEY_ATTR_TOS, /* u8 Tunnel IP ToS. */ + PSAMPLE_TUNNEL_KEY_ATTR_TTL, /* u8 Tunnel IP TTL. */ + PSAMPLE_TUNNEL_KEY_ATTR_DONT_FRAGMENT, /* No argument, set DF. */ + PSAMPLE_TUNNEL_KEY_ATTR_CSUM, /* No argument. CSUM packet. */ + PSAMPLE_TUNNEL_KEY_ATTR_OAM, /* No argument. OAM frame. */ + PSAMPLE_TUNNEL_KEY_ATTR_GENEVE_OPTS, /* Array of Geneve options. */ + PSAMPLE_TUNNEL_KEY_ATTR_TP_SRC, /* be16 src Transport Port. */ + PSAMPLE_TUNNEL_KEY_ATTR_TP_DST, /* be16 dst Transport Port. */ + PSAMPLE_TUNNEL_KEY_ATTR_VXLAN_OPTS, /* Nested VXLAN opts* */ + PSAMPLE_TUNNEL_KEY_ATTR_IPV6_SRC, /* struct in6_addr src IPv6 address. */ + PSAMPLE_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */ + PSAMPLE_TUNNEL_KEY_ATTR_PAD, + PSAMPLE_TUNNEL_KEY_ATTR_ERSPAN_OPTS, /* struct erspan_metadata */ + PSAMPLE_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE, /* No argument. IPV4_INFO_BRIDGE mode.*/ + __PSAMPLE_TUNNEL_KEY_ATTR_MAX +}; + /* Can be overridden at runtime by module option */ #define PSAMPLE_ATTR_MAX (__PSAMPLE_ATTR_MAX - 1) diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 0549a5c622bf..91b4c63d5cbf 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -83,6 +83,8 @@ struct sev_user_data_status { __u32 guest_count; /* Out */ } __packed; +#define SEV_STATUS_FLAGS_CONFIG_ES 0x0100 + /** * struct sev_user_data_pek_csr - PEK_CSR command parameters * diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h index 9dc9d0079e98..ff070aa64278 100644 --- a/include/uapi/linux/ptp_clock.h +++ b/include/uapi/linux/ptp_clock.h @@ -89,7 +89,9 @@ struct ptp_clock_caps { int n_pins; /* Number of input/output pins. */ /* Whether the clock supports precise system-device cross timestamps */ int cross_timestamping; - int rsv[13]; /* Reserved for future use. */ + /* Whether the clock supports adjust phase */ + int adjust_phase; + int rsv[12]; /* Reserved for future use. */ }; struct ptp_extts_request { diff --git a/include/uapi/linux/rtc.h b/include/uapi/linux/rtc.h index 83bba58d47f4..fa9aff91cbf2 100644 --- a/include/uapi/linux/rtc.h +++ b/include/uapi/linux/rtc.h @@ -99,6 +99,7 @@ struct rtc_pll_info { #define RTC_VL_BACKUP_LOW _BITUL(1) /* Backup voltage is low */ #define RTC_VL_BACKUP_EMPTY _BITUL(2) /* Backup empty or not present */ #define RTC_VL_ACCURACY_LOW _BITUL(3) /* Voltage is low, RTC accuracy is reduced */ +#define RTC_VL_BACKUP_SWITCH _BITUL(4) /* Backup switchover happened */ #define RTC_VL_READ _IOR('p', 0x13, unsigned int) /* Voltage low detection */ #define RTC_VL_CLR _IO('p', 0x14) /* Clear voltage low information */ diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 4a8c5b745157..073e71ef6bdd 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -609,11 +609,17 @@ enum { TCA_HW_OFFLOAD, TCA_INGRESS_BLOCK, TCA_EGRESS_BLOCK, + TCA_DUMP_FLAGS, __TCA_MAX }; #define TCA_MAX (__TCA_MAX - 1) +#define TCA_DUMP_FLAGS_TERSE (1 << 0) /* Means that in dump user gets only basic + * data necessary to identify the objects + * (handle, cookie, etc.) and stats. + */ + #define TCA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct tcmsg)))) #define TCA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct tcmsg)) diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h index ad80a5c885d5..82cc58fe9368 100644 --- a/include/uapi/linux/stat.h +++ b/include/uapi/linux/stat.h @@ -123,7 +123,10 @@ struct statx { __u32 stx_dev_major; /* ID of device containing file [uncond] */ __u32 stx_dev_minor; /* 0x90 */ - __u64 __spare2[14]; /* Spare space for future expansion */ + __u64 stx_mnt_id; + __u64 __spare2; + /* 0xa0 */ + __u64 __spare3[12]; /* Spare space for future expansion */ /* 0x100 */ }; @@ -148,9 +151,19 @@ struct statx { #define STATX_BLOCKS 0x00000400U /* Want/got stx_blocks */ #define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */ #define STATX_BTIME 0x00000800U /* Want/got stx_btime */ -#define STATX_ALL 0x00000fffU /* All currently supported flags */ +#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */ + #define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */ +#ifndef __KERNEL__ +/* + * This is deprecated, and shall remain the same value in the future. To avoid + * confusion please use the equivalent (STATX_BASIC_STATS | STATX_BTIME) + * instead. + */ +#define STATX_ALL 0x00000fffU +#endif + /* * Attributes to be found in stx_attributes and masked in stx_attributes_mask. * @@ -168,7 +181,9 @@ struct statx { #define STATX_ATTR_NODUMP 0x00000040 /* [I] File is not to be dumped */ #define STATX_ATTR_ENCRYPTED 0x00000800 /* [I] File requires key to decrypt in fs */ #define STATX_ATTR_AUTOMOUNT 0x00001000 /* Dir: Automount trigger */ +#define STATX_ATTR_MOUNT_ROOT 0x00002000 /* Root of a mount */ #define STATX_ATTR_VERITY 0x00100000 /* [I] Verity protected file */ +#define STATX_ATTR_DAX 0x00002000 /* [I] File is DAX */ #endif /* _UAPI_LINUX_STAT_H */ diff --git a/include/uapi/linux/tc_act/tc_gate.h b/include/uapi/linux/tc_act/tc_gate.h new file mode 100644 index 000000000000..f214b3a6d44f --- /dev/null +++ b/include/uapi/linux/tc_act/tc_gate.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* Copyright 2020 NXP */ + +#ifndef __LINUX_TC_GATE_H +#define __LINUX_TC_GATE_H + +#include <linux/pkt_cls.h> + +struct tc_gate { + tc_gen; +}; + +enum { + TCA_GATE_ENTRY_UNSPEC, + TCA_GATE_ENTRY_INDEX, + TCA_GATE_ENTRY_GATE, + TCA_GATE_ENTRY_INTERVAL, + TCA_GATE_ENTRY_IPV, + TCA_GATE_ENTRY_MAX_OCTETS, + __TCA_GATE_ENTRY_MAX, +}; +#define TCA_GATE_ENTRY_MAX (__TCA_GATE_ENTRY_MAX - 1) + +enum { + TCA_GATE_ONE_ENTRY_UNSPEC, + TCA_GATE_ONE_ENTRY, + __TCA_GATE_ONE_ENTRY_MAX, +}; +#define TCA_GATE_ONE_ENTRY_MAX (__TCA_GATE_ONE_ENTRY_MAX - 1) + +enum { + TCA_GATE_UNSPEC, + TCA_GATE_TM, + TCA_GATE_PARMS, + TCA_GATE_PAD, + TCA_GATE_PRIORITY, + TCA_GATE_ENTRY_LIST, + TCA_GATE_BASE_TIME, + TCA_GATE_CYCLE_TIME, + TCA_GATE_CYCLE_TIME_EXT, + TCA_GATE_FLAGS, + TCA_GATE_CLOCKID, + __TCA_GATE_MAX, +}; +#define TCA_GATE_MAX (__TCA_GATE_MAX - 1) + +#endif diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h index 6596f3a09e54..b619f37ee03e 100644 --- a/include/uapi/linux/tee.h +++ b/include/uapi/linux/tee.h @@ -173,6 +173,15 @@ struct tee_ioctl_buf_data { #define TEE_IOCTL_LOGIN_APPLICATION 4 #define TEE_IOCTL_LOGIN_USER_APPLICATION 5 #define TEE_IOCTL_LOGIN_GROUP_APPLICATION 6 +/* + * Disallow user-space to use GP implementation specific login + * method range (0x80000000 - 0xBFFFFFFF). This range is rather + * being reserved for REE kernel clients or TEE implementation. + */ +#define TEE_IOCTL_LOGIN_REE_KERNEL_MIN 0x80000000 +#define TEE_IOCTL_LOGIN_REE_KERNEL_MAX 0xBFFFFFFF +/* Private login method for REE kernel clients */ +#define TEE_IOCTL_LOGIN_REE_KERNEL 0x80000000 /** * struct tee_ioctl_param - parameter diff --git a/include/uapi/linux/usb/raw_gadget.h b/include/uapi/linux/usb/raw_gadget.h index ea375082b3ac..0be685272eb1 100644 --- a/include/uapi/linux/usb/raw_gadget.h +++ b/include/uapi/linux/usb/raw_gadget.h @@ -93,6 +93,64 @@ struct usb_raw_ep_io { __u8 data[0]; }; +/* Maximum number of non-control endpoints in struct usb_raw_eps_info. */ +#define USB_RAW_EPS_NUM_MAX 30 + +/* Maximum length of UDC endpoint name in struct usb_raw_ep_info. */ +#define USB_RAW_EP_NAME_MAX 16 + +/* Used as addr in struct usb_raw_ep_info if endpoint accepts any address. */ +#define USB_RAW_EP_ADDR_ANY 0xff + +/* + * struct usb_raw_ep_caps - exposes endpoint capabilities from struct usb_ep + * (technically from its member struct usb_ep_caps). + */ +struct usb_raw_ep_caps { + __u32 type_control : 1; + __u32 type_iso : 1; + __u32 type_bulk : 1; + __u32 type_int : 1; + __u32 dir_in : 1; + __u32 dir_out : 1; +}; + +/* + * struct usb_raw_ep_limits - exposes endpoint limits from struct usb_ep. + * @maxpacket_limit: Maximum packet size value supported by this endpoint. + * @max_streams: maximum number of streams supported by this endpoint + * (actual number is 2^n). + * @reserved: Empty, reserved for potential future extensions. + */ +struct usb_raw_ep_limits { + __u16 maxpacket_limit; + __u16 max_streams; + __u32 reserved; +}; + +/* + * struct usb_raw_ep_info - stores information about a gadget endpoint. + * @name: Name of the endpoint as it is defined in the UDC driver. + * @addr: Address of the endpoint that must be specified in the endpoint + * descriptor passed to USB_RAW_IOCTL_EP_ENABLE ioctl. + * @caps: Endpoint capabilities. + * @limits: Endpoint limits. + */ +struct usb_raw_ep_info { + __u8 name[USB_RAW_EP_NAME_MAX]; + __u32 addr; + struct usb_raw_ep_caps caps; + struct usb_raw_ep_limits limits; +}; + +/* + * struct usb_raw_eps_info - argument for USB_RAW_IOCTL_EPS_INFO ioctl. + * eps: Structures that store information about non-control endpoints. + */ +struct usb_raw_eps_info { + struct usb_raw_ep_info eps[USB_RAW_EPS_NUM_MAX]; +}; + /* * Initializes a Raw Gadget instance. * Accepts a pointer to the usb_raw_init struct as an argument. @@ -115,37 +173,38 @@ struct usb_raw_ep_io { #define USB_RAW_IOCTL_EVENT_FETCH _IOR('U', 2, struct usb_raw_event) /* - * Queues an IN (OUT for READ) urb as a response to the last control request - * received on endpoint 0, provided that was an IN (OUT for READ) request and - * waits until the urb is completed. Copies received data to user for READ. + * Queues an IN (OUT for READ) request as a response to the last setup request + * received on endpoint 0 (provided that was an IN (OUT for READ) request), and + * waits until the request is completed. Copies received data to user for READ. * Accepts a pointer to the usb_raw_ep_io struct as an argument. - * Returns length of trasferred data on success or negative error code on + * Returns length of transferred data on success or negative error code on * failure. */ #define USB_RAW_IOCTL_EP0_WRITE _IOW('U', 3, struct usb_raw_ep_io) #define USB_RAW_IOCTL_EP0_READ _IOWR('U', 4, struct usb_raw_ep_io) /* - * Finds an endpoint that supports the transfer type specified in the - * descriptor and enables it. - * Accepts a pointer to the usb_endpoint_descriptor struct as an argument. + * Finds an endpoint that satisfies the parameters specified in the provided + * descriptors (address, transfer type, etc.) and enables it. + * Accepts a pointer to the usb_raw_ep_descs struct as an argument. * Returns enabled endpoint handle on success or negative error code on failure. */ #define USB_RAW_IOCTL_EP_ENABLE _IOW('U', 5, struct usb_endpoint_descriptor) -/* Disables specified endpoint. +/* + * Disables specified endpoint. * Accepts endpoint handle as an argument. * Returns 0 on success or negative error code on failure. */ #define USB_RAW_IOCTL_EP_DISABLE _IOW('U', 6, __u32) /* - * Queues an IN (OUT for READ) urb as a response to the last control request - * received on endpoint usb_raw_ep_io.ep, provided that was an IN (OUT for READ) - * request and waits until the urb is completed. Copies received data to user - * for READ. + * Queues an IN (OUT for READ) request as a response to the last setup request + * received on endpoint usb_raw_ep_io.ep (provided that was an IN (OUT for READ) + * request), and waits until the request is completed. Copies received data to + * user for READ. * Accepts a pointer to the usb_raw_ep_io struct as an argument. - * Returns length of trasferred data on success or negative error code on + * Returns length of transferred data on success or negative error code on * failure. */ #define USB_RAW_IOCTL_EP_WRITE _IOW('U', 7, struct usb_raw_ep_io) @@ -164,4 +223,27 @@ struct usb_raw_ep_io { */ #define USB_RAW_IOCTL_VBUS_DRAW _IOW('U', 10, __u32) +/* + * Fills in the usb_raw_eps_info structure with information about non-control + * endpoints available for the currently connected UDC. + * Returns the number of available endpoints on success or negative error code + * on failure. + */ +#define USB_RAW_IOCTL_EPS_INFO _IOR('U', 11, struct usb_raw_eps_info) + +/* + * Stalls a pending control request on endpoint 0. + * Returns 0 on success or negative error code on failure. + */ +#define USB_RAW_IOCTL_EP0_STALL _IO('U', 12) + +/* + * Sets or clears halt or wedge status of the endpoint. + * Accepts endpoint handle as an argument. + * Returns 0 on success or negative error code on failure. + */ +#define USB_RAW_IOCTL_EP_SET_HALT _IOW('U', 13, __u32) +#define USB_RAW_IOCTL_EP_CLEAR_HALT _IOW('U', 14, __u32) +#define USB_RAW_IOCTL_EP_SET_WEDGE _IOW('U', 15, __u32) + #endif /* _UAPI__LINUX_USB_RAW_GADGET_H */ diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 1a58d7cc4ccc..62271418c1be 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -473,6 +473,10 @@ enum v4l2_mpeg_video_h264_level { V4L2_MPEG_VIDEO_H264_LEVEL_4_2 = 13, V4L2_MPEG_VIDEO_H264_LEVEL_5_0 = 14, V4L2_MPEG_VIDEO_H264_LEVEL_5_1 = 15, + V4L2_MPEG_VIDEO_H264_LEVEL_5_2 = 16, + V4L2_MPEG_VIDEO_H264_LEVEL_6_0 = 17, + V4L2_MPEG_VIDEO_H264_LEVEL_6_1 = 18, + V4L2_MPEG_VIDEO_H264_LEVEL_6_2 = 19, }; #define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA (V4L2_CID_MPEG_BASE+360) #define V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA (V4L2_CID_MPEG_BASE+361) @@ -501,6 +505,7 @@ enum v4l2_mpeg_video_h264_profile { V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA = 14, V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH = 15, V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH = 16, + V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH = 17, }; #define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_HEIGHT (V4L2_CID_MPEG_BASE+364) #define V4L2_CID_MPEG_VIDEO_H264_VUI_EXT_SAR_WIDTH (V4L2_CID_MPEG_BASE+365) @@ -918,6 +923,13 @@ enum v4l2_auto_focus_range { #define V4L2_CID_PAN_SPEED (V4L2_CID_CAMERA_CLASS_BASE+32) #define V4L2_CID_TILT_SPEED (V4L2_CID_CAMERA_CLASS_BASE+33) +#define V4L2_CID_CAMERA_ORIENTATION (V4L2_CID_CAMERA_CLASS_BASE+34) +#define V4L2_CAMERA_ORIENTATION_FRONT 0 +#define V4L2_CAMERA_ORIENTATION_BACK 1 +#define V4L2_CAMERA_ORIENTATION_EXTERNAL 2 + +#define V4L2_CID_CAMERA_SENSOR_ROTATION (V4L2_CID_CAMERA_CLASS_BASE+35) + /* FM Modulator class control IDs */ #define V4L2_CID_FM_TX_CLASS_BASE (V4L2_CTRL_CLASS_FM_TX | 0x900) diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h index 03970ce30741..5d2a1dab7911 100644 --- a/include/uapi/linux/v4l2-subdev.h +++ b/include/uapi/linux/v4l2-subdev.h @@ -155,9 +155,25 @@ struct v4l2_subdev_selection { __u32 reserved[8]; }; +/** + * struct v4l2_subdev_capability - subdev capabilities + * @version: the driver versioning number + * @capabilities: the subdev capabilities, see V4L2_SUBDEV_CAP_* + * @reserved: for future use, set to zero for now + */ +struct v4l2_subdev_capability { + __u32 version; + __u32 capabilities; + __u32 reserved[14]; +}; + +/* The v4l2 sub-device video device node is registered in read-only mode. */ +#define V4L2_SUBDEV_CAP_RO_SUBDEV BIT(0) + /* Backwards compatibility define --- to be removed */ #define v4l2_subdev_edid v4l2_edid +#define VIDIOC_SUBDEV_QUERYCAP _IOR('V', 0, struct v4l2_subdev_capability) #define VIDIOC_SUBDEV_G_FMT _IOWR('V', 4, struct v4l2_subdev_format) #define VIDIOC_SUBDEV_S_FMT _IOWR('V', 5, struct v4l2_subdev_format) #define VIDIOC_SUBDEV_G_FRAME_INTERVAL _IOWR('V', 21, struct v4l2_subdev_frame_interval) diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 015516bcfaa3..eca6692667a3 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -305,6 +305,7 @@ struct vfio_region_info_cap_type { #define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff) #define VFIO_REGION_TYPE_GFX (1) #define VFIO_REGION_TYPE_CCW (2) +#define VFIO_REGION_TYPE_MIGRATION (3) /* sub-types for VFIO_REGION_TYPE_PCI_* */ @@ -378,6 +379,235 @@ struct vfio_region_gfx_edid { /* sub-types for VFIO_REGION_TYPE_CCW */ #define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD (1) +#define VFIO_REGION_SUBTYPE_CCW_SCHIB (2) +#define VFIO_REGION_SUBTYPE_CCW_CRW (3) + +/* sub-types for VFIO_REGION_TYPE_MIGRATION */ +#define VFIO_REGION_SUBTYPE_MIGRATION (1) + +/* + * The structure vfio_device_migration_info is placed at the 0th offset of + * the VFIO_REGION_SUBTYPE_MIGRATION region to get and set VFIO device related + * migration information. Field accesses from this structure are only supported + * at their native width and alignment. Otherwise, the result is undefined and + * vendor drivers should return an error. + * + * device_state: (read/write) + * - The user application writes to this field to inform the vendor driver + * about the device state to be transitioned to. + * - The vendor driver should take the necessary actions to change the + * device state. After successful transition to a given state, the + * vendor driver should return success on write(device_state, state) + * system call. If the device state transition fails, the vendor driver + * should return an appropriate -errno for the fault condition. + * - On the user application side, if the device state transition fails, + * that is, if write(device_state, state) returns an error, read + * device_state again to determine the current state of the device from + * the vendor driver. + * - The vendor driver should return previous state of the device unless + * the vendor driver has encountered an internal error, in which case + * the vendor driver may report the device_state VFIO_DEVICE_STATE_ERROR. + * - The user application must use the device reset ioctl to recover the + * device from VFIO_DEVICE_STATE_ERROR state. If the device is + * indicated to be in a valid device state by reading device_state, the + * user application may attempt to transition the device to any valid + * state reachable from the current state or terminate itself. + * + * device_state consists of 3 bits: + * - If bit 0 is set, it indicates the _RUNNING state. If bit 0 is clear, + * it indicates the _STOP state. When the device state is changed to + * _STOP, driver should stop the device before write() returns. + * - If bit 1 is set, it indicates the _SAVING state, which means that the + * driver should start gathering device state information that will be + * provided to the VFIO user application to save the device's state. + * - If bit 2 is set, it indicates the _RESUMING state, which means that + * the driver should prepare to resume the device. Data provided through + * the migration region should be used to resume the device. + * Bits 3 - 31 are reserved for future use. To preserve them, the user + * application should perform a read-modify-write operation on this + * field when modifying the specified bits. + * + * +------- _RESUMING + * |+------ _SAVING + * ||+----- _RUNNING + * ||| + * 000b => Device Stopped, not saving or resuming + * 001b => Device running, which is the default state + * 010b => Stop the device & save the device state, stop-and-copy state + * 011b => Device running and save the device state, pre-copy state + * 100b => Device stopped and the device state is resuming + * 101b => Invalid state + * 110b => Error state + * 111b => Invalid state + * + * State transitions: + * + * _RESUMING _RUNNING Pre-copy Stop-and-copy _STOP + * (100b) (001b) (011b) (010b) (000b) + * 0. Running or default state + * | + * + * 1. Normal Shutdown (optional) + * |------------------------------------->| + * + * 2. Save the state or suspend + * |------------------------->|---------->| + * + * 3. Save the state during live migration + * |----------->|------------>|---------->| + * + * 4. Resuming + * |<---------| + * + * 5. Resumed + * |--------->| + * + * 0. Default state of VFIO device is _RUNNNG when the user application starts. + * 1. During normal shutdown of the user application, the user application may + * optionally change the VFIO device state from _RUNNING to _STOP. This + * transition is optional. The vendor driver must support this transition but + * must not require it. + * 2. When the user application saves state or suspends the application, the + * device state transitions from _RUNNING to stop-and-copy and then to _STOP. + * On state transition from _RUNNING to stop-and-copy, driver must stop the + * device, save the device state and send it to the application through the + * migration region. The sequence to be followed for such transition is given + * below. + * 3. In live migration of user application, the state transitions from _RUNNING + * to pre-copy, to stop-and-copy, and to _STOP. + * On state transition from _RUNNING to pre-copy, the driver should start + * gathering the device state while the application is still running and send + * the device state data to application through the migration region. + * On state transition from pre-copy to stop-and-copy, the driver must stop + * the device, save the device state and send it to the user application + * through the migration region. + * Vendor drivers must support the pre-copy state even for implementations + * where no data is provided to the user before the stop-and-copy state. The + * user must not be required to consume all migration data before the device + * transitions to a new state, including the stop-and-copy state. + * The sequence to be followed for above two transitions is given below. + * 4. To start the resuming phase, the device state should be transitioned from + * the _RUNNING to the _RESUMING state. + * In the _RESUMING state, the driver should use the device state data + * received through the migration region to resume the device. + * 5. After providing saved device data to the driver, the application should + * change the state from _RESUMING to _RUNNING. + * + * reserved: + * Reads on this field return zero and writes are ignored. + * + * pending_bytes: (read only) + * The number of pending bytes still to be migrated from the vendor driver. + * + * data_offset: (read only) + * The user application should read data_offset field from the migration + * region. The user application should read the device data from this + * offset within the migration region during the _SAVING state or write + * the device data during the _RESUMING state. See below for details of + * sequence to be followed. + * + * data_size: (read/write) + * The user application should read data_size to get the size in bytes of + * the data copied in the migration region during the _SAVING state and + * write the size in bytes of the data copied in the migration region + * during the _RESUMING state. + * + * The format of the migration region is as follows: + * ------------------------------------------------------------------ + * |vfio_device_migration_info| data section | + * | | /////////////////////////////// | + * ------------------------------------------------------------------ + * ^ ^ + * offset 0-trapped part data_offset + * + * The structure vfio_device_migration_info is always followed by the data + * section in the region, so data_offset will always be nonzero. The offset + * from where the data is copied is decided by the kernel driver. The data + * section can be trapped, mmapped, or partitioned, depending on how the kernel + * driver defines the data section. The data section partition can be defined + * as mapped by the sparse mmap capability. If mmapped, data_offset must be + * page aligned, whereas initial section which contains the + * vfio_device_migration_info structure, might not end at the offset, which is + * page aligned. The user is not required to access through mmap regardless + * of the capabilities of the region mmap. + * The vendor driver should determine whether and how to partition the data + * section. The vendor driver should return data_offset accordingly. + * + * The sequence to be followed while in pre-copy state and stop-and-copy state + * is as follows: + * a. Read pending_bytes, indicating the start of a new iteration to get device + * data. Repeated read on pending_bytes at this stage should have no side + * effects. + * If pending_bytes == 0, the user application should not iterate to get data + * for that device. + * If pending_bytes > 0, perform the following steps. + * b. Read data_offset, indicating that the vendor driver should make data + * available through the data section. The vendor driver should return this + * read operation only after data is available from (region + data_offset) + * to (region + data_offset + data_size). + * c. Read data_size, which is the amount of data in bytes available through + * the migration region. + * Read on data_offset and data_size should return the offset and size of + * the current buffer if the user application reads data_offset and + * data_size more than once here. + * d. Read data_size bytes of data from (region + data_offset) from the + * migration region. + * e. Process the data. + * f. Read pending_bytes, which indicates that the data from the previous + * iteration has been read. If pending_bytes > 0, go to step b. + * + * The user application can transition from the _SAVING|_RUNNING + * (pre-copy state) to the _SAVING (stop-and-copy) state regardless of the + * number of pending bytes. The user application should iterate in _SAVING + * (stop-and-copy) until pending_bytes is 0. + * + * The sequence to be followed while _RESUMING device state is as follows: + * While data for this device is available, repeat the following steps: + * a. Read data_offset from where the user application should write data. + * b. Write migration data starting at the migration region + data_offset for + * the length determined by data_size from the migration source. + * c. Write data_size, which indicates to the vendor driver that data is + * written in the migration region. Vendor driver must return this write + * operations on consuming data. Vendor driver should apply the + * user-provided migration region data to the device resume state. + * + * If an error occurs during the above sequences, the vendor driver can return + * an error code for next read() or write() operation, which will terminate the + * loop. The user application should then take the next necessary action, for + * example, failing migration or terminating the user application. + * + * For the user application, data is opaque. The user application should write + * data in the same order as the data is received and the data should be of + * same transaction size at the source. + */ + +struct vfio_device_migration_info { + __u32 device_state; /* VFIO device state */ +#define VFIO_DEVICE_STATE_STOP (0) +#define VFIO_DEVICE_STATE_RUNNING (1 << 0) +#define VFIO_DEVICE_STATE_SAVING (1 << 1) +#define VFIO_DEVICE_STATE_RESUMING (1 << 2) +#define VFIO_DEVICE_STATE_MASK (VFIO_DEVICE_STATE_RUNNING | \ + VFIO_DEVICE_STATE_SAVING | \ + VFIO_DEVICE_STATE_RESUMING) + +#define VFIO_DEVICE_STATE_VALID(state) \ + (state & VFIO_DEVICE_STATE_RESUMING ? \ + (state & VFIO_DEVICE_STATE_MASK) == VFIO_DEVICE_STATE_RESUMING : 1) + +#define VFIO_DEVICE_STATE_IS_ERROR(state) \ + ((state & VFIO_DEVICE_STATE_MASK) == (VFIO_DEVICE_STATE_SAVING | \ + VFIO_DEVICE_STATE_RESUMING)) + +#define VFIO_DEVICE_STATE_SET_ERROR(state) \ + ((state & ~VFIO_DEVICE_STATE_MASK) | VFIO_DEVICE_SATE_SAVING | \ + VFIO_DEVICE_STATE_RESUMING) + + __u32 reserved; + __u64 pending_bytes; + __u64 data_offset; + __u64 data_size; +}; /* * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped @@ -577,6 +807,7 @@ enum { enum { VFIO_CCW_IO_IRQ_INDEX, + VFIO_CCW_CRW_IRQ_INDEX, VFIO_CCW_NUM_IRQS }; @@ -785,6 +1016,29 @@ struct vfio_iommu_type1_info_cap_iova_range { struct vfio_iova_range iova_ranges[]; }; +/* + * The migration capability allows to report supported features for migration. + * + * The structures below define version 1 of this capability. + * + * The existence of this capability indicates that IOMMU kernel driver supports + * dirty page logging. + * + * pgsize_bitmap: Kernel driver returns bitmap of supported page sizes for dirty + * page logging. + * max_dirty_bitmap_size: Kernel driver returns maximum supported dirty bitmap + * size in bytes that can be used by user applications when getting the dirty + * bitmap. + */ +#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION 1 + +struct vfio_iommu_type1_info_cap_migration { + struct vfio_info_cap_header header; + __u32 flags; + __u64 pgsize_bitmap; + __u64 max_dirty_bitmap_size; /* in bytes */ +}; + #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12) /** @@ -805,6 +1059,12 @@ struct vfio_iommu_type1_dma_map { #define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13) +struct vfio_bitmap { + __u64 pgsize; /* page size for bitmap in bytes */ + __u64 size; /* in bytes */ + __u64 __user *data; /* one bit per page */ +}; + /** * VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14, * struct vfio_dma_unmap) @@ -814,12 +1074,23 @@ struct vfio_iommu_type1_dma_map { * field. No guarantee is made to the user that arbitrary unmaps of iova * or size different from those used in the original mapping call will * succeed. + * VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP should be set to get the dirty bitmap + * before unmapping IO virtual addresses. When this flag is set, the user must + * provide a struct vfio_bitmap in data[]. User must provide zero-allocated + * memory via vfio_bitmap.data and its size in the vfio_bitmap.size field. + * A bit in the bitmap represents one page, of user provided page size in + * vfio_bitmap.pgsize field, consecutively starting from iova offset. Bit set + * indicates that the page at that offset from iova is dirty. A Bitmap of the + * pages in the range of unmapped size is returned in the user-provided + * vfio_bitmap.data. */ struct vfio_iommu_type1_dma_unmap { __u32 argsz; __u32 flags; +#define VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP (1 << 0) __u64 iova; /* IO virtual address */ __u64 size; /* Size of mapping (bytes) */ + __u8 data[]; }; #define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14) @@ -831,6 +1102,57 @@ struct vfio_iommu_type1_dma_unmap { #define VFIO_IOMMU_ENABLE _IO(VFIO_TYPE, VFIO_BASE + 15) #define VFIO_IOMMU_DISABLE _IO(VFIO_TYPE, VFIO_BASE + 16) +/** + * VFIO_IOMMU_DIRTY_PAGES - _IOWR(VFIO_TYPE, VFIO_BASE + 17, + * struct vfio_iommu_type1_dirty_bitmap) + * IOCTL is used for dirty pages logging. + * Caller should set flag depending on which operation to perform, details as + * below: + * + * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_START flag set, instructs + * the IOMMU driver to log pages that are dirtied or potentially dirtied by + * the device; designed to be used when a migration is in progress. Dirty pages + * are logged until logging is disabled by user application by calling the IOCTL + * with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag. + * + * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP flag set, instructs + * the IOMMU driver to stop logging dirtied pages. + * + * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP flag set + * returns the dirty pages bitmap for IOMMU container for a given IOVA range. + * The user must specify the IOVA range and the pgsize through the structure + * vfio_iommu_type1_dirty_bitmap_get in the data[] portion. This interface + * supports getting a bitmap of the smallest supported pgsize only and can be + * modified in future to get a bitmap of any specified supported pgsize. The + * user must provide a zeroed memory area for the bitmap memory and specify its + * size in bitmap.size. One bit is used to represent one page consecutively + * starting from iova offset. The user should provide page size in bitmap.pgsize + * field. A bit set in the bitmap indicates that the page at that offset from + * iova is dirty. The caller must set argsz to a value including the size of + * structure vfio_iommu_type1_dirty_bitmap_get, but excluding the size of the + * actual bitmap. If dirty pages logging is not enabled, an error will be + * returned. + * + * Only one of the flags _START, _STOP and _GET may be specified at a time. + * + */ +struct vfio_iommu_type1_dirty_bitmap { + __u32 argsz; + __u32 flags; +#define VFIO_IOMMU_DIRTY_PAGES_FLAG_START (1 << 0) +#define VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP (1 << 1) +#define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP (1 << 2) + __u8 data[]; +}; + +struct vfio_iommu_type1_dirty_bitmap_get { + __u64 iova; /* IO virtual address */ + __u64 size; /* Size of iova range */ + struct vfio_bitmap bitmap; +}; + +#define VFIO_IOMMU_DIRTY_PAGES _IO(VFIO_TYPE, VFIO_BASE + 17) + /* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */ /* diff --git a/include/uapi/linux/vfio_ccw.h b/include/uapi/linux/vfio_ccw.h index cbecbf0cd54f..aa04f3aa6db0 100644 --- a/include/uapi/linux/vfio_ccw.h +++ b/include/uapi/linux/vfio_ccw.h @@ -34,4 +34,23 @@ struct ccw_cmd_region { __u32 ret_code; } __packed; +/* + * Used for processing commands that read the subchannel-information block + * Reading this region triggers a stsch() to hardware + * Note: this is controlled by a capability + */ +struct ccw_schib_region { +#define SCHIB_AREA_SIZE 52 + __u8 schib_area[SCHIB_AREA_SIZE]; +} __packed; + +/* + * Used for returning a Channel Report Word to userspace. + * Note: this is controlled by a capability + */ +struct ccw_crw_region { + __u32 crw; + __u32 pad; +} __packed; + #endif diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index 9fe72e4b1373..0c2349612e77 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -15,6 +15,8 @@ #include <linux/types.h> #include <linux/ioctl.h> +#define VHOST_FILE_UNBIND -1 + /* ioctls */ #define VHOST_VIRTIO 0xAF @@ -140,4 +142,6 @@ /* Get the max ring size. */ #define VHOST_VDPA_GET_VRING_NUM _IOR(VHOST_VIRTIO, 0x76, __u16) +/* Set event fd for config interrupt*/ +#define VHOST_VDPA_SET_CONFIG_CALL _IOW(VHOST_VIRTIO, 0x77, int) #endif diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 9817b7e2c968..c3a1cf1c507f 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -487,6 +487,8 @@ struct v4l2_capability { #define V4L2_CAP_TOUCH 0x10000000 /* Is a touch device */ +#define V4L2_CAP_IO_MC 0x20000000 /* Is input/output controlled by the media controller */ + #define V4L2_CAP_DEVICE_CAPS 0x80000000 /* sets device capabilities field */ /* @@ -782,7 +784,8 @@ struct v4l2_fmtdesc { __u32 flags; __u8 description[32]; /* Description string */ __u32 pixelformat; /* Format fourcc */ - __u32 reserved[4]; + __u32 mbus_code; /* Media bus code */ + __u32 reserved[3]; }; #define V4L2_FMT_FLAG_COMPRESSED 0x0001 diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h index 19974392d324..dc3e656470dd 100644 --- a/include/uapi/linux/virtio_balloon.h +++ b/include/uapi/linux/virtio_balloon.h @@ -48,8 +48,15 @@ struct virtio_balloon_config { __u32 num_pages; /* Number of pages we've actually got in balloon. */ __u32 actual; - /* Free page report command id, readonly by guest */ - __u32 free_page_report_cmd_id; + /* + * Free page hint command id, readonly by guest. + * Was previously named free_page_report_cmd_id so we + * need to carry that name for legacy support. + */ + union { + __u32 free_page_hint_cmd_id; + __u32 free_page_report_cmd_id; /* deprecated */ + }; /* Stores PAGE_POISON if page poisoning is in use */ __u32 poison_val; }; diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h index ecc27a17401a..b052355ac7a3 100644 --- a/include/uapi/linux/virtio_ids.h +++ b/include/uapi/linux/virtio_ids.h @@ -44,6 +44,7 @@ #define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */ #define VIRTIO_ID_CRYPTO 20 /* virtio crypto */ #define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */ +#define VIRTIO_ID_MEM 24 /* virtio mem */ #define VIRTIO_ID_FS 26 /* virtio filesystem */ #define VIRTIO_ID_PMEM 27 /* virtio pmem */ #define VIRTIO_ID_MAC80211_HWSIM 29 /* virtio mac80211-hwsim */ diff --git a/include/uapi/linux/virtio_mem.h b/include/uapi/linux/virtio_mem.h new file mode 100644 index 000000000000..a9ffe041843c --- /dev/null +++ b/include/uapi/linux/virtio_mem.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* + * Virtio Mem Device + * + * Copyright Red Hat, Inc. 2020 + * + * Authors: + * David Hildenbrand <david@redhat.com> + * + * This header is BSD licensed so anyone can use the definitions + * to implement compatible drivers/servers: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _LINUX_VIRTIO_MEM_H +#define _LINUX_VIRTIO_MEM_H + +#include <linux/types.h> +#include <linux/virtio_types.h> +#include <linux/virtio_ids.h> +#include <linux/virtio_config.h> + +/* + * Each virtio-mem device manages a dedicated region in physical address + * space. Each device can belong to a single NUMA node, multiple devices + * for a single NUMA node are possible. A virtio-mem device is like a + * "resizable DIMM" consisting of small memory blocks that can be plugged + * or unplugged. The device driver is responsible for (un)plugging memory + * blocks on demand. + * + * Virtio-mem devices can only operate on their assigned memory region in + * order to (un)plug memory. A device cannot (un)plug memory belonging to + * other devices. + * + * The "region_size" corresponds to the maximum amount of memory that can + * be provided by a device. The "size" corresponds to the amount of memory + * that is currently plugged. "requested_size" corresponds to a request + * from the device to the device driver to (un)plug blocks. The + * device driver should try to (un)plug blocks in order to reach the + * "requested_size". It is impossible to plug more memory than requested. + * + * The "usable_region_size" represents the memory region that can actually + * be used to (un)plug memory. It is always at least as big as the + * "requested_size" and will grow dynamically. It will only shrink when + * explicitly triggered (VIRTIO_MEM_REQ_UNPLUG). + * + * There are no guarantees what will happen if unplugged memory is + * read/written. Such memory should, in general, not be touched. E.g., + * even writing might succeed, but the values will simply be discarded at + * random points in time. + * + * It can happen that the device cannot process a request, because it is + * busy. The device driver has to retry later. + * + * Usually, during system resets all memory will get unplugged, so the + * device driver can start with a clean state. However, in specific + * scenarios (if the device is busy) it can happen that the device still + * has memory plugged. The device driver can request to unplug all memory + * (VIRTIO_MEM_REQ_UNPLUG) - which might take a while to succeed if the + * device is busy. + */ + +/* --- virtio-mem: feature bits --- */ + +/* node_id is an ACPI PXM and is valid */ +#define VIRTIO_MEM_F_ACPI_PXM 0 + + +/* --- virtio-mem: guest -> host requests --- */ + +/* request to plug memory blocks */ +#define VIRTIO_MEM_REQ_PLUG 0 +/* request to unplug memory blocks */ +#define VIRTIO_MEM_REQ_UNPLUG 1 +/* request to unplug all blocks and shrink the usable size */ +#define VIRTIO_MEM_REQ_UNPLUG_ALL 2 +/* request information about the plugged state of memory blocks */ +#define VIRTIO_MEM_REQ_STATE 3 + +struct virtio_mem_req_plug { + __virtio64 addr; + __virtio16 nb_blocks; + __virtio16 padding[3]; +}; + +struct virtio_mem_req_unplug { + __virtio64 addr; + __virtio16 nb_blocks; + __virtio16 padding[3]; +}; + +struct virtio_mem_req_state { + __virtio64 addr; + __virtio16 nb_blocks; + __virtio16 padding[3]; +}; + +struct virtio_mem_req { + __virtio16 type; + __virtio16 padding[3]; + + union { + struct virtio_mem_req_plug plug; + struct virtio_mem_req_unplug unplug; + struct virtio_mem_req_state state; + } u; +}; + + +/* --- virtio-mem: host -> guest response --- */ + +/* + * Request processed successfully, applicable for + * - VIRTIO_MEM_REQ_PLUG + * - VIRTIO_MEM_REQ_UNPLUG + * - VIRTIO_MEM_REQ_UNPLUG_ALL + * - VIRTIO_MEM_REQ_STATE + */ +#define VIRTIO_MEM_RESP_ACK 0 +/* + * Request denied - e.g. trying to plug more than requested, applicable for + * - VIRTIO_MEM_REQ_PLUG + */ +#define VIRTIO_MEM_RESP_NACK 1 +/* + * Request cannot be processed right now, try again later, applicable for + * - VIRTIO_MEM_REQ_PLUG + * - VIRTIO_MEM_REQ_UNPLUG + * - VIRTIO_MEM_REQ_UNPLUG_ALL + */ +#define VIRTIO_MEM_RESP_BUSY 2 +/* + * Error in request (e.g. addresses/alignment), applicable for + * - VIRTIO_MEM_REQ_PLUG + * - VIRTIO_MEM_REQ_UNPLUG + * - VIRTIO_MEM_REQ_STATE + */ +#define VIRTIO_MEM_RESP_ERROR 3 + + +/* State of memory blocks is "plugged" */ +#define VIRTIO_MEM_STATE_PLUGGED 0 +/* State of memory blocks is "unplugged" */ +#define VIRTIO_MEM_STATE_UNPLUGGED 1 +/* State of memory blocks is "mixed" */ +#define VIRTIO_MEM_STATE_MIXED 2 + +struct virtio_mem_resp_state { + __virtio16 state; +}; + +struct virtio_mem_resp { + __virtio16 type; + __virtio16 padding[3]; + + union { + struct virtio_mem_resp_state state; + } u; +}; + +/* --- virtio-mem: configuration --- */ + +struct virtio_mem_config { + /* Block size and alignment. Cannot change. */ + __u64 block_size; + /* Valid with VIRTIO_MEM_F_ACPI_PXM. Cannot change. */ + __u16 node_id; + __u8 padding[6]; + /* Start address of the memory region. Cannot change. */ + __u64 addr; + /* Region size (maximum). Cannot change. */ + __u64 region_size; + /* + * Currently usable region size. Can grow up to region_size. Can + * shrink due to VIRTIO_MEM_REQ_UNPLUG_ALL (in which case no config + * update will be sent). + */ + __u64 usable_region_size; + /* + * Currently used size. Changes due to plug/unplug requests, but no + * config updates will be sent. + */ + __u64 plugged_size; + /* Requested size. New plug requests cannot exceed it. Can change. */ + __u64 requested_size; +}; + +#endif /* _LINUX_VIRTIO_MEM_H */ diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h index 559f42e73315..476d3e5c0fe7 100644 --- a/include/uapi/linux/virtio_ring.h +++ b/include/uapi/linux/virtio_ring.h @@ -86,6 +86,13 @@ * at the end of the used ring. Guest should ignore the used->flags field. */ #define VIRTIO_RING_F_EVENT_IDX 29 +/* Alignment requirements for vring elements. + * When using pre-virtio 1.0 layout, these fall out naturally. + */ +#define VRING_AVAIL_ALIGN_SIZE 2 +#define VRING_USED_ALIGN_SIZE 4 +#define VRING_DESC_ALIGN_SIZE 16 + /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ struct vring_desc { /* Address (guest-physical). */ @@ -112,28 +119,47 @@ struct vring_used_elem { __virtio32 len; }; +typedef struct vring_used_elem __attribute__((aligned(VRING_USED_ALIGN_SIZE))) + vring_used_elem_t; + struct vring_used { __virtio16 flags; __virtio16 idx; - struct vring_used_elem ring[]; + vring_used_elem_t ring[]; }; +/* + * The ring element addresses are passed between components with different + * alignments assumptions. Thus, we might need to decrease the compiler-selected + * alignment, and so must use a typedef to make sure the aligned attribute + * actually takes hold: + * + * https://gcc.gnu.org/onlinedocs//gcc/Common-Type-Attributes.html#Common-Type-Attributes + * + * When used on a struct, or struct member, the aligned attribute can only + * increase the alignment; in order to decrease it, the packed attribute must + * be specified as well. When used as part of a typedef, the aligned attribute + * can both increase and decrease alignment, and specifying the packed + * attribute generates a warning. + */ +typedef struct vring_desc __attribute__((aligned(VRING_DESC_ALIGN_SIZE))) + vring_desc_t; +typedef struct vring_avail __attribute__((aligned(VRING_AVAIL_ALIGN_SIZE))) + vring_avail_t; +typedef struct vring_used __attribute__((aligned(VRING_USED_ALIGN_SIZE))) + vring_used_t; + struct vring { unsigned int num; - struct vring_desc *desc; + vring_desc_t *desc; - struct vring_avail *avail; + vring_avail_t *avail; - struct vring_used *used; + vring_used_t *used; }; -/* Alignment requirements for vring elements. - * When using pre-virtio 1.0 layout, these fall out naturally. - */ -#define VRING_AVAIL_ALIGN_SIZE 2 -#define VRING_USED_ALIGN_SIZE 4 -#define VRING_DESC_ALIGN_SIZE 16 +#ifndef VIRTIO_RING_NO_LEGACY /* The standard layout for the ring is a continuous chunk of memory which looks * like this. We assume num is a power of 2. @@ -181,6 +207,8 @@ static inline unsigned vring_size(unsigned int num, unsigned long align) + sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num; } +#endif /* VIRTIO_RING_NO_LEGACY */ + /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */ /* Assuming a given event_idx value from the other side, if * we have just incremented index from old to new_idx, diff --git a/include/uapi/linux/watch_queue.h b/include/uapi/linux/watch_queue.h new file mode 100644 index 000000000000..c3d8320b5d3a --- /dev/null +++ b/include/uapi/linux/watch_queue.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_LINUX_WATCH_QUEUE_H +#define _UAPI_LINUX_WATCH_QUEUE_H + +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/ioctl.h> + +#define O_NOTIFICATION_PIPE O_EXCL /* Parameter to pipe2() selecting notification pipe */ + +#define IOC_WATCH_QUEUE_SET_SIZE _IO('W', 0x60) /* Set the size in pages */ +#define IOC_WATCH_QUEUE_SET_FILTER _IO('W', 0x61) /* Set the filter */ + +enum watch_notification_type { + WATCH_TYPE_META = 0, /* Special record */ + WATCH_TYPE_KEY_NOTIFY = 1, /* Key change event notification */ + WATCH_TYPE__NR = 2 +}; + +enum watch_meta_notification_subtype { + WATCH_META_REMOVAL_NOTIFICATION = 0, /* Watched object was removed */ + WATCH_META_LOSS_NOTIFICATION = 1, /* Data loss occurred */ +}; + +/* + * Notification record header. This is aligned to 64-bits so that subclasses + * can contain __u64 fields. + */ +struct watch_notification { + __u32 type:24; /* enum watch_notification_type */ + __u32 subtype:8; /* Type-specific subtype (filterable) */ + __u32 info; +#define WATCH_INFO_LENGTH 0x0000007f /* Length of record */ +#define WATCH_INFO_LENGTH__SHIFT 0 +#define WATCH_INFO_ID 0x0000ff00 /* ID of watchpoint */ +#define WATCH_INFO_ID__SHIFT 8 +#define WATCH_INFO_TYPE_INFO 0xffff0000 /* Type-specific info */ +#define WATCH_INFO_TYPE_INFO__SHIFT 16 +#define WATCH_INFO_FLAG_0 0x00010000 /* Type-specific info, flag bit 0 */ +#define WATCH_INFO_FLAG_1 0x00020000 /* ... */ +#define WATCH_INFO_FLAG_2 0x00040000 +#define WATCH_INFO_FLAG_3 0x00080000 +#define WATCH_INFO_FLAG_4 0x00100000 +#define WATCH_INFO_FLAG_5 0x00200000 +#define WATCH_INFO_FLAG_6 0x00400000 +#define WATCH_INFO_FLAG_7 0x00800000 +}; + +/* + * Notification filtering rules (IOC_WATCH_QUEUE_SET_FILTER). + */ +struct watch_notification_type_filter { + __u32 type; /* Type to apply filter to */ + __u32 info_filter; /* Filter on watch_notification::info */ + __u32 info_mask; /* Mask of relevant bits in info_filter */ + __u32 subtype_filter[8]; /* Bitmask of subtypes to filter on */ +}; + +struct watch_notification_filter { + __u32 nr_filters; /* Number of filters */ + __u32 __reserved; /* Must be 0 */ + struct watch_notification_type_filter filters[]; +}; + + +/* + * Extended watch removal notification. This is used optionally if the type + * wants to indicate an identifier for the object being watched, if there is + * such. This can be distinguished by the length. + * + * type -> WATCH_TYPE_META + * subtype -> WATCH_META_REMOVAL_NOTIFICATION + */ +struct watch_notification_removal { + struct watch_notification watch; + __u64 id; /* Type-dependent identifier */ +}; + +/* + * Type of key/keyring change notification. + */ +enum key_notification_subtype { + NOTIFY_KEY_INSTANTIATED = 0, /* Key was instantiated (aux is error code) */ + NOTIFY_KEY_UPDATED = 1, /* Key was updated */ + NOTIFY_KEY_LINKED = 2, /* Key (aux) was added to watched keyring */ + NOTIFY_KEY_UNLINKED = 3, /* Key (aux) was removed from watched keyring */ + NOTIFY_KEY_CLEARED = 4, /* Keyring was cleared */ + NOTIFY_KEY_REVOKED = 5, /* Key was revoked */ + NOTIFY_KEY_INVALIDATED = 6, /* Key was invalidated */ + NOTIFY_KEY_SETATTR = 7, /* Key's attributes got changed */ +}; + +/* + * Key/keyring notification record. + * - watch.type = WATCH_TYPE_KEY_NOTIFY + * - watch.subtype = enum key_notification_type + */ +struct key_notification { + struct watch_notification watch; + __u32 key_id; /* The key/keyring affected */ + __u32 aux; /* Per-type auxiliary data */ +}; + +#endif /* _UAPI_LINUX_WATCH_QUEUE_H */ diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h index a2c006a364e0..24f3371ad826 100644 --- a/include/uapi/linux/wireless.h +++ b/include/uapi/linux/wireless.h @@ -74,7 +74,11 @@ #include <linux/socket.h> /* for "struct sockaddr" et al */ #include <linux/if.h> /* for IFNAMSIZ and co... */ -#include <stddef.h> /* for offsetof */ +#ifdef __KERNEL__ +# include <linux/stddef.h> /* for offsetof */ +#else +# include <stddef.h> /* for offsetof */ +#endif /***************************** VERSION *****************************/ /* diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h index c1395b5bd432..9463db2dfa9d 100644 --- a/include/uapi/linux/xattr.h +++ b/include/uapi/linux/xattr.h @@ -7,6 +7,7 @@ Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org> Copyright (c) 2001-2002 Silicon Graphics, Inc. All Rights Reserved. Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> + Copyright (c) 2020 Jan (janneke) Nieuwenhuizen <janneke@gnu.org> */ #include <linux/libc-compat.h> @@ -31,6 +32,9 @@ #define XATTR_BTRFS_PREFIX "btrfs." #define XATTR_BTRFS_PREFIX_LEN (sizeof(XATTR_BTRFS_PREFIX) - 1) +#define XATTR_HURD_PREFIX "gnu." +#define XATTR_HURD_PREFIX_LEN (sizeof(XATTR_HURD_PREFIX) - 1) + #define XATTR_SECURITY_PREFIX "security." #define XATTR_SECURITY_PREFIX_LEN (sizeof(XATTR_SECURITY_PREFIX) - 1) diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index 5f3b9fec7b5f..ff7cfdc6cb44 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -304,7 +304,7 @@ enum xfrm_attr_type_t { XFRMA_PROTO, /* __u8 */ XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */ XFRMA_PAD, - XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */ + XFRMA_OFFLOAD_DEV, /* struct xfrm_user_offload */ XFRMA_SET_MARK, /* __u32 */ XFRMA_SET_MARK_MASK, /* __u32 */ XFRMA_IF_ID, /* __u32 */ diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 4faa2c9767e5..f6267a8d7416 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note * - * Copyright 2016-2019 HabanaLabs, Ltd. + * Copyright 2016-2020 HabanaLabs, Ltd. * All Rights Reserved. * */ @@ -15,10 +15,13 @@ * Defines that are asic-specific but constitutes as ABI between kernel driver * and userspace */ -#define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */ +#define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */ +#define GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START 0x80 /* 128 bytes */ +#define GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT 48 +#define GAUDI_FIRST_AVAILABLE_W_S_MONITOR 24 /* - * Queue Numbering + * Goya queue Numbering * * The external queues (PCI DMA channels) MUST be before the internal queues * and each group (PCI DMA channels and internal) must be contiguous inside @@ -46,6 +49,129 @@ enum goya_queue_id { }; /* + * Gaudi queue Numbering + * External queues (PCI DMA channels) are DMA_0_*, DMA_1_* and DMA_5_*. + * Except one CPU queue, all the rest are internal queues. + */ + +enum gaudi_queue_id { + GAUDI_QUEUE_ID_DMA_0_0 = 0, /* external */ + GAUDI_QUEUE_ID_DMA_0_1 = 1, /* external */ + GAUDI_QUEUE_ID_DMA_0_2 = 2, /* external */ + GAUDI_QUEUE_ID_DMA_0_3 = 3, /* external */ + GAUDI_QUEUE_ID_DMA_1_0 = 4, /* external */ + GAUDI_QUEUE_ID_DMA_1_1 = 5, /* external */ + GAUDI_QUEUE_ID_DMA_1_2 = 6, /* external */ + GAUDI_QUEUE_ID_DMA_1_3 = 7, /* external */ + GAUDI_QUEUE_ID_CPU_PQ = 8, /* CPU */ + GAUDI_QUEUE_ID_DMA_2_0 = 9, /* internal */ + GAUDI_QUEUE_ID_DMA_2_1 = 10, /* internal */ + GAUDI_QUEUE_ID_DMA_2_2 = 11, /* internal */ + GAUDI_QUEUE_ID_DMA_2_3 = 12, /* internal */ + GAUDI_QUEUE_ID_DMA_3_0 = 13, /* internal */ + GAUDI_QUEUE_ID_DMA_3_1 = 14, /* internal */ + GAUDI_QUEUE_ID_DMA_3_2 = 15, /* internal */ + GAUDI_QUEUE_ID_DMA_3_3 = 16, /* internal */ + GAUDI_QUEUE_ID_DMA_4_0 = 17, /* internal */ + GAUDI_QUEUE_ID_DMA_4_1 = 18, /* internal */ + GAUDI_QUEUE_ID_DMA_4_2 = 19, /* internal */ + GAUDI_QUEUE_ID_DMA_4_3 = 20, /* internal */ + GAUDI_QUEUE_ID_DMA_5_0 = 21, /* external */ + GAUDI_QUEUE_ID_DMA_5_1 = 22, /* external */ + GAUDI_QUEUE_ID_DMA_5_2 = 23, /* external */ + GAUDI_QUEUE_ID_DMA_5_3 = 24, /* external */ + GAUDI_QUEUE_ID_DMA_6_0 = 25, /* internal */ + GAUDI_QUEUE_ID_DMA_6_1 = 26, /* internal */ + GAUDI_QUEUE_ID_DMA_6_2 = 27, /* internal */ + GAUDI_QUEUE_ID_DMA_6_3 = 28, /* internal */ + GAUDI_QUEUE_ID_DMA_7_0 = 29, /* internal */ + GAUDI_QUEUE_ID_DMA_7_1 = 30, /* internal */ + GAUDI_QUEUE_ID_DMA_7_2 = 31, /* internal */ + GAUDI_QUEUE_ID_DMA_7_3 = 32, /* internal */ + GAUDI_QUEUE_ID_MME_0_0 = 33, /* internal */ + GAUDI_QUEUE_ID_MME_0_1 = 34, /* internal */ + GAUDI_QUEUE_ID_MME_0_2 = 35, /* internal */ + GAUDI_QUEUE_ID_MME_0_3 = 36, /* internal */ + GAUDI_QUEUE_ID_MME_1_0 = 37, /* internal */ + GAUDI_QUEUE_ID_MME_1_1 = 38, /* internal */ + GAUDI_QUEUE_ID_MME_1_2 = 39, /* internal */ + GAUDI_QUEUE_ID_MME_1_3 = 40, /* internal */ + GAUDI_QUEUE_ID_TPC_0_0 = 41, /* internal */ + GAUDI_QUEUE_ID_TPC_0_1 = 42, /* internal */ + GAUDI_QUEUE_ID_TPC_0_2 = 43, /* internal */ + GAUDI_QUEUE_ID_TPC_0_3 = 44, /* internal */ + GAUDI_QUEUE_ID_TPC_1_0 = 45, /* internal */ + GAUDI_QUEUE_ID_TPC_1_1 = 46, /* internal */ + GAUDI_QUEUE_ID_TPC_1_2 = 47, /* internal */ + GAUDI_QUEUE_ID_TPC_1_3 = 48, /* internal */ + GAUDI_QUEUE_ID_TPC_2_0 = 49, /* internal */ + GAUDI_QUEUE_ID_TPC_2_1 = 50, /* internal */ + GAUDI_QUEUE_ID_TPC_2_2 = 51, /* internal */ + GAUDI_QUEUE_ID_TPC_2_3 = 52, /* internal */ + GAUDI_QUEUE_ID_TPC_3_0 = 53, /* internal */ + GAUDI_QUEUE_ID_TPC_3_1 = 54, /* internal */ + GAUDI_QUEUE_ID_TPC_3_2 = 55, /* internal */ + GAUDI_QUEUE_ID_TPC_3_3 = 56, /* internal */ + GAUDI_QUEUE_ID_TPC_4_0 = 57, /* internal */ + GAUDI_QUEUE_ID_TPC_4_1 = 58, /* internal */ + GAUDI_QUEUE_ID_TPC_4_2 = 59, /* internal */ + GAUDI_QUEUE_ID_TPC_4_3 = 60, /* internal */ + GAUDI_QUEUE_ID_TPC_5_0 = 61, /* internal */ + GAUDI_QUEUE_ID_TPC_5_1 = 62, /* internal */ + GAUDI_QUEUE_ID_TPC_5_2 = 63, /* internal */ + GAUDI_QUEUE_ID_TPC_5_3 = 64, /* internal */ + GAUDI_QUEUE_ID_TPC_6_0 = 65, /* internal */ + GAUDI_QUEUE_ID_TPC_6_1 = 66, /* internal */ + GAUDI_QUEUE_ID_TPC_6_2 = 67, /* internal */ + GAUDI_QUEUE_ID_TPC_6_3 = 68, /* internal */ + GAUDI_QUEUE_ID_TPC_7_0 = 69, /* internal */ + GAUDI_QUEUE_ID_TPC_7_1 = 70, /* internal */ + GAUDI_QUEUE_ID_TPC_7_2 = 71, /* internal */ + GAUDI_QUEUE_ID_TPC_7_3 = 72, /* internal */ + GAUDI_QUEUE_ID_NIC_0_0 = 73, /* internal */ + GAUDI_QUEUE_ID_NIC_0_1 = 74, /* internal */ + GAUDI_QUEUE_ID_NIC_0_2 = 75, /* internal */ + GAUDI_QUEUE_ID_NIC_0_3 = 76, /* internal */ + GAUDI_QUEUE_ID_NIC_1_0 = 77, /* internal */ + GAUDI_QUEUE_ID_NIC_1_1 = 78, /* internal */ + GAUDI_QUEUE_ID_NIC_1_2 = 79, /* internal */ + GAUDI_QUEUE_ID_NIC_1_3 = 80, /* internal */ + GAUDI_QUEUE_ID_NIC_2_0 = 81, /* internal */ + GAUDI_QUEUE_ID_NIC_2_1 = 82, /* internal */ + GAUDI_QUEUE_ID_NIC_2_2 = 83, /* internal */ + GAUDI_QUEUE_ID_NIC_2_3 = 84, /* internal */ + GAUDI_QUEUE_ID_NIC_3_0 = 85, /* internal */ + GAUDI_QUEUE_ID_NIC_3_1 = 86, /* internal */ + GAUDI_QUEUE_ID_NIC_3_2 = 87, /* internal */ + GAUDI_QUEUE_ID_NIC_3_3 = 88, /* internal */ + GAUDI_QUEUE_ID_NIC_4_0 = 89, /* internal */ + GAUDI_QUEUE_ID_NIC_4_1 = 90, /* internal */ + GAUDI_QUEUE_ID_NIC_4_2 = 91, /* internal */ + GAUDI_QUEUE_ID_NIC_4_3 = 92, /* internal */ + GAUDI_QUEUE_ID_NIC_5_0 = 93, /* internal */ + GAUDI_QUEUE_ID_NIC_5_1 = 94, /* internal */ + GAUDI_QUEUE_ID_NIC_5_2 = 95, /* internal */ + GAUDI_QUEUE_ID_NIC_5_3 = 96, /* internal */ + GAUDI_QUEUE_ID_NIC_6_0 = 97, /* internal */ + GAUDI_QUEUE_ID_NIC_6_1 = 98, /* internal */ + GAUDI_QUEUE_ID_NIC_6_2 = 99, /* internal */ + GAUDI_QUEUE_ID_NIC_6_3 = 100, /* internal */ + GAUDI_QUEUE_ID_NIC_7_0 = 101, /* internal */ + GAUDI_QUEUE_ID_NIC_7_1 = 102, /* internal */ + GAUDI_QUEUE_ID_NIC_7_2 = 103, /* internal */ + GAUDI_QUEUE_ID_NIC_7_3 = 104, /* internal */ + GAUDI_QUEUE_ID_NIC_8_0 = 105, /* internal */ + GAUDI_QUEUE_ID_NIC_8_1 = 106, /* internal */ + GAUDI_QUEUE_ID_NIC_8_2 = 107, /* internal */ + GAUDI_QUEUE_ID_NIC_8_3 = 108, /* internal */ + GAUDI_QUEUE_ID_NIC_9_0 = 109, /* internal */ + GAUDI_QUEUE_ID_NIC_9_1 = 110, /* internal */ + GAUDI_QUEUE_ID_NIC_9_2 = 111, /* internal */ + GAUDI_QUEUE_ID_NIC_9_3 = 112, /* internal */ + GAUDI_QUEUE_ID_SIZE +}; + +/* * Engine Numbering * * Used in the "busy_engines_mask" field in `struct hl_info_hw_idle' @@ -69,6 +195,40 @@ enum goya_engine_id { GOYA_ENGINE_ID_SIZE }; +enum gaudi_engine_id { + GAUDI_ENGINE_ID_DMA_0 = 0, + GAUDI_ENGINE_ID_DMA_1, + GAUDI_ENGINE_ID_DMA_2, + GAUDI_ENGINE_ID_DMA_3, + GAUDI_ENGINE_ID_DMA_4, + GAUDI_ENGINE_ID_DMA_5, + GAUDI_ENGINE_ID_DMA_6, + GAUDI_ENGINE_ID_DMA_7, + GAUDI_ENGINE_ID_MME_0, + GAUDI_ENGINE_ID_MME_1, + GAUDI_ENGINE_ID_MME_2, + GAUDI_ENGINE_ID_MME_3, + GAUDI_ENGINE_ID_TPC_0, + GAUDI_ENGINE_ID_TPC_1, + GAUDI_ENGINE_ID_TPC_2, + GAUDI_ENGINE_ID_TPC_3, + GAUDI_ENGINE_ID_TPC_4, + GAUDI_ENGINE_ID_TPC_5, + GAUDI_ENGINE_ID_TPC_6, + GAUDI_ENGINE_ID_TPC_7, + GAUDI_ENGINE_ID_NIC_0, + GAUDI_ENGINE_ID_NIC_1, + GAUDI_ENGINE_ID_NIC_2, + GAUDI_ENGINE_ID_NIC_3, + GAUDI_ENGINE_ID_NIC_4, + GAUDI_ENGINE_ID_NIC_5, + GAUDI_ENGINE_ID_NIC_6, + GAUDI_ENGINE_ID_NIC_7, + GAUDI_ENGINE_ID_NIC_8, + GAUDI_ENGINE_ID_NIC_9, + GAUDI_ENGINE_ID_SIZE +}; + enum hl_device_status { HL_DEVICE_STATUS_OPERATIONAL, HL_DEVICE_STATUS_IN_RESET, @@ -101,6 +261,8 @@ enum hl_device_status { * HL_INFO_RESET_COUNT - Retrieve the counts of the soft and hard reset * operations performed on the device since the last * time the driver was loaded. + * HL_INFO_TIME_SYNC - Retrieve the device's time alongside the host's time + * for synchronization. */ #define HL_INFO_HW_IP_INFO 0 #define HL_INFO_HW_EVENTS 1 @@ -111,6 +273,7 @@ enum hl_device_status { #define HL_INFO_HW_EVENTS_AGGREGATE 7 #define HL_INFO_CLK_RATE 8 #define HL_INFO_RESET_COUNT 9 +#define HL_INFO_TIME_SYNC 10 #define HL_INFO_VERSION_MAX_LEN 128 #define HL_INFO_CARD_NAME_MAX_LEN 16 @@ -122,7 +285,8 @@ struct hl_info_hw_ip_info { __u32 sram_size; __u32 num_of_events; __u32 device_id; /* PCI Device ID */ - __u32 reserved[3]; + __u32 module_id; /* For mezzanine cards in servers (From OCP spec.) */ + __u32 reserved[2]; __u32 armcp_cpld_version; __u32 psoc_pci_pll_nr; __u32 psoc_pci_pll_nf; @@ -169,6 +333,11 @@ struct hl_info_reset_count { __u32 soft_reset_cnt; }; +struct hl_info_time_sync { + __u64 device_time; + __u64 host_time; +}; + struct hl_info_args { /* Location of relevant struct in userspace */ __u64 return_pointer; @@ -201,7 +370,8 @@ struct hl_info_args { /* Opcode to destroy previously created command buffer */ #define HL_CB_OP_DESTROY 1 -#define HL_MAX_CB_SIZE 0x200000 /* 2MB */ +/* 2MB minus 32 bytes for 2xMSG_PROT */ +#define HL_MAX_CB_SIZE (0x200000 - 32) struct hl_cb_in { /* Handle of CB or 0 if we want to create one */ @@ -232,52 +402,87 @@ union hl_cb_args { * compatibility */ struct hl_cs_chunk { - /* - * For external queue, this represents a Handle of CB on the Host - * For internal queue, this represents an SRAM or DRAM address of the - * internal CB - */ - __u64 cb_handle; + union { + /* For external queue, this represents a Handle of CB on the + * Host. + * For internal queue in Goya, this represents an SRAM or + * a DRAM address of the internal CB. In Gaudi, this might also + * represent a mapped host address of the CB. + * + * A mapped host address is in the device address space, after + * a host address was mapped by the device MMU. + */ + __u64 cb_handle; + + /* Relevant only when HL_CS_FLAGS_WAIT is set. + * This holds address of array of u64 values that contain + * signal CS sequence numbers. The wait described by this job + * will listen on all those signals (wait event per signal) + */ + __u64 signal_seq_arr; + }; + /* Index of queue to put the CB on */ __u32 queue_index; - /* - * Size of command buffer with valid packets - * Can be smaller then actual CB size - */ - __u32 cb_size; + + union { + /* + * Size of command buffer with valid packets + * Can be smaller then actual CB size + */ + __u32 cb_size; + + /* Relevant only when HL_CS_FLAGS_WAIT is set. + * Number of entries in signal_seq_arr + */ + __u32 num_signal_seq_arr; + }; + /* HL_CS_CHUNK_FLAGS_* */ __u32 cs_chunk_flags; + /* Align structure to 64 bytes */ __u32 pad[11]; }; +/* SIGNAL and WAIT flags are mutually exclusive */ #define HL_CS_FLAGS_FORCE_RESTORE 0x1 +#define HL_CS_FLAGS_SIGNAL 0x2 +#define HL_CS_FLAGS_WAIT 0x4 #define HL_CS_STATUS_SUCCESS 0 #define HL_MAX_JOBS_PER_CS 512 struct hl_cs_in { + /* this holds address of array of hl_cs_chunk for restore phase */ __u64 chunks_restore; - /* this holds address of array of hl_cs_chunk for execution phase */ + + /* holds address of array of hl_cs_chunk for execution phase */ __u64 chunks_execute; + /* this holds address of array of hl_cs_chunk for store phase - * Currently not in use */ __u64 chunks_store; + /* Number of chunks in restore phase array. Maximum number is * HL_MAX_JOBS_PER_CS */ __u32 num_chunks_restore; + /* Number of chunks in execution array. Maximum number is * HL_MAX_JOBS_PER_CS */ __u32 num_chunks_execute; + /* Number of chunks in restore phase array - Currently not in use */ __u32 num_chunks_store; + /* HL_CS_FLAGS_* */ __u32 cs_flags; + /* Context ID - Currently not in use */ __u32 ctx_id; }; @@ -588,8 +793,8 @@ struct hl_debug_args { * For jobs on external queues, the user needs to create command buffers * through the CB ioctl and give the CB's handle to the CS ioctl. For jobs on * internal queues, the user needs to prepare a "command buffer" with packets - * on either the SRAM or DRAM, and give the device address of that buffer to - * the CS ioctl. + * on either the device SRAM/DRAM or the host, and give the device address of + * that buffer to the CS ioctl. * * This IOCTL is asynchronous in regard to the actual execution of the CS. This * means it returns immediately after ALL the JOBS were enqueued on their @@ -601,7 +806,7 @@ struct hl_debug_args { * external JOBS have been completed. Note that if the CS has internal JOBS * which can execute AFTER the external JOBS have finished, the driver might * report that the CS has finished executing BEFORE the internal JOBS have - * actually finish executing. + * actually finished executing. * * Even though the sequence number increments per CS, the user can NOT * automatically assume that if CS with sequence number N finished, then CS diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h index 47ffe3208c27..4b48fbf7d343 100644 --- a/include/uapi/mtd/mtd-abi.h +++ b/include/uapi/mtd/mtd-abi.h @@ -104,6 +104,7 @@ struct mtd_write_req { #define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */ #define MTD_NO_ERASE 0x1000 /* No erase necessary */ #define MTD_POWERUP_LOCK 0x2000 /* Always locked after reset */ +#define MTD_SLC_ON_MLC_EMULATION 0x4000 /* Emulate SLC behavior on MLC NANDs */ /* Some common devices / combinations of capabilities */ #define MTD_CAP_ROM 0 diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h index 01ac5853d9ac..d95ef9a2b032 100644 --- a/include/uapi/rdma/hfi/hfi1_user.h +++ b/include/uapi/rdma/hfi/hfi1_user.h @@ -6,7 +6,7 @@ * * GPL LICENSE SUMMARY * - * Copyright(c) 2015 - 2018 Intel Corporation. + * Copyright(c) 2015 - 2020 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -109,6 +109,7 @@ #define HFI1_CAP_OPFN (1UL << 16) /* Enable the OPFN protocol */ #define HFI1_CAP_SDMA_HEAD_CHECK (1UL << 17) /* SDMA head checking */ #define HFI1_CAP_EARLY_CREDIT_RETURN (1UL << 18) /* early credit return */ +#define HFI1_CAP_AIP (1UL << 19) /* Enable accelerated IP */ #define HFI1_RCVHDR_ENTSIZE_2 (1UL << 0) #define HFI1_RCVHDR_ENTSIZE_16 (1UL << 1) diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h index d4ddbe4e696c..4961d5e858eb 100644 --- a/include/uapi/rdma/ib_user_ioctl_cmds.h +++ b/include/uapi/rdma/ib_user_ioctl_cmds.h @@ -95,6 +95,7 @@ enum uverbs_attrs_create_cq_cmd_attr_ids { UVERBS_ATTR_CREATE_CQ_COMP_VECTOR, UVERBS_ATTR_CREATE_CQ_FLAGS, UVERBS_ATTR_CREATE_CQ_RESP_CQE, + UVERBS_ATTR_CREATE_CQ_EVENT_FD, }; enum uverbs_attrs_destroy_cq_cmd_attr_ids { @@ -120,11 +121,91 @@ enum uverbs_attrs_destroy_flow_action_esp { UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE, }; +enum uverbs_attrs_create_qp_cmd_attr_ids { + UVERBS_ATTR_CREATE_QP_HANDLE, + UVERBS_ATTR_CREATE_QP_XRCD_HANDLE, + UVERBS_ATTR_CREATE_QP_PD_HANDLE, + UVERBS_ATTR_CREATE_QP_SRQ_HANDLE, + UVERBS_ATTR_CREATE_QP_SEND_CQ_HANDLE, + UVERBS_ATTR_CREATE_QP_RECV_CQ_HANDLE, + UVERBS_ATTR_CREATE_QP_IND_TABLE_HANDLE, + UVERBS_ATTR_CREATE_QP_USER_HANDLE, + UVERBS_ATTR_CREATE_QP_CAP, + UVERBS_ATTR_CREATE_QP_TYPE, + UVERBS_ATTR_CREATE_QP_FLAGS, + UVERBS_ATTR_CREATE_QP_SOURCE_QPN, + UVERBS_ATTR_CREATE_QP_EVENT_FD, + UVERBS_ATTR_CREATE_QP_RESP_CAP, + UVERBS_ATTR_CREATE_QP_RESP_QP_NUM, +}; + +enum uverbs_attrs_destroy_qp_cmd_attr_ids { + UVERBS_ATTR_DESTROY_QP_HANDLE, + UVERBS_ATTR_DESTROY_QP_RESP, +}; + +enum uverbs_methods_qp { + UVERBS_METHOD_QP_CREATE, + UVERBS_METHOD_QP_DESTROY, +}; + +enum uverbs_attrs_create_srq_cmd_attr_ids { + UVERBS_ATTR_CREATE_SRQ_HANDLE, + UVERBS_ATTR_CREATE_SRQ_PD_HANDLE, + UVERBS_ATTR_CREATE_SRQ_XRCD_HANDLE, + UVERBS_ATTR_CREATE_SRQ_CQ_HANDLE, + UVERBS_ATTR_CREATE_SRQ_USER_HANDLE, + UVERBS_ATTR_CREATE_SRQ_MAX_WR, + UVERBS_ATTR_CREATE_SRQ_MAX_SGE, + UVERBS_ATTR_CREATE_SRQ_LIMIT, + UVERBS_ATTR_CREATE_SRQ_MAX_NUM_TAGS, + UVERBS_ATTR_CREATE_SRQ_TYPE, + UVERBS_ATTR_CREATE_SRQ_EVENT_FD, + UVERBS_ATTR_CREATE_SRQ_RESP_MAX_WR, + UVERBS_ATTR_CREATE_SRQ_RESP_MAX_SGE, + UVERBS_ATTR_CREATE_SRQ_RESP_SRQ_NUM, +}; + +enum uverbs_attrs_destroy_srq_cmd_attr_ids { + UVERBS_ATTR_DESTROY_SRQ_HANDLE, + UVERBS_ATTR_DESTROY_SRQ_RESP, +}; + +enum uverbs_methods_srq { + UVERBS_METHOD_SRQ_CREATE, + UVERBS_METHOD_SRQ_DESTROY, +}; + enum uverbs_methods_cq { UVERBS_METHOD_CQ_CREATE, UVERBS_METHOD_CQ_DESTROY, }; +enum uverbs_attrs_create_wq_cmd_attr_ids { + UVERBS_ATTR_CREATE_WQ_HANDLE, + UVERBS_ATTR_CREATE_WQ_PD_HANDLE, + UVERBS_ATTR_CREATE_WQ_CQ_HANDLE, + UVERBS_ATTR_CREATE_WQ_USER_HANDLE, + UVERBS_ATTR_CREATE_WQ_TYPE, + UVERBS_ATTR_CREATE_WQ_EVENT_FD, + UVERBS_ATTR_CREATE_WQ_MAX_WR, + UVERBS_ATTR_CREATE_WQ_MAX_SGE, + UVERBS_ATTR_CREATE_WQ_FLAGS, + UVERBS_ATTR_CREATE_WQ_RESP_MAX_WR, + UVERBS_ATTR_CREATE_WQ_RESP_MAX_SGE, + UVERBS_ATTR_CREATE_WQ_RESP_WQ_NUM, +}; + +enum uverbs_attrs_destroy_wq_cmd_attr_ids { + UVERBS_ATTR_DESTROY_WQ_HANDLE, + UVERBS_ATTR_DESTROY_WQ_RESP, +}; + +enum uverbs_methods_wq { + UVERBS_METHOD_WQ_CREATE, + UVERBS_METHOD_WQ_DESTROY, +}; + enum uverbs_methods_actions_flow_action_ops { UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, UVERBS_METHOD_FLOW_ACTION_DESTROY, diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h index a640bb814be0..5debab45ebcb 100644 --- a/include/uapi/rdma/ib_user_ioctl_verbs.h +++ b/include/uapi/rdma/ib_user_ioctl_verbs.h @@ -64,6 +64,41 @@ enum ib_uverbs_access_flags { ~(IB_UVERBS_ACCESS_OPTIONAL_FIRST - 1) }; +enum ib_uverbs_srq_type { + IB_UVERBS_SRQT_BASIC, + IB_UVERBS_SRQT_XRC, + IB_UVERBS_SRQT_TM, +}; + +enum ib_uverbs_wq_type { + IB_UVERBS_WQT_RQ, +}; + +enum ib_uverbs_wq_flags { + IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0, + IB_UVERBS_WQ_FLAGS_SCATTER_FCS = 1 << 1, + IB_UVERBS_WQ_FLAGS_DELAY_DROP = 1 << 2, + IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3, +}; + +enum ib_uverbs_qp_type { + IB_UVERBS_QPT_RC = 2, + IB_UVERBS_QPT_UC, + IB_UVERBS_QPT_UD, + IB_UVERBS_QPT_RAW_PACKET = 8, + IB_UVERBS_QPT_XRC_INI, + IB_UVERBS_QPT_XRC_TGT, + IB_UVERBS_QPT_DRIVER = 0xFF, +}; + +enum ib_uverbs_qp_create_flags { + IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1, + IB_UVERBS_QP_CREATE_SCATTER_FCS = 1 << 8, + IB_UVERBS_QP_CREATE_CVLAN_STRIPPING = 1 << 9, + IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11, + IB_UVERBS_QP_CREATE_SQ_SIG_ALL = 1 << 12, +}; + enum ib_uverbs_query_port_cap_flags { IB_UVERBS_PCF_SM = 1 << 1, IB_UVERBS_PCF_NOTICE_SUP = 1 << 2, @@ -185,6 +220,14 @@ struct ib_uverbs_query_port_resp_ex { __u8 reserved[6]; }; +struct ib_uverbs_qp_cap { + __u32 max_send_wr; + __u32 max_recv_wr; + __u32 max_send_sge; + __u32 max_recv_sge; + __u32 max_inline_data; +}; + enum rdma_driver_id { RDMA_DRIVER_UNKNOWN, RDMA_DRIVER_MLX5, diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index df1cc3641bda..27905a0268c9 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -100,6 +100,7 @@ struct mlx5_ib_alloc_ucontext_req_v2 { enum mlx5_ib_alloc_ucontext_resp_mask { MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0, MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY = 1UL << 1, + MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE = 1UL << 2, }; enum mlx5_user_cmds_supp_uhw { @@ -322,6 +323,8 @@ struct mlx5_ib_create_qp { __aligned_u64 sq_buf_addr; __aligned_u64 access_key; }; + __u32 ece_options; + __u32 reserved; }; /* RX Hash function flags */ @@ -371,7 +374,7 @@ enum mlx5_ib_create_qp_resp_mask { struct mlx5_ib_create_qp_resp { __u32 bfreg_index; - __u32 reserved; + __u32 ece_options; __u32 comp_mask; __u32 tirn; __u32 tisn; @@ -420,12 +423,14 @@ struct mlx5_ib_burst_info { struct mlx5_ib_modify_qp { __u32 comp_mask; struct mlx5_ib_burst_info burst_info; - __u32 reserved; + __u32 ece_options; }; struct mlx5_ib_modify_qp_resp { __u32 response_length; __u32 dctn; + __u32 ece_options; + __u32 reserved; }; struct mlx5_ib_create_wq_resp { diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h index 24f3388c3182..8e316ef896b5 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h +++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h @@ -241,6 +241,11 @@ enum mlx5_ib_flow_type { MLX5_IB_FLOW_TYPE_MC_DEFAULT, }; +enum mlx5_ib_create_flow_flags { + MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS = 1 << 0, + MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP = 1 << 1, +}; + enum mlx5_ib_create_flow_attrs { MLX5_IB_ATTR_CREATE_FLOW_HANDLE = (1U << UVERBS_ID_NS_SHIFT), MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE, @@ -251,6 +256,7 @@ enum mlx5_ib_create_flow_attrs { MLX5_IB_ATTR_CREATE_FLOW_TAG, MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET, + MLX5_IB_ATTR_CREATE_FLOW_FLAGS, }; enum mlx5_ib_destoy_flow_attrs { diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h index e42940a215a3..ed5a514305c1 100644 --- a/include/uapi/rdma/rdma_user_cm.h +++ b/include/uapi/rdma/rdma_user_cm.h @@ -164,6 +164,8 @@ struct rdma_ucm_query_route_resp { __u32 num_paths; __u8 port_num; __u8 reserved[3]; + __u32 ibdev_index; + __u32 reserved1; }; struct rdma_ucm_query_addr_resp { @@ -175,6 +177,8 @@ struct rdma_ucm_query_addr_resp { __u16 dst_size; struct __kernel_sockaddr_storage src_addr; struct __kernel_sockaddr_storage dst_addr; + __u32 ibdev_index; + __u32 reserved1; }; struct rdma_ucm_query_path_resp { @@ -206,10 +210,16 @@ struct rdma_ucm_ud_param { __u8 reserved[7]; }; +struct rdma_ucm_ece { + __u32 vendor_id; + __u32 attr_mod; +}; + struct rdma_ucm_connect { struct rdma_ucm_conn_param conn_param; __u32 id; __u32 reserved; + struct rdma_ucm_ece ece; }; struct rdma_ucm_listen { @@ -222,12 +232,14 @@ struct rdma_ucm_accept { struct rdma_ucm_conn_param conn_param; __u32 id; __u32 reserved; + struct rdma_ucm_ece ece; }; struct rdma_ucm_reject { __u32 id; __u8 private_data_len; - __u8 reserved[3]; + __u8 reason; + __u8 reserved[2]; __u8 private_data[RDMA_MAX_PRIVATE_DATA]; }; @@ -287,6 +299,7 @@ struct rdma_ucm_event_resp { struct rdma_ucm_ud_param ud; } param; __u32 reserved; + struct rdma_ucm_ece ece; }; /* Option levels */ diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h index 7b1ec806f8f9..38ab7accb7be 100644 --- a/include/uapi/rdma/rdma_user_ioctl_cmds.h +++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h @@ -36,7 +36,7 @@ #include <linux/types.h> #include <linux/ioctl.h> -/* Documentation/ioctl/ioctl-number.rst */ +/* Documentation/userspace-api/ioctl/ioctl-number.rst */ #define RDMA_IOCTL_MAGIC 0x1b #define RDMA_VERBS_IOCTL \ _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr) diff --git a/include/uapi/scsi/scsi_bsg_fc.h b/include/uapi/scsi/scsi_bsg_fc.h index 7f5930801f72..3ae65e93235c 100644 --- a/include/uapi/scsi/scsi_bsg_fc.h +++ b/include/uapi/scsi/scsi_bsg_fc.h @@ -209,7 +209,7 @@ struct fc_bsg_host_vendor { __u64 vendor_id; /* start of vendor command area */ - __u32 vendor_cmd[]; + __u32 vendor_cmd[0]; }; /* Response: diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h index 9eee32f5e407..a93c0decfdd5 100644 --- a/include/uapi/sound/skl-tplg-interface.h +++ b/include/uapi/sound/skl-tplg-interface.h @@ -18,6 +18,8 @@ */ #define SKL_CONTROL_TYPE_BYTE_TLV 0x100 #define SKL_CONTROL_TYPE_MIC_SELECT 0x102 +#define SKL_CONTROL_TYPE_MULTI_IO_SELECT 0x103 +#define SKL_CONTROL_TYPE_MULTI_IO_SELECT_DMIC 0x104 #define HDA_SST_CFG_MAX 900 /* size of copier cfg*/ #define MAX_IN_QUEUE 8 diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h index 5995b79d6df1..d54be303090f 100644 --- a/include/uapi/sound/sof/abi.h +++ b/include/uapi/sound/sof/abi.h @@ -26,7 +26,7 @@ /* SOF ABI version major, minor and patch numbers */ #define SOF_ABI_MAJOR 3 -#define SOF_ABI_MINOR 13 +#define SOF_ABI_MINOR 16 #define SOF_ABI_PATCH 0 /* SOF ABI version number. Format within 32bit word is MMmmmppp */ diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h index 2a25cd8da503..5941e2eb1588 100644 --- a/include/uapi/sound/sof/tokens.h +++ b/include/uapi/sound/sof/tokens.h @@ -126,4 +126,12 @@ #define SOF_TKN_MUTE_LED_USE 1300 #define SOF_TKN_MUTE_LED_DIRECTION 1301 +/* ALH */ +#define SOF_TKN_INTEL_ALH_RATE 1400 +#define SOF_TKN_INTEL_ALH_CH 1401 + +/* HDA */ +#define SOF_TKN_INTEL_HDA_RATE 1500 +#define SOF_TKN_INTEL_HDA_CH 1501 + #endif diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h index 5cbc9fcbfd45..7955c56d6b3c 100644 --- a/include/vdso/datapage.h +++ b/include/vdso/datapage.h @@ -73,8 +73,8 @@ struct vdso_timestamp { * * @offset is used by the special time namespace VVAR pages which are * installed instead of the real VVAR page. These namespace pages must set - * @seq to 1 and @clock_mode to VLOCK_TIMENS to force the code into the - * time namespace slow path. The namespace aware functions retrieve the + * @seq to 1 and @clock_mode to VDSO_CLOCKMODE_TIMENS to force the code into + * the time namespace slow path. The namespace aware functions retrieve the * real system wide VVAR page, read host time and add the per clock offset. * For clocks which are not affected by time namespace adjustment the * offset must be zero. diff --git a/include/xen/arm/page.h b/include/xen/arm/page.h index f77dcbcba5a6..d7f6af50e200 100644 --- a/include/xen/arm/page.h +++ b/include/xen/arm/page.h @@ -3,11 +3,11 @@ #define _ASM_ARM_XEN_PAGE_H #include <asm/page.h> -#include <asm/pgtable.h> #include <linux/pfn.h> #include <linux/types.h> #include <linux/dma-mapping.h> +#include <linux/pgtable.h> #include <xen/xen.h> #include <xen/interface/grant_table.h> diff --git a/include/xen/events.h b/include/xen/events.h index 12b0dcb6a120..df1e6391f63f 100644 --- a/include/xen/events.h +++ b/include/xen/events.h @@ -90,13 +90,6 @@ unsigned int irq_from_evtchn(evtchn_port_t evtchn); int irq_from_virq(unsigned int cpu, unsigned int virq); evtchn_port_t evtchn_from_irq(unsigned irq); -#ifdef CONFIG_XEN_PVHVM -/* Xen HVM evtchn vector callback */ -void xen_hvm_callback_vector(void); -#ifdef CONFIG_TRACING -#define trace_xen_hvm_callback_vector xen_hvm_callback_vector -#endif -#endif int xen_set_callback_via(uint64_t via); void xen_evtchn_do_upcall(struct pt_regs *regs); void xen_hvm_evtchn_do_upcall(void); diff --git a/include/xen/hvm.h b/include/xen/hvm.h index 0b15f8cb17fc..b7fd7fc9ad41 100644 --- a/include/xen/hvm.h +++ b/include/xen/hvm.h @@ -58,4 +58,6 @@ static inline int hvm_get_parameter(int idx, uint64_t *value) #define HVM_CALLBACK_VECTOR(x) (((uint64_t)HVM_CALLBACK_VIA_TYPE_VECTOR)<<\ HVM_CALLBACK_VIA_TYPE_SHIFT | (x)) +void xen_setup_callback_vector(void); + #endif /* XEN_HVM_H__ */ diff --git a/include/xen/interface/hvm/hvm_op.h b/include/xen/interface/hvm/hvm_op.h index 956a04682865..25d945ef17de 100644 --- a/include/xen/interface/hvm/hvm_op.h +++ b/include/xen/interface/hvm/hvm_op.h @@ -21,6 +21,8 @@ #ifndef __XEN_PUBLIC_HVM_HVM_OP_H__ #define __XEN_PUBLIC_HVM_HVM_OP_H__ +#include <xen/interface/xen.h> + /* Get/set subcommands: the second argument of the hypercall is a * pointer to a xen_hvm_param struct. */ #define HVMOP_set_param 0 diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index 095be1d66f31..39a5580f8feb 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -215,17 +215,7 @@ bool xen_running_on_version_or_later(unsigned int major, unsigned int minor); void xen_efi_runtime_setup(void); -#ifdef CONFIG_PREEMPTION - -static inline void xen_preemptible_hcall_begin(void) -{ -} - -static inline void xen_preemptible_hcall_end(void) -{ -} - -#else +#if defined(CONFIG_XEN_PV) && !defined(CONFIG_PREEMPTION) DECLARE_PER_CPU(bool, xen_in_preemptible_hcall); @@ -239,6 +229,11 @@ static inline void xen_preemptible_hcall_end(void) __this_cpu_write(xen_in_preemptible_hcall, false); } -#endif /* CONFIG_PREEMPTION */ +#else + +static inline void xen_preemptible_hcall_begin(void) { } +static inline void xen_preemptible_hcall_end(void) { } + +#endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */ #endif /* INCLUDE_XEN_OPS_H */ |