diff options
Diffstat (limited to 'include/linux')
223 files changed, 6978 insertions, 2514 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 72e4f7fd268c..974d497a897d 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -249,7 +249,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); /* the following numa functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); -#if defined(CONFIG_X86) || defined(CONFIG_IA64) +#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); #else static inline void @@ -1380,13 +1380,11 @@ static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level) #endif #ifdef CONFIG_ACPI -extern int acpi_platform_notify(struct device *dev, enum kobject_action action); +extern void acpi_device_notify(struct device *dev); +extern void acpi_device_notify_remove(struct device *dev); #else -static inline int -acpi_platform_notify(struct device *dev, enum kobject_action action) -{ - return 0; -} +static inline void acpi_device_notify(struct device *dev) { } +static inline void acpi_device_notify_remove(struct device *dev) { } #endif #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/atomic.h b/include/linux/atomic.h index ed1d3ffd5b9d..8dd57c3a99e9 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -77,9 +77,8 @@ __ret; \ }) -#include <linux/atomic-arch-fallback.h> -#include <asm-generic/atomic-instrumented.h> - -#include <asm-generic/atomic-long.h> +#include <linux/atomic/atomic-arch-fallback.h> +#include <linux/atomic/atomic-long.h> +#include <linux/atomic/atomic-instrumented.h> #endif /* _LINUX_ATOMIC_H */ diff --git a/include/linux/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h index a3dba31df01e..a3dba31df01e 100644 --- a/include/linux/atomic-arch-fallback.h +++ b/include/linux/atomic/atomic-arch-fallback.h diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h new file mode 100644 index 000000000000..a0f654370da3 --- /dev/null +++ b/include/linux/atomic/atomic-instrumented.h @@ -0,0 +1,1915 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-instrumented.sh +// DO NOT MODIFY THIS FILE DIRECTLY + +/* + * This file provides wrappers with KASAN instrumentation for atomic operations. + * To use this functionality an arch's atomic.h file needs to define all + * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include + * this file at the end. This file provides atomic_read() that forwards to + * arch_atomic_read() for actual atomic operation. + * Note: if an arch atomic operation is implemented by means of other atomic + * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use + * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid + * double instrumentation. + */ +#ifndef _LINUX_ATOMIC_INSTRUMENTED_H +#define _LINUX_ATOMIC_INSTRUMENTED_H + +#include <linux/build_bug.h> +#include <linux/compiler.h> +#include <linux/instrumented.h> + +static __always_inline int +atomic_read(const atomic_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_read(v); +} + +static __always_inline int +atomic_read_acquire(const atomic_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_read_acquire(v); +} + +static __always_inline void +atomic_set(atomic_t *v, int i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_set(v, i); +} + +static __always_inline void +atomic_set_release(atomic_t *v, int i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_set_release(v, i); +} + +static __always_inline void +atomic_add(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_add(i, v); +} + +static __always_inline int +atomic_add_return(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_return(i, v); +} + +static __always_inline int +atomic_add_return_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_return_acquire(i, v); +} + +static __always_inline int +atomic_add_return_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_return_release(i, v); +} + +static __always_inline int +atomic_add_return_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_return_relaxed(i, v); +} + +static __always_inline int +atomic_fetch_add(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_add(i, v); +} + +static __always_inline int +atomic_fetch_add_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_add_acquire(i, v); +} + +static __always_inline int +atomic_fetch_add_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_add_release(i, v); +} + +static __always_inline int +atomic_fetch_add_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_add_relaxed(i, v); +} + +static __always_inline void +atomic_sub(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_sub(i, v); +} + +static __always_inline int +atomic_sub_return(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_sub_return(i, v); +} + +static __always_inline int +atomic_sub_return_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_sub_return_acquire(i, v); +} + +static __always_inline int +atomic_sub_return_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_sub_return_release(i, v); +} + +static __always_inline int +atomic_sub_return_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_sub_return_relaxed(i, v); +} + +static __always_inline int +atomic_fetch_sub(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_sub(i, v); +} + +static __always_inline int +atomic_fetch_sub_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_sub_acquire(i, v); +} + +static __always_inline int +atomic_fetch_sub_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_sub_release(i, v); +} + +static __always_inline int +atomic_fetch_sub_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_sub_relaxed(i, v); +} + +static __always_inline void +atomic_inc(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_inc(v); +} + +static __always_inline int +atomic_inc_return(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_return(v); +} + +static __always_inline int +atomic_inc_return_acquire(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_return_acquire(v); +} + +static __always_inline int +atomic_inc_return_release(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_return_release(v); +} + +static __always_inline int +atomic_inc_return_relaxed(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_return_relaxed(v); +} + +static __always_inline int +atomic_fetch_inc(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_inc(v); +} + +static __always_inline int +atomic_fetch_inc_acquire(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_acquire(v); +} + +static __always_inline int +atomic_fetch_inc_release(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_release(v); +} + +static __always_inline int +atomic_fetch_inc_relaxed(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_relaxed(v); +} + +static __always_inline void +atomic_dec(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_dec(v); +} + +static __always_inline int +atomic_dec_return(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_return(v); +} + +static __always_inline int +atomic_dec_return_acquire(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_return_acquire(v); +} + +static __always_inline int +atomic_dec_return_release(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_return_release(v); +} + +static __always_inline int +atomic_dec_return_relaxed(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_return_relaxed(v); +} + +static __always_inline int +atomic_fetch_dec(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_dec(v); +} + +static __always_inline int +atomic_fetch_dec_acquire(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_acquire(v); +} + +static __always_inline int +atomic_fetch_dec_release(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_release(v); +} + +static __always_inline int +atomic_fetch_dec_relaxed(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_relaxed(v); +} + +static __always_inline void +atomic_and(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_and(i, v); +} + +static __always_inline int +atomic_fetch_and(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_and(i, v); +} + +static __always_inline int +atomic_fetch_and_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_and_acquire(i, v); +} + +static __always_inline int +atomic_fetch_and_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_and_release(i, v); +} + +static __always_inline int +atomic_fetch_and_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_and_relaxed(i, v); +} + +static __always_inline void +atomic_andnot(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_andnot(i, v); +} + +static __always_inline int +atomic_fetch_andnot(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot(i, v); +} + +static __always_inline int +atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_acquire(i, v); +} + +static __always_inline int +atomic_fetch_andnot_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_release(i, v); +} + +static __always_inline int +atomic_fetch_andnot_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +atomic_or(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_or(i, v); +} + +static __always_inline int +atomic_fetch_or(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_or(i, v); +} + +static __always_inline int +atomic_fetch_or_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_or_acquire(i, v); +} + +static __always_inline int +atomic_fetch_or_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_or_release(i, v); +} + +static __always_inline int +atomic_fetch_or_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_or_relaxed(i, v); +} + +static __always_inline void +atomic_xor(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_xor(i, v); +} + +static __always_inline int +atomic_fetch_xor(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_xor(i, v); +} + +static __always_inline int +atomic_fetch_xor_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_xor_acquire(i, v); +} + +static __always_inline int +atomic_fetch_xor_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_xor_release(i, v); +} + +static __always_inline int +atomic_fetch_xor_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_xor_relaxed(i, v); +} + +static __always_inline int +atomic_xchg(atomic_t *v, int i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_xchg(v, i); +} + +static __always_inline int +atomic_xchg_acquire(atomic_t *v, int i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_xchg_acquire(v, i); +} + +static __always_inline int +atomic_xchg_release(atomic_t *v, int i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_xchg_release(v, i); +} + +static __always_inline int +atomic_xchg_relaxed(atomic_t *v, int i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_xchg_relaxed(v, i); +} + +static __always_inline int +atomic_cmpxchg(atomic_t *v, int old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_cmpxchg(v, old, new); +} + +static __always_inline int +atomic_cmpxchg_acquire(atomic_t *v, int old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_cmpxchg_acquire(v, old, new); +} + +static __always_inline int +atomic_cmpxchg_release(atomic_t *v, int old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_cmpxchg_release(v, old, new); +} + +static __always_inline int +atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg(v, old, new); +} + +static __always_inline bool +atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg_acquire(v, old, new); +} + +static __always_inline bool +atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg_release(v, old, new); +} + +static __always_inline bool +atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_sub_and_test(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_sub_and_test(i, v); +} + +static __always_inline bool +atomic_dec_and_test(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_and_test(v); +} + +static __always_inline bool +atomic_inc_and_test(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_and_test(v); +} + +static __always_inline bool +atomic_add_negative(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_negative(i, v); +} + +static __always_inline int +atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_add_unless(v, a, u); +} + +static __always_inline bool +atomic_add_unless(atomic_t *v, int a, int u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_unless(v, a, u); +} + +static __always_inline bool +atomic_inc_not_zero(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_not_zero(v); +} + +static __always_inline bool +atomic_inc_unless_negative(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_unless_negative(v); +} + +static __always_inline bool +atomic_dec_unless_positive(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_unless_positive(v); +} + +static __always_inline int +atomic_dec_if_positive(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_if_positive(v); +} + +static __always_inline s64 +atomic64_read(const atomic64_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic64_read(v); +} + +static __always_inline s64 +atomic64_read_acquire(const atomic64_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic64_read_acquire(v); +} + +static __always_inline void +atomic64_set(atomic64_t *v, s64 i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic64_set(v, i); +} + +static __always_inline void +atomic64_set_release(atomic64_t *v, s64 i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic64_set_release(v, i); +} + +static __always_inline void +atomic64_add(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_add(i, v); +} + +static __always_inline s64 +atomic64_add_return(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_return(i, v); +} + +static __always_inline s64 +atomic64_add_return_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_return_acquire(i, v); +} + +static __always_inline s64 +atomic64_add_return_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_return_release(i, v); +} + +static __always_inline s64 +atomic64_add_return_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_return_relaxed(i, v); +} + +static __always_inline s64 +atomic64_fetch_add(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_add(i, v); +} + +static __always_inline s64 +atomic64_fetch_add_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_add_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_relaxed(i, v); +} + +static __always_inline void +atomic64_sub(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_sub(i, v); +} + +static __always_inline s64 +atomic64_sub_return(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_sub_return(i, v); +} + +static __always_inline s64 +atomic64_sub_return_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_sub_return_acquire(i, v); +} + +static __always_inline s64 +atomic64_sub_return_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_sub_return_release(i, v); +} + +static __always_inline s64 +atomic64_sub_return_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_sub_return_relaxed(i, v); +} + +static __always_inline s64 +atomic64_fetch_sub(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub(i, v); +} + +static __always_inline s64 +atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_sub_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub_relaxed(i, v); +} + +static __always_inline void +atomic64_inc(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_inc(v); +} + +static __always_inline s64 +atomic64_inc_return(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_return(v); +} + +static __always_inline s64 +atomic64_inc_return_acquire(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_return_acquire(v); +} + +static __always_inline s64 +atomic64_inc_return_release(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_return_release(v); +} + +static __always_inline s64 +atomic64_inc_return_relaxed(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_return_relaxed(v); +} + +static __always_inline s64 +atomic64_fetch_inc(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc(v); +} + +static __always_inline s64 +atomic64_fetch_inc_acquire(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_acquire(v); +} + +static __always_inline s64 +atomic64_fetch_inc_release(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_release(v); +} + +static __always_inline s64 +atomic64_fetch_inc_relaxed(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_relaxed(v); +} + +static __always_inline void +atomic64_dec(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_dec(v); +} + +static __always_inline s64 +atomic64_dec_return(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_return(v); +} + +static __always_inline s64 +atomic64_dec_return_acquire(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_return_acquire(v); +} + +static __always_inline s64 +atomic64_dec_return_release(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_return_release(v); +} + +static __always_inline s64 +atomic64_dec_return_relaxed(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_return_relaxed(v); +} + +static __always_inline s64 +atomic64_fetch_dec(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec(v); +} + +static __always_inline s64 +atomic64_fetch_dec_acquire(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_acquire(v); +} + +static __always_inline s64 +atomic64_fetch_dec_release(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_release(v); +} + +static __always_inline s64 +atomic64_fetch_dec_relaxed(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_relaxed(v); +} + +static __always_inline void +atomic64_and(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_and(i, v); +} + +static __always_inline s64 +atomic64_fetch_and(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_and(i, v); +} + +static __always_inline s64 +atomic64_fetch_and_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_and_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_and_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_and_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_and_relaxed(i, v); +} + +static __always_inline void +atomic64_andnot(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_andnot(i, v); +} + +static __always_inline s64 +atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot(i, v); +} + +static __always_inline s64 +atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +atomic64_or(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_or(i, v); +} + +static __always_inline s64 +atomic64_fetch_or(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_or(i, v); +} + +static __always_inline s64 +atomic64_fetch_or_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_or_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_or_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_or_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_or_relaxed(i, v); +} + +static __always_inline void +atomic64_xor(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_xor(i, v); +} + +static __always_inline s64 +atomic64_fetch_xor(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor(i, v); +} + +static __always_inline s64 +atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_xor_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor_relaxed(i, v); +} + +static __always_inline s64 +atomic64_xchg(atomic64_t *v, s64 i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_xchg(v, i); +} + +static __always_inline s64 +atomic64_xchg_acquire(atomic64_t *v, s64 i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_xchg_acquire(v, i); +} + +static __always_inline s64 +atomic64_xchg_release(atomic64_t *v, s64 i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_xchg_release(v, i); +} + +static __always_inline s64 +atomic64_xchg_relaxed(atomic64_t *v, s64 i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_xchg_relaxed(v, i); +} + +static __always_inline s64 +atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg(v, old, new); +} + +static __always_inline s64 +atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg_acquire(v, old, new); +} + +static __always_inline s64 +atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg_release(v, old, new); +} + +static __always_inline s64 +atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg(v, old, new); +} + +static __always_inline bool +atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg_acquire(v, old, new); +} + +static __always_inline bool +atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg_release(v, old, new); +} + +static __always_inline bool +atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic64_sub_and_test(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_sub_and_test(i, v); +} + +static __always_inline bool +atomic64_dec_and_test(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_and_test(v); +} + +static __always_inline bool +atomic64_inc_and_test(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_and_test(v); +} + +static __always_inline bool +atomic64_add_negative(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_negative(i, v); +} + +static __always_inline s64 +atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_unless(v, a, u); +} + +static __always_inline bool +atomic64_add_unless(atomic64_t *v, s64 a, s64 u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_unless(v, a, u); +} + +static __always_inline bool +atomic64_inc_not_zero(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_not_zero(v); +} + +static __always_inline bool +atomic64_inc_unless_negative(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_unless_negative(v); +} + +static __always_inline bool +atomic64_dec_unless_positive(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_unless_positive(v); +} + +static __always_inline s64 +atomic64_dec_if_positive(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_if_positive(v); +} + +static __always_inline long +atomic_long_read(const atomic_long_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_long_read(v); +} + +static __always_inline long +atomic_long_read_acquire(const atomic_long_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_long_read_acquire(v); +} + +static __always_inline void +atomic_long_set(atomic_long_t *v, long i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_long_set(v, i); +} + +static __always_inline void +atomic_long_set_release(atomic_long_t *v, long i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_long_set_release(v, i); +} + +static __always_inline void +atomic_long_add(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_add(i, v); +} + +static __always_inline long +atomic_long_add_return(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return(i, v); +} + +static __always_inline long +atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return_acquire(i, v); +} + +static __always_inline long +atomic_long_add_return_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return_release(i, v); +} + +static __always_inline long +atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return_relaxed(i, v); +} + +static __always_inline long +atomic_long_fetch_add(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add(i, v); +} + +static __always_inline long +atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_release(i, v); +} + +static __always_inline long +atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_relaxed(i, v); +} + +static __always_inline void +atomic_long_sub(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_sub(i, v); +} + +static __always_inline long +atomic_long_sub_return(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return(i, v); +} + +static __always_inline long +atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return_acquire(i, v); +} + +static __always_inline long +atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return_release(i, v); +} + +static __always_inline long +atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return_relaxed(i, v); +} + +static __always_inline long +atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub_release(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub_relaxed(i, v); +} + +static __always_inline void +atomic_long_inc(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_inc(v); +} + +static __always_inline long +atomic_long_inc_return(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return(v); +} + +static __always_inline long +atomic_long_inc_return_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return_acquire(v); +} + +static __always_inline long +atomic_long_inc_return_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return_release(v); +} + +static __always_inline long +atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return_relaxed(v); +} + +static __always_inline long +atomic_long_fetch_inc(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc(v); +} + +static __always_inline long +atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc_acquire(v); +} + +static __always_inline long +atomic_long_fetch_inc_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc_release(v); +} + +static __always_inline long +atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc_relaxed(v); +} + +static __always_inline void +atomic_long_dec(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_dec(v); +} + +static __always_inline long +atomic_long_dec_return(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return(v); +} + +static __always_inline long +atomic_long_dec_return_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return_acquire(v); +} + +static __always_inline long +atomic_long_dec_return_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return_release(v); +} + +static __always_inline long +atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return_relaxed(v); +} + +static __always_inline long +atomic_long_fetch_dec(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec(v); +} + +static __always_inline long +atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec_acquire(v); +} + +static __always_inline long +atomic_long_fetch_dec_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec_release(v); +} + +static __always_inline long +atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec_relaxed(v); +} + +static __always_inline void +atomic_long_and(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_and(i, v); +} + +static __always_inline long +atomic_long_fetch_and(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and(i, v); +} + +static __always_inline long +atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and_release(i, v); +} + +static __always_inline long +atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and_relaxed(i, v); +} + +static __always_inline void +atomic_long_andnot(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_andnot(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot_release(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +atomic_long_or(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_or(i, v); +} + +static __always_inline long +atomic_long_fetch_or(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or(i, v); +} + +static __always_inline long +atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or_release(i, v); +} + +static __always_inline long +atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or_relaxed(i, v); +} + +static __always_inline void +atomic_long_xor(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_xor(i, v); +} + +static __always_inline long +atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor_release(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor_relaxed(i, v); +} + +static __always_inline long +atomic_long_xchg(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg(v, i); +} + +static __always_inline long +atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg_acquire(v, i); +} + +static __always_inline long +atomic_long_xchg_release(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg_release(v, i); +} + +static __always_inline long +atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg_relaxed(v, i); +} + +static __always_inline long +atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg_acquire(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg_release(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg_acquire(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg_release(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_and_test(i, v); +} + +static __always_inline bool +atomic_long_dec_and_test(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_and_test(v); +} + +static __always_inline bool +atomic_long_inc_and_test(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_and_test(v); +} + +static __always_inline bool +atomic_long_add_negative(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_negative(i, v); +} + +static __always_inline long +atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_unless(v, a, u); +} + +static __always_inline bool +atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_unless(v, a, u); +} + +static __always_inline bool +atomic_long_inc_not_zero(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_not_zero(v); +} + +static __always_inline bool +atomic_long_inc_unless_negative(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_unless_negative(v); +} + +static __always_inline bool +atomic_long_dec_unless_positive(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_unless_positive(v); +} + +static __always_inline long +atomic_long_dec_if_positive(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_if_positive(v); +} + +#define xchg(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg(__ai_ptr, __VA_ARGS__); \ +}) + +#define xchg_acquire(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ +}) + +#define xchg_release(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg_release(__ai_ptr, __VA_ARGS__); \ +}) + +#define xchg_relaxed(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg_acquire(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg_release(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg_relaxed(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg64(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg64_acquire(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg64_release(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg64_relaxed(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ +}) + +#define try_cmpxchg(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define try_cmpxchg_acquire(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define try_cmpxchg_release(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define try_cmpxchg_relaxed(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define cmpxchg_local(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg64_local(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ +}) + +#define sync_cmpxchg(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg_double(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \ +}) + + +#define cmpxchg_double_local(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ +}) + +#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ +// 2a9553f0a9d5619f19151092df5cabbbf16ce835 diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h new file mode 100644 index 000000000000..800b8c35992d --- /dev/null +++ b/include/linux/atomic/atomic-long.h @@ -0,0 +1,1014 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-long.sh +// DO NOT MODIFY THIS FILE DIRECTLY + +#ifndef _LINUX_ATOMIC_LONG_H +#define _LINUX_ATOMIC_LONG_H + +#include <linux/compiler.h> +#include <asm/types.h> + +#ifdef CONFIG_64BIT +typedef atomic64_t atomic_long_t; +#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) +#define atomic_long_cond_read_acquire atomic64_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed +#else +typedef atomic_t atomic_long_t; +#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) +#define atomic_long_cond_read_acquire atomic_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed +#endif + +#ifdef CONFIG_64BIT + +static __always_inline long +arch_atomic_long_read(const atomic_long_t *v) +{ + return arch_atomic64_read(v); +} + +static __always_inline long +arch_atomic_long_read_acquire(const atomic_long_t *v) +{ + return arch_atomic64_read_acquire(v); +} + +static __always_inline void +arch_atomic_long_set(atomic_long_t *v, long i) +{ + arch_atomic64_set(v, i); +} + +static __always_inline void +arch_atomic_long_set_release(atomic_long_t *v, long i) +{ + arch_atomic64_set_release(v, i); +} + +static __always_inline void +arch_atomic_long_add(long i, atomic_long_t *v) +{ + arch_atomic64_add(i, v); +} + +static __always_inline long +arch_atomic_long_add_return(long i, atomic_long_t *v) +{ + return arch_atomic64_add_return(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_add_return_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_release(long i, atomic_long_t *v) +{ + return arch_atomic64_add_return_release(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_add_return_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_add(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_add_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_add_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_add_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_sub(long i, atomic_long_t *v) +{ + arch_atomic64_sub(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_return(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_return_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_return_release(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_return_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_sub(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_sub_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_sub_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_sub_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_inc(atomic_long_t *v) +{ + arch_atomic64_inc(v); +} + +static __always_inline long +arch_atomic_long_inc_return(atomic_long_t *v) +{ + return arch_atomic64_inc_return(v); +} + +static __always_inline long +arch_atomic_long_inc_return_acquire(atomic_long_t *v) +{ + return arch_atomic64_inc_return_acquire(v); +} + +static __always_inline long +arch_atomic_long_inc_return_release(atomic_long_t *v) +{ + return arch_atomic64_inc_return_release(v); +} + +static __always_inline long +arch_atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + return arch_atomic64_inc_return_relaxed(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc(atomic_long_t *v) +{ + return arch_atomic64_fetch_inc(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + return arch_atomic64_fetch_inc_acquire(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_release(atomic_long_t *v) +{ + return arch_atomic64_fetch_inc_release(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + return arch_atomic64_fetch_inc_relaxed(v); +} + +static __always_inline void +arch_atomic_long_dec(atomic_long_t *v) +{ + arch_atomic64_dec(v); +} + +static __always_inline long +arch_atomic_long_dec_return(atomic_long_t *v) +{ + return arch_atomic64_dec_return(v); +} + +static __always_inline long +arch_atomic_long_dec_return_acquire(atomic_long_t *v) +{ + return arch_atomic64_dec_return_acquire(v); +} + +static __always_inline long +arch_atomic_long_dec_return_release(atomic_long_t *v) +{ + return arch_atomic64_dec_return_release(v); +} + +static __always_inline long +arch_atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + return arch_atomic64_dec_return_relaxed(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec(atomic_long_t *v) +{ + return arch_atomic64_fetch_dec(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + return arch_atomic64_fetch_dec_acquire(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_release(atomic_long_t *v) +{ + return arch_atomic64_fetch_dec_release(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + return arch_atomic64_fetch_dec_relaxed(v); +} + +static __always_inline void +arch_atomic_long_and(long i, atomic_long_t *v) +{ + arch_atomic64_and(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_and(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_and_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_and_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_and_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_andnot(long i, atomic_long_t *v) +{ + arch_atomic64_andnot(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_andnot(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_andnot_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_andnot_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_or(long i, atomic_long_t *v) +{ + arch_atomic64_or(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_or(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_or_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_or_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_or_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_xor(long i, atomic_long_t *v) +{ + arch_atomic64_xor(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_xor(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_xor_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_xor_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_xor_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_xchg(atomic_long_t *v, long i) +{ + return arch_atomic64_xchg(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + return arch_atomic64_xchg_acquire(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_release(atomic_long_t *v, long i) +{ + return arch_atomic64_xchg_release(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + return arch_atomic64_xchg_relaxed(v, i); +} + +static __always_inline long +arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + return arch_atomic64_cmpxchg(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + return arch_atomic64_cmpxchg_acquire(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + return arch_atomic64_cmpxchg_release(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + return arch_atomic64_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + return arch_atomic64_try_cmpxchg(v, (s64 *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + return arch_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + return arch_atomic64_try_cmpxchg_release(v, (s64 *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + return arch_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); +} + +static __always_inline bool +arch_atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_and_test(i, v); +} + +static __always_inline bool +arch_atomic_long_dec_and_test(atomic_long_t *v) +{ + return arch_atomic64_dec_and_test(v); +} + +static __always_inline bool +arch_atomic_long_inc_and_test(atomic_long_t *v) +{ + return arch_atomic64_inc_and_test(v); +} + +static __always_inline bool +arch_atomic_long_add_negative(long i, atomic_long_t *v) +{ + return arch_atomic64_add_negative(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + return arch_atomic64_fetch_add_unless(v, a, u); +} + +static __always_inline bool +arch_atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + return arch_atomic64_add_unless(v, a, u); +} + +static __always_inline bool +arch_atomic_long_inc_not_zero(atomic_long_t *v) +{ + return arch_atomic64_inc_not_zero(v); +} + +static __always_inline bool +arch_atomic_long_inc_unless_negative(atomic_long_t *v) +{ + return arch_atomic64_inc_unless_negative(v); +} + +static __always_inline bool +arch_atomic_long_dec_unless_positive(atomic_long_t *v) +{ + return arch_atomic64_dec_unless_positive(v); +} + +static __always_inline long +arch_atomic_long_dec_if_positive(atomic_long_t *v) +{ + return arch_atomic64_dec_if_positive(v); +} + +#else /* CONFIG_64BIT */ + +static __always_inline long +arch_atomic_long_read(const atomic_long_t *v) +{ + return arch_atomic_read(v); +} + +static __always_inline long +arch_atomic_long_read_acquire(const atomic_long_t *v) +{ + return arch_atomic_read_acquire(v); +} + +static __always_inline void +arch_atomic_long_set(atomic_long_t *v, long i) +{ + arch_atomic_set(v, i); +} + +static __always_inline void +arch_atomic_long_set_release(atomic_long_t *v, long i) +{ + arch_atomic_set_release(v, i); +} + +static __always_inline void +arch_atomic_long_add(long i, atomic_long_t *v) +{ + arch_atomic_add(i, v); +} + +static __always_inline long +arch_atomic_long_add_return(long i, atomic_long_t *v) +{ + return arch_atomic_add_return(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_add_return_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_release(long i, atomic_long_t *v) +{ + return arch_atomic_add_return_release(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_add_return_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_add(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_add_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_add_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_add_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_sub(long i, atomic_long_t *v) +{ + arch_atomic_sub(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return(long i, atomic_long_t *v) +{ + return arch_atomic_sub_return(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_sub_return_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + return arch_atomic_sub_return_release(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_sub_return_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_sub(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_sub_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_sub_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_sub_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_inc(atomic_long_t *v) +{ + arch_atomic_inc(v); +} + +static __always_inline long +arch_atomic_long_inc_return(atomic_long_t *v) +{ + return arch_atomic_inc_return(v); +} + +static __always_inline long +arch_atomic_long_inc_return_acquire(atomic_long_t *v) +{ + return arch_atomic_inc_return_acquire(v); +} + +static __always_inline long +arch_atomic_long_inc_return_release(atomic_long_t *v) +{ + return arch_atomic_inc_return_release(v); +} + +static __always_inline long +arch_atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + return arch_atomic_inc_return_relaxed(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc(atomic_long_t *v) +{ + return arch_atomic_fetch_inc(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + return arch_atomic_fetch_inc_acquire(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_release(atomic_long_t *v) +{ + return arch_atomic_fetch_inc_release(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + return arch_atomic_fetch_inc_relaxed(v); +} + +static __always_inline void +arch_atomic_long_dec(atomic_long_t *v) +{ + arch_atomic_dec(v); +} + +static __always_inline long +arch_atomic_long_dec_return(atomic_long_t *v) +{ + return arch_atomic_dec_return(v); +} + +static __always_inline long +arch_atomic_long_dec_return_acquire(atomic_long_t *v) +{ + return arch_atomic_dec_return_acquire(v); +} + +static __always_inline long +arch_atomic_long_dec_return_release(atomic_long_t *v) +{ + return arch_atomic_dec_return_release(v); +} + +static __always_inline long +arch_atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + return arch_atomic_dec_return_relaxed(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec(atomic_long_t *v) +{ + return arch_atomic_fetch_dec(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + return arch_atomic_fetch_dec_acquire(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_release(atomic_long_t *v) +{ + return arch_atomic_fetch_dec_release(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + return arch_atomic_fetch_dec_relaxed(v); +} + +static __always_inline void +arch_atomic_long_and(long i, atomic_long_t *v) +{ + arch_atomic_and(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_and(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_and_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_and_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_and_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_andnot(long i, atomic_long_t *v) +{ + arch_atomic_andnot(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_andnot(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_andnot_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_andnot_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_or(long i, atomic_long_t *v) +{ + arch_atomic_or(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_or(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_or_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_or_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_or_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_xor(long i, atomic_long_t *v) +{ + arch_atomic_xor(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_xor(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_xor_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_xor_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_xor_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_xchg(atomic_long_t *v, long i) +{ + return arch_atomic_xchg(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + return arch_atomic_xchg_acquire(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_release(atomic_long_t *v, long i) +{ + return arch_atomic_xchg_release(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + return arch_atomic_xchg_relaxed(v, i); +} + +static __always_inline long +arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + return arch_atomic_cmpxchg(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + return arch_atomic_cmpxchg_acquire(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + return arch_atomic_cmpxchg_release(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + return arch_atomic_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + return arch_atomic_try_cmpxchg(v, (int *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + return arch_atomic_try_cmpxchg_acquire(v, (int *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + return arch_atomic_try_cmpxchg_release(v, (int *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + return arch_atomic_try_cmpxchg_relaxed(v, (int *)old, new); +} + +static __always_inline bool +arch_atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + return arch_atomic_sub_and_test(i, v); +} + +static __always_inline bool +arch_atomic_long_dec_and_test(atomic_long_t *v) +{ + return arch_atomic_dec_and_test(v); +} + +static __always_inline bool +arch_atomic_long_inc_and_test(atomic_long_t *v) +{ + return arch_atomic_inc_and_test(v); +} + +static __always_inline bool +arch_atomic_long_add_negative(long i, atomic_long_t *v) +{ + return arch_atomic_add_negative(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + return arch_atomic_fetch_add_unless(v, a, u); +} + +static __always_inline bool +arch_atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + return arch_atomic_add_unless(v, a, u); +} + +static __always_inline bool +arch_atomic_long_inc_not_zero(atomic_long_t *v) +{ + return arch_atomic_inc_not_zero(v); +} + +static __always_inline bool +arch_atomic_long_inc_unless_negative(atomic_long_t *v) +{ + return arch_atomic_inc_unless_negative(v); +} + +static __always_inline bool +arch_atomic_long_dec_unless_positive(atomic_long_t *v) +{ + return arch_atomic_dec_unless_positive(v); +} + +static __always_inline long +arch_atomic_long_dec_if_positive(atomic_long_t *v) +{ + return arch_atomic_dec_if_positive(v); +} + +#endif /* CONFIG_64BIT */ +#endif /* _LINUX_ATOMIC_LONG_H */ +// e8f0e08ff072b74d180eabe2ad001282b38c2c88 diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 44df4fcef65c..29530859d9ff 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -143,7 +143,7 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) sb = inode->i_sb; #ifdef CONFIG_BLOCK if (sb_is_blkdev_sb(sb)) - return I_BDEV(inode)->bd_bdi; + return I_BDEV(inode)->bd_disk->bdi; #endif return sb->s_bdi; } diff --git a/include/linux/bio.h b/include/linux/bio.h index 2203b686e1f0..00952e92eae1 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -5,7 +5,6 @@ #ifndef __LINUX_BIO_H #define __LINUX_BIO_H -#include <linux/highmem.h> #include <linux/mempool.h> #include <linux/ioprio.h> /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ @@ -375,7 +374,7 @@ static inline void bip_set_seed(struct bio_integrity_payload *bip, #endif /* CONFIG_BLK_DEV_INTEGRITY */ -extern void bio_trim(struct bio *bio, int offset, int size); +void bio_trim(struct bio *bio, sector_t offset, sector_t size); extern struct bio *bio_split(struct bio *bio, int sectors, gfp_t gfp, struct bio_set *bs); @@ -401,6 +400,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors, enum { BIOSET_NEED_BVECS = BIT(0), BIOSET_NEED_RESCUER = BIT(1), + BIOSET_PERCPU_CACHE = BIT(2), }; extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); extern void bioset_exit(struct bio_set *); @@ -409,6 +409,8 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs, struct bio_set *bs); +struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs, + struct bio_set *bs); struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs); extern void bio_put(struct bio *); @@ -519,47 +521,6 @@ static inline void bio_clone_blkg_association(struct bio *dst, struct bio *src) { } #endif /* CONFIG_BLK_CGROUP */ -#ifdef CONFIG_HIGHMEM -/* - * remember never ever reenable interrupts between a bvec_kmap_irq and - * bvec_kunmap_irq! - */ -static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) -{ - unsigned long addr; - - /* - * might not be a highmem page, but the preempt/irq count - * balancing is a lot nicer this way - */ - local_irq_save(*flags); - addr = (unsigned long) kmap_atomic(bvec->bv_page); - - BUG_ON(addr & ~PAGE_MASK); - - return (char *) addr + bvec->bv_offset; -} - -static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) -{ - unsigned long ptr = (unsigned long) buffer & PAGE_MASK; - - kunmap_atomic((void *) ptr); - local_irq_restore(*flags); -} - -#else -static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) -{ - return page_address(bvec->bv_page) + bvec->bv_offset; -} - -static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) -{ - *flags = 0; -} -#endif - /* * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. * @@ -699,6 +660,11 @@ struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; + /* + * per-cpu bio alloc cache + */ + struct bio_alloc_cache __percpu *cache; + mempool_t bio_pool; mempool_t bvec_pool; #if defined(CONFIG_BLK_DEV_INTEGRITY) @@ -715,6 +681,11 @@ struct bio_set { struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; + + /* + * Hot un-plug notifier for the per-cpu cache, if used + */ + struct hlist_node cpuhp_dead; }; static inline bool bioset_initialized(struct bio_set *bs) diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index a36cfcec4e77..37f36dad18bd 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -227,6 +227,12 @@ unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, un int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits); +extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp, + int nmaskbits, loff_t off, size_t count); + +extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp, + int nmaskbits, loff_t off, size_t count); + #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 26bf15e6cd35..5e62e2383b7f 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -4,6 +4,7 @@ #include <asm/types.h> #include <linux/bits.h> +#include <linux/typecheck.h> #include <uapi/linux/kernel.h> @@ -253,6 +254,55 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, __clear_bit(nr, addr); } +/** + * __ptr_set_bit - Set bit in a pointer's value + * @nr: the bit to set + * @addr: the address of the pointer variable + * + * Example: + * void *p = foo(); + * __ptr_set_bit(bit, &p); + */ +#define __ptr_set_bit(nr, addr) \ + ({ \ + typecheck_pointer(*(addr)); \ + __set_bit(nr, (unsigned long *)(addr)); \ + }) + +/** + * __ptr_clear_bit - Clear bit in a pointer's value + * @nr: the bit to clear + * @addr: the address of the pointer variable + * + * Example: + * void *p = foo(); + * __ptr_clear_bit(bit, &p); + */ +#define __ptr_clear_bit(nr, addr) \ + ({ \ + typecheck_pointer(*(addr)); \ + __clear_bit(nr, (unsigned long *)(addr)); \ + }) + +/** + * __ptr_test_bit - Test bit in a pointer's value + * @nr: the bit to test + * @addr: the address of the pointer variable + * + * Example: + * void *p = foo(); + * if (__ptr_test_bit(bit, &p)) { + * ... + * } else { + * ... + * } + */ +#define __ptr_test_bit(nr, addr) \ + ({ \ + typecheck_pointer(*(addr)); \ + test_bit(nr, (unsigned long *)(addr)); \ + }) + #ifdef __KERNEL__ #ifndef set_mask_bits diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 37048438872c..b4de2010fba5 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -152,8 +152,8 @@ typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); -typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf, - size_t size); +typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, + struct seq_file *s); struct blkcg_policy { int plid; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 1d18447ebebc..13ba1861e688 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -404,7 +404,13 @@ enum { BLK_MQ_F_STACKING = 1 << 2, BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3, BLK_MQ_F_BLOCKING = 1 << 5, + /* Do not allow an I/O scheduler to be configured. */ BLK_MQ_F_NO_SCHED = 1 << 6, + /* + * Select 'none' during queue registration in case of a single hwq + * or shared hwqs instead of 'mq-deadline'. + */ + BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7, BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, BLK_MQ_F_ALLOC_POLICY_BITS = 1, @@ -426,18 +432,14 @@ enum { ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ << BLK_MQ_F_ALLOC_POLICY_START_BIT) +struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, + struct lock_class_key *lkclass); #define blk_mq_alloc_disk(set, queuedata) \ ({ \ static struct lock_class_key __key; \ - struct gendisk *__disk = __blk_mq_alloc_disk(set, queuedata); \ \ - if (!IS_ERR(__disk)) \ - lockdep_init_map(&__disk->lockdep_map, \ - "(bio completion)", &__key, 0); \ - __disk; \ + __blk_mq_alloc_disk(set, queuedata, &__key); \ }) -struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, - void *queuedata); struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *q); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 290f9061b29a..be622b5a21ed 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -34,14 +34,10 @@ struct block_device { void * bd_holder; int bd_holders; bool bd_write_holder; -#ifdef CONFIG_SYSFS - struct list_head bd_holder_disks; -#endif struct kobject *bd_holder_dir; u8 bd_partno; spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ struct gendisk * bd_disk; - struct backing_dev_info *bd_bdi; /* The counter of freeze processes */ int bd_fsfreeze_count; @@ -281,6 +277,7 @@ struct bio { }; #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) +#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT) /* * bio flags @@ -301,6 +298,7 @@ enum { BIO_TRACKED, /* set if bio goes through the rq_qos path */ BIO_REMAPPED, BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */ + BIO_PERCPU_CACHE, /* can participate in per-cpu alloc cache */ BIO_FLAG_LAST }; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 3177181c4326..c9cb12483e12 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -11,7 +11,6 @@ #include <linux/minmax.h> #include <linux/timer.h> #include <linux/workqueue.h> -#include <linux/backing-dev-defs.h> #include <linux/wait.h> #include <linux/mempool.h> #include <linux/pfn.h> @@ -57,7 +56,7 @@ struct blk_keyslot_manager; * Maximum number of blkcg policies allowed to be registered concurrently. * Defined here to simplify include dependency. */ -#define BLKCG_MAX_POLS 5 +#define BLKCG_MAX_POLS 6 typedef void (rq_end_io_fn)(struct request *, blk_status_t); @@ -398,8 +397,6 @@ struct request_queue { struct blk_mq_hw_ctx **queue_hw_ctx; unsigned int nr_hw_queues; - struct backing_dev_info *backing_dev_info; - /* * The queue owner gets to use this for whatever they like. * ll_rw_blk doesn't touch it. @@ -424,6 +421,8 @@ struct request_queue { spinlock_t queue_lock; + struct gendisk *disk; + /* * queue kobject */ @@ -664,8 +663,6 @@ extern void blk_clear_pm_only(struct request_queue *q); dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ (dir), (attrs)) -#define queue_to_disk(q) (dev_to_disk(kobj_to_dev((q)->kobj.parent))) - static inline bool queue_is_mq(struct request_queue *q) { return q->mq_ops; @@ -941,6 +938,10 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev) #define SECTOR_SIZE (1 << SECTOR_SHIFT) #endif +#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) +#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) +#define SECTOR_MASK (PAGE_SECTORS - 1) + /* * blk_rq_pos() : the current sector * blk_rq_bytes() : bytes left in the entire request @@ -1139,7 +1140,7 @@ void blk_queue_zone_write_granularity(struct request_queue *q, unsigned int size); extern void blk_queue_alignment_offset(struct request_queue *q, unsigned int alignment); -void blk_queue_update_readahead(struct request_queue *q); +void disk_update_readahead(struct gendisk *disk); extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); extern void blk_queue_io_min(struct request_queue *q, unsigned int min); extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); @@ -1521,6 +1522,22 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector return offset << SECTOR_SHIFT; } +/* + * Two cases of handling DISCARD merge: + * If max_discard_segments > 1, the driver takes every bio + * as a range and send them to controller together. The ranges + * needn't to be contiguous. + * Otherwise, the bios/requests will be handled as same as + * others which should be contiguous. + */ +static inline bool blk_discard_mergable(struct request *req) +{ + if (req_op(req) == REQ_OP_DISCARD && + queue_max_discard_segments(req->q) > 1) + return true; + return false; +} + static inline int bdev_discard_alignment(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); @@ -1855,6 +1872,13 @@ struct block_device_operations { char *(*devnode)(struct gendisk *disk, umode_t *mode); struct module *owner; const struct pr_ops *pr_ops; + + /* + * Special callback for probing GPT entry at a given sector. + * Needed by Android devices, used by GPT scanner and MMC blk + * driver. + */ + int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector); }; #ifdef CONFIG_COMPAT @@ -1984,8 +2008,6 @@ void blkdev_put_no_open(struct block_device *bdev); struct block_device *bdev_alloc(struct gendisk *disk, u8 partno); void bdev_add(struct block_device *bdev, dev_t dev); struct block_device *I_BDEV(struct inode *inode); -struct block_device *bdgrab(struct block_device *bdev); -void bdput(struct block_device *); int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart, loff_t lend); diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 8b77d08d4b47..2746fd804216 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -23,22 +23,73 @@ struct ctl_table_header; struct task_struct; #ifdef CONFIG_CGROUP_BPF +enum cgroup_bpf_attach_type { + CGROUP_BPF_ATTACH_TYPE_INVALID = -1, + CGROUP_INET_INGRESS = 0, + CGROUP_INET_EGRESS, + CGROUP_INET_SOCK_CREATE, + CGROUP_SOCK_OPS, + CGROUP_DEVICE, + CGROUP_INET4_BIND, + CGROUP_INET6_BIND, + CGROUP_INET4_CONNECT, + CGROUP_INET6_CONNECT, + CGROUP_INET4_POST_BIND, + CGROUP_INET6_POST_BIND, + CGROUP_UDP4_SENDMSG, + CGROUP_UDP6_SENDMSG, + CGROUP_SYSCTL, + CGROUP_UDP4_RECVMSG, + CGROUP_UDP6_RECVMSG, + CGROUP_GETSOCKOPT, + CGROUP_SETSOCKOPT, + CGROUP_INET4_GETPEERNAME, + CGROUP_INET6_GETPEERNAME, + CGROUP_INET4_GETSOCKNAME, + CGROUP_INET6_GETSOCKNAME, + CGROUP_INET_SOCK_RELEASE, + MAX_CGROUP_BPF_ATTACH_TYPE +}; -extern struct static_key_false cgroup_bpf_enabled_key[MAX_BPF_ATTACH_TYPE]; -#define cgroup_bpf_enabled(type) static_branch_unlikely(&cgroup_bpf_enabled_key[type]) +#define CGROUP_ATYPE(type) \ + case BPF_##type: return type -#define BPF_CGROUP_STORAGE_NEST_MAX 8 +static inline enum cgroup_bpf_attach_type +to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type) +{ + switch (attach_type) { + CGROUP_ATYPE(CGROUP_INET_INGRESS); + CGROUP_ATYPE(CGROUP_INET_EGRESS); + CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE); + CGROUP_ATYPE(CGROUP_SOCK_OPS); + CGROUP_ATYPE(CGROUP_DEVICE); + CGROUP_ATYPE(CGROUP_INET4_BIND); + CGROUP_ATYPE(CGROUP_INET6_BIND); + CGROUP_ATYPE(CGROUP_INET4_CONNECT); + CGROUP_ATYPE(CGROUP_INET6_CONNECT); + CGROUP_ATYPE(CGROUP_INET4_POST_BIND); + CGROUP_ATYPE(CGROUP_INET6_POST_BIND); + CGROUP_ATYPE(CGROUP_UDP4_SENDMSG); + CGROUP_ATYPE(CGROUP_UDP6_SENDMSG); + CGROUP_ATYPE(CGROUP_SYSCTL); + CGROUP_ATYPE(CGROUP_UDP4_RECVMSG); + CGROUP_ATYPE(CGROUP_UDP6_RECVMSG); + CGROUP_ATYPE(CGROUP_GETSOCKOPT); + CGROUP_ATYPE(CGROUP_SETSOCKOPT); + CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME); + CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME); + CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME); + CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME); + CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE); + default: + return CGROUP_BPF_ATTACH_TYPE_INVALID; + } +} -struct bpf_cgroup_storage_info { - struct task_struct *task; - struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; -}; +#undef CGROUP_ATYPE -/* For each cpu, permit maximum BPF_CGROUP_STORAGE_NEST_MAX number of tasks - * to use bpf cgroup storage simultaneously. - */ -DECLARE_PER_CPU(struct bpf_cgroup_storage_info, - bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]); +extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE]; +#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype]) #define for_each_cgroup_storage_type(stype) \ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) @@ -80,15 +131,15 @@ struct bpf_prog_array; struct cgroup_bpf { /* array of effective progs in this cgroup */ - struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE]; + struct bpf_prog_array __rcu *effective[MAX_CGROUP_BPF_ATTACH_TYPE]; /* attached progs to this cgroup and attach flags * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will * have either zero or one element * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS */ - struct list_head progs[MAX_BPF_ATTACH_TYPE]; - u32 flags[MAX_BPF_ATTACH_TYPE]; + struct list_head progs[MAX_CGROUP_BPF_ATTACH_TYPE]; + u32 flags[MAX_CGROUP_BPF_ATTACH_TYPE]; /* list of cgroup shared storages */ struct list_head storages; @@ -128,28 +179,28 @@ int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, int __cgroup_bpf_run_filter_skb(struct sock *sk, struct sk_buff *skb, - enum bpf_attach_type type); + enum cgroup_bpf_attach_type atype); int __cgroup_bpf_run_filter_sk(struct sock *sk, - enum bpf_attach_type type); + enum cgroup_bpf_attach_type atype); int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, struct sockaddr *uaddr, - enum bpf_attach_type type, + enum cgroup_bpf_attach_type atype, void *t_ctx, u32 *flags); int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, struct bpf_sock_ops_kern *sock_ops, - enum bpf_attach_type type); + enum cgroup_bpf_attach_type atype); int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, - short access, enum bpf_attach_type type); + short access, enum cgroup_bpf_attach_type atype); int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, struct ctl_table *table, int write, char **buf, size_t *pcount, loff_t *ppos, - enum bpf_attach_type type); + enum cgroup_bpf_attach_type atype); int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, int *optname, char __user *optval, @@ -172,44 +223,6 @@ static inline enum bpf_cgroup_storage_type cgroup_storage_type( return BPF_CGROUP_STORAGE_SHARED; } -static inline int bpf_cgroup_storage_set(struct bpf_cgroup_storage - *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) -{ - enum bpf_cgroup_storage_type stype; - int i, err = 0; - - preempt_disable(); - for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) { - if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != NULL)) - continue; - - this_cpu_write(bpf_cgroup_storage_info[i].task, current); - for_each_cgroup_storage_type(stype) - this_cpu_write(bpf_cgroup_storage_info[i].storage[stype], - storage[stype]); - goto out; - } - err = -EBUSY; - WARN_ON_ONCE(1); - -out: - preempt_enable(); - return err; -} - -static inline void bpf_cgroup_storage_unset(void) -{ - int i; - - for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) { - if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current)) - continue; - - this_cpu_write(bpf_cgroup_storage_info[i].task, NULL); - return; - } -} - struct bpf_cgroup_storage * cgroup_storage_lookup(struct bpf_cgroup_storage_map *map, void *key, bool locked); @@ -230,9 +243,9 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_INET_INGRESS)) \ + if (cgroup_bpf_enabled(CGROUP_INET_INGRESS)) \ __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ - BPF_CGROUP_INET_INGRESS); \ + CGROUP_INET_INGRESS); \ \ __ret; \ }) @@ -240,54 +253,54 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \ + if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \ typeof(sk) __sk = sk_to_full_sk(sk); \ if (sk_fullsock(__sk)) \ __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ - BPF_CGROUP_INET_EGRESS); \ + CGROUP_INET_EGRESS); \ } \ __ret; \ }) -#define BPF_CGROUP_RUN_SK_PROG(sk, type) \ +#define BPF_CGROUP_RUN_SK_PROG(sk, atype) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(type)) { \ - __ret = __cgroup_bpf_run_filter_sk(sk, type); \ + if (cgroup_bpf_enabled(atype)) { \ + __ret = __cgroup_bpf_run_filter_sk(sk, atype); \ } \ __ret; \ }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ - BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE) + BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE) #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \ - BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE) + BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE) #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ - BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND) + BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND) #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \ - BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND) + BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND) -#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \ +#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \ ({ \ u32 __unused_flags; \ int __ret = 0; \ - if (cgroup_bpf_enabled(type)) \ - __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ + if (cgroup_bpf_enabled(atype)) \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ NULL, \ &__unused_flags); \ __ret; \ }) -#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \ +#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \ ({ \ u32 __unused_flags; \ int __ret = 0; \ - if (cgroup_bpf_enabled(type)) { \ + if (cgroup_bpf_enabled(atype)) { \ lock_sock(sk); \ - __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ t_ctx, \ &__unused_flags); \ release_sock(sk); \ @@ -300,13 +313,13 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE). */ -#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, bind_flags) \ +#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \ ({ \ u32 __flags = 0; \ int __ret = 0; \ - if (cgroup_bpf_enabled(type)) { \ + if (cgroup_bpf_enabled(atype)) { \ lock_sock(sk); \ - __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ NULL, &__flags); \ release_sock(sk); \ if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \ @@ -316,33 +329,33 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, }) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \ - ((cgroup_bpf_enabled(BPF_CGROUP_INET4_CONNECT) || \ - cgroup_bpf_enabled(BPF_CGROUP_INET6_CONNECT)) && \ + ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \ + cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \ (sk)->sk_prot->pre_connect) #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT) + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT) #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT) + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT) #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL) #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL) #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx) #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx) #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL) #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL) /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a * fullsock and its parent fullsock cannot be traced by @@ -362,33 +375,33 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS)) \ + if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \ __ret = __cgroup_bpf_run_filter_sock_ops(sk, \ sock_ops, \ - BPF_CGROUP_SOCK_OPS); \ + CGROUP_SOCK_OPS); \ __ret; \ }) #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS) && (sock_ops)->sk) { \ + if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \ typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \ if (__sk && sk_fullsock(__sk)) \ __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \ sock_ops, \ - BPF_CGROUP_SOCK_OPS); \ + CGROUP_SOCK_OPS); \ } \ __ret; \ }) -#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \ +#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_DEVICE)) \ - __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \ + if (cgroup_bpf_enabled(CGROUP_DEVICE)) \ + __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \ access, \ - BPF_CGROUP_DEVICE); \ + CGROUP_DEVICE); \ \ __ret; \ }) @@ -397,10 +410,10 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_SYSCTL)) \ + if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \ __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \ buf, count, pos, \ - BPF_CGROUP_SYSCTL); \ + CGROUP_SYSCTL); \ __ret; \ }) @@ -408,7 +421,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, kernel_optval) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_SETSOCKOPT)) \ + if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT)) \ __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \ optname, optval, \ optlen, \ @@ -419,7 +432,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \ + if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ get_user(__ret, optlen); \ __ret; \ }) @@ -428,7 +441,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, max_optlen, retval) \ ({ \ int __ret = retval; \ - if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \ + if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ if (!(sock)->sk_prot->bpf_bypass_getsockopt || \ !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \ tcp_bpf_bypass_getsockopt, \ @@ -443,7 +456,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, optlen, retval) \ ({ \ int __ret = retval; \ - if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \ + if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ __ret = __cgroup_bpf_run_filter_getsockopt_kern( \ sock, level, optname, optval, optlen, retval); \ __ret; \ @@ -487,9 +500,6 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, return -EINVAL; } -static inline int bpf_cgroup_storage_set( - struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { return 0; } -static inline void bpf_cgroup_storage_unset(void) {} static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map) { return 0; } static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( @@ -505,14 +515,14 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, return 0; } -#define cgroup_bpf_enabled(type) (0) -#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; }) +#define cgroup_bpf_enabled(atype) (0) +#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; }) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, flags) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; }) @@ -524,7 +534,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; }) #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f309fc1509f2..f4c16f19f83e 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -168,6 +168,7 @@ struct bpf_map { u32 max_entries; u32 map_flags; int spin_lock_off; /* >=0 valid offset, <0 error */ + int timer_off; /* >=0 valid offset, <0 error */ u32 id; int numa_node; u32 btf_key_type_id; @@ -197,30 +198,53 @@ static inline bool map_value_has_spin_lock(const struct bpf_map *map) return map->spin_lock_off >= 0; } -static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) +static inline bool map_value_has_timer(const struct bpf_map *map) { - if (likely(!map_value_has_spin_lock(map))) - return; - *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = - (struct bpf_spin_lock){}; + return map->timer_off >= 0; } -/* copy everything but bpf_spin_lock */ +static inline void check_and_init_map_value(struct bpf_map *map, void *dst) +{ + if (unlikely(map_value_has_spin_lock(map))) + *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = + (struct bpf_spin_lock){}; + if (unlikely(map_value_has_timer(map))) + *(struct bpf_timer *)(dst + map->timer_off) = + (struct bpf_timer){}; +} + +/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) { + u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0; + if (unlikely(map_value_has_spin_lock(map))) { - u32 off = map->spin_lock_off; + s_off = map->spin_lock_off; + s_sz = sizeof(struct bpf_spin_lock); + } else if (unlikely(map_value_has_timer(map))) { + t_off = map->timer_off; + t_sz = sizeof(struct bpf_timer); + } - memcpy(dst, src, off); - memcpy(dst + off + sizeof(struct bpf_spin_lock), - src + off + sizeof(struct bpf_spin_lock), - map->value_size - off - sizeof(struct bpf_spin_lock)); + if (unlikely(s_sz || t_sz)) { + if (s_off < t_off || !s_sz) { + swap(s_off, t_off); + swap(s_sz, t_sz); + } + memcpy(dst, src, t_off); + memcpy(dst + t_off + t_sz, + src + t_off + t_sz, + s_off - t_off - t_sz); + memcpy(dst + s_off + s_sz, + src + s_off + s_sz, + map->value_size - s_off - s_sz); } else { memcpy(dst, src, map->value_size); } } void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, bool lock_src); +void bpf_timer_cancel_and_free(void *timer); int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); struct bpf_offload_dev; @@ -314,6 +338,7 @@ enum bpf_arg_type { ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */ ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ + ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ __BPF_ARG_TYPE_MAX, }; @@ -554,6 +579,11 @@ struct btf_func_model { */ #define BPF_TRAMP_F_SKIP_FRAME BIT(2) +/* Store IP address of the caller on the trampoline stack, + * so it's available for trampoline's programs. + */ +#define BPF_TRAMP_F_IP_ARG BIT(3) + /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 */ @@ -780,6 +810,7 @@ struct bpf_jit_poke_descriptor { void *tailcall_target; void *tailcall_bypass; void *bypass_addr; + void *aux; union { struct { struct bpf_map *map; @@ -1072,7 +1103,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, /* an array of programs to be executed under rcu_lock. * * Typical usage: - * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); + * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, bpf_prog_run); * * the structure returned by bpf_prog_array_alloc() should be populated * with program pointers and the last pointer must be NULL. @@ -1083,7 +1114,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, */ struct bpf_prog_array_item { struct bpf_prog *prog; - struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; + union { + struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; + u64 bpf_cookie; + }; }; struct bpf_prog_array { @@ -1109,73 +1143,133 @@ int bpf_prog_array_copy_info(struct bpf_prog_array *array, int bpf_prog_array_copy(struct bpf_prog_array *old_array, struct bpf_prog *exclude_prog, struct bpf_prog *include_prog, + u64 bpf_cookie, struct bpf_prog_array **new_array); +struct bpf_run_ctx {}; + +struct bpf_cg_run_ctx { + struct bpf_run_ctx run_ctx; + const struct bpf_prog_array_item *prog_item; +}; + +struct bpf_trace_run_ctx { + struct bpf_run_ctx run_ctx; + u64 bpf_cookie; +}; + +static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx) +{ + struct bpf_run_ctx *old_ctx = NULL; + +#ifdef CONFIG_BPF_SYSCALL + old_ctx = current->bpf_ctx; + current->bpf_ctx = new_ctx; +#endif + return old_ctx; +} + +static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) +{ +#ifdef CONFIG_BPF_SYSCALL + current->bpf_ctx = old_ctx; +#endif +} + /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */ #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0) /* BPF program asks to set CN on the packet. */ #define BPF_RET_SET_CN (1 << 0) -/* For BPF_PROG_RUN_ARRAY_FLAGS and __BPF_PROG_RUN_ARRAY, - * if bpf_cgroup_storage_set() failed, the rest of programs - * will not execute. This should be a really rare scenario - * as it requires BPF_CGROUP_STORAGE_NEST_MAX number of - * preemptions all between bpf_cgroup_storage_set() and - * bpf_cgroup_storage_unset() on the same cpu. - */ -#define BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, ret_flags) \ - ({ \ - struct bpf_prog_array_item *_item; \ - struct bpf_prog *_prog; \ - struct bpf_prog_array *_array; \ - u32 _ret = 1; \ - u32 func_ret; \ - migrate_disable(); \ - rcu_read_lock(); \ - _array = rcu_dereference(array); \ - _item = &_array->items[0]; \ - while ((_prog = READ_ONCE(_item->prog))) { \ - if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \ - break; \ - func_ret = func(_prog, ctx); \ - _ret &= (func_ret & 1); \ - *(ret_flags) |= (func_ret >> 1); \ - bpf_cgroup_storage_unset(); \ - _item++; \ - } \ - rcu_read_unlock(); \ - migrate_enable(); \ - _ret; \ - }) - -#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \ - ({ \ - struct bpf_prog_array_item *_item; \ - struct bpf_prog *_prog; \ - struct bpf_prog_array *_array; \ - u32 _ret = 1; \ - migrate_disable(); \ - rcu_read_lock(); \ - _array = rcu_dereference(array); \ - if (unlikely(check_non_null && !_array))\ - goto _out; \ - _item = &_array->items[0]; \ - while ((_prog = READ_ONCE(_item->prog))) { \ - if (!set_cg_storage) { \ - _ret &= func(_prog, ctx); \ - } else { \ - if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \ - break; \ - _ret &= func(_prog, ctx); \ - bpf_cgroup_storage_unset(); \ - } \ - _item++; \ - } \ -_out: \ - rcu_read_unlock(); \ - migrate_enable(); \ - _ret; \ - }) +typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); + +static __always_inline u32 +BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, + const void *ctx, bpf_prog_run_fn run_prog, + u32 *ret_flags) +{ + const struct bpf_prog_array_item *item; + const struct bpf_prog *prog; + const struct bpf_prog_array *array; + struct bpf_run_ctx *old_run_ctx; + struct bpf_cg_run_ctx run_ctx; + u32 ret = 1; + u32 func_ret; + + migrate_disable(); + rcu_read_lock(); + array = rcu_dereference(array_rcu); + item = &array->items[0]; + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + while ((prog = READ_ONCE(item->prog))) { + run_ctx.prog_item = item; + func_ret = run_prog(prog, ctx); + ret &= (func_ret & 1); + *(ret_flags) |= (func_ret >> 1); + item++; + } + bpf_reset_run_ctx(old_run_ctx); + rcu_read_unlock(); + migrate_enable(); + return ret; +} + +static __always_inline u32 +BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, + const void *ctx, bpf_prog_run_fn run_prog) +{ + const struct bpf_prog_array_item *item; + const struct bpf_prog *prog; + const struct bpf_prog_array *array; + struct bpf_run_ctx *old_run_ctx; + struct bpf_cg_run_ctx run_ctx; + u32 ret = 1; + + migrate_disable(); + rcu_read_lock(); + array = rcu_dereference(array_rcu); + item = &array->items[0]; + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + while ((prog = READ_ONCE(item->prog))) { + run_ctx.prog_item = item; + ret &= run_prog(prog, ctx); + item++; + } + bpf_reset_run_ctx(old_run_ctx); + rcu_read_unlock(); + migrate_enable(); + return ret; +} + +static __always_inline u32 +BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu, + const void *ctx, bpf_prog_run_fn run_prog) +{ + const struct bpf_prog_array_item *item; + const struct bpf_prog *prog; + const struct bpf_prog_array *array; + struct bpf_run_ctx *old_run_ctx; + struct bpf_trace_run_ctx run_ctx; + u32 ret = 1; + + migrate_disable(); + rcu_read_lock(); + array = rcu_dereference(array_rcu); + if (unlikely(!array)) + goto out; + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + item = &array->items[0]; + while ((prog = READ_ONCE(item->prog))) { + run_ctx.bpf_cookie = item->bpf_cookie; + ret &= run_prog(prog, ctx); + item++; + } + bpf_reset_run_ctx(old_run_ctx); +out: + rcu_read_unlock(); + migrate_enable(); + return ret; +} /* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs * so BPF programs can request cwr for TCP packets. @@ -1204,7 +1298,7 @@ _out: \ u32 _flags = 0; \ bool _cn; \ u32 _ret; \ - _ret = BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, &_flags); \ + _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \ _cn = _flags & BPF_RET_SET_CN; \ if (_ret) \ _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ @@ -1213,12 +1307,6 @@ _out: \ _ret; \ }) -#define BPF_PROG_RUN_ARRAY(array, ctx, func) \ - __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true) - -#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ - __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false) - #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); extern struct mutex bpf_stats_enabled_mutex; @@ -1397,6 +1485,9 @@ typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, struct seq_file *seq); typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, struct bpf_link_info *info); +typedef const struct bpf_func_proto * +(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id, + const struct bpf_prog *prog); enum bpf_iter_feature { BPF_ITER_RESCHED = BIT(0), @@ -1409,6 +1500,7 @@ struct bpf_iter_reg { bpf_iter_detach_target_t detach_target; bpf_iter_show_fdinfo_t show_fdinfo; bpf_iter_fill_link_info_t fill_link_info; + bpf_iter_get_func_proto_t get_func_proto; u32 ctx_arg_info_size; u32 feature; struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; @@ -1431,6 +1523,8 @@ struct bpf_iter__bpf_map_elem { int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); bool bpf_iter_prog_supported(struct bpf_prog *prog); +const struct bpf_func_proto * +bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog); int bpf_iter_new_fd(struct bpf_link *link); bool bpf_link_is_iter(struct bpf_link *link); @@ -1508,12 +1602,12 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, struct bpf_prog *xdp_prog, struct bpf_map *map, bool exclude_ingress); -bool dev_map_can_have_prog(struct bpf_map *map); void __cpu_map_flush(void); int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, struct net_device *dev_rx); -bool cpu_map_prog_allowed(struct bpf_map *map); +int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, + struct sk_buff *skb); /* Return map's numa specified by userspace */ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) @@ -1710,6 +1804,12 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, return 0; } +static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, + struct sk_buff *skb) +{ + return -EOPNOTSUPP; +} + static inline bool cpu_map_prog_allowed(struct bpf_map *map) { return false; @@ -1851,6 +1951,12 @@ void bpf_map_offload_map_free(struct bpf_map *map); int bpf_prog_test_run_syscall(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); + +int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); +int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); +int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); +void sock_map_unhash(struct sock *sk); +void sock_map_close(struct sock *sk, long timeout); #else static inline int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) @@ -1883,24 +1989,6 @@ static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, { return -ENOTSUPP; } -#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ - -#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) -int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); -int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); -int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); -void sock_map_unhash(struct sock *sk); -void sock_map_close(struct sock *sk, long timeout); - -void bpf_sk_reuseport_detach(struct sock *sk); -int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, - void *value); -int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, - void *value, u64 map_flags); -#else -static inline void bpf_sk_reuseport_detach(struct sock *sk) -{ -} #ifdef CONFIG_BPF_SYSCALL static inline int sock_map_get_from_fd(const union bpf_attr *attr, @@ -1920,7 +2008,21 @@ static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void { return -EOPNOTSUPP; } +#endif /* CONFIG_BPF_SYSCALL */ +#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ +#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) +void bpf_sk_reuseport_detach(struct sock *sk); +int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, + void *value); +int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, + void *value, u64 map_flags); +#else +static inline void bpf_sk_reuseport_detach(struct sock *sk) +{ +} + +#ifdef CONFIG_BPF_SYSCALL static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, void *value) { @@ -1997,9 +2099,8 @@ extern const struct bpf_func_proto bpf_task_storage_get_proto; extern const struct bpf_func_proto bpf_task_storage_delete_proto; extern const struct bpf_func_proto bpf_for_each_map_elem_proto; extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; - -const struct bpf_func_proto *bpf_tracing_func_proto( - enum bpf_func_id func_id, const struct bpf_prog *prog); +extern const struct bpf_func_proto bpf_sk_setsockopt_proto; +extern const struct bpf_func_proto bpf_sk_getsockopt_proto; const struct bpf_func_proto *tracing_prog_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index a9db1eae6796..9c81724e4b98 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -134,4 +134,8 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup) BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter) #ifdef CONFIG_NET BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns) +BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp) +#endif +#ifdef CONFIG_PERF_EVENTS +BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf) #endif diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index e774ecc1cd1f..5424124dbe36 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -53,7 +53,14 @@ struct bpf_reg_state { /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | * PTR_TO_MAP_VALUE_OR_NULL */ - struct bpf_map *map_ptr; + struct { + struct bpf_map *map_ptr; + /* To distinguish map lookups from outer map + * the map_uid is non-zero for registers + * pointing to inner maps. + */ + u32 map_uid; + }; /* for PTR_TO_BTF_ID */ struct { @@ -201,12 +208,19 @@ struct bpf_func_state { * zero == main subprog */ u32 subprogno; + /* Every bpf_timer_start will increment async_entry_cnt. + * It's used to distinguish: + * void foo(void) { for(;;); } + * void foo(void) { bpf_timer_set_callback(,foo); } + */ + u32 async_entry_cnt; + bool in_callback_fn; + bool in_async_callback_fn; /* The following fields should be last. See copy_func_state() */ int acquired_refs; struct bpf_reference_state *refs; int allocated_stack; - bool in_callback_fn; struct bpf_stack_state *stack; }; @@ -340,8 +354,8 @@ struct bpf_insn_aux_data { }; u64 map_key_state; /* constant (32 bit) key tracking for maps */ int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ - int sanitize_stack_off; /* stack slot to be cleared */ u32 seen; /* this insn was processed by the verifier at env->pass_cnt */ + bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */ bool zext_dst; /* this insn zero extends dst reg */ u8 alu_state; /* used in combination with alu_limit */ @@ -392,6 +406,7 @@ struct bpf_subprog_info { bool has_tail_call; bool tail_call_reachable; bool has_ld_abs; + bool is_async_cb; }; /* single container for all structs @@ -414,6 +429,7 @@ struct bpf_verifier_env { u32 used_map_cnt; /* number of used maps */ u32 used_btf_cnt; /* number of used BTF objects */ u32 id_gen; /* used to generate unique reg IDs */ + bool explore_alu_limits; bool allow_ptr_leaks; bool allow_uninit_stack; bool allow_ptr_to_map_access; diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h index 5cdeab497cb3..546e27fc6d46 100644 --- a/include/linux/bpfptr.h +++ b/include/linux/bpfptr.h @@ -62,9 +62,17 @@ static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset, return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size); } -static inline void *memdup_bpfptr(bpfptr_t src, size_t len) +static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len) { - return memdup_sockptr((sockptr_t) src, len); + void *p = kvmalloc(len, GFP_USER | __GFP_NOWARN); + + if (!p) + return ERR_PTR(-ENOMEM); + if (copy_from_bpfptr(p, src, len)) { + kvfree(p); + return ERR_PTR(-EFAULT); + } + return p; } static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count) diff --git a/include/linux/btf.h b/include/linux/btf.h index 94a0c976c90f..214fde93214b 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -99,6 +99,7 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, const struct btf_member *m, u32 expected_offset, u32 expected_size); int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t); +int btf_find_timer(const struct btf *btf, const struct btf_type *t); bool btf_type_is_void(const struct btf_type *t); s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind); const struct btf_type *btf_type_skip_modifiers(const struct btf *btf, diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index 57890b357f85..47d9abfbdb55 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -82,6 +82,9 @@ __BTF_ID_LIST(name, globl) #define BTF_ID_LIST_SINGLE(name, prefix, typename) \ BTF_ID_LIST(name) \ BTF_ID(prefix, typename) +#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \ + BTF_ID_LIST_GLOBAL(name) \ + BTF_ID(prefix, typename) /* * The BTF_ID_UNUSED macro defines 4 zero bytes. @@ -148,6 +151,7 @@ extern struct btf_id_set name; #define BTF_ID_UNUSED #define BTF_ID_LIST_GLOBAL(name) u32 name[1]; #define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1]; +#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 name[1]; #define BTF_SET_START(name) static struct btf_id_set name = { 0 }; #define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 }; #define BTF_SET_END(name) @@ -172,7 +176,8 @@ extern struct btf_id_set name; BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) enum { #define BTF_SOCK_TYPE(name, str) name, @@ -184,4 +189,6 @@ MAX_BTF_SOCK_TYPE, extern u32 btf_sock_ids[]; #endif +extern u32 btf_task_struct_ids[]; + #endif diff --git a/include/linux/bvec.h b/include/linux/bvec.h index ff832e698efb..0e9bdd42dafb 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -4,9 +4,10 @@ * * Copyright (C) 2001 Ming Lei <ming.lei@canonical.com> */ -#ifndef __LINUX_BVEC_ITER_H -#define __LINUX_BVEC_ITER_H +#ifndef __LINUX_BVEC_H +#define __LINUX_BVEC_H +#include <linux/highmem.h> #include <linux/bug.h> #include <linux/errno.h> #include <linux/limits.h> @@ -183,4 +184,61 @@ static inline void bvec_advance(const struct bio_vec *bvec, } } -#endif /* __LINUX_BVEC_ITER_H */ +/** + * bvec_kmap_local - map a bvec into the kernel virtual address space + * @bvec: bvec to map + * + * Must be called on single-page bvecs only. Call kunmap_local on the returned + * address to unmap. + */ +static inline void *bvec_kmap_local(struct bio_vec *bvec) +{ + return kmap_local_page(bvec->bv_page) + bvec->bv_offset; +} + +/** + * memcpy_from_bvec - copy data from a bvec + * @bvec: bvec to copy from + * + * Must be called on single-page bvecs only. + */ +static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec) +{ + memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len); +} + +/** + * memcpy_to_bvec - copy data to a bvec + * @bvec: bvec to copy to + * + * Must be called on single-page bvecs only. + */ +static inline void memcpy_to_bvec(struct bio_vec *bvec, const char *from) +{ + memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len); +} + +/** + * memzero_bvec - zero all data in a bvec + * @bvec: bvec to zero + * + * Must be called on single-page bvecs only. + */ +static inline void memzero_bvec(struct bio_vec *bvec) +{ + memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len); +} + +/** + * bvec_virt - return the virtual address for a bvec + * @bvec: bvec to return the virtual address for + * + * Note: the caller must ensure that @bvec->bv_page is not a highmem page. + */ +static inline void *bvec_virt(struct bio_vec *bvec) +{ + WARN_ON_ONCE(PageHighMem(bvec->bv_page)); + return page_address(bvec->bv_page) + bvec->bv_offset; +} + +#endif /* __LINUX_BVEC_H */ diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h index ae7a3411167c..9de6e9053e34 100644 --- a/include/linux/can/bittiming.h +++ b/include/linux/can/bittiming.h @@ -37,7 +37,7 @@ * quanta, from when the bit is sent on the TX pin to when it is * received on the RX pin of the transmitter. Possible options: * - * O: automatic mode. The controller dynamically measure @tdcv + * 0: automatic mode. The controller dynamically measures @tdcv * for each transmitted CAN FD frame. * * Other values: manual mode. Use the fixed provided value. @@ -45,7 +45,7 @@ * @tdco: Transmitter Delay Compensation Offset. Offset value, in time * quanta, defining the distance between the start of the bit * reception on the RX pin of the transceiver and the SSP - * position such as SSP = @tdcv + @tdco. + * position such that SSP = @tdcv + @tdco. * * If @tdco is zero, then TDC is disabled and both @tdcv and * @tdcf should be ignored. diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 27b275e463da..2413253e54c7 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -32,6 +32,12 @@ enum can_mode { CAN_MODE_SLEEP }; +enum can_termination_gpio { + CAN_TERMINATION_GPIO_DISABLED = 0, + CAN_TERMINATION_GPIO_ENABLED, + CAN_TERMINATION_GPIO_MAX, +}; + /* * CAN common private data */ @@ -55,6 +61,8 @@ struct can_priv { unsigned int termination_const_cnt; const u16 *termination_const; u16 termination; + struct gpio_desc *termination_gpio; + u16 termination_gpio_ohms[CAN_TERMINATION_GPIO_MAX]; enum can_state state; diff --git a/include/linux/can/platform/flexcan.h b/include/linux/can/platform/flexcan.h new file mode 100644 index 000000000000..1b536fb999de --- /dev/null +++ b/include/linux/can/platform/flexcan.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Angelo Dureghello <angelo@kernel-space.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CAN_PLATFORM_FLEXCAN_H +#define _CAN_PLATFORM_FLEXCAN_H + +struct flexcan_platform_data { + u32 clock_frequency; + u8 clk_src; +}; + +#endif /* _CAN_PLATFORM_FLEXCAN_H */ diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h index 40882df7105e..c11477620403 100644 --- a/include/linux/can/rx-offload.h +++ b/include/linux/can/rx-offload.h @@ -20,6 +20,7 @@ struct can_rx_offload { bool drop); struct sk_buff_head skb_queue; + struct sk_buff_head skb_irq_queue; u32 skb_queue_len_max; unsigned int mb_first; @@ -48,14 +49,11 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, unsigned int *frame_len_ptr); int can_rx_offload_queue_tail(struct can_rx_offload *offload, struct sk_buff *skb); +void can_rx_offload_irq_finish(struct can_rx_offload *offload); +void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload); void can_rx_offload_del(struct can_rx_offload *offload); void can_rx_offload_enable(struct can_rx_offload *offload); -static inline void can_rx_offload_schedule(struct can_rx_offload *offload) -{ - napi_schedule(&offload->napi); -} - static inline void can_rx_offload_disable(struct can_rx_offload *offload) { napi_disable(&offload->napi); diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h deleted file mode 100644 index 68a541807bdf..000000000000 --- a/include/linux/cmdline-parser.h +++ /dev/null @@ -1,46 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Parsing command line, get the partitions information. - * - * Written by Cai Zhiyong <caizhiyong@huawei.com> - * - */ -#ifndef CMDLINEPARSEH -#define CMDLINEPARSEH - -#include <linux/blkdev.h> -#include <linux/fs.h> -#include <linux/slab.h> - -/* partition flags */ -#define PF_RDONLY 0x01 /* Device is read only */ -#define PF_POWERUP_LOCK 0x02 /* Always locked after reset */ - -struct cmdline_subpart { - char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */ - sector_t from; - sector_t size; - int flags; - struct cmdline_subpart *next_subpart; -}; - -struct cmdline_parts { - char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */ - unsigned int nr_subparts; - struct cmdline_subpart *subpart; - struct cmdline_parts *next_parts; -}; - -void cmdline_parts_free(struct cmdline_parts **parts); - -int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline); - -struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts, - const char *bdev); - -int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size, - int slot, - int (*add_part)(int, struct cmdline_subpart *, void *), - void *param); - -#endif /* CMDLINEPARSEH */ diff --git a/include/linux/compat.h b/include/linux/compat.h index c270124e4402..8e0598c7d1d1 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -20,11 +20,8 @@ #include <linux/unistd.h> #include <asm/compat.h> - -#ifdef CONFIG_COMPAT #include <asm/siginfo.h> #include <asm/signal.h> -#endif #ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER /* @@ -95,8 +92,6 @@ struct compat_iovec { compat_size_t iov_len; }; -#ifdef CONFIG_COMPAT - #ifndef compat_user_stack_pointer #define compat_user_stack_pointer() current_user_stack_pointer() #endif @@ -131,9 +126,11 @@ struct compat_tms { #define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) +#ifndef compat_sigset_t typedef struct { compat_sigset_word sig[_COMPAT_NSIG_WORDS]; } compat_sigset_t; +#endif int set_compat_user_sigmask(const compat_sigset_t __user *umask, size_t sigsetsize); @@ -384,6 +381,7 @@ struct compat_keyctl_kdf_params { __u32 __spare[8]; }; +struct compat_stat; struct compat_statfs; struct compat_statfs64; struct compat_old_linux_dirent; @@ -428,7 +426,7 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, unsigned int size) { /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */ -#ifdef __BIG_ENDIAN +#if defined(__BIG_ENDIAN) && defined(CONFIG_64BIT) compat_sigset_t v; switch (_NSIG_WORDS) { case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3]; @@ -929,17 +927,6 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args); #endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ - -/* - * For most but not all architectures, "am I in a compat syscall?" and - * "am I a compat task?" are the same question. For architectures on which - * they aren't the same question, arch code can override in_compat_syscall. - */ - -#ifndef in_compat_syscall -static inline bool in_compat_syscall(void) { return is_compat_task(); } -#endif - /** * ns_to_old_timeval32 - Compat version of ns_to_timeval * @nsec: the nanoseconds value to be converted @@ -969,6 +956,17 @@ int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz, int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct compat_statfs64 __user * buf); +#ifdef CONFIG_COMPAT + +/* + * For most but not all architectures, "am I in a compat syscall?" and + * "am I a compat task?" are the same question. For architectures on which + * they aren't the same question, arch code can override in_compat_syscall. + */ +#ifndef in_compat_syscall +static inline bool in_compat_syscall(void) { return is_compat_task(); } +#endif + #else /* !CONFIG_COMPAT */ #define is_compat_task() (0) diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 85008a65e21f..93a2922b7653 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -220,6 +220,10 @@ struct coresight_sysfs_link { * @nr_links: number of sysfs links created to other components from this * device. These will appear in the "connections" group. * @has_conns_grp: Have added a "connections" group for sysfs links. + * @feature_csdev_list: List of complex feature programming added to the device. + * @config_csdev_list: List of system configurations added to the device. + * @cscfg_csdev_lock: Protect the lists of configurations and features. + * @active_cscfg_ctxt: Context information for current active system configuration. */ struct coresight_device { struct coresight_platform_data *pdata; @@ -241,6 +245,11 @@ struct coresight_device { int nr_links; bool has_conns_grp; bool ect_enabled; /* true only if associated ect device is enabled */ + /* system configuration and feature lists */ + struct list_head feature_csdev_list; + struct list_head config_csdev_list; + spinlock_t cscfg_csdev_lock; + void *active_cscfg_ctxt; }; /* diff --git a/include/linux/counter.h b/include/linux/counter.h index 9dbd5df4cd34..d16ce2819b48 100644 --- a/include/linux/counter.h +++ b/include/linux/counter.h @@ -162,15 +162,15 @@ struct counter_count_ext { void *priv; }; -enum counter_count_function { - COUNTER_COUNT_FUNCTION_INCREASE = 0, - COUNTER_COUNT_FUNCTION_DECREASE, - COUNTER_COUNT_FUNCTION_PULSE_DIRECTION, - COUNTER_COUNT_FUNCTION_QUADRATURE_X1_A, - COUNTER_COUNT_FUNCTION_QUADRATURE_X1_B, - COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A, - COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B, - COUNTER_COUNT_FUNCTION_QUADRATURE_X4 +enum counter_function { + COUNTER_FUNCTION_INCREASE = 0, + COUNTER_FUNCTION_DECREASE, + COUNTER_FUNCTION_PULSE_DIRECTION, + COUNTER_FUNCTION_QUADRATURE_X1_A, + COUNTER_FUNCTION_QUADRATURE_X1_B, + COUNTER_FUNCTION_QUADRATURE_X2_A, + COUNTER_FUNCTION_QUADRATURE_X2_B, + COUNTER_FUNCTION_QUADRATURE_X4 }; /** @@ -192,7 +192,7 @@ struct counter_count { const char *name; size_t function; - const enum counter_count_function *functions_list; + const enum counter_function *functions_list; size_t num_functions; struct counter_synapse *synapses; @@ -290,16 +290,16 @@ struct counter_device_state { const struct attribute_group **groups; }; -enum counter_signal_value { - COUNTER_SIGNAL_LOW = 0, - COUNTER_SIGNAL_HIGH +enum counter_signal_level { + COUNTER_SIGNAL_LEVEL_LOW, + COUNTER_SIGNAL_LEVEL_HIGH, }; /** * struct counter_ops - Callbacks from driver * @signal_read: optional read callback for Signal attribute. The read - * value of the respective Signal should be passed back via - * the val parameter. + * level of the respective Signal should be passed back via + * the level parameter. * @count_read: optional read callback for Count attribute. The read * value of the respective Count should be passed back via * the val parameter. @@ -324,7 +324,7 @@ enum counter_signal_value { struct counter_ops { int (*signal_read)(struct counter_device *counter, struct counter_signal *signal, - enum counter_signal_value *val); + enum counter_signal_level *level); int (*count_read)(struct counter_device *counter, struct counter_count *count, unsigned long *val); int (*count_write)(struct counter_device *counter, diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index f39b34b13871..95f88edc8f09 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -46,6 +46,7 @@ enum cpuhp_state { CPUHP_ARM_OMAP_WAKE_DEAD, CPUHP_IRQ_POLL_DEAD, CPUHP_BLOCK_SOFTIRQ_DEAD, + CPUHP_BIO_DEAD, CPUHP_ACPI_CPUDRV_DEAD, CPUHP_S390_PFAULT_DEAD, CPUHP_BLK_MQ_DEAD, @@ -399,7 +400,7 @@ static inline int cpuhp_state_remove_instance(enum cpuhp_state state, /** * cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state - * without invoking the reatdown callback + * without invoking the teardown callback * @state: The state from which the instance is removed * @node: The node for this individual state. * diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index f3689a52bfd0..5d4d07a9e1ed 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -983,6 +983,44 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) nr_cpu_ids); } +/** + * cpumap_print_bitmask_to_buf - copies the cpumask into the buffer as + * hex values of cpumask + * + * @buf: the buffer to copy into + * @mask: the cpumask to copy + * @off: in the string from which we are copying, we copy to @buf + * @count: the maximum number of bytes to print + * + * The function prints the cpumask into the buffer as hex values of + * cpumask; Typically used by bin_attribute to export cpumask bitmask + * ABI. + * + * Returns the length of how many bytes have been copied. + */ +static inline ssize_t +cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask, + loff_t off, size_t count) +{ + return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask), + nr_cpu_ids, off, count); +} + +/** + * cpumap_print_list_to_buf - copies the cpumask into the buffer as + * comma-separated list of cpus + * + * Everything is same with the above cpumap_print_bitmask_to_buf() + * except the print format. + */ +static inline ssize_t +cpumap_print_list_to_buf(char *buf, const struct cpumask *mask, + loff_t off, size_t count) +{ + return bitmap_print_list_to_buf(buf, cpumask_bits(mask), + nr_cpu_ids, off, count); +} + #if NR_CPUS <= BITS_PER_LONG #define CPU_MASK_ALL \ (cpumask_t) { { \ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 04c20de66afc..d2b9c41c8edf 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -15,6 +15,7 @@ #include <linux/cpumask.h> #include <linux/nodemask.h> #include <linux/mm.h> +#include <linux/mmu_context.h> #include <linux/jump_label.h> #ifdef CONFIG_CPUSETS @@ -58,7 +59,7 @@ extern void cpuset_wait_for_hotplug(void); extern void cpuset_read_lock(void); extern void cpuset_read_unlock(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); -extern void cpuset_cpus_allowed_fallback(struct task_struct *p); +extern bool cpuset_cpus_allowed_fallback(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); #define cpuset_current_mems_allowed (current->mems_allowed) void cpuset_init_current_mems_allowed(void); @@ -184,11 +185,12 @@ static inline void cpuset_read_unlock(void) { } static inline void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask) { - cpumask_copy(mask, cpu_possible_mask); + cpumask_copy(mask, task_cpu_possible_mask(p)); } -static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) +static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p) { + return false; } static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index edb5c186b0b7..3f49e65169c6 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -3,8 +3,7 @@ #define __LINUX_DEBUG_LOCKING_H #include <linux/atomic.h> -#include <linux/bug.h> -#include <linux/printk.h> +#include <linux/cache.h> struct task_struct; diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 7457d49acf9a..114553b487ef 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -31,7 +31,7 @@ enum dm_queue_mode { DM_TYPE_DAX_BIO_BASED = 3, }; -typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; +typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t; union map_info { void *ptr; @@ -151,7 +151,6 @@ typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff, size_t nr_pages); -#define PAGE_SECTORS (PAGE_SIZE / 512) void dm_error(const char *message); @@ -603,6 +602,10 @@ void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm); #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 0 : scnprintf(result + sz, maxlen - sz, x)) +#define DMEMIT_TARGET_NAME_VERSION(y) \ + DMEMIT("target_name=%s,target_version=%u.%u.%u", \ + (y)->name, (y)->version[0], (y)->version[1], (y)->version[2]) + /* * Definitions of return values from target end_io function. */ diff --git a/include/linux/device.h b/include/linux/device.h index 59940f1744c1..65d84b67b024 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -407,6 +407,7 @@ struct dev_links_info { * @em_pd: device's energy model performance domain * @pins: For device pin management. * See Documentation/driver-api/pin-control.rst for details. + * @msi_lock: Lock to protect MSI mask cache and mask register * @msi_list: Hosts MSI descriptors * @msi_domain: The generic MSI domain this device is using. * @numa_node: NUMA node this device is close to. @@ -506,6 +507,7 @@ struct device { struct dev_pin_info *pins; #endif #ifdef CONFIG_GENERIC_MSI_IRQ + raw_spinlock_t msi_lock; struct list_head msi_list; #endif #ifdef CONFIG_DMA_OPS diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h index 1ea5e1d1545b..062777a45a74 100644 --- a/include/linux/device/bus.h +++ b/include/linux/device/bus.h @@ -91,7 +91,7 @@ struct bus_type { int (*uevent)(struct device *dev, struct kobj_uevent_env *env); int (*probe)(struct device *dev); void (*sync_state)(struct device *dev); - int (*remove)(struct device *dev); + void (*remove)(struct device *dev); void (*shutdown)(struct device *dev); int (*online)(struct device *dev); diff --git a/include/linux/dfl.h b/include/linux/dfl.h index 6cc10982351a..431636a0dc78 100644 --- a/include/linux/dfl.h +++ b/include/linux/dfl.h @@ -38,6 +38,7 @@ struct dfl_device { int id; u16 type; u16 feature_id; + u8 revision; struct resource mmio_res; int *irqs; unsigned int num_irqs; diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index efdc56b9d95f..8b32b4bdd590 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -54,7 +54,7 @@ struct dma_buf_ops { * device), and otherwise need to fail the attach operation. * * The exporter should also in general check whether the current - * allocation fullfills the DMA constraints of the new device. If this + * allocation fulfills the DMA constraints of the new device. If this * is not the case, and the allocation cannot be moved, it should also * fail the attach operation. * @@ -96,6 +96,12 @@ struct dma_buf_ops { * This is called automatically for non-dynamic importers from * dma_buf_attach(). * + * Note that similar to non-dynamic exporters in their @map_dma_buf + * callback the driver must guarantee that the memory is available for + * use and cleared of any old data by the time this function returns. + * Drivers which pipeline their buffer moves internally must wait for + * all moves and clears to complete. + * * Returns: * * 0 on success, negative error code on failure. @@ -144,9 +150,18 @@ struct dma_buf_ops { * This is always called with the dmabuf->resv object locked when * the dynamic_mapping flag is true. * + * Note that for non-dynamic exporters the driver must guarantee that + * that the memory is available for use and cleared of any old data by + * the time this function returns. Drivers which pipeline their buffer + * moves internally must wait for all moves and clears to complete. + * Dynamic exporters do not need to follow this rule: For non-dynamic + * importers the buffer is already pinned through @pin, which has the + * same requirements. Dynamic importers otoh are required to obey the + * dma_resv fences. + * * Returns: * - * A &sg_table scatter list of or the backing storage of the DMA buffer, + * A &sg_table scatter list of the backing storage of the DMA buffer, * already mapped into the device address space of the &device attached * with the provided &dma_buf_attachment. The addresses and lengths in * the scatter list are PAGE_SIZE aligned. @@ -168,7 +183,7 @@ struct dma_buf_ops { * * This is called by dma_buf_unmap_attachment() and should unmap and * release the &sg_table allocated in @map_dma_buf, and it is mandatory. - * For static dma_buf handling this might also unpins the backing + * For static dma_buf handling this might also unpin the backing * storage if this is the last mapping of the DMA buffer. */ void (*unmap_dma_buf)(struct dma_buf_attachment *, @@ -237,7 +252,7 @@ struct dma_buf_ops { * This callback is used by the dma_buf_mmap() function * * Note that the mapping needs to be incoherent, userspace is expected - * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface. + * to bracket CPU access using the DMA_BUF_IOCTL_SYNC interface. * * Because dma-buf buffers have invariant size over their lifetime, the * dma-buf core checks whether a vma is too large and rejects such @@ -274,27 +289,6 @@ struct dma_buf_ops { /** * struct dma_buf - shared buffer object - * @size: size of the buffer; invariant over the lifetime of the buffer. - * @file: file pointer used for sharing buffers across, and for refcounting. - * @attachments: list of dma_buf_attachment that denotes all devices attached, - * protected by dma_resv lock. - * @ops: dma_buf_ops associated with this buffer object. - * @lock: used internally to serialize list manipulation, attach/detach and - * vmap/unmap - * @vmapping_counter: used internally to refcnt the vmaps - * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 - * @exp_name: name of the exporter; useful for debugging. - * @name: userspace-provided name; useful for accounting and debugging, - * protected by @resv. - * @name_lock: spinlock to protect name access - * @owner: pointer to exporter module; used for refcounting when exporter is a - * kernel module. - * @list_node: node for dma_buf accounting and debugging. - * @priv: exporter specific private data for this buffer object. - * @resv: reservation object linked to this dma-buf - * @poll: for userspace poll support - * @cb_excl: for userspace poll support - * @cb_shared: for userspace poll support * * This represents a shared buffer, created by calling dma_buf_export(). The * userspace representation is a normal file descriptor, which can be created by @@ -306,30 +300,152 @@ struct dma_buf_ops { * Device DMA access is handled by the separate &struct dma_buf_attachment. */ struct dma_buf { + /** + * @size: + * + * Size of the buffer; invariant over the lifetime of the buffer. + */ size_t size; + + /** + * @file: + * + * File pointer used for sharing buffers across, and for refcounting. + * See dma_buf_get() and dma_buf_put(). + */ struct file *file; + + /** + * @attachments: + * + * List of dma_buf_attachment that denotes all devices attached, + * protected by &dma_resv lock @resv. + */ struct list_head attachments; + + /** @ops: dma_buf_ops associated with this buffer object. */ const struct dma_buf_ops *ops; + + /** + * @lock: + * + * Used internally to serialize list manipulation, attach/detach and + * vmap/unmap. Note that in many cases this is superseeded by + * dma_resv_lock() on @resv. + */ struct mutex lock; + + /** + * @vmapping_counter: + * + * Used internally to refcnt the vmaps returned by dma_buf_vmap(). + * Protected by @lock. + */ unsigned vmapping_counter; + + /** + * @vmap_ptr: + * The current vmap ptr if @vmapping_counter > 0. Protected by @lock. + */ struct dma_buf_map vmap_ptr; + + /** + * @exp_name: + * + * Name of the exporter; useful for debugging. See the + * DMA_BUF_SET_NAME IOCTL. + */ const char *exp_name; + + /** + * @name: + * + * Userspace-provided name; useful for accounting and debugging, + * protected by dma_resv_lock() on @resv and @name_lock for read access. + */ const char *name; + + /** @name_lock: Spinlock to protect name acces for read access. */ spinlock_t name_lock; + + /** + * @owner: + * + * Pointer to exporter module; used for refcounting when exporter is a + * kernel module. + */ struct module *owner; + + /** @list_node: node for dma_buf accounting and debugging. */ struct list_head list_node; + + /** @priv: exporter specific private data for this buffer object. */ void *priv; + + /** + * @resv: + * + * Reservation object linked to this dma-buf. + * + * IMPLICIT SYNCHRONIZATION RULES: + * + * Drivers which support implicit synchronization of buffer access as + * e.g. exposed in `Implicit Fence Poll Support`_ must follow the + * below rules. + * + * - Drivers must add a shared fence through dma_resv_add_shared_fence() + * for anything the userspace API considers a read access. This highly + * depends upon the API and window system. + * + * - Similarly drivers must set the exclusive fence through + * dma_resv_add_excl_fence() for anything the userspace API considers + * write access. + * + * - Drivers may just always set the exclusive fence, since that only + * causes unecessarily synchronization, but no correctness issues. + * + * - Some drivers only expose a synchronous userspace API with no + * pipelining across drivers. These do not set any fences for their + * access. An example here is v4l. + * + * DYNAMIC IMPORTER RULES: + * + * Dynamic importers, see dma_buf_attachment_is_dynamic(), have + * additional constraints on how they set up fences: + * + * - Dynamic importers must obey the exclusive fence and wait for it to + * signal before allowing access to the buffer's underlying storage + * through the device. + * + * - Dynamic importers should set fences for any access that they can't + * disable immediately from their &dma_buf_attach_ops.move_notify + * callback. + */ struct dma_resv *resv; - /* poll support */ + /** @poll: for userspace poll support */ wait_queue_head_t poll; + /** @cb_excl: for userspace poll support */ + /** @cb_shared: for userspace poll support */ struct dma_buf_poll_cb_t { struct dma_fence_cb cb; wait_queue_head_t *poll; __poll_t active; } cb_excl, cb_shared; +#ifdef CONFIG_DMABUF_SYSFS_STATS + /** + * @sysfs_entry: + * + * For exposing information about this buffer in sysfs. See also + * `DMA-BUF statistics`_ for the uapi this enables. + */ + struct dma_buf_sysfs_entry { + struct kobject kobj; + struct dma_buf *dmabuf; + } *sysfs_entry; +#endif }; /** @@ -464,7 +580,7 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf) /** * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic - * mappinsg + * mappings * @attach: the DMA-buf attachment to check * * Returns true if a DMA-buf importer wants to call the map/unmap functions with diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h index 10462a029da2..54fe3443fd2c 100644 --- a/include/linux/dma-fence-chain.h +++ b/include/linux/dma-fence-chain.h @@ -12,25 +12,41 @@ #include <linux/dma-fence.h> #include <linux/irq_work.h> +#include <linux/slab.h> /** * struct dma_fence_chain - fence to represent an node of a fence chain * @base: fence base class - * @lock: spinlock for fence handling * @prev: previous fence of the chain * @prev_seqno: original previous seqno before garbage collection * @fence: encapsulated fence - * @cb: callback structure for signaling - * @work: irq work item for signaling + * @lock: spinlock for fence handling */ struct dma_fence_chain { struct dma_fence base; - spinlock_t lock; struct dma_fence __rcu *prev; u64 prev_seqno; struct dma_fence *fence; - struct dma_fence_cb cb; - struct irq_work work; + union { + /** + * @cb: callback for signaling + * + * This is used to add the callback for signaling the + * complection of the fence chain. Never used at the same time + * as the irq work. + */ + struct dma_fence_cb cb; + + /** + * @work: irq work item for signaling + * + * Irq work structure to allow us to add the callback without + * running into lock inversion. Never used at the same time as + * the callback. + */ + struct irq_work work; + }; + spinlock_t lock; }; extern const struct dma_fence_ops dma_fence_chain_ops; @@ -52,6 +68,30 @@ to_dma_fence_chain(struct dma_fence *fence) } /** + * dma_fence_chain_alloc + * + * Returns a new struct dma_fence_chain object or NULL on failure. + */ +static inline struct dma_fence_chain *dma_fence_chain_alloc(void) +{ + return kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL); +}; + +/** + * dma_fence_chain_free + * @chain: chain node to free + * + * Frees up an allocated but not used struct dma_fence_chain object. This + * doesn't need an RCU grace period since the fence was never initialized nor + * published. After dma_fence_chain_init() has been called the fence must be + * released by calling dma_fence_put(), and not through this function. + */ +static inline void dma_fence_chain_free(struct dma_fence_chain *chain) +{ + kfree(chain); +}; + +/** * dma_fence_chain_for_each - iterate over all fences in chain * @iter: current fence * @head: starting point diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index 1587961f1a7b..c7fa4a3498fe 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -11,60 +11,48 @@ struct dsa_switch; struct sk_buff; struct net_device; -struct packet_type; -struct dsa_8021q_context; -struct dsa_8021q_crosschip_link { +struct dsa_tag_8021q_vlan { struct list_head list; int port; - struct dsa_8021q_context *other_ctx; - int other_port; + u16 vid; refcount_t refcount; }; -struct dsa_8021q_ops { - int (*vlan_add)(struct dsa_switch *ds, int port, u16 vid, u16 flags); - int (*vlan_del)(struct dsa_switch *ds, int port, u16 vid); -}; - struct dsa_8021q_context { - const struct dsa_8021q_ops *ops; struct dsa_switch *ds; - struct list_head crosschip_links; + struct list_head vlans; /* EtherType of RX VID, used for filtering on master interface */ __be16 proto; }; -#define DSA_8021Q_N_SUBVLAN 8 - -int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled); +int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto); -int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port, - struct dsa_8021q_context *other_ctx, - int other_port); - -int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port, - struct dsa_8021q_context *other_ctx, - int other_port); +void dsa_tag_8021q_unregister(struct dsa_switch *ds); struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, u16 tpid, u16 tci); -void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id, - int *subvlan); +void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id); + +int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port, + struct net_device *br, + int bridge_num); + +void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port, + struct net_device *br, + int bridge_num); + +u16 dsa_8021q_bridge_tx_fwd_offload_vid(int bridge_num); u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port); u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port); -u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan); - int dsa_8021q_rx_switch_id(u16 vid); int dsa_8021q_rx_source_port(u16 vid); -u16 dsa_8021q_rx_subvlan(u16 vid); - bool vid_is_dsa_8021q_rxvlan(u16 vid); bool vid_is_dsa_8021q_txvlan(u16 vid); diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h index b6089b88314c..171106202fe5 100644 --- a/include/linux/dsa/sja1105.h +++ b/include/linux/dsa/sja1105.h @@ -16,6 +16,8 @@ #define ETH_P_SJA1105_META 0x0008 #define ETH_P_SJA1110 0xdadc +#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1) + /* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */ #define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull #define SJA1105_LINKLOCAL_FILTER_A_MASK 0xFFFFFF000000ull @@ -59,14 +61,12 @@ struct sja1105_skb_cb { ((struct sja1105_skb_cb *)((skb)->cb)) struct sja1105_port { - u16 subvlan_map[DSA_8021Q_N_SUBVLAN]; struct kthread_worker *xmit_worker; struct kthread_work xmit_work; struct sk_buff_head xmit_queue; struct sja1105_tagger_data *data; struct dsa_port *dp; bool hwts_tx_en; - u16 xmit_tpid; }; enum sja1110_meta_tstamp { @@ -89,4 +89,22 @@ static inline void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, #endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */ +#if IS_ENABLED(CONFIG_NET_DSA_SJA1105) + +extern const struct dsa_switch_ops sja1105_switch_ops; + +static inline bool dsa_port_is_sja1105(struct dsa_port *dp) +{ + return dp->ds->ops == &sja1105_switch_ops; +} + +#else + +static inline bool dsa_port_is_sja1105(struct dsa_port *dp) +{ + return false; +} + +#endif + #endif /* _NET_DSA_SJA1105_H */ diff --git a/include/linux/edac.h b/include/linux/edac.h index 76d3562d3006..4207d06996a4 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -184,6 +184,7 @@ static inline char *mc_event_error_type(const unsigned int err_type) * @MEM_DDR5: Unbuffered DDR5 RAM * @MEM_NVDIMM: Non-volatile RAM * @MEM_WIO2: Wide I/O 2. + * @MEM_HBM2: High bandwidth Memory Gen 2. */ enum mem_type { MEM_EMPTY = 0, @@ -212,6 +213,7 @@ enum mem_type { MEM_DDR5, MEM_NVDIMM, MEM_WIO2, + MEM_HBM2, }; #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) @@ -239,6 +241,7 @@ enum mem_type { #define MEM_FLAG_DDR5 BIT(MEM_DDR5) #define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM) #define MEM_FLAG_WIO2 BIT(MEM_WIO2) +#define MEM_FLAG_HBM2 BIT(MEM_HBM2) /** * enum edac_type - Error Detection and Correction capabilities and mode diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 3f221dbf5f95..1834752c5617 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -53,6 +53,22 @@ struct em_perf_domain { #ifdef CONFIG_ENERGY_MODEL #define EM_MAX_POWER 0xFFFF +/* + * Increase resolution of energy estimation calculations for 64-bit + * architectures. The extra resolution improves decision made by EAS for the + * task placement when two Performance Domains might provide similar energy + * estimation values (w/o better resolution the values could be equal). + * + * We increase resolution only if we have enough bits to allow this increased + * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit + * are pretty high and the returns do not justify the increased costs. + */ +#ifdef CONFIG_64BIT +#define em_scale_power(p) ((p) * 1000) +#else +#define em_scale_power(p) (p) +#endif + struct em_data_callback { /** * active_power() - Provide power at the next performance state of diff --git a/include/linux/errno.h b/include/linux/errno.h index d73f597a2484..8b0c754bab02 100644 --- a/include/linux/errno.h +++ b/include/linux/errno.h @@ -31,5 +31,6 @@ #define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */ #define EIOCBQUEUED 529 /* iocb queued, will get completion event */ #define ERECALLCONFLICT 530 /* conflict with recalled state */ +#define ENOGRACE 531 /* NFS file lock reclaim refused */ #endif diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 29dbb603bc91..849524b55d89 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -15,10 +15,9 @@ #include <linux/bitmap.h> #include <linux/compat.h> +#include <linux/netlink.h> #include <uapi/linux/ethtool.h> -#ifdef CONFIG_COMPAT - struct compat_ethtool_rx_flow_spec { u32 flow_type; union ethtool_flow_union h_u; @@ -38,8 +37,6 @@ struct compat_ethtool_rxnfc { u32 rule_locs[]; }; -#endif /* CONFIG_COMPAT */ - #include <linux/rculist.h> /** @@ -176,6 +173,11 @@ extern int __ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *link_ksettings); +struct kernel_ethtool_coalesce { + u8 use_cqe_mode_tx; + u8 use_cqe_mode_rx; +}; + /** * ethtool_intersect_link_masks - Given two link masks, AND them together * @dst: first mask and where result is stored @@ -215,7 +217,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, #define ETHTOOL_COALESCE_TX_USECS_HIGH BIT(19) #define ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH BIT(20) #define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21) -#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(21, 0) +#define ETHTOOL_COALESCE_USE_CQE_RX BIT(22) +#define ETHTOOL_COALESCE_USE_CQE_TX BIT(23) +#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(23, 0) #define ETHTOOL_COALESCE_USECS \ (ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS) @@ -241,6 +245,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \ ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \ ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL) +#define ETHTOOL_COALESCE_USE_CQE \ + (ETHTOOL_COALESCE_USE_CQE_RX | ETHTOOL_COALESCE_USE_CQE_TX) #define ETHTOOL_STAT_NOT_SET (~0ULL) @@ -606,8 +612,14 @@ struct ethtool_ops { struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); - int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); - int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); + int (*get_coalesce)(struct net_device *, + struct ethtool_coalesce *, + struct kernel_ethtool_coalesce *, + struct netlink_ext_ack *); + int (*set_coalesce)(struct net_device *, + struct ethtool_coalesce *, + struct kernel_ethtool_coalesce *, + struct netlink_ext_ack *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, @@ -758,6 +770,16 @@ ethtool_params_from_link_mode(struct ethtool_link_ksettings *link_ksettings, enum ethtool_link_mode_bit_indices link_mode); /** + * ethtool_get_phc_vclocks - Derive phc vclocks information, and caller + * is responsible to free memory of vclock_index + * @dev: pointer to net_device structure + * @vclock_index: pointer to pointer of vclock index + * + * Return number of phc vclocks + */ +int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index); + +/** * ethtool_sprintf - Write formatted string to ethtool string data * @data: Pointer to start of string to update * @fmt: Format of string to write diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index fa0a524baed0..305d5f19093b 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -14,6 +14,7 @@ #include <linux/err.h> #include <linux/percpu-defs.h> #include <linux/percpu.h> +#include <linux/sched.h> /* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining @@ -43,11 +44,9 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w __u64 *cnt); void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt); -DECLARE_PER_CPU(int, eventfd_wake_count); - -static inline bool eventfd_signal_count(void) +static inline bool eventfd_signal_allowed(void) { - return this_cpu_read(eventfd_wake_count); + return !current->in_eventfd_signal; } #else /* CONFIG_EVENTFD */ @@ -78,9 +77,9 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, return -ENOSYS; } -static inline bool eventfd_signal_count(void) +static inline bool eventfd_signal_allowed(void) { - return false; + return true; } static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index fe848901fcc3..3260fe714846 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -221,6 +221,8 @@ struct export_operations { #define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply atomic attribute updates */ +#define EXPORT_OP_SYNC_LOCKS (0x20) /* Filesystem can't do + asychronous blocking locks */ unsigned long flags; }; diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index a16dbeced152..eec3b7c40811 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -27,6 +27,8 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */ #define FANOTIFY_FID_BITS (FAN_REPORT_FID | FAN_REPORT_DFID_NAME) +#define FANOTIFY_INFO_MODES (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD) + /* * fanotify_init() flags that require CAP_SYS_ADMIN. * We do not allow unprivileged groups to request permission events. @@ -35,6 +37,7 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */ */ #define FANOTIFY_ADMIN_INIT_FLAGS (FANOTIFY_PERM_CLASSES | \ FAN_REPORT_TID | \ + FAN_REPORT_PIDFD | \ FAN_UNLIMITED_QUEUE | \ FAN_UNLIMITED_MARKS) diff --git a/include/linux/fb.h b/include/linux/fb.h index ecfbcc0553a5..5950f8f5dc74 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -2,6 +2,7 @@ #ifndef _LINUX_FB_H #define _LINUX_FB_H +#include <linux/refcount.h> #include <linux/kgdb.h> #include <uapi/linux/fb.h> @@ -435,7 +436,7 @@ struct fb_tile_ops { struct fb_info { - atomic_t count; + refcount_t count; int node; int flags; /* diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h index 4e624c466583..c50882f19235 100644 --- a/include/linux/fiemap.h +++ b/include/linux/fiemap.h @@ -18,8 +18,4 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo, int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, u64 phys, u64 len, u32 flags); -int generic_block_fiemap(struct inode *inode, - struct fiemap_extent_info *fieinfo, u64 start, u64 len, - get_block_t *get_block); - #endif /* _LINUX_FIEMAP_H 1 */ diff --git a/include/linux/filter.h b/include/linux/filter.h index 472f97074da0..7d248941ecea 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -73,6 +73,11 @@ struct ctl_table_header; /* unused opcode to mark call to interpreter with arguments */ #define BPF_CALL_ARGS 0xe0 +/* unused opcode to mark speculation barrier for mitigating + * Speculative Store Bypass + */ +#define BPF_NOSPEC 0xc0 + /* As per nm, we expose JITed images as text (code) section for * kallsyms. That way, tools like perf can find it to match * addresses. @@ -390,6 +395,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) .off = 0, \ .imm = 0 }) +/* Speculation barrier */ + +#define BPF_ST_NOSPEC() \ + ((struct bpf_insn) { \ + .code = BPF_ST | BPF_NOSPEC, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = 0 }) + /* Internal classic blocks for direct assignment */ #define __BPF_STMT(CODE, K) \ @@ -559,7 +574,8 @@ struct bpf_prog { kprobe_override:1, /* Do we override a kprobe? */ has_callchain_buf:1, /* callchain buffer allocated? */ enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ - call_get_stack:1; /* Do we call bpf_get_stack() or bpf_get_stackid() */ + call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ + call_get_func_ip:1; /* Do we call get_func_ip() */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ @@ -584,25 +600,38 @@ struct sk_filter { DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); -#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \ - u32 __ret; \ - cant_migrate(); \ - if (static_branch_unlikely(&bpf_stats_enabled_key)) { \ - struct bpf_prog_stats *__stats; \ - u64 __start = sched_clock(); \ - __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \ - __stats = this_cpu_ptr(prog->stats); \ - u64_stats_update_begin(&__stats->syncp); \ - __stats->cnt++; \ - __stats->nsecs += sched_clock() - __start; \ - u64_stats_update_end(&__stats->syncp); \ - } else { \ - __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \ - } \ - __ret; }) - -#define BPF_PROG_RUN(prog, ctx) \ - __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func) +typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx, + const struct bpf_insn *insnsi, + unsigned int (*bpf_func)(const void *, + const struct bpf_insn *)); + +static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog, + const void *ctx, + bpf_dispatcher_fn dfunc) +{ + u32 ret; + + cant_migrate(); + if (static_branch_unlikely(&bpf_stats_enabled_key)) { + struct bpf_prog_stats *stats; + u64 start = sched_clock(); + + ret = dfunc(ctx, prog->insnsi, prog->bpf_func); + stats = this_cpu_ptr(prog->stats); + u64_stats_update_begin(&stats->syncp); + stats->cnt++; + stats->nsecs += sched_clock() - start; + u64_stats_update_end(&stats->syncp); + } else { + ret = dfunc(ctx, prog->insnsi, prog->bpf_func); + } + return ret; +} + +static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx) +{ + return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func); +} /* * Use in preemptible and therefore migratable context to make sure that @@ -621,7 +650,7 @@ static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog, u32 ret; migrate_disable(); - ret = __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func); + ret = bpf_prog_run(prog, ctx); migrate_enable(); return ret; } @@ -694,7 +723,7 @@ static inline void bpf_restore_data_end( cb->data_end = saved_data_end; } -static inline u8 *bpf_skb_cb(struct sk_buff *skb) +static inline u8 *bpf_skb_cb(const struct sk_buff *skb) { /* eBPF programs may read/write skb->cb[] area to transfer meta * data between tail calls. Since this also needs to work with @@ -715,8 +744,9 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb) /* Must be invoked with migration disabled */ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, - struct sk_buff *skb) + const void *ctx) { + const struct sk_buff *skb = ctx; u8 *cb_data = bpf_skb_cb(skb); u8 cb_saved[BPF_SKB_CB_LEN]; u32 res; @@ -726,7 +756,7 @@ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, memset(cb_data, 0, sizeof(cb_saved)); } - res = BPF_PROG_RUN(prog, skb); + res = bpf_prog_run(prog, skb); if (unlikely(prog->cb_access)) memcpy(cb_data, cb_saved, sizeof(cb_saved)); @@ -760,6 +790,10 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, DECLARE_BPF_DISPATCHER(xdp) +DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key); + +u32 xdp_master_redirect(struct xdp_buff *xdp); + static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, struct xdp_buff *xdp) { @@ -767,7 +801,14 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, * under local_bh_disable(), which provides the needed RCU protection * for accessing map entries. */ - return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); + u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); + + if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) { + if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev)) + act = xdp_master_redirect(xdp); + } + + return act; } void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog); @@ -1413,7 +1454,7 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, }; u32 act; - act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN); + act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run); if (act == SK_PASS) { selected_sk = ctx.selected_sk; no_reuseport = ctx.no_reuseport; @@ -1451,7 +1492,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, }; u32 act; - act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN); + act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run); if (act == SK_PASS) { selected_sk = ctx.selected_sk; no_reuseport = ctx.no_reuseport; diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 9d1a5c175065..56b426fe020c 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -52,6 +52,10 @@ #define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U #define ZYNQMP_PM_CAPABILITY_UNUSABLE 0x8U +/* Loader commands */ +#define PM_LOAD_PDI 0x701 +#define PDI_SRC_DDR 0xF + /* * Firmware FPGA Manager flags * XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration @@ -411,6 +415,7 @@ int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param, u32 *value); int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, u32 value); +int zynqmp_pm_load_pdi(const u32 src, const u64 address); #else static inline int zynqmp_pm_get_api_version(u32 *version) { @@ -622,6 +627,11 @@ static inline int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, { return -ENODEV; } + +static inline int zynqmp_pm_load_pdi(const u32 src, const u64 address) +{ + return -ENODEV; +} #endif #endif /* __FIRMWARE_ZYNQMP_H__ */ diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index ec2cd8bfceb0..474c1f506307 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -110,7 +110,7 @@ struct fpga_image_info { * @initial_header_size: Maximum number of bytes that should be passed into write_init * @state: returns an enum value of the FPGA's state * @status: returns status of the FPGA, including reconfiguration error code - * @write_init: prepare the FPGA to receive confuration data + * @write_init: prepare the FPGA to receive configuration data * @write: write count bytes of configuration data to the FPGA * @write_sg: write the scatter list of configuration data to the FPGA * @write_complete: set FPGA to operating state after writing is done diff --git a/include/linux/fs.h b/include/linux/fs.h index 640574294216..1c01f9f2b574 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -319,6 +319,8 @@ enum rw_hint { /* iocb->ki_waitq is valid */ #define IOCB_WAITQ (1 << 19) #define IOCB_NOIO (1 << 20) +/* can use bio alloc cache */ +#define IOCB_ALLOC_CACHE (1 << 21) struct kiocb { struct file *ki_filp; @@ -436,6 +438,10 @@ int pagecache_write_end(struct file *, struct address_space *mapping, * struct address_space - Contents of a cacheable, mappable object. * @host: Owner, either the inode or the block_device. * @i_pages: Cached pages. + * @invalidate_lock: Guards coherency between page cache contents and + * file offset->disk block mappings in the filesystem during invalidates. + * It is also used to block modification of page cache contents through + * memory mappings. * @gfp_mask: Memory allocation flags to use for allocating pages. * @i_mmap_writable: Number of VM_SHARED mappings. * @nr_thps: Number of THPs in the pagecache (non-shmem only). @@ -453,6 +459,7 @@ int pagecache_write_end(struct file *, struct address_space *mapping, struct address_space { struct inode *host; struct xarray i_pages; + struct rw_semaphore invalidate_lock; gfp_t gfp_mask; atomic_t i_mmap_writable; #ifdef CONFIG_READ_ONLY_THP_FOR_FS @@ -814,9 +821,42 @@ static inline void inode_lock_shared_nested(struct inode *inode, unsigned subcla down_read_nested(&inode->i_rwsem, subclass); } +static inline void filemap_invalidate_lock(struct address_space *mapping) +{ + down_write(&mapping->invalidate_lock); +} + +static inline void filemap_invalidate_unlock(struct address_space *mapping) +{ + up_write(&mapping->invalidate_lock); +} + +static inline void filemap_invalidate_lock_shared(struct address_space *mapping) +{ + down_read(&mapping->invalidate_lock); +} + +static inline int filemap_invalidate_trylock_shared( + struct address_space *mapping) +{ + return down_read_trylock(&mapping->invalidate_lock); +} + +static inline void filemap_invalidate_unlock_shared( + struct address_space *mapping) +{ + up_read(&mapping->invalidate_lock); +} + void lock_two_nondirectories(struct inode *, struct inode*); void unlock_two_nondirectories(struct inode *, struct inode*); +void filemap_invalidate_lock_two(struct address_space *mapping1, + struct address_space *mapping2); +void filemap_invalidate_unlock_two(struct address_space *mapping1, + struct address_space *mapping2); + + /* * NOTE: in a 32bit arch with a preemptable kernel and * an UP compile the i_size_read/write must be atomic @@ -997,6 +1037,7 @@ static inline struct file *get_file(struct file *f) #define FL_UNLOCK_PENDING 512 /* Lease is being broken */ #define FL_OFDLCK 1024 /* lock is "owned" by struct file */ #define FL_LAYOUT 2048 /* outstanding pNFS layout */ +#define FL_RECLAIM 4096 /* reclaiming from a reboot server */ #define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE) @@ -1507,8 +1548,11 @@ struct super_block { /* Number of inodes with nlink == 0 but still referenced */ atomic_long_t s_remove_count; - /* Pending fsnotify inode refs */ - atomic_long_t s_fsnotify_inode_refs; + /* + * Number of inode/mount/sb objects that are being watched, note that + * inodes objects are currently double-accounted. + */ + atomic_long_t s_fsnotify_connectors; /* Being remounted read-only */ int s_readonly_remount; @@ -2457,7 +2501,6 @@ static inline void file_accessed(struct file *file) extern int file_modified(struct file *file); -int sync_inode(struct inode *inode, struct writeback_control *wbc); int sync_inode_metadata(struct inode *inode, int wait); struct file_system_type { @@ -2487,6 +2530,7 @@ struct file_system_type { struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; + struct lock_class_key invalidate_lock_key; struct lock_class_key i_mutex_dir_key; }; @@ -2570,90 +2614,6 @@ extern struct kobject *fs_kobj; #define MAX_RW_COUNT (INT_MAX & PAGE_MASK) -#ifdef CONFIG_MANDATORY_FILE_LOCKING -extern int locks_mandatory_locked(struct file *); -extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char); - -/* - * Candidates for mandatory locking have the setgid bit set - * but no group execute bit - an otherwise meaningless combination. - */ - -static inline int __mandatory_lock(struct inode *ino) -{ - return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID; -} - -/* - * ... and these candidates should be on SB_MANDLOCK mounted fs, - * otherwise these will be advisory locks - */ - -static inline int mandatory_lock(struct inode *ino) -{ - return IS_MANDLOCK(ino) && __mandatory_lock(ino); -} - -static inline int locks_verify_locked(struct file *file) -{ - if (mandatory_lock(locks_inode(file))) - return locks_mandatory_locked(file); - return 0; -} - -static inline int locks_verify_truncate(struct inode *inode, - struct file *f, - loff_t size) -{ - if (!inode->i_flctx || !mandatory_lock(inode)) - return 0; - - if (size < inode->i_size) { - return locks_mandatory_area(inode, f, size, inode->i_size - 1, - F_WRLCK); - } else { - return locks_mandatory_area(inode, f, inode->i_size, size - 1, - F_WRLCK); - } -} - -#else /* !CONFIG_MANDATORY_FILE_LOCKING */ - -static inline int locks_mandatory_locked(struct file *file) -{ - return 0; -} - -static inline int locks_mandatory_area(struct inode *inode, struct file *filp, - loff_t start, loff_t end, unsigned char type) -{ - return 0; -} - -static inline int __mandatory_lock(struct inode *inode) -{ - return 0; -} - -static inline int mandatory_lock(struct inode *inode) -{ - return 0; -} - -static inline int locks_verify_locked(struct file *file) -{ - return 0; -} - -static inline int locks_verify_truncate(struct inode *inode, struct file *filp, - size_t size) -{ - return 0; -} - -#endif /* CONFIG_MANDATORY_FILE_LOCKING */ - - #ifdef CONFIG_FILE_LOCKING static inline int break_lease(struct inode *inode, unsigned int mode) { @@ -2786,6 +2746,7 @@ static inline struct file *file_clone_open(struct file *file) extern int filp_close(struct file *, fl_owner_t id); extern struct filename *getname_flags(const char __user *, int, int *); +extern struct filename *getname_uflags(const char __user *, int); extern struct filename *getname(const char __user *); extern struct filename *getname_kernel(const char *); extern void putname(struct filename *name); @@ -2891,6 +2852,8 @@ extern int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end); extern int filemap_check_errors(struct address_space *mapping); extern void __filemap_set_wb_err(struct address_space *mapping, int err); +int filemap_fdatawrite_wbc(struct address_space *mapping, + struct writeback_control *wbc); static inline int filemap_write_and_wait(struct address_space *mapping) { @@ -3246,10 +3209,6 @@ ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb, ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb, struct iov_iter *iter); -/* fs/block_dev.c */ -extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, - int datasync); - /* fs/splice.c */ extern ssize_t generic_file_splice_read(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index 37e1e8f7f08d..6b54982fc5f3 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -139,6 +139,9 @@ extern int vfs_parse_fs_string(struct fs_context *fc, const char *key, extern int generic_parse_monolithic(struct fs_context *fc, void *data); extern int vfs_get_tree(struct fs_context *fc); extern void put_fs_context(struct fs_context *fc); +extern int vfs_parse_fs_param_source(struct fs_context *fc, + struct fs_parameter *param); +extern void fc_drop_locked(struct fs_context *fc); /* * sget() wrappers to be called from the ->get_tree() op. diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 2ea1387bb497..e912ed9141d9 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -47,27 +47,128 @@ struct fscrypt_name { #define FSCRYPT_SET_CONTEXT_MAX_SIZE 40 #ifdef CONFIG_FS_ENCRYPTION + /* - * fscrypt superblock flags + * If set, the fscrypt bounce page pool won't be allocated (unless another + * filesystem needs it). Set this if the filesystem always uses its own bounce + * pages for writes and therefore won't need the fscrypt bounce page pool. */ #define FS_CFLG_OWN_PAGES (1U << 1) -/* - * crypto operations for filesystems - */ +/* Crypto operations for filesystems */ struct fscrypt_operations { + + /* Set of optional flags; see above for allowed flags */ unsigned int flags; + + /* + * If set, this is a filesystem-specific key description prefix that + * will be accepted for "logon" keys for v1 fscrypt policies, in + * addition to the generic prefix "fscrypt:". This functionality is + * deprecated, so new filesystems shouldn't set this field. + */ const char *key_prefix; + + /* + * Get the fscrypt context of the given inode. + * + * @inode: the inode whose context to get + * @ctx: the buffer into which to get the context + * @len: length of the @ctx buffer in bytes + * + * Return: On success, returns the length of the context in bytes; this + * may be less than @len. On failure, returns -ENODATA if the + * inode doesn't have a context, -ERANGE if the context is + * longer than @len, or another -errno code. + */ int (*get_context)(struct inode *inode, void *ctx, size_t len); + + /* + * Set an fscrypt context on the given inode. + * + * @inode: the inode whose context to set. The inode won't already have + * an fscrypt context. + * @ctx: the context to set + * @len: length of @ctx in bytes (at most FSCRYPT_SET_CONTEXT_MAX_SIZE) + * @fs_data: If called from fscrypt_set_context(), this will be the + * value the filesystem passed to fscrypt_set_context(). + * Otherwise (i.e. when called from + * FS_IOC_SET_ENCRYPTION_POLICY) this will be NULL. + * + * i_rwsem will be held for write. + * + * Return: 0 on success, -errno on failure. + */ int (*set_context)(struct inode *inode, const void *ctx, size_t len, void *fs_data); + + /* + * Get the dummy fscrypt policy in use on the filesystem (if any). + * + * Filesystems only need to implement this function if they support the + * test_dummy_encryption mount option. + * + * Return: A pointer to the dummy fscrypt policy, if the filesystem is + * mounted with test_dummy_encryption; otherwise NULL. + */ const union fscrypt_policy *(*get_dummy_policy)(struct super_block *sb); + + /* + * Check whether a directory is empty. i_rwsem will be held for write. + */ bool (*empty_dir)(struct inode *inode); + + /* The filesystem's maximum ciphertext filename length, in bytes */ unsigned int max_namelen; + + /* + * Check whether the filesystem's inode numbers and UUID are stable, + * meaning that they will never be changed even by offline operations + * such as filesystem shrinking and therefore can be used in the + * encryption without the possibility of files becoming unreadable. + * + * Filesystems only need to implement this function if they want to + * support the FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags. These + * flags are designed to work around the limitations of UFS and eMMC + * inline crypto hardware, and they shouldn't be used in scenarios where + * such hardware isn't being used. + * + * Leaving this NULL is equivalent to always returning false. + */ bool (*has_stable_inodes)(struct super_block *sb); + + /* + * Get the number of bits that the filesystem uses to represent inode + * numbers and file logical block numbers. + * + * By default, both of these are assumed to be 64-bit. This function + * can be implemented to declare that either or both of these numbers is + * shorter, which may allow the use of the + * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags and/or the use of + * inline crypto hardware whose maximum DUN length is less than 64 bits + * (e.g., eMMC v5.2 spec compliant hardware). This function only needs + * to be implemented if support for one of these features is needed. + */ void (*get_ino_and_lblk_bits)(struct super_block *sb, int *ino_bits_ret, int *lblk_bits_ret); + + /* + * Return the number of block devices to which the filesystem may write + * encrypted file contents. + * + * If the filesystem can use multiple block devices (other than block + * devices that aren't used for encrypted file contents, such as + * external journal devices), and wants to support inline encryption, + * then it must implement this function. Otherwise it's not needed. + */ int (*get_num_devices)(struct super_block *sb); + + /* + * If ->get_num_devices() returns a value greater than 1, then this + * function is called to get the array of request_queues that the + * filesystem is using -- one per block device. (There may be duplicate + * entries in this array, as block devices can share a request_queue.) + */ void (*get_devices)(struct super_block *sb, struct request_queue **devs); }; @@ -253,6 +354,7 @@ int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, unsigned int max_size, struct delayed_call *done); +int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat); static inline void fscrypt_set_ops(struct super_block *sb, const struct fscrypt_operations *s_cop) { @@ -583,6 +685,12 @@ static inline const char *fscrypt_get_symlink(struct inode *inode, return ERR_PTR(-EOPNOTSUPP); } +static inline int fscrypt_symlink_getattr(const struct path *path, + struct kstat *stat) +{ + return -EOPNOTSUPP; +} + static inline void fscrypt_set_ops(struct super_block *sb, const struct fscrypt_operations *s_cop) { diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h index 63b56aba925a..30ece3ae6df7 100644 --- a/include/linux/fsl/mc.h +++ b/include/linux/fsl/mc.h @@ -423,7 +423,8 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev); void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev); -struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev); +struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, + u16 if_id); extern struct bus_type fsl_mc_bus_type; diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index f8acddcf54fb..12d3a7d308ab 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -30,6 +30,9 @@ static inline void fsnotify_name(struct inode *dir, __u32 mask, struct inode *child, const struct qstr *name, u32 cookie) { + if (atomic_long_read(&dir->i_sb->s_fsnotify_connectors) == 0) + return; + fsnotify(mask, child, FSNOTIFY_EVENT_INODE, dir, name, NULL, cookie); } @@ -41,6 +44,9 @@ static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, static inline void fsnotify_inode(struct inode *inode, __u32 mask) { + if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0) + return; + if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; @@ -53,6 +59,9 @@ static inline int fsnotify_parent(struct dentry *dentry, __u32 mask, { struct inode *inode = d_inode(dentry); + if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0) + return 0; + if (S_ISDIR(inode->i_mode)) { mask |= FS_ISDIR; diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index a69f363b61bf..832e65f06754 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -643,6 +643,22 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } extern int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr); +/** + * ftrace_need_init_nop - return whether nop call sites should be initialized + * + * Normally the compiler's -mnop-mcount generates suitable nops, so we don't + * need to call ftrace_init_nop() if the code is built with that flag. + * Architectures where this is not always the case may define their own + * condition. + * + * Return must be: + * 0 if ftrace_init_nop() should be called + * Nonzero if ftrace_init_nop() should not be called + */ + +#ifndef ftrace_need_init_nop +#define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT)) +#endif /** * ftrace_init_nop - initialize a nop call site diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h index bc738504ab4a..c285968e437a 100644 --- a/include/linux/genetlink.h +++ b/include/linux/genetlink.h @@ -8,34 +8,11 @@ /* All generic netlink requests are serialized by a global lock. */ extern void genl_lock(void); extern void genl_unlock(void); -#ifdef CONFIG_LOCKDEP -extern bool lockdep_genl_is_held(void); -#endif /* for synchronisation between af_netlink and genetlink */ extern atomic_t genl_sk_destructing_cnt; extern wait_queue_head_t genl_sk_destructing_waitq; -/** - * rcu_dereference_genl - rcu_dereference with debug checking - * @p: The pointer to read, prior to dereferencing - * - * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() - * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference() - */ -#define rcu_dereference_genl(p) \ - rcu_dereference_check(p, lockdep_genl_is_held()) - -/** - * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex - * @p: The pointer to read, prior to dereferencing - * - * Return the value of the specified RCU-protected pointer, but omit - * the READ_ONCE(), because caller holds genl mutex. - */ -#define genl_dereference(p) \ - rcu_dereference_protected(p, lockdep_genl_is_held()) - #define MODULE_ALIAS_GENL_FAMILY(family)\ MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family) diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 13b34177cc85..c68d83c87f83 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -60,9 +60,6 @@ struct partition_meta_info { * device. * Affects responses to the ``CDROM_GET_CAPABILITY`` ioctl. * - * ``GENHD_FL_UP`` (0x0010): indicates that the block device is "up", - * with a similar meaning to network interfaces. - * * ``GENHD_FL_SUPPRESS_PARTITION_INFO`` (0x0020): don't include * partition information in ``/proc/partitions`` or in the output of * printk_all_partitions(). @@ -97,7 +94,6 @@ struct partition_meta_info { /* 2 is unused (used to be GENHD_FL_DRIVERFS) */ /* 4 is unused (used to be GENHD_FL_MEDIA_CHANGE_NOTIFY) */ #define GENHD_FL_CD 0x0008 -#define GENHD_FL_UP 0x0010 #define GENHD_FL_SUPPRESS_PARTITION_INFO 0x0020 #define GENHD_FL_EXT_DEVT 0x0040 #define GENHD_FL_NATIVE_CAPACITY 0x0080 @@ -153,13 +149,15 @@ struct gendisk { unsigned long state; #define GD_NEED_PART_SCAN 0 #define GD_READ_ONLY 1 -#define GD_QUEUE_REF 2 struct mutex open_mutex; /* open/close mutex */ unsigned open_partitions; /* number of open partitions */ + struct backing_dev_info *bdi; struct kobject *slave_dir; - +#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED + struct list_head slave_bdevs; +#endif struct timer_rand_state *random; atomic_t sync_io; /* RAID */ struct disk_events *ev; @@ -172,8 +170,14 @@ struct gendisk { int node_id; struct badblocks *bb; struct lockdep_map lockdep_map; + u64 diskseq; }; +static inline bool disk_live(struct gendisk *disk) +{ + return !inode_unhashed(disk->part0->bd_inode); +} + /* * The gendisk is refcounted by the part0 block_device, and the bd_device * therein is also used for device model presentation in sysfs. @@ -210,18 +214,12 @@ static inline dev_t disk_devt(struct gendisk *disk) void disk_uevent(struct gendisk *disk, enum kobject_action action); /* block/genhd.c */ -extern void device_add_disk(struct device *parent, struct gendisk *disk, - const struct attribute_group **groups); -static inline void add_disk(struct gendisk *disk) +int device_add_disk(struct device *parent, struct gendisk *disk, + const struct attribute_group **groups); +static inline int add_disk(struct gendisk *disk) { - device_add_disk(NULL, disk, NULL); + return device_add_disk(NULL, disk, NULL); } -extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk); -static inline void add_disk_no_queue_reg(struct gendisk *disk) -{ - device_add_disk_no_queue_reg(NULL, disk); -} - extern void del_gendisk(struct gendisk *gp); void set_disk_ro(struct gendisk *disk, bool read_only); @@ -236,6 +234,7 @@ extern void disk_block_events(struct gendisk *disk); extern void disk_unblock_events(struct gendisk *disk); extern void disk_flush_events(struct gendisk *disk, unsigned int mask); bool set_capacity_and_notify(struct gendisk *disk, sector_t size); +bool disk_force_media_change(struct gendisk *disk, unsigned int events); /* drivers/char/random.c */ extern void add_disk_randomness(struct gendisk *disk) __latent_entropy; @@ -259,26 +258,10 @@ static inline sector_t get_capacity(struct gendisk *disk) int bdev_disk_changed(struct gendisk *disk, bool invalidate); void blk_drop_partitions(struct gendisk *disk); -extern struct gendisk *__alloc_disk_node(int minors, int node_id); +struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, + struct lock_class_key *lkclass); extern void put_disk(struct gendisk *disk); - -#define alloc_disk_node(minors, node_id) \ -({ \ - static struct lock_class_key __key; \ - const char *__name; \ - struct gendisk *__disk; \ - \ - __name = "(gendisk_completion)"#minors"("#node_id")"; \ - \ - __disk = __alloc_disk_node(minors, node_id); \ - \ - if (__disk) \ - lockdep_init_map(&__disk->lockdep_map, __name, &__key, 0); \ - \ - __disk; \ -}) - -#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE) +struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass); /** * blk_alloc_disk - allocate a gendisk structure @@ -291,15 +274,10 @@ extern void put_disk(struct gendisk *disk); */ #define blk_alloc_disk(node_id) \ ({ \ - struct gendisk *__disk = __blk_alloc_disk(node_id); \ static struct lock_class_key __key; \ \ - if (__disk) \ - lockdep_init_map(&__disk->lockdep_map, \ - "(bio completion)", &__key, 0); \ - __disk; \ + __blk_alloc_disk(node_id, &__key); \ }) -struct gendisk *__blk_alloc_disk(int node); void blk_cleanup_disk(struct gendisk *disk); int __register_blkdev(unsigned int major, const char *name, @@ -316,9 +294,10 @@ void set_capacity(struct gendisk *disk, sector_t size); int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); -#ifdef CONFIG_SYSFS +#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); +int bd_register_pending_holders(struct gendisk *disk); #else static inline int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) @@ -329,9 +308,14 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) { } -#endif /* CONFIG_SYSFS */ +static inline int bd_register_pending_holders(struct gendisk *disk) +{ + return 0; +} +#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */ dev_t part_devt(struct gendisk *disk, u8 partno); +void inc_diskseq(struct gendisk *disk); dev_t blk_lookup_devt(const char *name, int partno); void blk_request_module(dev_t devt); #ifdef CONFIG_BLOCK diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h index cacc4dd27794..630a388035f1 100644 --- a/include/linux/hdlc.h +++ b/include/linux/hdlc.h @@ -22,7 +22,7 @@ struct hdlc_proto { void (*start)(struct net_device *dev); /* if open & DCD */ void (*stop)(struct net_device *dev); /* if open & !DCD */ void (*detach)(struct net_device *dev); - int (*ioctl)(struct net_device *dev, struct ifreq *ifr); + int (*ioctl)(struct net_device *dev, struct if_settings *ifs); __be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev); int (*netif_rx)(struct sk_buff *skb); netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev); @@ -54,7 +54,7 @@ typedef struct hdlc_device { /* Exported from hdlc module */ /* Called by hardware driver when a user requests HDLC service */ -int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); +int hdlc_ioctl(struct net_device *dev, struct if_settings *ifs); /* Must be used by hardware driver on module startup/exit */ #define register_hdlc_device(dev) register_netdev(dev) diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h index d4d633a49d36..5d70c3f98f5b 100644 --- a/include/linux/hdlcdrv.h +++ b/include/linux/hdlcdrv.h @@ -79,7 +79,7 @@ struct hdlcdrv_ops { */ int (*open)(struct net_device *); int (*close)(struct net_device *); - int (*ioctl)(struct net_device *, struct ifreq *, + int (*ioctl)(struct net_device *, void __user *, struct hdlcdrv_ioctl *, int); }; diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 8c6e8e996c87..d9a606a9fc64 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -318,14 +318,16 @@ static inline void memcpy_to_page(struct page *page, size_t offset, VM_BUG_ON(offset + len > PAGE_SIZE); memcpy(to + offset, from, len); + flush_dcache_page(page); kunmap_local(to); } static inline void memzero_page(struct page *page, size_t offset, size_t len) { - char *addr = kmap_atomic(page); + char *addr = kmap_local_page(page); memset(addr + offset, 0, len); - kunmap_atomic(addr); + flush_dcache_page(page); + kunmap_local(addr); } #endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 9b0487c88571..7bccf589aba7 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -170,6 +170,8 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base); void host1x_syncpt_release_vblank_reservation(struct host1x_client *client, u32 syncpt_id); +struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold); + /* * host1x channel */ @@ -216,8 +218,8 @@ struct host1x_job { struct host1x_client *client; /* Gathers and their memory */ - struct host1x_job_gather *gathers; - unsigned int num_gathers; + struct host1x_job_cmd *cmds; + unsigned int num_cmds; /* Array of handles to be pinned & unpinned */ struct host1x_reloc *relocs; @@ -234,9 +236,15 @@ struct host1x_job { u32 syncpt_incrs; u32 syncpt_end; + /* Completion waiter ref */ + void *waiter; + /* Maximum time to wait for this job */ unsigned int timeout; + /* Job has timed out and should be released */ + bool cancelled; + /* Index and number of slots used in the push buffer */ unsigned int first_get; unsigned int num_slots; @@ -257,12 +265,25 @@ struct host1x_job { /* Add a channel wait for previous ops to complete */ bool serialize; + + /* Fast-forward syncpoint increments on job timeout */ + bool syncpt_recovery; + + /* Callback called when job is freed */ + void (*release)(struct host1x_job *job); + void *user_data; + + /* Whether host1x-side firewall should be ran for this job or not */ + bool enable_firewall; }; struct host1x_job *host1x_job_alloc(struct host1x_channel *ch, - u32 num_cmdbufs, u32 num_relocs); + u32 num_cmdbufs, u32 num_relocs, + bool skip_firewall); void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo, unsigned int words, unsigned int offset); +void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh, + bool relative, u32 next_class); struct host1x_job *host1x_job_get(struct host1x_job *job); void host1x_job_put(struct host1x_job *job); int host1x_job_pin(struct host1x_job *job, struct device *dev); diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index bb5e7b0a4274..0ee140176f10 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -318,16 +318,12 @@ struct clock_event_device; extern void hrtimer_interrupt(struct clock_event_device *dev); -extern void clock_was_set_delayed(void); - extern unsigned int hrtimer_resolution; #else #define hrtimer_resolution (unsigned int)LOW_RES_NSEC -static inline void clock_was_set_delayed(void) { } - #endif static inline ktime_t @@ -351,13 +347,13 @@ hrtimer_expires_remaining_adjusted(const struct hrtimer *timer) timer->base->get_time()); } -extern void clock_was_set(void); #ifdef CONFIG_TIMERFD extern void timerfd_clock_was_set(void); +extern void timerfd_resume(void); #else static inline void timerfd_clock_was_set(void) { } +static inline void timerfd_resume(void) { } #endif -extern void hrtimers_resume(void); DECLARE_PER_CPU(struct tick_device, tick_cpu_device); diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index a6730072d13a..694264503119 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1088,6 +1088,48 @@ struct ieee80211_ext { } u; } __packed __aligned(2); +#define IEEE80211_TWT_CONTROL_NDP BIT(0) +#define IEEE80211_TWT_CONTROL_RESP_MODE BIT(1) +#define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3) +#define IEEE80211_TWT_CONTROL_RX_DISABLED BIT(4) +#define IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT BIT(5) + +#define IEEE80211_TWT_REQTYPE_REQUEST BIT(0) +#define IEEE80211_TWT_REQTYPE_SETUP_CMD GENMASK(3, 1) +#define IEEE80211_TWT_REQTYPE_TRIGGER BIT(4) +#define IEEE80211_TWT_REQTYPE_IMPLICIT BIT(5) +#define IEEE80211_TWT_REQTYPE_FLOWTYPE BIT(6) +#define IEEE80211_TWT_REQTYPE_FLOWID GENMASK(9, 7) +#define IEEE80211_TWT_REQTYPE_WAKE_INT_EXP GENMASK(14, 10) +#define IEEE80211_TWT_REQTYPE_PROTECTION BIT(15) + +enum ieee80211_twt_setup_cmd { + TWT_SETUP_CMD_REQUEST, + TWT_SETUP_CMD_SUGGEST, + TWT_SETUP_CMD_DEMAND, + TWT_SETUP_CMD_GROUPING, + TWT_SETUP_CMD_ACCEPT, + TWT_SETUP_CMD_ALTERNATE, + TWT_SETUP_CMD_DICTATE, + TWT_SETUP_CMD_REJECT, +}; + +struct ieee80211_twt_params { + __le16 req_type; + __le64 twt; + u8 min_twt_dur; + __le16 mantissa; + u8 channel; +} __packed; + +struct ieee80211_twt_setup { + u8 dialog_token; + u8 element_id; + u8 length; + u8 control; + u8 params[]; +} __packed; + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -1252,6 +1294,10 @@ struct ieee80211_mgmt { __le16 toa_error; u8 variable[0]; } __packed ftm; + struct { + u8 action_code; + u8 variable[]; + } __packed s1g; } u; } __packed action; } u; @@ -2266,6 +2312,9 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) #define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000 #define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000 +#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0 +#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1 + /** * ieee80211_he_6ghz_oper - HE 6 GHz operation Information field * @primary: primary channel @@ -2282,6 +2331,7 @@ struct ieee80211_he_6ghz_oper { #define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2 #define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3 #define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4 +#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x38 u8 control; u8 ccfs0; u8 ccfs1; @@ -2289,6 +2339,44 @@ struct ieee80211_he_6ghz_oper { } __packed; /* + * In "9.4.2.161 Transmit Power Envelope element" of "IEEE Std 802.11ax-2021", + * it show four types in "Table 9-275a-Maximum Transmit Power Interpretation + * subfield encoding", and two category for each type in "Table E-12-Regulatory + * Info subfield encoding in the United States". + * So it it totally max 8 Transmit Power Envelope element. + */ +#define IEEE80211_TPE_MAX_IE_COUNT 8 +/* + * In "Table 9-277—Meaning of Maximum Transmit Power Count subfield" + * of "IEEE Std 802.11ax™‐2021", the max power level is 8. + */ +#define IEEE80211_MAX_NUM_PWR_LEVEL 8 + +#define IEEE80211_TPE_MAX_POWER_COUNT 8 + +/* transmit power interpretation type of transmit power envelope element */ +enum ieee80211_tx_power_intrpt_type { + IEEE80211_TPE_LOCAL_EIRP, + IEEE80211_TPE_LOCAL_EIRP_PSD, + IEEE80211_TPE_REG_CLIENT_EIRP, + IEEE80211_TPE_REG_CLIENT_EIRP_PSD, +}; + +/** + * struct ieee80211_tx_pwr_env + * + * This structure represents the "Transmit Power Envelope element" + */ +struct ieee80211_tx_pwr_env { + u8 tx_power_info; + s8 tx_power[IEEE80211_TPE_MAX_POWER_COUNT]; +} __packed; + +#define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x7 +#define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38 +#define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xC0 + +/* * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size * @he_oper_ie: byte data of the He Operations IE, stating from the byte * after the ext ID byte. It is assumed that he_oper_ie has at least @@ -2869,7 +2957,7 @@ enum ieee80211_eid { WLAN_EID_VHT_OPERATION = 192, WLAN_EID_EXTENDED_BSS_LOAD = 193, WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194, - WLAN_EID_VHT_TX_POWER_ENVELOPE = 195, + WLAN_EID_TX_POWER_ENVELOPE = 195, WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196, WLAN_EID_AID = 197, WLAN_EID_QUIET_CHANNEL = 198, @@ -2881,6 +2969,7 @@ enum ieee80211_eid { WLAN_EID_AID_RESPONSE = 211, WLAN_EID_S1G_BCN_COMPAT = 213, WLAN_EID_S1G_SHORT_BCN_INTERVAL = 214, + WLAN_EID_S1G_TWT = 216, WLAN_EID_S1G_CAPABILITIES = 217, WLAN_EID_VENDOR_SPECIFIC = 221, WLAN_EID_QOS_PARAMETER = 222, @@ -2950,6 +3039,7 @@ enum ieee80211_category { WLAN_CATEGORY_FST = 18, WLAN_CATEGORY_UNPROT_DMG = 20, WLAN_CATEGORY_VHT = 21, + WLAN_CATEGORY_S1G = 22, WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, WLAN_CATEGORY_VENDOR_SPECIFIC = 127, }; @@ -3023,6 +3113,20 @@ enum ieee80211_key_len { WLAN_KEY_LEN_BIP_GMAC_256 = 32, }; +enum ieee80211_s1g_actioncode { + WLAN_S1G_AID_SWITCH_REQUEST, + WLAN_S1G_AID_SWITCH_RESPONSE, + WLAN_S1G_SYNC_CONTROL, + WLAN_S1G_STA_INFO_ANNOUNCE, + WLAN_S1G_EDCA_PARAM_SET, + WLAN_S1G_EL_OPERATION, + WLAN_S1G_TWT_SETUP, + WLAN_S1G_TWT_TEARDOWN, + WLAN_S1G_SECT_GROUP_ID_LIST, + WLAN_S1G_SECT_ID_FEEDBACK, + WLAN_S1G_TWT_INFORMATION = 11, +}; + #define IEEE80211_WEP_IV_LEN 4 #define IEEE80211_WEP_ICV_LEN 4 #define IEEE80211_CCMP_HDR_LEN 8 diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index b651c5e32a28..509e18c7e740 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -57,10 +57,16 @@ struct br_ip_list { #define BR_MRP_AWARE BIT(17) #define BR_MRP_LOST_CONT BIT(18) #define BR_MRP_LOST_IN_CONT BIT(19) +#define BR_TX_FWD_OFFLOAD BIT(20) #define BR_DEFAULT_AGEING_TIME (300 * HZ) -extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); +struct net_bridge; +void brioctl_set(int (*hook)(struct net *net, struct net_bridge *br, + unsigned int cmd, struct ifreq *ifr, + void __user *uarg)); +int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd, + struct ifreq *ifr, void __user *uarg); #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) int br_multicast_list_adjacent(struct net_device *dev, @@ -70,9 +76,6 @@ bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto); bool br_multicast_has_router_adjacent(struct net_device *dev, int proto); bool br_multicast_enabled(const struct net_device *dev); bool br_multicast_router(const struct net_device *dev); -int br_mdb_replay(struct net_device *br_dev, struct net_device *dev, - const void *ctx, bool adding, struct notifier_block *nb, - struct netlink_ext_ack *extack); #else static inline int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list) @@ -104,13 +107,6 @@ static inline bool br_multicast_router(const struct net_device *dev) { return false; } -static inline int br_mdb_replay(const struct net_device *br_dev, - const struct net_device *dev, const void *ctx, - bool adding, struct notifier_block *nb, - struct netlink_ext_ack *extack) -{ - return -EOPNOTSUPP; -} #endif #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) @@ -120,9 +116,8 @@ int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid); int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto); int br_vlan_get_info(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo); -int br_vlan_replay(struct net_device *br_dev, struct net_device *dev, - const void *ctx, bool adding, struct notifier_block *nb, - struct netlink_ext_ack *extack); +int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid, + struct bridge_vlan_info *p_vinfo); #else static inline bool br_vlan_enabled(const struct net_device *dev) { @@ -150,12 +145,10 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, return -EINVAL; } -static inline int br_vlan_replay(struct net_device *br_dev, - struct net_device *dev, const void *ctx, - bool adding, struct notifier_block *nb, - struct netlink_ext_ack *extack) +static inline int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid, + struct bridge_vlan_info *p_vinfo) { - return -EOPNOTSUPP; + return -EINVAL; } #endif @@ -167,8 +160,6 @@ void br_fdb_clear_offload(const struct net_device *dev, u16 vid); bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag); u8 br_port_get_stp_state(const struct net_device *dev); clock_t br_get_ageing_time(const struct net_device *br_dev); -int br_fdb_replay(const struct net_device *br_dev, const struct net_device *dev, - const void *ctx, bool adding, struct notifier_block *nb); #else static inline struct net_device * br_fdb_find_port(const struct net_device *br_dev, @@ -197,13 +188,6 @@ static inline clock_t br_get_ageing_time(const struct net_device *br_dev) { return 0; } - -static inline int br_fdb_replay(const struct net_device *br_dev, - const struct net_device *dev, const void *ctx, - bool adding, struct notifier_block *nb) -{ - return -EOPNOTSUPP; -} #endif #endif diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 64ce8cd1cfaf..93c262ecbdc9 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -41,9 +41,6 @@ struct ip_sf_socklist { __be32 sl_addr[]; }; -#define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \ - (count) * sizeof(__be32)) - #define IP_SFBLOCK 10 /* allocate this many at once */ /* ip_mc_socklist is real list now. Speed is not argument; diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 53aa0343bf69..a038feb63f23 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -41,7 +41,7 @@ struct in_device { unsigned long mr_qri; /* Query Response Interval */ unsigned char mr_qrv; /* Query Robustness Variable */ unsigned char mr_gq_running; - unsigned char mr_ifc_count; + u32 mr_ifc_count; struct timer_list mr_gq_timer; /* general query timer */ struct timer_list mr_ifc_timer; /* interface change timer */ @@ -178,6 +178,15 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *); +#ifdef CONFIG_INET +int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size); +#else +static inline int inet_gifconf(struct net_device *dev, char __user *buf, + int len, int size) +{ + return 0; +} +#endif void devinet_init(void); struct in_device *inetdev_by_index(struct net *, int); __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h index 25e2b4e80502..aee8ff4739b1 100644 --- a/include/linux/intel-ish-client-if.h +++ b/include/linux/intel-ish-client-if.h @@ -81,6 +81,8 @@ int ishtp_register_event_cb(struct ishtp_cl_device *device, /* Get the device * from ishtp device instance */ struct device *ishtp_device(struct ishtp_cl_device *cl_device); +/* wait for IPC resume */ +bool ishtp_wait_resume(struct ishtp_device *dev); /* Trace interface for clients */ ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device); /* Get device pointer of PCI device for DMA acces */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 2ed65b01c961..1f22a30c0963 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -13,6 +13,7 @@ #include <linux/hrtimer.h> #include <linux/kref.h> #include <linux/workqueue.h> +#include <linux/jump_label.h> #include <linux/atomic.h> #include <asm/ptrace.h> @@ -474,12 +475,13 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, #ifdef CONFIG_IRQ_FORCED_THREADING # ifdef CONFIG_PREEMPT_RT -# define force_irqthreads (true) +# define force_irqthreads() (true) # else -extern bool force_irqthreads; +DECLARE_STATIC_KEY_FALSE(force_irqthreads_key); +# define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key)) # endif #else -#define force_irqthreads (0) +#define force_irqthreads() (false) #endif #ifndef local_softirq_pending diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 04b650bcbbe5..649a4d7c241b 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -7,17 +7,18 @@ #if defined(CONFIG_IO_URING) struct sock *io_uring_get_socket(struct file *file); -void __io_uring_cancel(struct files_struct *files); +void __io_uring_cancel(bool cancel_all); void __io_uring_free(struct task_struct *tsk); -static inline void io_uring_files_cancel(struct files_struct *files) +static inline void io_uring_files_cancel(void) { if (current->io_uring) - __io_uring_cancel(files); + __io_uring_cancel(false); } static inline void io_uring_task_cancel(void) { - return io_uring_files_cancel(NULL); + if (current->io_uring) + __io_uring_cancel(true); } static inline void io_uring_free(struct task_struct *tsk) { @@ -32,7 +33,7 @@ static inline struct sock *io_uring_get_socket(struct file *file) static inline void io_uring_task_cancel(void) { } -static inline void io_uring_files_cancel(struct files_struct *files) +static inline void io_uring_files_cancel(void) { } static inline void io_uring_free(struct task_struct *tsk) diff --git a/include/linux/ioam6.h b/include/linux/ioam6.h new file mode 100644 index 000000000000..94a24b36998f --- /dev/null +++ b/include/linux/ioam6.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * IPv6 IOAM + * + * Author: + * Justin Iurman <justin.iurman@uliege.be> + */ +#ifndef _LINUX_IOAM6_H +#define _LINUX_IOAM6_H + +#include <uapi/linux/ioam6.h> + +#endif /* _LINUX_IOAM6_H */ diff --git a/include/linux/ioam6_genl.h b/include/linux/ioam6_genl.h new file mode 100644 index 000000000000..176e67919de3 --- /dev/null +++ b/include/linux/ioam6_genl.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * IPv6 IOAM Generic Netlink API + * + * Author: + * Justin Iurman <justin.iurman@uliege.be> + */ +#ifndef _LINUX_IOAM6_GENL_H +#define _LINUX_IOAM6_GENL_H + +#include <uapi/linux/ioam6_genl.h> + +#endif /* _LINUX_IOAM6_GENL_H */ diff --git a/include/linux/ioam6_iptunnel.h b/include/linux/ioam6_iptunnel.h new file mode 100644 index 000000000000..07d9dfedd29d --- /dev/null +++ b/include/linux/ioam6_iptunnel.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * IPv6 IOAM Lightweight Tunnel API + * + * Author: + * Justin Iurman <justin.iurman@uliege.be> + */ +#ifndef _LINUX_IOAM6_IPTUNNEL_H +#define _LINUX_IOAM6_IPTUNNEL_H + +#include <uapi/linux/ioam6_iptunnel.h> + +#endif /* _LINUX_IOAM6_IPTUNNEL_H */ diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 479c1da3e221..24f8489583ca 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -91,13 +91,30 @@ struct iomap { const struct iomap_page_ops *page_ops; }; -static inline sector_t -iomap_sector(struct iomap *iomap, loff_t pos) +static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos) { return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT; } /* + * Returns the inline data pointer for logical offset @pos. + */ +static inline void *iomap_inline_data(const struct iomap *iomap, loff_t pos) +{ + return iomap->inline_data + pos - iomap->offset; +} + +/* + * Check if the mapping's length is within the valid range for inline data. + * This is used to guard against accessing data beyond the page inline_data + * points at. + */ +static inline bool iomap_inline_data_valid(const struct iomap *iomap) +{ + return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data); +} + +/* * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare * and page_done will be called for each page written to. This only applies to * buffered writes as unbuffered writes will not typically have pages @@ -108,10 +125,9 @@ iomap_sector(struct iomap *iomap, loff_t pos) * associated page could not be obtained. */ struct iomap_page_ops { - int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len, - struct iomap *iomap); + int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len); void (*page_done)(struct inode *inode, loff_t pos, unsigned copied, - struct page *page, struct iomap *iomap); + struct page *page); }; /* @@ -124,6 +140,7 @@ struct iomap_page_ops { #define IOMAP_DIRECT (1 << 4) /* direct I/O */ #define IOMAP_NOWAIT (1 << 5) /* do not block */ #define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */ +#define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */ struct iomap_ops { /* @@ -145,15 +162,61 @@ struct iomap_ops { ssize_t written, unsigned flags, struct iomap *iomap); }; -/* - * Main iomap iterator function. +/** + * struct iomap_iter - Iterate through a range of a file + * @inode: Set at the start of the iteration and should not change. + * @pos: The current file position we are operating on. It is updated by + * calls to iomap_iter(). Treat as read-only in the body. + * @len: The remaining length of the file segment we're operating on. + * It is updated at the same time as @pos. + * @processed: The number of bytes processed by the body in the most recent + * iteration, or a negative errno. 0 causes the iteration to stop. + * @flags: Zero or more of the iomap_begin flags above. + * @iomap: Map describing the I/O iteration + * @srcmap: Source map for COW operations */ -typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len, - void *data, struct iomap *iomap, struct iomap *srcmap); +struct iomap_iter { + struct inode *inode; + loff_t pos; + u64 len; + s64 processed; + unsigned flags; + struct iomap iomap; + struct iomap srcmap; +}; -loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length, - unsigned flags, const struct iomap_ops *ops, void *data, - iomap_actor_t actor); +int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops); + +/** + * iomap_length - length of the current iomap iteration + * @iter: iteration structure + * + * Returns the length that the operation applies to for the current iteration. + */ +static inline u64 iomap_length(const struct iomap_iter *iter) +{ + u64 end = iter->iomap.offset + iter->iomap.length; + + if (iter->srcmap.type != IOMAP_HOLE) + end = min(end, iter->srcmap.offset + iter->srcmap.length); + return min(iter->len, end - iter->pos); +} + +/** + * iomap_iter_srcmap - return the source map for the current iomap iteration + * @i: iteration structure + * + * Write operations on file systems with reflink support might require a + * source and a destination map. This function retourns the source map + * for a given operation, which may or may no be identical to the destination + * map in &i->iomap. + */ +static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i) +{ + if (i->srcmap.type != IOMAP_HOLE) + return &i->srcmap; + return &i->iomap; +} ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, const struct iomap_ops *ops); @@ -250,8 +313,8 @@ int iomap_writepages(struct address_space *mapping, struct iomap_dio_ops { int (*end_io)(struct kiocb *iocb, ssize_t size, int error, unsigned flags); - blk_qc_t (*submit_io)(struct inode *inode, struct iomap *iomap, - struct bio *bio, loff_t file_offset); + blk_qc_t (*submit_io)(const struct iomap_iter *iter, struct bio *bio, + loff_t file_offset); }; /* diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index e9bfe6972aed..3f53bc27a19b 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -6,46 +6,22 @@ #include <linux/sched/rt.h> #include <linux/iocontext.h> -/* - * Gives us 8 prio classes with 13-bits of data for each class - */ -#define IOPRIO_CLASS_SHIFT (13) -#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1) - -#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT) -#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK) -#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data) - -#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE) +#include <uapi/linux/ioprio.h> /* - * These are the io priority groups as implemented by CFQ. RT is the realtime - * class, it always gets premium service. BE is the best-effort scheduling - * class, the default for any process. IDLE is the idle scheduling class, it - * is only served when no one else is using the disk. + * Default IO priority. */ -enum { - IOPRIO_CLASS_NONE, - IOPRIO_CLASS_RT, - IOPRIO_CLASS_BE, - IOPRIO_CLASS_IDLE, -}; +#define IOPRIO_DEFAULT IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_BE_NORM) /* - * 8 best effort priority levels are supported + * Check that a priority value has a valid class. */ -#define IOPRIO_BE_NR (8) - -enum { - IOPRIO_WHO_PROCESS = 1, - IOPRIO_WHO_PGRP, - IOPRIO_WHO_USER, -}; +static inline bool ioprio_valid(unsigned short ioprio) +{ + unsigned short class = IOPRIO_PRIO_CLASS(ioprio); -/* - * Fallback BE priority - */ -#define IOPRIO_NORM (4) + return class > IOPRIO_CLASS_NONE && class <= IOPRIO_CLASS_IDLE; +} /* * if process has set io priority explicitly, use that. if not, convert @@ -80,7 +56,7 @@ static inline int get_current_ioprio(void) if (ioc) return ioc->ioprio; - return IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); + return IOPRIO_DEFAULT; } /* diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 70b2ad3b9884..ef4a69865737 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -76,6 +76,9 @@ struct ipv6_devconf { __s32 disable_policy; __s32 ndisc_tclass; __s32 rpl_seg_enabled; + __u32 ioam6_id; + __u32 ioam6_id_wide; + __u8 ioam6_enabled; struct ctl_table_header *sysctl_header; }; diff --git a/include/linux/irq.h b/include/linux/irq.h index 8e9a9ae471a6..c8293c817646 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -569,6 +569,7 @@ struct irq_chip { * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND: Invokes __enable_irq()/__disable_irq() for wake irqs * in the suspend path if they are in disabled state + * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup */ enum { IRQCHIP_SET_TYPE_MASKED = (1 << 0), @@ -581,6 +582,7 @@ enum { IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), IRQCHIP_SUPPORTS_NMI = (1 << 8), IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9), + IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10), }; #include <linux/irqdesc.h> diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h index b7b45ca82bea..790e7fcfc1a6 100644 --- a/include/linux/iscsi_ibft.h +++ b/include/linux/iscsi_ibft.h @@ -13,26 +13,22 @@ #ifndef ISCSI_IBFT_H #define ISCSI_IBFT_H -#include <linux/acpi.h> +#include <linux/types.h> /* - * Logical location of iSCSI Boot Format Table. - * If the value is NULL there is no iBFT on the machine. + * Physical location of iSCSI Boot Format Table. + * If the value is 0 there is no iBFT on the machine. */ -extern struct acpi_table_ibft *ibft_addr; +extern phys_addr_t ibft_phys_addr; /* * Routine used to find and reserve the iSCSI Boot Format Table. The - * mapped address is set in the ibft_addr variable. + * physical address is set in the ibft_phys_addr variable. */ #ifdef CONFIG_ISCSI_IBFT_FIND -unsigned long find_ibft_region(unsigned long *sizep); +void reserve_ibft_region(void); #else -static inline unsigned long find_ibft_region(unsigned long *sizep) -{ - *sizep = 0; - return 0; -} +static inline void reserve_ibft_region(void) {} #endif #endif /* ISCSI_IBFT_H */ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 5310e217bd74..dd874a1ee862 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -3,6 +3,7 @@ #define _LINUX_KASAN_H #include <linux/bug.h> +#include <linux/kernel.h> #include <linux/static_key.h> #include <linux/types.h> diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 9e8ca8743c26..1093abf7c28c 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -98,6 +98,11 @@ struct kernfs_elem_dir { * better directly in kernfs_node but is here to save space. */ struct kernfs_root *root; + /* + * Monotonic revision counter, used to identify if a directory + * node has changed during negative dentry revalidation. + */ + unsigned long rev; }; struct kernfs_elem_symlink { @@ -188,7 +193,7 @@ struct kernfs_root { u32 id_highbits; struct kernfs_syscall_ops *syscall_ops; - /* list of kernfs_super_info of this root, protected by kernfs_mutex */ + /* list of kernfs_super_info of this root, protected by kernfs_rwsem */ struct list_head supers; wait_queue_head_t deactivate_waitq; diff --git a/include/linux/kfence.h b/include/linux/kfence.h index a70d1ea03532..3fe6dd8a18c1 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -51,10 +51,11 @@ extern atomic_t kfence_allocation_gate; static __always_inline bool is_kfence_address(const void *addr) { /* - * The non-NULL check is required in case the __kfence_pool pointer was - * never initialized; keep it in the slow-path after the range-check. + * The __kfence_pool != NULL check is required to deal with the case + * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in + * the slow-path after the range-check! */ - return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && addr); + return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool); } /** diff --git a/include/linux/leds.h b/include/linux/leds.h index 329fd914cf24..a0b730be40ad 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -33,6 +33,12 @@ enum led_brightness { LED_FULL = 255, }; +enum led_default_state { + LEDS_DEFSTATE_OFF = 0, + LEDS_DEFSTATE_ON = 1, + LEDS_DEFSTATE_KEEP = 2, +}; + struct led_init_data { /* device fwnode handle */ struct fwnode_handle *fwnode; @@ -520,9 +526,9 @@ struct gpio_led { /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ struct gpio_desc *gpiod; }; -#define LEDS_GPIO_DEFSTATE_OFF 0 -#define LEDS_GPIO_DEFSTATE_ON 1 -#define LEDS_GPIO_DEFSTATE_KEEP 2 +#define LEDS_GPIO_DEFSTATE_OFF LEDS_DEFSTATE_OFF +#define LEDS_GPIO_DEFSTATE_ON LEDS_DEFSTATE_ON +#define LEDS_GPIO_DEFSTATE_KEEP LEDS_DEFSTATE_KEEP struct gpio_led_platform_data { int num_leds; diff --git a/include/linux/libata.h b/include/linux/libata.h index 3fcd24236793..860e63f5667b 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -161,6 +161,10 @@ enum { ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */ ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */ + ATA_DFLAG_FEATURES_MASK = ATA_DFLAG_TRUSTED | ATA_DFLAG_DA | \ + ATA_DFLAG_DEVSLP | ATA_DFLAG_NCQ_SEND_RECV | \ + ATA_DFLAG_NCQ_PRIO, + ATA_DEV_UNKNOWN = 0, /* unknown device */ ATA_DEV_ATA = 1, /* ATA device */ ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */ @@ -535,6 +539,7 @@ typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes) extern struct device_attribute dev_attr_unload_heads; #ifdef CONFIG_SATA_HOST extern struct device_attribute dev_attr_link_power_management_policy; +extern struct device_attribute dev_attr_ncq_prio_supported; extern struct device_attribute dev_attr_ncq_prio_enable; extern struct device_attribute dev_attr_em_message_type; extern struct device_attribute dev_attr_em_message; @@ -1454,7 +1459,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap) static inline bool ata_is_host_link(const struct ata_link *link) { - return 1; + return true; } #endif /* CONFIG_SATA_PMP */ diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h deleted file mode 100644 index 0908abda9c1b..000000000000 --- a/include/linux/lightnvm.h +++ /dev/null @@ -1,697 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef NVM_H -#define NVM_H - -#include <linux/blkdev.h> -#include <linux/types.h> -#include <uapi/linux/lightnvm.h> - -enum { - NVM_IO_OK = 0, - NVM_IO_REQUEUE = 1, - NVM_IO_DONE = 2, - NVM_IO_ERR = 3, - - NVM_IOTYPE_NONE = 0, - NVM_IOTYPE_GC = 1, -}; - -/* common format */ -#define NVM_GEN_CH_BITS (8) -#define NVM_GEN_LUN_BITS (8) -#define NVM_GEN_BLK_BITS (16) -#define NVM_GEN_RESERVED (32) - -/* 1.2 format */ -#define NVM_12_PG_BITS (16) -#define NVM_12_PL_BITS (4) -#define NVM_12_SEC_BITS (4) -#define NVM_12_RESERVED (8) - -/* 2.0 format */ -#define NVM_20_SEC_BITS (24) -#define NVM_20_RESERVED (8) - -enum { - NVM_OCSSD_SPEC_12 = 12, - NVM_OCSSD_SPEC_20 = 20, -}; - -struct ppa_addr { - /* Generic structure for all addresses */ - union { - /* generic device format */ - struct { - u64 ch : NVM_GEN_CH_BITS; - u64 lun : NVM_GEN_LUN_BITS; - u64 blk : NVM_GEN_BLK_BITS; - u64 reserved : NVM_GEN_RESERVED; - } a; - - /* 1.2 device format */ - struct { - u64 ch : NVM_GEN_CH_BITS; - u64 lun : NVM_GEN_LUN_BITS; - u64 blk : NVM_GEN_BLK_BITS; - u64 pg : NVM_12_PG_BITS; - u64 pl : NVM_12_PL_BITS; - u64 sec : NVM_12_SEC_BITS; - u64 reserved : NVM_12_RESERVED; - } g; - - /* 2.0 device format */ - struct { - u64 grp : NVM_GEN_CH_BITS; - u64 pu : NVM_GEN_LUN_BITS; - u64 chk : NVM_GEN_BLK_BITS; - u64 sec : NVM_20_SEC_BITS; - u64 reserved : NVM_20_RESERVED; - } m; - - struct { - u64 line : 63; - u64 is_cached : 1; - } c; - - u64 ppa; - }; -}; - -struct nvm_rq; -struct nvm_id; -struct nvm_dev; -struct nvm_tgt_dev; -struct nvm_chk_meta; - -typedef int (nvm_id_fn)(struct nvm_dev *); -typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); -typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); -typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int, - struct nvm_chk_meta *); -typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *, void *); -typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int); -typedef void (nvm_destroy_dma_pool_fn)(void *); -typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, - dma_addr_t *); -typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); - -struct nvm_dev_ops { - nvm_id_fn *identity; - nvm_op_bb_tbl_fn *get_bb_tbl; - nvm_op_set_bb_fn *set_bb_tbl; - - nvm_get_chk_meta_fn *get_chk_meta; - - nvm_submit_io_fn *submit_io; - - nvm_create_dma_pool_fn *create_dma_pool; - nvm_destroy_dma_pool_fn *destroy_dma_pool; - nvm_dev_dma_alloc_fn *dev_dma_alloc; - nvm_dev_dma_free_fn *dev_dma_free; -}; - -#ifdef CONFIG_NVM - -#include <linux/file.h> -#include <linux/dmapool.h> - -enum { - /* HW Responsibilities */ - NVM_RSP_L2P = 1 << 0, - NVM_RSP_ECC = 1 << 1, - - /* Physical Adressing Mode */ - NVM_ADDRMODE_LINEAR = 0, - NVM_ADDRMODE_CHANNEL = 1, - - /* Plane programming mode for LUN */ - NVM_PLANE_SINGLE = 1, - NVM_PLANE_DOUBLE = 2, - NVM_PLANE_QUAD = 4, - - /* Status codes */ - NVM_RSP_SUCCESS = 0x0, - NVM_RSP_NOT_CHANGEABLE = 0x1, - NVM_RSP_ERR_FAILWRITE = 0x40ff, - NVM_RSP_ERR_EMPTYPAGE = 0x42ff, - NVM_RSP_ERR_FAILECC = 0x4281, - NVM_RSP_ERR_FAILCRC = 0x4004, - NVM_RSP_WARN_HIGHECC = 0x4700, - - /* Device opcodes */ - NVM_OP_PWRITE = 0x91, - NVM_OP_PREAD = 0x92, - NVM_OP_ERASE = 0x90, - - /* PPA Command Flags */ - NVM_IO_SNGL_ACCESS = 0x0, - NVM_IO_DUAL_ACCESS = 0x1, - NVM_IO_QUAD_ACCESS = 0x2, - - /* NAND Access Modes */ - NVM_IO_SUSPEND = 0x80, - NVM_IO_SLC_MODE = 0x100, - NVM_IO_SCRAMBLE_ENABLE = 0x200, - - /* Block Types */ - NVM_BLK_T_FREE = 0x0, - NVM_BLK_T_BAD = 0x1, - NVM_BLK_T_GRWN_BAD = 0x2, - NVM_BLK_T_DEV = 0x4, - NVM_BLK_T_HOST = 0x8, - - /* Memory capabilities */ - NVM_ID_CAP_SLC = 0x1, - NVM_ID_CAP_CMD_SUSPEND = 0x2, - NVM_ID_CAP_SCRAMBLE = 0x4, - NVM_ID_CAP_ENCRYPT = 0x8, - - /* Memory types */ - NVM_ID_FMTYPE_SLC = 0, - NVM_ID_FMTYPE_MLC = 1, - - /* Device capabilities */ - NVM_ID_DCAP_BBLKMGMT = 0x1, - NVM_UD_DCAP_ECC = 0x2, -}; - -struct nvm_id_lp_mlc { - u16 num_pairs; - u8 pairs[886]; -}; - -struct nvm_id_lp_tbl { - __u8 id[8]; - struct nvm_id_lp_mlc mlc; -}; - -struct nvm_addrf_12 { - u8 ch_len; - u8 lun_len; - u8 blk_len; - u8 pg_len; - u8 pln_len; - u8 sec_len; - - u8 ch_offset; - u8 lun_offset; - u8 blk_offset; - u8 pg_offset; - u8 pln_offset; - u8 sec_offset; - - u64 ch_mask; - u64 lun_mask; - u64 blk_mask; - u64 pg_mask; - u64 pln_mask; - u64 sec_mask; -}; - -struct nvm_addrf { - u8 ch_len; - u8 lun_len; - u8 chk_len; - u8 sec_len; - u8 rsv_len[2]; - - u8 ch_offset; - u8 lun_offset; - u8 chk_offset; - u8 sec_offset; - u8 rsv_off[2]; - - u64 ch_mask; - u64 lun_mask; - u64 chk_mask; - u64 sec_mask; - u64 rsv_mask[2]; -}; - -enum { - /* Chunk states */ - NVM_CHK_ST_FREE = 1 << 0, - NVM_CHK_ST_CLOSED = 1 << 1, - NVM_CHK_ST_OPEN = 1 << 2, - NVM_CHK_ST_OFFLINE = 1 << 3, - - /* Chunk types */ - NVM_CHK_TP_W_SEQ = 1 << 0, - NVM_CHK_TP_W_RAN = 1 << 1, - NVM_CHK_TP_SZ_SPEC = 1 << 4, -}; - -/* - * Note: The structure size is linked to nvme_nvm_chk_meta such that the same - * buffer can be used when converting from little endian to cpu addressing. - */ -struct nvm_chk_meta { - u8 state; - u8 type; - u8 wi; - u8 rsvd[5]; - u64 slba; - u64 cnlb; - u64 wp; -}; - -struct nvm_target { - struct list_head list; - struct nvm_tgt_dev *dev; - struct nvm_tgt_type *type; - struct gendisk *disk; -}; - -#define ADDR_EMPTY (~0ULL) - -#define NVM_TARGET_DEFAULT_OP (101) -#define NVM_TARGET_MIN_OP (3) -#define NVM_TARGET_MAX_OP (80) - -#define NVM_VERSION_MAJOR 1 -#define NVM_VERSION_MINOR 0 -#define NVM_VERSION_PATCH 0 - -#define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */ - -struct nvm_rq; -typedef void (nvm_end_io_fn)(struct nvm_rq *); - -struct nvm_rq { - struct nvm_tgt_dev *dev; - - struct bio *bio; - - union { - struct ppa_addr ppa_addr; - dma_addr_t dma_ppa_list; - }; - - struct ppa_addr *ppa_list; - - void *meta_list; - dma_addr_t dma_meta_list; - - nvm_end_io_fn *end_io; - - uint8_t opcode; - uint16_t nr_ppas; - uint16_t flags; - - u64 ppa_status; /* ppa media status */ - int error; - - int is_seq; /* Sequential hint flag. 1.2 only */ - - void *private; -}; - -static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu) -{ - return pdu - sizeof(struct nvm_rq); -} - -static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) -{ - return rqdata + 1; -} - -static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd) -{ - return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr; -} - -enum { - NVM_BLK_ST_FREE = 0x1, /* Free block */ - NVM_BLK_ST_TGT = 0x2, /* Block in use by target */ - NVM_BLK_ST_BAD = 0x8, /* Bad block */ -}; - -/* Instance geometry */ -struct nvm_geo { - /* device reported version */ - u8 major_ver_id; - u8 minor_ver_id; - - /* kernel short version */ - u8 version; - - /* instance specific geometry */ - int num_ch; - int num_lun; /* per channel */ - - /* calculated values */ - int all_luns; /* across channels */ - int all_chunks; /* across channels */ - - int op; /* over-provision in instance */ - - sector_t total_secs; /* across channels */ - - /* chunk geometry */ - u32 num_chk; /* chunks per lun */ - u32 clba; /* sectors per chunk */ - u16 csecs; /* sector size */ - u16 sos; /* out-of-band area size */ - bool ext; /* metadata in extended data buffer */ - u32 mdts; /* Max data transfer size*/ - - /* device write constrains */ - u32 ws_min; /* minimum write size */ - u32 ws_opt; /* optimal write size */ - u32 mw_cunits; /* distance required for successful read */ - u32 maxoc; /* maximum open chunks */ - u32 maxocpu; /* maximum open chunks per parallel unit */ - - /* device capabilities */ - u32 mccap; - - /* device timings */ - u32 trdt; /* Avg. Tread (ns) */ - u32 trdm; /* Max Tread (ns) */ - u32 tprt; /* Avg. Tprog (ns) */ - u32 tprm; /* Max Tprog (ns) */ - u32 tbet; /* Avg. Terase (ns) */ - u32 tbem; /* Max Terase (ns) */ - - /* generic address format */ - struct nvm_addrf addrf; - - /* 1.2 compatibility */ - u8 vmnt; - u32 cap; - u32 dom; - - u8 mtype; - u8 fmtype; - - u16 cpar; - u32 mpos; - - u8 num_pln; - u8 pln_mode; - u16 num_pg; - u16 fpg_sz; -}; - -/* sub-device structure */ -struct nvm_tgt_dev { - /* Device information */ - struct nvm_geo geo; - - /* Base ppas for target LUNs */ - struct ppa_addr *luns; - - struct request_queue *q; - - struct nvm_dev *parent; - void *map; -}; - -struct nvm_dev { - struct nvm_dev_ops *ops; - - struct list_head devices; - - /* Device information */ - struct nvm_geo geo; - - unsigned long *lun_map; - void *dma_pool; - - /* Backend device */ - struct request_queue *q; - char name[DISK_NAME_LEN]; - void *private_data; - - struct kref ref; - void *rmap; - - struct mutex mlock; - spinlock_t lock; - - /* target management */ - struct list_head area_list; - struct list_head targets; -}; - -static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, - struct ppa_addr r) -{ - struct nvm_geo *geo = &dev->geo; - struct ppa_addr l; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; - - l.ppa = ((u64)r.g.ch) << ppaf->ch_offset; - l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset; - l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset; - l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset; - l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset; - l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset; - } else { - struct nvm_addrf *lbaf = &geo->addrf; - - l.ppa = ((u64)r.m.grp) << lbaf->ch_offset; - l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset; - l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset; - l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset; - } - - return l; -} - -static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, - struct ppa_addr r) -{ - struct nvm_geo *geo = &dev->geo; - struct ppa_addr l; - - l.ppa = 0; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; - - l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset; - l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset; - l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset; - l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset; - l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset; - l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset; - } else { - struct nvm_addrf *lbaf = &geo->addrf; - - l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset; - l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset; - l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset; - l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset; - } - - return l; -} - -static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf, - struct ppa_addr p) -{ - struct nvm_geo *geo = &dev->geo; - u64 caddr; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf; - - caddr = (u64)p.g.pg << ppaf->pg_offset; - caddr |= (u64)p.g.pl << ppaf->pln_offset; - caddr |= (u64)p.g.sec << ppaf->sec_offset; - } else { - caddr = p.m.sec; - } - - return caddr; -} - -static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev, - void *addrf, u32 ppa32) -{ - struct ppa_addr ppa64; - - ppa64.ppa = 0; - - if (ppa32 == -1) { - ppa64.ppa = ADDR_EMPTY; - } else if (ppa32 & (1U << 31)) { - ppa64.c.line = ppa32 & ((~0U) >> 1); - ppa64.c.is_cached = 1; - } else { - struct nvm_geo *geo = &dev->geo; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = addrf; - - ppa64.g.ch = (ppa32 & ppaf->ch_mask) >> - ppaf->ch_offset; - ppa64.g.lun = (ppa32 & ppaf->lun_mask) >> - ppaf->lun_offset; - ppa64.g.blk = (ppa32 & ppaf->blk_mask) >> - ppaf->blk_offset; - ppa64.g.pg = (ppa32 & ppaf->pg_mask) >> - ppaf->pg_offset; - ppa64.g.pl = (ppa32 & ppaf->pln_mask) >> - ppaf->pln_offset; - ppa64.g.sec = (ppa32 & ppaf->sec_mask) >> - ppaf->sec_offset; - } else { - struct nvm_addrf *lbaf = addrf; - - ppa64.m.grp = (ppa32 & lbaf->ch_mask) >> - lbaf->ch_offset; - ppa64.m.pu = (ppa32 & lbaf->lun_mask) >> - lbaf->lun_offset; - ppa64.m.chk = (ppa32 & lbaf->chk_mask) >> - lbaf->chk_offset; - ppa64.m.sec = (ppa32 & lbaf->sec_mask) >> - lbaf->sec_offset; - } - } - - return ppa64; -} - -static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev, - void *addrf, struct ppa_addr ppa64) -{ - u32 ppa32 = 0; - - if (ppa64.ppa == ADDR_EMPTY) { - ppa32 = ~0U; - } else if (ppa64.c.is_cached) { - ppa32 |= ppa64.c.line; - ppa32 |= 1U << 31; - } else { - struct nvm_geo *geo = &dev->geo; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = addrf; - - ppa32 |= ppa64.g.ch << ppaf->ch_offset; - ppa32 |= ppa64.g.lun << ppaf->lun_offset; - ppa32 |= ppa64.g.blk << ppaf->blk_offset; - ppa32 |= ppa64.g.pg << ppaf->pg_offset; - ppa32 |= ppa64.g.pl << ppaf->pln_offset; - ppa32 |= ppa64.g.sec << ppaf->sec_offset; - } else { - struct nvm_addrf *lbaf = addrf; - - ppa32 |= ppa64.m.grp << lbaf->ch_offset; - ppa32 |= ppa64.m.pu << lbaf->lun_offset; - ppa32 |= ppa64.m.chk << lbaf->chk_offset; - ppa32 |= ppa64.m.sec << lbaf->sec_offset; - } - } - - return ppa32; -} - -static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev, - struct ppa_addr *ppa) -{ - struct nvm_geo *geo = &dev->geo; - int last = 0; - - if (geo->version == NVM_OCSSD_SPEC_12) { - int sec = ppa->g.sec; - - sec++; - if (sec == geo->ws_min) { - int pg = ppa->g.pg; - - sec = 0; - pg++; - if (pg == geo->num_pg) { - int pl = ppa->g.pl; - - pg = 0; - pl++; - if (pl == geo->num_pln) - last = 1; - - ppa->g.pl = pl; - } - ppa->g.pg = pg; - } - ppa->g.sec = sec; - } else { - ppa->m.sec++; - if (ppa->m.sec == geo->clba) - last = 1; - } - - return last; -} - -typedef sector_t (nvm_tgt_capacity_fn)(void *); -typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *, - int flags); -typedef void (nvm_tgt_exit_fn)(void *, bool); -typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *); -typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *); - -enum { - NVM_TGT_F_DEV_L2P = 0, - NVM_TGT_F_HOST_L2P = 1 << 0, -}; - -struct nvm_tgt_type { - const char *name; - unsigned int version[3]; - int flags; - - /* target entry points */ - const struct block_device_operations *bops; - nvm_tgt_capacity_fn *capacity; - - /* module-specific init/teardown */ - nvm_tgt_init_fn *init; - nvm_tgt_exit_fn *exit; - - /* sysfs */ - nvm_tgt_sysfs_init_fn *sysfs_init; - nvm_tgt_sysfs_exit_fn *sysfs_exit; - - /* For internal use */ - struct list_head list; - struct module *owner; -}; - -extern int nvm_register_tgt_type(struct nvm_tgt_type *); -extern void nvm_unregister_tgt_type(struct nvm_tgt_type *); - -extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *); -extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t); - -extern struct nvm_dev *nvm_alloc_dev(int); -extern int nvm_register(struct nvm_dev *); -extern void nvm_unregister(struct nvm_dev *); - -extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr, - int, struct nvm_chk_meta *); -extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *, - int, int); -extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *, void *); -extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *, void *); -extern void nvm_end_io(struct nvm_rq *); - -#else /* CONFIG_NVM */ -struct nvm_dev_ops; - -static inline struct nvm_dev *nvm_alloc_dev(int node) -{ - return ERR_PTR(-EINVAL); -} -static inline int nvm_register(struct nvm_dev *dev) -{ - return -EINVAL; -} -static inline void nvm_unregister(struct nvm_dev *dev) {} -#endif /* CONFIG_NVM */ -#endif /* LIGHTNVM.H */ diff --git a/include/linux/linear_range.h b/include/linux/linear_range.h index 17b5943727d5..fd3d0b358f22 100644 --- a/include/linux/linear_range.h +++ b/include/linux/linear_range.h @@ -41,6 +41,8 @@ int linear_range_get_selector_low(const struct linear_range *r, int linear_range_get_selector_high(const struct linear_range *r, unsigned int val, unsigned int *selector, bool *found); +void linear_range_get_selector_within(const struct linear_range *r, + unsigned int val, unsigned int *selector); int linear_range_get_selector_low_array(const struct linear_range *r, int ranges, unsigned int val, unsigned int *selector, bool *found); diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index ded90b097e6e..975e33b793a7 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -6,6 +6,8 @@ #include <linux/percpu-defs.h> #include <linux/lockdep.h> +#ifndef CONFIG_PREEMPT_RT + typedef struct { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; @@ -14,29 +16,14 @@ typedef struct { } local_lock_t; #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define LL_DEP_MAP_INIT(lockname) \ +# define LOCAL_LOCK_DEBUG_INIT(lockname) \ .dep_map = { \ .name = #lockname, \ .wait_type_inner = LD_WAIT_CONFIG, \ - .lock_type = LD_LOCK_PERCPU, \ - } -#else -# define LL_DEP_MAP_INIT(lockname) -#endif - -#define INIT_LOCAL_LOCK(lockname) { LL_DEP_MAP_INIT(lockname) } - -#define __local_lock_init(lock) \ -do { \ - static struct lock_class_key __key; \ - \ - debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ - lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \ - LD_WAIT_CONFIG, LD_WAIT_INV, \ - LD_LOCK_PERCPU); \ -} while (0) + .lock_type = LD_LOCK_PERCPU, \ + }, \ + .owner = NULL, -#ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void local_lock_acquire(local_lock_t *l) { lock_map_acquire(&l->dep_map); @@ -51,11 +38,30 @@ static inline void local_lock_release(local_lock_t *l) lock_map_release(&l->dep_map); } +static inline void local_lock_debug_init(local_lock_t *l) +{ + l->owner = NULL; +} #else /* CONFIG_DEBUG_LOCK_ALLOC */ +# define LOCAL_LOCK_DEBUG_INIT(lockname) static inline void local_lock_acquire(local_lock_t *l) { } static inline void local_lock_release(local_lock_t *l) { } +static inline void local_lock_debug_init(local_lock_t *l) { } #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ +#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) } + +#define __local_lock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ + lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \ + 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ + LD_LOCK_PERCPU); \ + local_lock_debug_init(lock); \ +} while (0) + #define __local_lock(lock) \ do { \ preempt_disable(); \ @@ -91,3 +97,45 @@ static inline void local_lock_release(local_lock_t *l) { } local_lock_release(this_cpu_ptr(lock)); \ local_irq_restore(flags); \ } while (0) + +#else /* !CONFIG_PREEMPT_RT */ + +/* + * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the + * critical section while staying preemptible. + */ +typedef spinlock_t local_lock_t; + +#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname)) + +#define __local_lock_init(l) \ + do { \ + local_spin_lock_init((l)); \ + } while (0) + +#define __local_lock(__lock) \ + do { \ + migrate_disable(); \ + spin_lock(this_cpu_ptr((__lock))); \ + } while (0) + +#define __local_lock_irq(lock) __local_lock(lock) + +#define __local_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + __local_lock(lock); \ + } while (0) + +#define __local_unlock(__lock) \ + do { \ + spin_unlock(this_cpu_ptr((__lock))); \ + migrate_enable(); \ + } while (0) + +#define __local_unlock_irq(lock) __local_unlock(lock) + +#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock) + +#endif /* CONFIG_PREEMPT_RT */ diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h index 0520c0cd73f4..3bc9f7410e21 100644 --- a/include/linux/lockd/bind.h +++ b/include/linux/lockd/bind.h @@ -27,7 +27,8 @@ struct rpc_task; struct nlmsvc_binding { __be32 (*fopen)(struct svc_rqst *, struct nfs_fh *, - struct file **); + struct file **, + int mode); void (*fclose)(struct file *); }; diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index 666f5f310a04..c4ae6506b8b3 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -10,6 +10,8 @@ #ifndef LINUX_LOCKD_LOCKD_H #define LINUX_LOCKD_LOCKD_H +/* XXX: a lot of this should really be under fs/lockd. */ + #include <linux/in.h> #include <linux/in6.h> #include <net/ipv6.h> @@ -154,7 +156,8 @@ struct nlm_rqst { struct nlm_file { struct hlist_node f_list; /* linked list */ struct nfs_fh f_handle; /* NFS file handle */ - struct file * f_file; /* VFS file pointer */ + struct file * f_file[2]; /* VFS file pointers, + indexed by O_ flags */ struct nlm_share * f_shares; /* DOS shares */ struct list_head f_blocks; /* blocked locks */ unsigned int f_locks; /* guesstimate # of locks */ @@ -267,6 +270,7 @@ typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref); /* * Server-side lock handling */ +int lock_to_openmode(struct file_lock *); __be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *, struct nlm_host *, struct nlm_lock *, int, struct nlm_cookie *, int); @@ -286,7 +290,7 @@ void nlmsvc_locks_init_private(struct file_lock *, struct nlm_host *, pid_t); * File handling for the server personality */ __be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **, - struct nfs_fh *); + struct nlm_lock *); void nlm_release_file(struct nlm_file *); void nlmsvc_release_lockowner(struct nlm_lock *); void nlmsvc_mark_resources(struct net *); @@ -301,7 +305,8 @@ int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr); static inline struct inode *nlmsvc_file_inode(struct nlm_file *file) { - return locks_inode(file->f_file); + return locks_inode(file->f_file[O_RDONLY] ? + file->f_file[O_RDONLY] : file->f_file[O_WRONLY]); } static inline int __nlm_privileged_request4(const struct sockaddr *sap) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 5cf387813754..9fe165beb0f9 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -306,31 +306,29 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) -#define lockdep_assert_held(l) do { \ - WARN_ON(debug_locks && \ - lockdep_is_held(l) == LOCK_STATE_NOT_HELD); \ - } while (0) +#define lockdep_assert(cond) \ + do { WARN_ON(debug_locks && !(cond)); } while (0) -#define lockdep_assert_not_held(l) do { \ - WARN_ON(debug_locks && \ - lockdep_is_held(l) == LOCK_STATE_HELD); \ - } while (0) +#define lockdep_assert_once(cond) \ + do { WARN_ON_ONCE(debug_locks && !(cond)); } while (0) -#define lockdep_assert_held_write(l) do { \ - WARN_ON(debug_locks && !lockdep_is_held_type(l, 0)); \ - } while (0) +#define lockdep_assert_held(l) \ + lockdep_assert(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) -#define lockdep_assert_held_read(l) do { \ - WARN_ON(debug_locks && !lockdep_is_held_type(l, 1)); \ - } while (0) +#define lockdep_assert_not_held(l) \ + lockdep_assert(lockdep_is_held(l) != LOCK_STATE_HELD) -#define lockdep_assert_held_once(l) do { \ - WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ - } while (0) +#define lockdep_assert_held_write(l) \ + lockdep_assert(lockdep_is_held_type(l, 0)) -#define lockdep_assert_none_held_once() do { \ - WARN_ON_ONCE(debug_locks && current->lockdep_depth); \ - } while (0) +#define lockdep_assert_held_read(l) \ + lockdep_assert(lockdep_is_held_type(l, 1)) + +#define lockdep_assert_held_once(l) \ + lockdep_assert_once(lockdep_is_held(l) != LOCK_STATE_NOT_HELD) + +#define lockdep_assert_none_held_once() \ + lockdep_assert_once(!current->lockdep_depth) #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) @@ -407,6 +405,9 @@ extern int lock_is_held(const void *); extern int lockdep_is_held(const void *); #define lockdep_is_held_type(l, r) (1) +#define lockdep_assert(c) do { } while (0) +#define lockdep_assert_once(c) do { } while (0) + #define lockdep_assert_held(l) do { (void)(l); } while (0) #define lockdep_assert_not_held(l) do { (void)(l); } while (0) #define lockdep_assert_held_write(l) do { (void)(l); } while (0) diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index acee44b9db26..0f06c2287b52 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -22,14 +22,10 @@ #define MARVELL_PHY_ID_88E1545 0x01410ea0 #define MARVELL_PHY_ID_88E1548P 0x01410ec0 #define MARVELL_PHY_ID_88E3016 0x01410e60 +#define MARVELL_PHY_ID_88X3310 0x002b09a0 #define MARVELL_PHY_ID_88E2110 0x002b09b0 #define MARVELL_PHY_ID_88X2222 0x01410f10 -/* PHY IDs and mask for Alaska 10G PHYs */ -#define MARVELL_PHY_ID_88X33X0_MASK 0xfffffff8 -#define MARVELL_PHY_ID_88X3310 0x002b09a0 -#define MARVELL_PHY_ID_88X3340 0x002b09a8 - /* Marvel 88E1111 in Finisar SFP module with modified PHY ID */ #define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0 diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index 07f5ef8fc456..c6786c12b207 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -91,12 +91,13 @@ void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv); mei_cldev_driver_register,\ mei_cldev_driver_unregister) -ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); +ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, + size_t length); ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, size_t length); -ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, - u8 vtag); +ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf, + size_t length, u8 vtag); ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, u8 *vtag); ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf, @@ -114,6 +115,6 @@ void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data); int mei_cldev_enable(struct mei_cl_device *cldev); int mei_cldev_disable(struct mei_cl_device *cldev); -bool mei_cldev_enabled(struct mei_cl_device *cldev); +bool mei_cldev_enabled(const struct mei_cl_device *cldev); #endif /* _LINUX_MEI_CL_BUS_H */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index cbf46f56d105..4a53c3ca86bd 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -209,7 +209,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, */ #define for_each_mem_range(i, p_start, p_end) \ __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \ - MEMBLOCK_NONE, p_start, p_end, NULL) + MEMBLOCK_HOTPLUG, p_start, p_end, NULL) /** * for_each_mem_range_rev - reverse iterate through memblock areas from @@ -220,7 +220,7 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, */ #define for_each_mem_range_rev(i, p_start, p_end) \ __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \ - MEMBLOCK_NONE, p_start, p_end, NULL) + MEMBLOCK_HOTPLUG, p_start, p_end, NULL) /** * for_each_reserved_mem_range - iterate over all reserved memblock areas diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index bfe5c486f4ad..20151c4f1e0e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -612,12 +612,15 @@ static inline bool mem_cgroup_disabled(void) return !cgroup_subsys_enabled(memory_cgrp_subsys); } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - bool in_low_reclaim) +static inline void mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, + unsigned long *min, + unsigned long *low) { + *min = *low = 0; + if (mem_cgroup_disabled()) - return 0; + return; /* * There is no reclaim protection applied to a targeted reclaim. @@ -653,13 +656,10 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, * */ if (root == memcg) - return 0; - - if (in_low_reclaim) - return READ_ONCE(memcg->memory.emin); + return; - return max(READ_ONCE(memcg->memory.emin), - READ_ONCE(memcg->memory.elow)); + *min = READ_ONCE(memcg->memory.emin); + *low = READ_ONCE(memcg->memory.elow); } void mem_cgroup_calculate_protection(struct mem_cgroup *root, @@ -1147,11 +1147,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, { } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - bool in_low_reclaim) +static inline void mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, + unsigned long *min, + unsigned long *low) { - return 0; + *min = *low = 0; } static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, @@ -1581,7 +1582,8 @@ static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) #endif /* CONFIG_CGROUP_WRITEBACK */ struct sock; -bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); +bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, + gfp_t gfp_mask); void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); #ifdef CONFIG_MEMCG extern struct static_key_false memcg_sockets_enabled_key; diff --git a/include/linux/mfd/hi6421-spmi-pmic.h b/include/linux/mfd/hi6421-spmi-pmic.h index 2660226138b8..e5b8dbf828b6 100644 --- a/include/linux/mfd/hi6421-spmi-pmic.h +++ b/include/linux/mfd/hi6421-spmi-pmic.h @@ -19,11 +19,6 @@ struct hi6421_spmi_pmic { struct resource *res; struct device *dev; void __iomem *regs; - spinlock_t lock; - struct irq_domain *domain; - int irq; - int gpio; - unsigned int *irqs; struct regmap *regmap; }; diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 944aa3aa3035..723985879035 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -303,6 +303,7 @@ struct mhi_controller_config { * @rddm_size: RAM dump size that host should allocate for debugging purpose * @sbl_size: SBL image size downloaded through BHIe (optional) * @seg_len: BHIe vector size (optional) + * @reg_len: Length of the MHI MMIO region (required) * @fbc_image: Points to firmware image buffer * @rddm_image: Points to RAM dump buffer * @mhi_chan: Points to the channel configuration table @@ -356,6 +357,7 @@ struct mhi_controller_config { * @fbc_download: MHI host needs to do complete image transfer (optional) * @wake_set: Device wakeup set flag * @irq_flags: irq flags passed to request_irq (optional) + * @mru: the default MRU for the MHI device * * Fields marked as (required) need to be populated by the controller driver * before calling mhi_register_controller(). For the fields marked as (optional) @@ -386,6 +388,7 @@ struct mhi_controller { size_t rddm_size; size_t sbl_size; size_t seg_len; + size_t reg_len; struct image_info *fbc_image; struct image_info *rddm_image; struct mhi_chan *mhi_chan; @@ -448,6 +451,7 @@ struct mhi_controller { bool fbc_download; bool wake_set; unsigned long irq_flags; + u32 mru; }; /** diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 9b7b7cd3bae9..23dadf7aeba8 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -51,7 +51,6 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page); extern int migrate_page_move_mapping(struct address_space *mapping, struct page *newpage, struct page *page, int extra_count); -extern void copy_huge_page(struct page *dst, struct page *src); #else static inline void putback_movable_pages(struct list_head *l) {} @@ -77,10 +76,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, { return -ENOSYS; } - -static inline void copy_huge_page(struct page *dst, struct page *src) -{ -} #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_COMPACTION diff --git a/include/linux/mii.h b/include/linux/mii.h index 219b93cad1dd..12ea29e04293 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -32,7 +32,7 @@ struct mii_if_info { extern int mii_link_ok (struct mii_if_info *mii); extern int mii_nway_restart (struct mii_if_info *mii); -extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); +extern void mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); extern void mii_ethtool_get_link_ksettings( struct mii_if_info *mii, struct ethtool_link_ksettings *cmd); extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 0025913505ab..66eaf0aa7f69 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1038,7 +1038,7 @@ enum { struct mlx5_mkey_seg { /* This is a two bit field occupying bits 31-30. * bit 31 is always 0, - * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation + * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have translation */ u8 status; u8 pcie_control; @@ -1157,6 +1157,9 @@ enum mlx5_cap_mode { HCA_CAP_OPMOD_GET_CUR = 1, }; +/* Any new cap addition must update mlx5_hca_caps_alloc() to allocate + * capability memory. + */ enum mlx5_cap_type { MLX5_CAP_GENERAL = 0, MLX5_CAP_ETHERNET_OFFLOADS, @@ -1213,55 +1216,55 @@ enum mlx5_qcam_feature_groups { /* GET Dev Caps macros */ #define MLX5_CAP_GEN(mdev, cap) \ - MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) + MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap) #define MLX5_CAP_GEN_64(mdev, cap) \ - MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) + MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap) #define MLX5_CAP_GEN_MAX(mdev, cap) \ - MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap) + MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->max, cap) #define MLX5_CAP_GEN_2(mdev, cap) \ - MLX5_GET(cmd_hca_cap_2, mdev->caps.hca_cur[MLX5_CAP_GENERAL_2], cap) + MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap) #define MLX5_CAP_GEN_2_64(mdev, cap) \ - MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca_cur[MLX5_CAP_GENERAL_2], cap) + MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap) #define MLX5_CAP_GEN_2_MAX(mdev, cap) \ - MLX5_GET(cmd_hca_cap_2, mdev->caps.hca_max[MLX5_CAP_GENERAL_2], cap) + MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->max, cap) #define MLX5_CAP_ETH(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ - mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) + mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap) #define MLX5_CAP_ETH_MAX(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ - mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) + mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->max, cap) #define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ - mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap) + mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap) #define MLX5_CAP_ROCE(mdev, cap) \ - MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap) + MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap) #define MLX5_CAP_ROCE_MAX(mdev, cap) \ - MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap) + MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->max, cap) #define MLX5_CAP_ATOMIC(mdev, cap) \ - MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap) + MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap) #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ - MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap) + MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->max, cap) #define MLX5_CAP_FLOWTABLE(mdev, cap) \ - MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) + MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap) #define MLX5_CAP64_FLOWTABLE(mdev, cap) \ - MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) + MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap) #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ - MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap) + MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->max, cap) #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) @@ -1301,11 +1304,11 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ - mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap) #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ - mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->max, cap) #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) @@ -1327,31 +1330,31 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_ESW(mdev, cap) \ MLX5_GET(e_switch_cap, \ - mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap) + mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap) #define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET64(flow_table_eswitch_cap, \ - (mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + (mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap) #define MLX5_CAP_ESW_MAX(mdev, cap) \ MLX5_GET(e_switch_cap, \ - mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap) + mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap) #define MLX5_CAP_ODP(mdev, cap)\ - MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap) + MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap) #define MLX5_CAP_ODP_MAX(mdev, cap)\ - MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap) + MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap) #define MLX5_CAP_VECTOR_CALC(mdev, cap) \ MLX5_GET(vector_calc_cap, \ - mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap) + mdev->caps.hca[MLX5_CAP_VECTOR_CALC]->cur, cap) #define MLX5_CAP_QOS(mdev, cap)\ - MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap) + MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap) #define MLX5_CAP_DEBUG(mdev, cap)\ - MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap) + MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap) #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \ MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld) @@ -1387,27 +1390,27 @@ enum mlx5_qcam_feature_groups { MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap) #define MLX5_CAP_DEV_MEM(mdev, cap)\ - MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) + MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap) #define MLX5_CAP64_DEV_MEM(mdev, cap)\ - MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) + MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap) #define MLX5_CAP_TLS(mdev, cap) \ - MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap) + MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap) #define MLX5_CAP_DEV_EVENT(mdev, cap)\ - MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap) + MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap) #define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\ MLX5_GET(virtio_emulation_cap, \ - (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) + (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap) #define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\ MLX5_GET64(virtio_emulation_cap, \ - (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) + (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap) #define MLX5_CAP_IPSEC(mdev, cap)\ - MLX5_GET(ipsec_cap, (mdev)->caps.hca_cur[MLX5_CAP_IPSEC], cap) + MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap) enum { MLX5_CMD_STAT_OK = 0x0, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 1efe37466969..e23417424373 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -581,7 +581,7 @@ struct mlx5_priv { /* end: qp staff */ /* start: alloc staff */ - /* protect buffer alocation according to numa node */ + /* protect buffer allocation according to numa node */ struct mutex alloc_mutex; int numa_node; @@ -623,8 +623,7 @@ struct mlx5_priv { }; enum mlx5_device_state { - MLX5_DEVICE_STATE_UNINITIALIZED, - MLX5_DEVICE_STATE_UP, + MLX5_DEVICE_STATE_UP = 1, MLX5_DEVICE_STATE_INTERNAL_ERROR, }; @@ -730,6 +729,11 @@ struct mlx5_profile { } mr_cache[MAX_MR_CACHE_ENTRIES]; }; +struct mlx5_hca_cap { + u32 cur[MLX5_UN_SZ_DW(hca_cap_union)]; + u32 max[MLX5_UN_SZ_DW(hca_cap_union)]; +}; + struct mlx5_core_dev { struct device *device; enum mlx5_coredev_type coredev_type; @@ -741,8 +745,7 @@ struct mlx5_core_dev { char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; struct { - u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; - u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; + struct mlx5_hca_cap *hca[MLX5_CAP_NUM]; u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)]; u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; @@ -1044,8 +1047,7 @@ void mlx5_unregister_debugfs(void); void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); -int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, - unsigned int *irqn); +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); @@ -1111,7 +1113,7 @@ static inline u8 mlx5_mkey_variant(u32 mkey) } /* Async-atomic event notifier used by mlx5 core to forward FW - * evetns recived from event queue to mlx5 consumers. + * evetns received from event queue to mlx5 consumers. * Optimise event queue dipatching. */ int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); @@ -1138,6 +1140,8 @@ bool mlx5_lag_is_roce(struct mlx5_core_dev *dev); bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev); bool mlx5_lag_is_active(struct mlx5_core_dev *dev); +bool mlx5_lag_is_master(struct mlx5_core_dev *dev); +bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev); struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, struct net_device *slave); @@ -1145,6 +1149,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, u64 *values, int num_counters, size_t *offsets); +struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev); struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index bc7db2e059eb..4ab5c1fc1270 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -29,11 +29,20 @@ enum { REP_LOADED, }; +enum mlx5_switchdev_event { + MLX5_SWITCHDEV_EVENT_PAIR, + MLX5_SWITCHDEV_EVENT_UNPAIR, +}; + struct mlx5_eswitch_rep; struct mlx5_eswitch_rep_ops { int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep); void (*unload)(struct mlx5_eswitch_rep *rep); void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); + int (*event)(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, + enum mlx5_switchdev_event event, + void *data); }; struct mlx5_eswitch_rep_data { @@ -63,6 +72,7 @@ struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type); struct mlx5_flow_handle * mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, + struct mlx5_eswitch *from_esw, struct mlx5_eswitch_rep *rep, u32 sqn); #ifdef CONFIG_MLX5_ESWITCH @@ -128,6 +138,7 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev); u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); +struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw); #else /* CONFIG_MLX5_ESWITCH */ @@ -171,6 +182,11 @@ static inline u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev) return 0; } +static inline struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw) +{ + return NULL; +} + #endif /* CONFIG_MLX5_ESWITCH */ static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev) diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 77746f7e35b8..0106c67e8ccb 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -38,6 +38,8 @@ #define MLX5_FS_DEFAULT_FLOW_TAG 0x0 +#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) + enum { MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index b0009aa3647f..f3638d09ba77 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -865,7 +865,8 @@ struct mlx5_ifc_qos_cap_bits { u8 nic_bw_share[0x1]; u8 nic_rate_limit[0x1]; u8 packet_pacing_uid[0x1]; - u8 reserved_at_c[0x14]; + u8 log_esw_max_sched_depth[0x4]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0xb]; u8 log_max_qos_nic_queue_group[0x5]; @@ -921,7 +922,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 scatter_fcs[0x1]; u8 enhanced_multi_pkt_send_wqe[0x1]; u8 tunnel_lso_const_out_ip_id[0x1]; - u8 reserved_at_1c[0x2]; + u8 tunnel_lro_gre[0x1]; + u8 tunnel_lro_vxlan[0x1]; u8 tunnel_stateless_gre[0x1]; u8 tunnel_stateless_vxlan[0x1]; @@ -1651,7 +1653,13 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 max_geneve_tlv_option_data_len[0x5]; u8 reserved_at_570[0x10]; - u8 reserved_at_580[0x33]; + u8 reserved_at_580[0xb]; + u8 log_max_dci_stream_channels[0x5]; + u8 reserved_at_590[0x3]; + u8 log_max_dci_errored_streams[0x5]; + u8 reserved_at_598[0x8]; + + u8 reserved_at_5a0[0x13]; u8 log_max_dek[0x5]; u8 reserved_at_5b8[0x4]; u8 mini_cqe_resp_stride_index[0x1]; @@ -3020,10 +3028,12 @@ struct mlx5_ifc_qpc_bits { u8 reserved_at_3c0[0x8]; u8 next_send_psn[0x18]; - u8 reserved_at_3e0[0x8]; + u8 reserved_at_3e0[0x3]; + u8 log_num_dci_stream_channels[0x5]; u8 cqn_snd[0x18]; - u8 reserved_at_400[0x8]; + u8 reserved_at_400[0x3]; + u8 log_num_dci_errored_streams[0x5]; u8 deth_sqpn[0x18]; u8 reserved_at_420[0x20]; @@ -3911,7 +3921,7 @@ struct mlx5_ifc_cqc_bits { u8 status[0x4]; u8 reserved_at_4[0x2]; u8 dbr_umem_valid[0x1]; - u8 apu_thread_cq[0x1]; + u8 apu_cq[0x1]; u8 cqe_sz[0x3]; u8 cc[0x1]; u8 reserved_at_c[0x1]; @@ -3937,8 +3947,7 @@ struct mlx5_ifc_cqc_bits { u8 cq_period[0xc]; u8 cq_max_count[0x10]; - u8 reserved_at_a0[0x18]; - u8 c_eqn[0x8]; + u8 c_eqn_or_apu_element[0x20]; u8 reserved_at_c0[0x3]; u8 log_page_size[0x5]; diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h index 98b56b75c625..1a9c9d94cb59 100644 --- a/include/linux/mlx5/mlx5_ifc_vdpa.h +++ b/include/linux/mlx5/mlx5_ifc_vdpa.h @@ -11,13 +11,15 @@ enum { }; enum { - MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = 0x1, // do I check this caps? - MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = 0x2, + MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0, + MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1, }; enum { - MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0, - MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1, + MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = + BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT), + MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = + BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED), }; struct mlx5_ifc_virtio_q_bits { diff --git a/include/linux/mm.h b/include/linux/mm.h index 57453dba41b9..7ca22e6e694a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -906,6 +906,7 @@ void __put_page(struct page *page); void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); +void copy_huge_page(struct page *dst, struct page *src); /* * Compound pages have a destructor function. Provide a diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 52bbd2b7cb46..7f8ee09c711f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -103,11 +103,19 @@ struct page { unsigned long pp_magic; struct page_pool *pp; unsigned long _pp_mapping_pad; - /** - * @dma_addr: might require a 64-bit value on - * 32-bit architectures. - */ - unsigned long dma_addr[2]; + unsigned long dma_addr; + union { + /** + * dma_addr_upper: might require a 64-bit + * value on 32-bit architectures. + */ + unsigned long dma_addr_upper; + /** + * For frag page support, not supported in + * 32-bit architectures with 64-bit DMA. + */ + atomic_long_t pp_frag_count; + }; }; struct { /* slab, slob and slub */ union { diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 74e6c0624d27..37f975875102 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -109,6 +109,7 @@ struct mmc_ext_csd { u8 raw_hc_erase_gap_size; /* 221 */ u8 raw_erase_timeout_mult; /* 223 */ u8 raw_hc_erase_grp_size; /* 224 */ + u8 raw_boot_mult; /* 226 */ u8 raw_sec_trim_mult; /* 229 */ u8 raw_sec_erase_mult; /* 230 */ u8 raw_sec_feature_support;/* 231 */ diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index ab19245e9945..71101d1ec825 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -164,9 +164,8 @@ struct mmc_request { int tag; #ifdef CONFIG_MMC_CRYPTO - bool crypto_enabled; + const struct bio_crypt_ctx *crypto_ctx; int crypto_key_slot; - u32 data_unit_num; #endif }; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 0abd47e9ef9b..0c0c9a0fdf57 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -153,7 +153,7 @@ struct mmc_host_ops { int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios); - /* Check if the card is pulling dat[0:3] low */ + /* Check if the card is pulling dat[0] low */ int (*card_busy)(struct mmc_host *host); /* The tuning command opcode value is different for SD and eMMC cards */ @@ -398,6 +398,7 @@ struct mmc_host { #else #define MMC_CAP2_CRYPTO 0 #endif +#define MMC_CAP2_ALT_GPT_TEGRA (1 << 28) /* Host with eMMC that has GPT entry at a non-standard location */ int fixed_drv_type; /* fixed driver type for non-removable media */ diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 12036619346c..a85c9f0bd470 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -75,6 +75,7 @@ #define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4 #define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 #define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf +#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752 0xaae8 #define SDIO_VENDOR_ID_MARVELL 0x02df #define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103 diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h index 03dee12d2b61..b9b970f7ab45 100644 --- a/include/linux/mmu_context.h +++ b/include/linux/mmu_context.h @@ -14,4 +14,18 @@ static inline void leave_mm(int cpu) { } #endif +/* + * CPUs that are capable of running user task @p. Must contain at least one + * active CPU. It is assumed that the kernel can run on all CPUs, so calling + * this for a kernel thread is pointless. + * + * By default, we assume a sane, homogeneous system. + */ +#ifndef task_cpu_possible_mask +# define task_cpu_possible_mask(p) cpu_possible_mask +# define task_cpu_possible(cpu, p) true +#else +# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p)) +#endif + #endif diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index eed280fae433..962cd41a2cb5 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -431,6 +431,8 @@ extern int param_get_int(char *buffer, const struct kernel_param *kp); extern const struct kernel_param_ops param_ops_uint; extern int param_set_uint(const char *val, const struct kernel_param *kp); extern int param_get_uint(char *buffer, const struct kernel_param *kp); +int param_set_uint_minmax(const char *val, const struct kernel_param *kp, + unsigned int min, unsigned int max); #define param_check_uint(name, p) __param_check(name, p, unsigned int) extern const struct kernel_param_ops param_ops_long; diff --git a/include/linux/msi.h b/include/linux/msi.h index 6aff469e511d..49cf6eb222e7 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -107,7 +107,8 @@ struct ti_sci_inta_msi_desc { * address or data changes * @write_msi_msg_data: Data parameter for the callback. * - * @masked: [PCI MSI/X] Mask bits + * @msi_mask: [PCI MSI] MSI cached mask bits + * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits * @is_msix: [PCI MSI/X] True if MSI-X * @multiple: [PCI MSI/X] log2 num of messages allocated * @multi_cap: [PCI MSI/X] log2 num of messages supported @@ -139,7 +140,10 @@ struct msi_desc { union { /* PCI MSI/X specific data */ struct { - u32 masked; + union { + u32 msi_mask; + u32 msix_ctrl; + }; struct { u8 is_msix : 1; u8 multiple : 3; @@ -232,11 +236,13 @@ void free_msi_entry(struct msi_desc *entry); void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); -u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); -u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); void pci_msi_mask_irq(struct irq_data *data); void pci_msi_unmask_irq(struct irq_data *data); +const struct attribute_group **msi_populate_sysfs(struct device *dev); +void msi_destroy_sysfs(struct device *dev, + const struct attribute_group **msi_irq_groups); + /* * The arch hooks to setup up msi irqs. Default functions are implemented * as weak symbols so that they /can/ be overriden by architecture specific diff --git a/include/linux/mutex.h b/include/linux/mutex.h index e19323521f9c..8f226d460f51 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -20,8 +20,17 @@ #include <linux/osq_lock.h> #include <linux/debug_locks.h> -struct ww_class; -struct ww_acquire_ctx; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_SLEEP, \ + } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +#endif + +#ifndef CONFIG_PREEMPT_RT /* * Simple, straightforward mutexes with strict semantics: @@ -53,7 +62,7 @@ struct ww_acquire_ctx; */ struct mutex { atomic_long_t owner; - spinlock_t wait_lock; + raw_spinlock_t wait_lock; #ifdef CONFIG_MUTEX_SPIN_ON_OWNER struct optimistic_spin_queue osq; /* Spinner MCS lock */ #endif @@ -66,27 +75,6 @@ struct mutex { #endif }; -struct ww_mutex { - struct mutex base; - struct ww_acquire_ctx *ctx; -#ifdef CONFIG_DEBUG_MUTEXES - struct ww_class *ww_class; -#endif -}; - -/* - * This is the control structure for tasks blocked on mutex, - * which resides on the blocked task's kernel stack: - */ -struct mutex_waiter { - struct list_head list; - struct task_struct *task; - struct ww_acquire_ctx *ww_ctx; -#ifdef CONFIG_DEBUG_MUTEXES - void *magic; -#endif -}; - #ifdef CONFIG_DEBUG_MUTEXES #define __DEBUG_MUTEX_INITIALIZER(lockname) \ @@ -117,19 +105,9 @@ do { \ __mutex_init((mutex), #mutex, &__key); \ } while (0) -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ - , .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_SLEEP, \ - } -#else -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) -#endif - #define __MUTEX_INITIALIZER(lockname) \ { .owner = ATOMIC_LONG_INIT(0) \ - , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ + , .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ __DEBUG_MUTEX_INITIALIZER(lockname) \ __DEP_MAP_MUTEX_INITIALIZER(lockname) } @@ -148,6 +126,50 @@ extern void __mutex_init(struct mutex *lock, const char *name, */ extern bool mutex_is_locked(struct mutex *lock); +#else /* !CONFIG_PREEMPT_RT */ +/* + * Preempt-RT variant based on rtmutexes. + */ +#include <linux/rtmutex.h> + +struct mutex { + struct rt_mutex_base rtmutex; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define __MUTEX_INITIALIZER(mutexname) \ +{ \ + .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex) \ + __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ +} + +#define DEFINE_MUTEX(mutexname) \ + struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) + +extern void __mutex_rt_init(struct mutex *lock, const char *name, + struct lock_class_key *key); +extern int mutex_trylock(struct mutex *lock); + +static inline void mutex_destroy(struct mutex *lock) { } + +#define mutex_is_locked(l) rt_mutex_base_is_locked(&(l)->rtmutex) + +#define __mutex_init(mutex, name, key) \ +do { \ + rt_mutex_base_init(&(mutex)->rtmutex); \ + __mutex_rt_init((mutex), name, key); \ +} while (0) + +#define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ +} while (0) +#endif /* CONFIG_PREEMPT_RT */ + /* * See kernel/locking/mutex.c for detailed documentation of these APIs. * Also see Documentation/locking/mutex-design.rst. diff --git a/include/linux/namei.h b/include/linux/namei.h index be9a2b349ca7..e89329bb3134 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -68,6 +68,7 @@ extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int); extern struct dentry *lookup_one_len(const char *, struct dentry *, int); extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int); extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int); +struct dentry *lookup_one(struct user_namespace *, const char *, struct dentry *, int); extern int follow_down_one(struct path *); extern int follow_down(struct path *); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index eaf5bb008aa9..7c41593c1d6a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -47,6 +47,7 @@ #include <uapi/linux/if_bonding.h> #include <uapi/linux/pkt_cls.h> #include <linux/hashtable.h> +#include <linux/rbtree.h> struct netpoll_info; struct device; @@ -208,6 +209,7 @@ struct sk_buff; struct netdev_hw_addr { struct list_head list; + struct rb_node node; unsigned char addr[MAX_ADDR_LEN]; unsigned char type; #define NETDEV_HW_ADDR_T_LAN 1 @@ -224,6 +226,9 @@ struct netdev_hw_addr { struct netdev_hw_addr_list { struct list_head list; int count; + + /* Auxiliary tree for faster lookup on addition and deletion */ + struct rb_root tree; }; #define netdev_hw_addr_list_count(l) ((l)->count) @@ -295,18 +300,6 @@ enum netdev_state_t { }; -/* - * This structure holds boot-time configured netdevice settings. They - * are then used in the device probing. - */ -struct netdev_boot_setup { - char name[IFNAMSIZ]; - struct ifmap map; -}; -#define NETDEV_BOOT_SETUP_MAX 8 - -int __init netdev_boot_setup(char *str); - struct gro_list { struct list_head list; int count; @@ -734,13 +727,13 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, /* This structure contains an instance of an RX queue. */ struct netdev_rx_queue { + struct xdp_rxq_info xdp_rxq; #ifdef CONFIG_RPS struct rps_map __rcu *rps_map; struct rps_dev_flow_table __rcu *rps_flow_table; #endif struct kobject kobj; struct net_device *dev; - struct xdp_rxq_info xdp_rxq; #ifdef CONFIG_XDP_SOCKETS struct xsk_buff_pool *pool; #endif @@ -1086,9 +1079,18 @@ struct netdev_net_notifier { * Test if Media Access Control address is valid for the device. * * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); - * Called when a user requests an ioctl which can't be handled by - * the generic interface code. If not defined ioctls return - * not supported error code. + * Old-style ioctl entry point. This is used internally by the + * appletalk and ieee802154 subsystems but is no longer called by + * the device ioctl handler. + * + * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd); + * Used by the bonding driver for its device specific ioctls: + * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE, + * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY + * + * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); + * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG, + * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP. * * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); * Used to set network devices bus interface parameters. This interface @@ -1321,6 +1323,9 @@ struct netdev_net_notifier { * that got dropped are freed/returned via xdp_return_frame(). * Returns negative number, means general error invoking ndo, meaning * no frames were xmit'ed and core-caller will free all frames. + * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev, + * struct xdp_buff *xdp); + * Get the xmit slave of master device based on the xdp_buff. * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); * This function is used to wake up the softirq, ksoftirqd or kthread * responsible for sending and/or receiving packets on a specific @@ -1361,6 +1366,15 @@ struct net_device_ops { int (*ndo_validate_addr)(struct net_device *dev); int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); + int (*ndo_eth_ioctl)(struct net_device *dev, + struct ifreq *ifr, int cmd); + int (*ndo_siocbond)(struct net_device *dev, + struct ifreq *ifr, int cmd); + int (*ndo_siocwandev)(struct net_device *dev, + struct if_settings *ifs); + int (*ndo_siocdevprivate)(struct net_device *dev, + struct ifreq *ifr, + void __user *data, int cmd); int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); int (*ndo_change_mtu)(struct net_device *dev, @@ -1539,6 +1553,8 @@ struct net_device_ops { int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, u32 flags); + struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev, + struct xdp_buff *xdp); int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); @@ -1805,6 +1821,7 @@ enum netdev_ml_priv_type { * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network * device struct * @mpls_ptr: mpls_dev struct pointer + * @mctp_ptr: MCTP specific data * * @dev_addr: Hw address (before bcast, * because most packets are unicast) @@ -2092,6 +2109,9 @@ struct net_device { #if IS_ENABLED(CONFIG_MPLS_ROUTING) struct mpls_dev __rcu *mpls_ptr; #endif +#if IS_ENABLED(CONFIG_MCTP) + struct mctp_dev __rcu *mctp_ptr; +#endif /* * Cache lines mostly used on receive path (including eth_type_trans()) @@ -2917,7 +2937,6 @@ static inline struct net_device *first_net_device_rcu(struct net *net) } int netdev_boot_setup_check(struct net_device *dev); -unsigned long netdev_boot_base(const char *prefix, int unit); struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, const char *hwaddr); struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); @@ -3289,14 +3308,6 @@ static inline bool dev_has_header(const struct net_device *dev) return dev->header_ops && dev->header_ops->create; } -typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, - int len, int size); -int register_gifconf(unsigned int family, gifconf_func_t *gifconf); -static inline int unregister_gifconf(unsigned int family) -{ - return register_gifconf(family, NULL); -} - #ifdef CONFIG_NET_FLOW_LIMIT #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ struct sd_flow_limit { @@ -3915,6 +3926,8 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev, return 0; } #endif +int netif_set_real_num_queues(struct net_device *dev, + unsigned int txq, unsigned int rxq); static inline struct netdev_rx_queue * __netif_get_rx_queue(struct net_device *dev, unsigned int rxq) @@ -3948,7 +3961,7 @@ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); /* * It is not allowed to call kfree_skb() or consume_skb() from hardware * interrupt context or with hardware interrupts being disabled. - * (in_irq() || irqs_disabled()) + * (in_hardirq() || irqs_disabled()) * * We provide four helpers that can be used in following contexts : * @@ -3984,6 +3997,8 @@ static inline void dev_consume_skb_any(struct sk_buff *skb) __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); } +u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, + struct bpf_prog *xdp_prog); void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); int netif_rx(struct sk_buff *skb); @@ -4012,10 +4027,16 @@ int netdev_rx_handler_register(struct net_device *dev, void netdev_rx_handler_unregister(struct net_device *dev); bool dev_valid_name(const char *name); +static inline bool is_socket_ioctl_cmd(unsigned int cmd) +{ + return _IOC_TYPE(cmd) == SOCK_IOC_TYPE; +} +int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg); +int put_user_ifreq(struct ifreq *ifr, void __user *arg); int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, - bool *need_copyout); -int dev_ifconf(struct net *net, struct ifconf *, int); -int dev_ethtool(struct net *net, struct ifreq *); + void __user *data, bool *need_copyout); +int dev_ifconf(struct net *net, struct ifconf __user *ifc); +int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); unsigned int dev_get_flags(const struct net_device *); int __dev_change_flags(struct net_device *dev, unsigned int flags, struct netlink_ext_ack *extack); @@ -4069,6 +4090,7 @@ typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, int expected_fd, u32 flags); int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +u8 dev_xdp_prog_count(struct net_device *dev); u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); @@ -4136,11 +4158,13 @@ void netdev_run_todo(void); */ static inline void dev_put(struct net_device *dev) { + if (dev) { #ifdef CONFIG_PCPU_DEV_REFCNT - this_cpu_dec(*dev->pcpu_refcnt); + this_cpu_dec(*dev->pcpu_refcnt); #else - refcount_dec(&dev->dev_refcnt); + refcount_dec(&dev->dev_refcnt); #endif + } } /** @@ -4151,11 +4175,13 @@ static inline void dev_put(struct net_device *dev) */ static inline void dev_hold(struct net_device *dev) { + if (dev) { #ifdef CONFIG_PCPU_DEV_REFCNT - this_cpu_inc(*dev->pcpu_refcnt); + this_cpu_inc(*dev->pcpu_refcnt); #else - refcount_inc(&dev->dev_refcnt); + refcount_inc(&dev->dev_refcnt); #endif + } } /* Carrier loss detection, dial on demand. The functions netif_carrier_on diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 10279c4830ac..ada1296c87d5 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -196,6 +196,9 @@ struct ip_set_region { u32 elements; /* Number of elements vs timeout */ }; +/* Max range where every element is added/deleted in one step */ +#define IPSET_MAX_RANGE (1<<20) + /* The max revision number supported by any set type + 1 */ #define IPSET_REVISION_MAX 9 diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 28d7027cd460..5897f3dbaf7c 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -238,9 +238,6 @@ struct xt_table { u_int8_t af; /* address/protocol family */ int priority; /* hook order */ - /* called when table is needed in the given netns */ - int (*table_init)(struct net *net); - /* A unique name... */ const char name[XT_TABLE_MAXNAMELEN]; }; @@ -452,6 +449,9 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu) struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *); +int xt_register_template(const struct xt_table *t, int(*table_init)(struct net *net)); +void xt_unregister_template(const struct xt_table *t); + #ifdef CONFIG_NETFILTER_XTABLES_COMPAT #include <net/compat.h> diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index a8178253ce53..10a01978bc0d 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -127,4 +127,6 @@ static inline bool ebt_invalid_target(int target) return (target < -NUM_STANDARD_TARGETS || target >= 0); } +int ebt_register_template(const struct ebt_table *t, int(*table_init)(struct net *net)); +void ebt_unregister_template(const struct ebt_table *t); #endif diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 2fb373a5c1ed..87069b8459af 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -168,8 +168,6 @@ extern int raw_notifier_call_chain(struct raw_notifier_head *nh, extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh, unsigned long val, void *v); -extern int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh, - unsigned long val_up, unsigned long val_down, void *v); extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh, unsigned long val_up, unsigned long val_down, void *v); extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh, diff --git a/include/linux/nubus.h b/include/linux/nubus.h index eba50b057f6f..392fc6c53e96 100644 --- a/include/linux/nubus.h +++ b/include/linux/nubus.h @@ -86,7 +86,7 @@ extern struct list_head nubus_func_rsrcs; struct nubus_driver { struct device_driver driver; int (*probe)(struct nubus_board *board); - int (*remove)(struct nubus_board *board); + void (*remove)(struct nubus_board *board); }; extern struct bus_type nubus_bus_type; diff --git a/include/linux/of.h b/include/linux/of.h index 9c2e71e202d1..0e786b60bd5d 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -946,6 +946,11 @@ static inline int of_machine_is_compatible(const char *compat) return 0; } +static inline int of_add_property(struct device_node *np, struct property *prop) +{ + return 0; +} + static inline int of_remove_property(struct device_node *np, struct property *prop) { return 0; diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index 3d8db1f6a5db..0f4a8903922a 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -70,6 +70,9 @@ enum OID { OID_spnego, /* 1.3.6.1.5.5.2 */ + OID_IAKerb, /* 1.3.6.1.5.2.5 */ + OID_PKU2U, /* 1.3.5.1.5.2.7 */ + OID_Scram, /* 1.3.6.1.5.5.14 */ OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ OID_sha1, /* 1.3.14.3.2.26 */ OID_id_ansip384r1, /* 1.3.132.0.34 */ @@ -104,6 +107,10 @@ enum OID { OID_authorityKeyIdentifier, /* 2.5.29.35 */ OID_extKeyUsage, /* 2.5.29.37 */ + /* Heimdal mechanisms */ + OID_NetlogonMechanism, /* 1.2.752.43.14.2 */ + OID_appleLocalKdcSupported, /* 1.2.752.43.14.3 */ + /* EC-RDSA */ OID_gostCPSignA, /* 1.2.643.2.2.35.1 */ OID_gostCPSignB, /* 1.2.643.2.2.35.2 */ diff --git a/include/linux/once.h b/include/linux/once.h index 9225ee6d96c7..ae6f4eb41cbe 100644 --- a/include/linux/once.h +++ b/include/linux/once.h @@ -7,7 +7,7 @@ bool __do_once_start(bool *done, unsigned long *flags); void __do_once_done(bool *done, struct static_key_true *once_key, - unsigned long *flags); + unsigned long *flags, struct module *mod); /* Call a function exactly once. The idea of DO_ONCE() is to perform * a function call such as initialization of random seeds, etc, only @@ -46,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key, if (unlikely(___ret)) { \ func(__VA_ARGS__); \ __do_once_done(&___done, &___once_key, \ - &___flags); \ + &___flags, THIS_MODULE); \ } \ } \ ___ret; \ diff --git a/include/linux/padata.h b/include/linux/padata.h index a433f13fc4bf..495b16b6b4d7 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -12,6 +12,7 @@ #ifndef PADATA_H #define PADATA_H +#include <linux/refcount.h> #include <linux/compiler_types.h> #include <linux/workqueue.h> #include <linux/spinlock.h> @@ -96,7 +97,7 @@ struct parallel_data { struct padata_shell *ps; struct padata_list __percpu *reorder_list; struct padata_serial_queue __percpu *squeue; - atomic_t refcnt; + refcount_t refcnt; unsigned int seq_nr; unsigned int processed; int cpu; diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 2debc27ba95e..8292420426f3 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -85,7 +85,7 @@ struct pci_epf_ops { */ struct pci_epf_driver { int (*probe)(struct pci_epf *epf); - int (*remove)(struct pci_epf *epf); + void (*remove)(struct pci_epf *epf); struct device_driver driver; struct pci_epf_ops *ops; diff --git a/include/linux/pci.h b/include/linux/pci.h index 540b377ca8f6..947430637cac 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1620,6 +1620,16 @@ static inline bool pci_aer_available(void) { return false; } bool pci_ats_disabled(void); +#ifdef CONFIG_PCIE_PTM +int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); +bool pcie_ptm_enabled(struct pci_dev *dev); +#else +static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) +{ return -EINVAL; } +static inline bool pcie_ptm_enabled(struct pci_dev *dev) +{ return false; } +#endif + void pci_cfg_access_lock(struct pci_dev *dev); bool pci_cfg_access_trylock(struct pci_dev *dev); void pci_cfg_access_unlock(struct pci_dev *dev); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 4bac1831de80..06eccef155ad 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -555,6 +555,7 @@ #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 #define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 +#define PCI_DEVICE_ID_AMD_19H_M40H_DF_F3 0x167c #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 @@ -1121,6 +1122,7 @@ #define PCI_DEVICE_ID_3COM_3CR990SVR 0x990a #define PCI_VENDOR_ID_AL 0x10b9 +#define PCI_DEVICE_ID_AL_M1489 0x1489 #define PCI_DEVICE_ID_AL_M1533 0x1533 #define PCI_DEVICE_ID_AL_M1535 0x1535 #define PCI_DEVICE_ID_AL_M1541 0x1541 @@ -2643,6 +2645,7 @@ #define PCI_DEVICE_ID_INTEL_82375 0x0482 #define PCI_DEVICE_ID_INTEL_82424 0x0483 #define PCI_DEVICE_ID_INTEL_82378 0x0484 +#define PCI_DEVICE_ID_INTEL_82425 0x0486 #define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807 #define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808 #define PCI_DEVICE_ID_INTEL_MFD_SD 0x0820 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2d510ad750ed..fe156a8170aa 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -762,6 +762,7 @@ struct perf_event { #ifdef CONFIG_BPF_SYSCALL perf_overflow_handler_t orig_overflow_handler; struct bpf_prog *prog; + u64 bpf_cookie; #endif #ifdef CONFIG_EVENT_TRACING diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index d147480cdefc..e24d2c992b11 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1397,34 +1397,10 @@ static inline int p4d_clear_huge(p4d_t *p4d) } #endif /* !__PAGETABLE_P4D_FOLDED */ -#ifndef __PAGETABLE_PUD_FOLDED int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); -int pud_clear_huge(pud_t *pud); -#else -static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) -{ - return 0; -} -static inline int pud_clear_huge(pud_t *pud) -{ - return 0; -} -#endif /* !__PAGETABLE_PUD_FOLDED */ - -#ifndef __PAGETABLE_PMD_FOLDED int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); +int pud_clear_huge(pud_t *pud); int pmd_clear_huge(pmd_t *pmd); -#else -static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) -{ - return 0; -} -static inline int pmd_clear_huge(pmd_t *pmd) -{ - return 0; -} -#endif /* !__PAGETABLE_PMD_FOLDED */ - int p4d_free_pud_page(p4d_t *p4d, unsigned long addr); int pud_free_pmd_page(pud_t *pud, unsigned long addr); int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); diff --git a/include/linux/phy.h b/include/linux/phy.h index 3b80dc3ed68b..736e1d1a47c4 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1431,6 +1431,7 @@ static inline int phy_device_register(struct phy_device *phy) static inline void phy_device_free(struct phy_device *phydev) { } #endif /* CONFIG_PHYLIB */ void phy_device_remove(struct phy_device *phydev); +int phy_get_c45_ids(struct phy_device *phydev); int phy_init_hw(struct phy_device *phydev); int phy_suspend(struct phy_device *phydev); int phy_resume(struct phy_device *phydev); diff --git a/include/linux/pid.h b/include/linux/pid.h index fa10acb8d6a4..af308e15f174 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -78,6 +78,7 @@ struct file; extern struct pid *pidfd_pid(const struct file *file); struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags); +int pidfd_create(struct pid *pid, unsigned int flags); static inline struct pid *get_pid(struct pid *pid) { diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 5d2705f1d01c..fc5642431b92 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -48,6 +48,7 @@ struct pipe_buffer { * @files: number of struct file referring this pipe (protected by ->i_lock) * @r_counter: reader counter * @w_counter: writer counter + * @poll_usage: is this pipe used for epoll, which has crazy wakeups? * @fasync_readers: reader side fasync * @fasync_writers: writer side fasync * @bufs: the circular array of pipe buffers @@ -70,6 +71,7 @@ struct pipe_inode_info { unsigned int files; unsigned int r_counter; unsigned int w_counter; + unsigned int poll_usage; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 45f53afc46e2..271bd87bff0a 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -4228,6 +4228,7 @@ enum ec_device_event { EC_DEVICE_EVENT_TRACKPAD, EC_DEVICE_EVENT_DSP, EC_DEVICE_EVENT_WIFI, + EC_DEVICE_EVENT_WLC, }; enum ec_device_event_param { @@ -5460,6 +5461,72 @@ struct ec_response_rollback_info { /* Issue AP reset */ #define EC_CMD_AP_RESET 0x0125 +/** + * Get the number of peripheral charge ports + */ +#define EC_CMD_PCHG_COUNT 0x0134 + +#define EC_PCHG_MAX_PORTS 8 + +struct ec_response_pchg_count { + uint8_t port_count; +} __ec_align1; + +/** + * Get the status of a peripheral charge port + */ +#define EC_CMD_PCHG 0x0135 + +struct ec_params_pchg { + uint8_t port; +} __ec_align1; + +struct ec_response_pchg { + uint32_t error; /* enum pchg_error */ + uint8_t state; /* enum pchg_state state */ + uint8_t battery_percentage; + uint8_t unused0; + uint8_t unused1; + /* Fields added in version 1 */ + uint32_t fw_version; + uint32_t dropped_event_count; +} __ec_align2; + +enum pchg_state { + /* Charger is reset and not initialized. */ + PCHG_STATE_RESET = 0, + /* Charger is initialized or disabled. */ + PCHG_STATE_INITIALIZED, + /* Charger is enabled and ready to detect a device. */ + PCHG_STATE_ENABLED, + /* Device is in proximity. */ + PCHG_STATE_DETECTED, + /* Device is being charged. */ + PCHG_STATE_CHARGING, + /* Device is fully charged. It implies DETECTED (& not charging). */ + PCHG_STATE_FULL, + /* In download (a.k.a. firmware update) mode */ + PCHG_STATE_DOWNLOAD, + /* In download mode. Ready for receiving data. */ + PCHG_STATE_DOWNLOADING, + /* Device is ready for data communication. */ + PCHG_STATE_CONNECTED, + /* Put no more entry below */ + PCHG_STATE_COUNT, +}; + +#define EC_PCHG_STATE_TEXT { \ + [PCHG_STATE_RESET] = "RESET", \ + [PCHG_STATE_INITIALIZED] = "INITIALIZED", \ + [PCHG_STATE_ENABLED] = "ENABLED", \ + [PCHG_STATE_DETECTED] = "DETECTED", \ + [PCHG_STATE_CHARGING] = "CHARGING", \ + [PCHG_STATE_FULL] = "FULL", \ + [PCHG_STATE_DOWNLOAD] = "DOWNLOAD", \ + [PCHG_STATE_DOWNLOADING] = "DOWNLOADING", \ + [PCHG_STATE_CONNECTED] = "CONNECTED", \ + } + /*****************************************************************************/ /* Voltage regulator controls */ diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h index 5d1fb0d78a22..76b13ef67562 100644 --- a/include/linux/platform_data/davinci_asp.h +++ b/include/linux/platform_data/davinci_asp.h @@ -96,6 +96,7 @@ enum { MCASP_VERSION_2, /* DA8xx/OMAPL1x */ MCASP_VERSION_3, /* TI81xx/AM33xx */ MCASP_VERSION_4, /* DRA7xxx */ + MCASP_VERSION_OMAP, /* OMAP4/5 */ }; enum mcbsp_clk_input_pin { diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h deleted file mode 100644 index cba1184b364c..000000000000 --- a/include/linux/platform_data/mmc-esdhc-imx.h +++ /dev/null @@ -1,42 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2010 Wolfram Sang <kernel@pengutronix.de> - */ - -#ifndef __ASM_ARCH_IMX_ESDHC_H -#define __ASM_ARCH_IMX_ESDHC_H - -#include <linux/types.h> - -enum wp_types { - ESDHC_WP_NONE, /* no WP, neither controller nor gpio */ - ESDHC_WP_CONTROLLER, /* mmc controller internal WP */ - ESDHC_WP_GPIO, /* external gpio pin for WP */ -}; - -enum cd_types { - ESDHC_CD_NONE, /* no CD, neither controller nor gpio */ - ESDHC_CD_CONTROLLER, /* mmc controller internal CD */ - ESDHC_CD_GPIO, /* external gpio pin for CD */ - ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */ -}; - -/** - * struct esdhc_platform_data - platform data for esdhc on i.MX - * - * ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35. - * - * @wp_type: type of write_protect method (see wp_types enum above) - * @cd_type: type of card_detect method (see cd_types enum above) - */ - -struct esdhc_platform_data { - enum wp_types wp_type; - enum cd_types cd_type; - int max_bus_width; - unsigned int delay_line; - unsigned int tuning_step; /* The delay cell steps in tuning procedure */ - unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */ - unsigned int strobe_dll_delay_target; /* The delay cell for strobe pad (read clock) */ -}; -#endif /* __ASM_ARCH_IMX_ESDHC_H */ diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h index 65fd5ffd257c..f0db674f07b8 100644 --- a/include/linux/platform_data/spi-mt65xx.h +++ b/include/linux/platform_data/spi-mt65xx.h @@ -12,5 +12,6 @@ /* Board specific platform_data */ struct mtk_chip_config { u32 sample_sel; + u32 tick_delay; }; #endif diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index ed42ea9f60ba..7c96f169d274 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -197,8 +197,6 @@ extern int platform_device_add_resources(struct platform_device *pdev, unsigned int num); extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); -extern int platform_device_add_properties(struct platform_device *pdev, - const struct property_entry *properties); extern int platform_device_add(struct platform_device *pdev); extern void platform_device_del(struct platform_device *pdev); extern void platform_device_put(struct platform_device *pdev); diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 21a0577305ef..67017c9390c8 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -198,6 +198,7 @@ struct generic_pm_domain_data { struct notifier_block *power_nb; int cpu; unsigned int performance_state; + unsigned int default_pstate; unsigned int rpm_pstate; ktime_t next_wakeup; void *data; diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 896c16d2c5fb..00fef0064355 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -82,12 +82,19 @@ static inline bool cpu_timer_enqueue(struct timerqueue_head *head, return timerqueue_add(head, &ctmr->node); } -static inline void cpu_timer_dequeue(struct cpu_timer *ctmr) +static inline bool cpu_timer_queued(struct cpu_timer *ctmr) { - if (ctmr->head) { + return !!ctmr->head; +} + +static inline bool cpu_timer_dequeue(struct cpu_timer *ctmr) +{ + if (cpu_timer_queued(ctmr)) { timerqueue_del(ctmr->head, &ctmr->node); ctmr->head = NULL; + return true; } + return false; } static inline u64 cpu_timer_getexpires(struct cpu_timer *ctmr) diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h index d55c746ac56e..dd24756a8af7 100644 --- a/include/linux/power/max17042_battery.h +++ b/include/linux/power/max17042_battery.h @@ -69,7 +69,7 @@ enum max17042_register { MAX17042_RelaxCFG = 0x2A, MAX17042_MiscCFG = 0x2B, MAX17042_TGAIN = 0x2C, - MAx17042_TOFF = 0x2D, + MAX17042_TOFF = 0x2D, MAX17042_CGAIN = 0x2E, MAX17042_COFF = 0x2F, @@ -110,13 +110,14 @@ enum max17042_register { MAX17042_VFSOC = 0xFF, }; +/* Registers specific to max17055 only */ enum max17055_register { MAX17055_QRes = 0x0C, + MAX17055_RCell = 0x14, MAX17055_TTF = 0x20, - MAX17055_V_empty = 0x3A, - MAX17055_TIMER = 0x3E, + MAX17055_DieTemp = 0x34, MAX17055_USER_MEM = 0x40, - MAX17055_RGAIN = 0x42, + MAX17055_RGAIN = 0x43, MAX17055_ConvgCfg = 0x49, MAX17055_VFRemCap = 0x4A, @@ -155,13 +156,14 @@ enum max17055_register { MAX17055_AtAvCap = 0xDF, }; -/* Registers specific to max17047/50 */ +/* Registers specific to max17047/50/55 */ enum max17047_register { MAX17047_QRTbl00 = 0x12, MAX17047_FullSOCThr = 0x13, MAX17047_QRTbl10 = 0x22, MAX17047_QRTbl20 = 0x32, MAX17047_V_empty = 0x3A, + MAX17047_TIMER = 0x3E, MAX17047_QRTbl30 = 0x42, }; diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index be203985ecdd..9ca1f120a211 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -352,6 +352,7 @@ struct power_supply_resistance_temp_table { */ struct power_supply_battery_info { + unsigned int technology; /* from the enum above */ int energy_full_design_uwh; /* microWatt-hours */ int charge_full_design_uah; /* microAmp-hours */ int voltage_min_design_uv; /* microVolts */ diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 9881eac0698f..4d244e295e85 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -121,7 +121,11 @@ /* * The preempt_count offset after spin_lock() */ +#if !defined(CONFIG_PREEMPT_RT) #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET +#else +#define PREEMPT_LOCK_OFFSET 0 +#endif /* * The preempt_count offset needed for things like: diff --git a/include/linux/property.h b/include/linux/property.h index 073e680c35e2..357513a977e5 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -484,8 +484,6 @@ void software_node_unregister_node_group(const struct software_node **node_group int software_node_register(const struct software_node *node); void software_node_unregister(const struct software_node *node); -int software_node_notify(struct device *dev, unsigned long action); - struct fwnode_handle * fwnode_create_software_node(const struct property_entry *properties, const struct fwnode_handle *parent); diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index aba237c0b3a2..2e5565067355 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -11,7 +11,10 @@ #include <linux/device.h> #include <linux/pps_kernel.h> #include <linux/ptp_clock.h> +#include <linux/timecounter.h> +#include <linux/skbuff.h> +#define PTP_CLOCK_NAME_LEN 32 /** * struct ptp_clock_request - request PTP clock event * @@ -134,7 +137,7 @@ struct ptp_system_timestamp { struct ptp_clock_info { struct module *owner; - char name[16]; + char name[PTP_CLOCK_NAME_LEN]; s32 max_adj; int n_alarm; int n_ext_ts; @@ -212,7 +215,7 @@ static inline long scaled_ppm_to_ppb(long ppm) return (long)ppb; } -#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) /** * ptp_clock_register() - register a PTP hardware clock driver @@ -323,6 +326,40 @@ static inline int ptp_schedule_worker(struct ptp_clock *ptp, { return -EOPNOTSUPP; } static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp) { } +#endif + +#if IS_BUILTIN(CONFIG_PTP_1588_CLOCK) +/* + * These are called by the network core, and don't work if PTP is in + * a loadable module. + */ + +/** + * ptp_get_vclocks_index() - get all vclocks index on pclock, and + * caller is responsible to free memory + * of vclock_index + * + * @pclock_index: phc index of ptp pclock. + * @vclock_index: pointer to pointer of vclock index. + * + * return number of vclocks. + */ +int ptp_get_vclocks_index(int pclock_index, int **vclock_index); + +/** + * ptp_convert_timestamp() - convert timestamp to a ptp vclock time + * + * @hwtstamps: skb_shared_hwtstamps structure pointer + * @vclock_index: phc index of ptp vclock. + */ +void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, + int vclock_index); +#else +static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index) +{ return 0; } +static inline void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, + int vclock_index) +{ } #endif diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index d31ecaf4fdd3..235047d7a1b5 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -17,24 +17,14 @@ #ifndef _LINUX_RBTREE_H #define _LINUX_RBTREE_H +#include <linux/rbtree_types.h> + #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/rcupdate.h> -struct rb_node { - unsigned long __rb_parent_color; - struct rb_node *rb_right; - struct rb_node *rb_left; -} __attribute__((aligned(sizeof(long)))); - /* The alignment might seem pointless, but allegedly CRIS needs it */ - -struct rb_root { - struct rb_node *rb_node; -}; - #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) -#define RB_ROOT (struct rb_root) { NULL, } #define rb_entry(ptr, type, member) container_of(ptr, type, member) #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) @@ -112,23 +102,6 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent typeof(*pos), field); 1; }); \ pos = n) -/* - * Leftmost-cached rbtrees. - * - * We do not cache the rightmost node based on footprint - * size vs number of potential users that could benefit - * from O(1) rb_last(). Just not worth it, users that want - * this feature can always implement the logic explicitly. - * Furthermore, users that want to cache both pointers may - * find it a bit asymmetric, but that's ok. - */ -struct rb_root_cached { - struct rb_root rb_root; - struct rb_node *rb_leftmost; -}; - -#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } - /* Same as rb_first(), but O(1) */ #define rb_first_cached(root) (root)->rb_leftmost diff --git a/include/linux/rbtree_types.h b/include/linux/rbtree_types.h new file mode 100644 index 000000000000..45b6ecde3665 --- /dev/null +++ b/include/linux/rbtree_types.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _LINUX_RBTREE_TYPES_H +#define _LINUX_RBTREE_TYPES_H + +struct rb_node { + unsigned long __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +} __attribute__((aligned(sizeof(long)))); +/* The alignment might seem pointless, but allegedly CRIS needs it */ + +struct rb_root { + struct rb_node *rb_node; +}; + +/* + * Leftmost-cached rbtrees. + * + * We do not cache the rightmost node based on footprint + * size vs number of potential users that could benefit + * from O(1) rb_last(). Just not worth it, users that want + * this feature can always implement the logic explicitly. + * Furthermore, users that want to cache both pointers may + * find it a bit asymmetric, but that's ok. + */ +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +#define RB_ROOT (struct rb_root) { NULL, } +#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } + +#endif diff --git a/include/linux/rculist.h b/include/linux/rculist.h index f8633d37e358..d29740be4833 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -11,15 +11,6 @@ #include <linux/rcupdate.h> /* - * Why is there no list_empty_rcu()? Because list_empty() serves this - * purpose. The list_empty() function fetches the RCU-protected pointer - * and compares it to the address of the list head, but neither dereferences - * this pointer itself nor provides this pointer to the caller. Therefore, - * it is not necessary to use rcu_dereference(), so that list_empty() can - * be used anywhere you would want to use a list_empty_rcu(). - */ - -/* * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers * @list: list to be initialized * @@ -318,21 +309,29 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, /* * Where are list_empty_rcu() and list_first_entry_rcu()? * - * Implementing those functions following their counterparts list_empty() and - * list_first_entry() is not advisable because they lead to subtle race - * conditions as the following snippet shows: + * They do not exist because they would lead to subtle race conditions: * * if (!list_empty_rcu(mylist)) { * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); * do_something(bar); * } * - * The list may not be empty when list_empty_rcu checks it, but it may be when - * list_first_entry_rcu rereads the ->next pointer. - * - * Rereading the ->next pointer is not a problem for list_empty() and - * list_first_entry() because they would be protected by a lock that blocks - * writers. + * The list might be non-empty when list_empty_rcu() checks it, but it + * might have become empty by the time that list_first_entry_rcu() rereads + * the ->next pointer, which would result in a SEGV. + * + * When not using RCU, it is OK for list_first_entry() to re-read that + * pointer because both functions should be protected by some lock that + * blocks writers. + * + * When using RCU, list_empty() uses READ_ONCE() to fetch the + * RCU-protected ->next pointer and then compares it to the address of the + * list head. However, it neither dereferences this pointer nor provides + * this pointer to its caller. Thus, READ_ONCE() suffices (that is, + * rcu_dereference() is not needed), which means that list_empty() can be + * used anywhere you would want to use list_empty_rcu(). Just don't + * expect anything useful to happen if you do a subsequent lockless + * call to list_first_entry_rcu()!!! * * See list_first_or_null_rcu for an alternative. */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index d9680b798b21..434d12fe2d4f 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -53,7 +53,7 @@ void __rcu_read_unlock(void); * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ -#define rcu_preempt_depth() (current->rcu_read_lock_nesting) +#define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting) #else /* #ifdef CONFIG_PREEMPT_RCU */ @@ -167,7 +167,7 @@ void synchronize_rcu_tasks(void); # define synchronize_rcu_tasks synchronize_rcu # endif -# ifdef CONFIG_TASKS_RCU_TRACE +# ifdef CONFIG_TASKS_TRACE_RCU # define rcu_tasks_trace_qs(t) \ do { \ if (!likely(READ_ONCE((t)->trc_reader_checked)) && \ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 953e70fafe38..9be015305f9f 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -14,9 +14,6 @@ #include <asm/param.h> /* for HZ */ -/* Never flag non-existent other CPUs! */ -static inline bool rcu_eqs_special_set(int cpu) { return false; } - unsigned long get_state_synchronize_rcu(void); unsigned long start_poll_synchronize_rcu(void); bool poll_state_synchronize_rcu(unsigned long oldstate); diff --git a/include/linux/regmap.h b/include/linux/regmap.h index f5f08dd0a116..e3c9a25a853a 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -344,6 +344,7 @@ typedef void (*regmap_unlock)(void *); * @ranges: Array of configuration entries for virtual address ranges. * @num_ranges: Number of range configuration entries. * @use_hwlock: Indicate if a hardware spinlock should be used. + * @use_raw_spinlock: Indicate if a raw spinlock should be used. * @hwlock_id: Specify the hardware spinlock id. * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE, * HWLOCK_IRQ or 0. @@ -403,6 +404,7 @@ struct regmap_config { unsigned int num_ranges; bool use_hwlock; + bool use_raw_spinlock; unsigned int hwlock_id; unsigned int hwlock_mode; @@ -1269,12 +1271,13 @@ void devm_regmap_field_free(struct device *dev, struct regmap_field *field); int regmap_field_bulk_alloc(struct regmap *regmap, struct regmap_field **rm_field, - struct reg_field *reg_field, + const struct reg_field *reg_field, int num_fields); void regmap_field_bulk_free(struct regmap_field *field); int devm_regmap_field_bulk_alloc(struct device *dev, struct regmap *regmap, struct regmap_field **field, - struct reg_field *reg_field, int num_fields); + const struct reg_field *reg_field, + int num_fields); void devm_regmap_field_bulk_free(struct device *dev, struct regmap_field *field); diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index f72ca73631be..bbf6590a6dec 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -222,17 +222,12 @@ void regulator_bulk_unregister_supply_alias(struct device *dev, int devm_regulator_register_supply_alias(struct device *dev, const char *id, struct device *alias_dev, const char *alias_id); -void devm_regulator_unregister_supply_alias(struct device *dev, - const char *id); int devm_regulator_bulk_register_supply_alias(struct device *dev, const char *const *id, struct device *alias_dev, const char *const *alias_id, int num_id); -void devm_regulator_bulk_unregister_supply_alias(struct device *dev, - const char *const *id, - int num_id); /* regulator output control and status */ int __must_check regulator_enable(struct regulator *regulator); @@ -408,11 +403,6 @@ static inline int devm_regulator_register_supply_alias(struct device *dev, return 0; } -static inline void devm_regulator_unregister_supply_alias(struct device *dev, - const char *id) -{ -} - static inline int devm_regulator_bulk_register_supply_alias(struct device *dev, const char *const *id, struct device *alias_dev, @@ -422,11 +412,6 @@ static inline int devm_regulator_bulk_register_supply_alias(struct device *dev, return 0; } -static inline void devm_regulator_bulk_unregister_supply_alias( - struct device *dev, const char *const *id, int num_id) -{ -} - static inline int regulator_enable(struct regulator *regulator) { return 0; diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 4aec20387857..bd7a73db2e66 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -337,6 +337,12 @@ enum regulator_type { * @pull_down_val_on: Enabling value for control when using regmap * set_pull_down * + * @ramp_reg: Register for controlling the regulator ramp-rate. + * @ramp_mask: Bitmask for the ramp-rate control register. + * @ramp_delay_table: Table for mapping the regulator ramp-rate values. Values + * should be given in units of V/S (uV/uS). See the + * regulator_set_ramp_delay_regmap(). + * * @enable_time: Time taken for initial enable of regulator (in uS). * @off_on_delay: guard time (in uS), before re-enabling a regulator * @@ -462,7 +468,7 @@ struct regulator_err_state { }; /** - * struct regulator_irq_data - regulator error/notification status date + * struct regulator_irq_data - regulator error/notification status data * * @states: Status structs for each of the associated regulators. * @num_states: Amount of associated regulators. @@ -521,8 +527,8 @@ struct regulator_irq_data { * active events as core does not clean the map data. * REGULATOR_FAILED_RETRY can be returned to indicate that the * status reading from IC failed. If this is repeated for - * fatal_cnt times the core will call die() callback or BUG() - * as a last resort to protect the HW. + * fatal_cnt times the core will call die() callback or power-off + * the system as a last resort to protect the HW. * @renable: Optional callback to check status (if HW supports that) before * re-enabling IRQ. If implemented this should clear the error * flags so that errors fetched by regulator_get_error_flags() @@ -531,7 +537,8 @@ struct regulator_irq_data { * REGULATOR_FAILED_RETRY can be returned to * indicate that the status reading from IC failed. If this is * repeated for 'fatal_cnt' times the core will call die() - * callback or BUG() as a last resort to protect the HW. + * callback or if die() is not populated then attempt to power-off + * the system as a last resort to protect the HW. * Returning zero indicates that the problem in HW has been solved * and IRQ will be re-enabled. Returning REGULATOR_ERROR_ON * indicates the error condition is still active and keeps IRQ @@ -645,7 +652,6 @@ devm_regulator_register(struct device *dev, const struct regulator_desc *regulator_desc, const struct regulator_config *config); void regulator_unregister(struct regulator_dev *rdev); -void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev); int regulator_notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data); diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 68b4a514a410..621b7f4a3639 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -112,7 +112,7 @@ struct notification_limit { * @over_voltage_limits: Limits for acting on over voltage. * @under_voltage_limits: Limits for acting on under voltage. * @temp_limits: Limits for acting on over temperature. - + * * @max_spread: Max possible spread between coupled regulators * @max_uV_step: Max possible step change in voltage * @valid_modes_mask: Mask of modes which may be configured by consumers. diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 9b05af9b3e28..21deb5212bbd 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -2,6 +2,8 @@ #ifndef _RESCTRL_H #define _RESCTRL_H +#include <linux/kernel.h> +#include <linux/list.h> #include <linux/pid.h> #ifdef CONFIG_PROC_CPU_RESCTRL @@ -13,4 +15,186 @@ int proc_resctrl_show(struct seq_file *m, #endif +/** + * enum resctrl_conf_type - The type of configuration. + * @CDP_NONE: No prioritisation, both code and data are controlled or monitored. + * @CDP_CODE: Configuration applies to instruction fetches. + * @CDP_DATA: Configuration applies to reads and writes. + */ +enum resctrl_conf_type { + CDP_NONE, + CDP_CODE, + CDP_DATA, +}; + +#define CDP_NUM_TYPES (CDP_DATA + 1) + +/** + * struct resctrl_staged_config - parsed configuration to be applied + * @new_ctrl: new ctrl value to be loaded + * @have_new_ctrl: whether the user provided new_ctrl is valid + */ +struct resctrl_staged_config { + u32 new_ctrl; + bool have_new_ctrl; +}; + +/** + * struct rdt_domain - group of CPUs sharing a resctrl resource + * @list: all instances of this resource + * @id: unique id for this instance + * @cpu_mask: which CPUs share this resource + * @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold + * @mbm_total: saved state for MBM total bandwidth + * @mbm_local: saved state for MBM local bandwidth + * @mbm_over: worker to periodically read MBM h/w counters + * @cqm_limbo: worker to periodically read CQM h/w counters + * @mbm_work_cpu: worker CPU for MBM h/w counters + * @cqm_work_cpu: worker CPU for CQM h/w counters + * @plr: pseudo-locked region (if any) associated with domain + * @staged_config: parsed configuration to be applied + */ +struct rdt_domain { + struct list_head list; + int id; + struct cpumask cpu_mask; + unsigned long *rmid_busy_llc; + struct mbm_state *mbm_total; + struct mbm_state *mbm_local; + struct delayed_work mbm_over; + struct delayed_work cqm_limbo; + int mbm_work_cpu; + int cqm_work_cpu; + struct pseudo_lock_region *plr; + struct resctrl_staged_config staged_config[CDP_NUM_TYPES]; +}; + +/** + * struct resctrl_cache - Cache allocation related data + * @cbm_len: Length of the cache bit mask + * @min_cbm_bits: Minimum number of consecutive bits to be set + * @shareable_bits: Bitmask of shareable resource with other + * executing entities + * @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid. + * @arch_has_empty_bitmaps: True if the '0' bitmap is valid. + * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache + * level has CPU scope. + */ +struct resctrl_cache { + unsigned int cbm_len; + unsigned int min_cbm_bits; + unsigned int shareable_bits; + bool arch_has_sparse_bitmaps; + bool arch_has_empty_bitmaps; + bool arch_has_per_cpu_cfg; +}; + +/** + * enum membw_throttle_mode - System's memory bandwidth throttling mode + * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system + * @THREAD_THROTTLE_MAX: Memory bandwidth is throttled at the core + * always using smallest bandwidth percentage + * assigned to threads, aka "max throttling" + * @THREAD_THROTTLE_PER_THREAD: Memory bandwidth is throttled at the thread + */ +enum membw_throttle_mode { + THREAD_THROTTLE_UNDEFINED = 0, + THREAD_THROTTLE_MAX, + THREAD_THROTTLE_PER_THREAD, +}; + +/** + * struct resctrl_membw - Memory bandwidth allocation related data + * @min_bw: Minimum memory bandwidth percentage user can request + * @bw_gran: Granularity at which the memory bandwidth is allocated + * @delay_linear: True if memory B/W delay is in linear scale + * @arch_needs_linear: True if we can't configure non-linear resources + * @throttle_mode: Bandwidth throttling mode when threads request + * different memory bandwidths + * @mba_sc: True if MBA software controller(mba_sc) is enabled + * @mb_map: Mapping of memory B/W percentage to memory B/W delay + */ +struct resctrl_membw { + u32 min_bw; + u32 bw_gran; + u32 delay_linear; + bool arch_needs_linear; + enum membw_throttle_mode throttle_mode; + bool mba_sc; + u32 *mb_map; +}; + +struct rdt_parse_data; +struct resctrl_schema; + +/** + * struct rdt_resource - attributes of a resctrl resource + * @rid: The index of the resource + * @alloc_enabled: Is allocation enabled on this machine + * @mon_enabled: Is monitoring enabled for this feature + * @alloc_capable: Is allocation available on this machine + * @mon_capable: Is monitor feature available on this machine + * @num_rmid: Number of RMIDs available + * @cache_level: Which cache level defines scope of this resource + * @cache: Cache allocation related data + * @membw: If the component has bandwidth controls, their properties. + * @domains: All domains for this resource + * @name: Name to use in "schemata" file. + * @data_width: Character width of data when displaying + * @default_ctrl: Specifies default cache cbm or memory B/W percent. + * @format_str: Per resource format string to show domain value + * @parse_ctrlval: Per resource function pointer to parse control values + * @evt_list: List of monitoring events + * @fflags: flags to choose base and info files + * @cdp_capable: Is the CDP feature available on this resource + */ +struct rdt_resource { + int rid; + bool alloc_enabled; + bool mon_enabled; + bool alloc_capable; + bool mon_capable; + int num_rmid; + int cache_level; + struct resctrl_cache cache; + struct resctrl_membw membw; + struct list_head domains; + char *name; + int data_width; + u32 default_ctrl; + const char *format_str; + int (*parse_ctrlval)(struct rdt_parse_data *data, + struct resctrl_schema *s, + struct rdt_domain *d); + struct list_head evt_list; + unsigned long fflags; + bool cdp_capable; +}; + +/** + * struct resctrl_schema - configuration abilities of a resource presented to + * user-space + * @list: Member of resctrl_schema_all. + * @name: The name to use in the "schemata" file. + * @conf_type: Whether this schema is specific to code/data. + * @res: The resource structure exported by the architecture to describe + * the hardware that is configured by this schema. + * @num_closid: The number of closid that can be used with this schema. When + * features like CDP are enabled, this will be lower than the + * hardware supports for the resource. + */ +struct resctrl_schema { + struct list_head list; + char name[8]; + enum resctrl_conf_type conf_type; + struct rdt_resource *res; + u32 num_closid; +}; + +/* The number of closid supported by this resource regardless of CDP */ +u32 resctrl_arch_get_num_closid(struct rdt_resource *r); +int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid); +u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, enum resctrl_conf_type type); + #endif /* _RESCTRL_H */ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 83fb86133fe1..c976cc6de257 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -291,7 +291,9 @@ static inline int page_referenced(struct page *page, int is_locked, return 0; } -#define try_to_unmap(page, refs) false +static inline void try_to_unmap(struct page *page, enum ttu_flags flags) +{ +} static inline int page_mkclean(struct page *page) { diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index d1672de9ca89..9deedfeec2b1 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -13,12 +13,39 @@ #ifndef __LINUX_RT_MUTEX_H #define __LINUX_RT_MUTEX_H +#include <linux/compiler.h> #include <linux/linkage.h> -#include <linux/rbtree.h> -#include <linux/spinlock_types.h> +#include <linux/rbtree_types.h> +#include <linux/spinlock_types_raw.h> extern int max_lock_depth; /* for sysctl */ +struct rt_mutex_base { + raw_spinlock_t wait_lock; + struct rb_root_cached waiters; + struct task_struct *owner; +}; + +#define __RT_MUTEX_BASE_INITIALIZER(rtbasename) \ +{ \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(rtbasename.wait_lock), \ + .waiters = RB_ROOT_CACHED, \ + .owner = NULL \ +} + +/** + * rt_mutex_base_is_locked - is the rtmutex locked + * @lock: the mutex to be queried + * + * Returns true if the mutex is locked, false if unlocked. + */ +static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock) +{ + return READ_ONCE(lock->owner) != NULL; +} + +extern void rt_mutex_base_init(struct rt_mutex_base *rtb); + /** * The rt_mutex structure * @@ -28,9 +55,7 @@ extern int max_lock_depth; /* for sysctl */ * @owner: the mutex owner */ struct rt_mutex { - raw_spinlock_t wait_lock; - struct rb_root_cached waiters; - struct task_struct *owner; + struct rt_mutex_base rtmutex; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif @@ -52,32 +77,24 @@ do { \ } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC -#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ - , .dep_map = { .name = #mutexname } +#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ + .dep_map = { \ + .name = #mutexname, \ + .wait_type_inner = LD_WAIT_SLEEP, \ + } #else #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) #endif -#define __RT_MUTEX_INITIALIZER(mutexname) \ - { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ - , .waiters = RB_ROOT_CACHED \ - , .owner = NULL \ - __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)} +#define __RT_MUTEX_INITIALIZER(mutexname) \ +{ \ + .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex), \ + __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ +} #define DEFINE_RT_MUTEX(mutexname) \ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) -/** - * rt_mutex_is_locked - is the mutex locked - * @lock: the mutex to be queried - * - * Returns 1 if the mutex is locked, 0 if unlocked. - */ -static inline int rt_mutex_is_locked(struct rt_mutex *lock) -{ - return lock->owner != NULL; -} - extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h new file mode 100644 index 000000000000..1d264dd08625 --- /dev/null +++ b/include/linux/rwbase_rt.h @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0-only +#ifndef _LINUX_RWBASE_RT_H +#define _LINUX_RWBASE_RT_H + +#include <linux/rtmutex.h> +#include <linux/atomic.h> + +#define READER_BIAS (1U << 31) +#define WRITER_BIAS (1U << 30) + +struct rwbase_rt { + atomic_t readers; + struct rt_mutex_base rtmutex; +}; + +#define __RWBASE_INITIALIZER(name) \ +{ \ + .readers = ATOMIC_INIT(READER_BIAS), \ + .rtmutex = __RT_MUTEX_BASE_INITIALIZER(name.rtmutex), \ +} + +#define init_rwbase_rt(rwbase) \ + do { \ + rt_mutex_base_init(&(rwbase)->rtmutex); \ + atomic_set(&(rwbase)->readers, READER_BIAS); \ + } while (0) + + +static __always_inline bool rw_base_is_locked(struct rwbase_rt *rwb) +{ + return atomic_read(&rwb->readers) != READER_BIAS; +} + +static __always_inline bool rw_base_is_contended(struct rwbase_rt *rwb) +{ + return atomic_read(&rwb->readers) > 0; +} + +#endif /* _LINUX_RWBASE_RT_H */ diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h new file mode 100644 index 000000000000..49c1f3842ed5 --- /dev/null +++ b/include/linux/rwlock_rt.h @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0-only +#ifndef __LINUX_RWLOCK_RT_H +#define __LINUX_RWLOCK_RT_H + +#ifndef __LINUX_SPINLOCK_RT_H +#error Do not #include directly. Use <linux/spinlock.h>. +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void __rt_rwlock_init(rwlock_t *rwlock, const char *name, + struct lock_class_key *key); +#else +static inline void __rt_rwlock_init(rwlock_t *rwlock, char *name, + struct lock_class_key *key) +{ +} +#endif + +#define rwlock_init(rwl) \ +do { \ + static struct lock_class_key __key; \ + \ + init_rwbase_rt(&(rwl)->rwbase); \ + __rt_rwlock_init(rwl, #rwl, &__key); \ +} while (0) + +extern void rt_read_lock(rwlock_t *rwlock); +extern int rt_read_trylock(rwlock_t *rwlock); +extern void rt_read_unlock(rwlock_t *rwlock); +extern void rt_write_lock(rwlock_t *rwlock); +extern int rt_write_trylock(rwlock_t *rwlock); +extern void rt_write_unlock(rwlock_t *rwlock); + +static __always_inline void read_lock(rwlock_t *rwlock) +{ + rt_read_lock(rwlock); +} + +static __always_inline void read_lock_bh(rwlock_t *rwlock) +{ + local_bh_disable(); + rt_read_lock(rwlock); +} + +static __always_inline void read_lock_irq(rwlock_t *rwlock) +{ + rt_read_lock(rwlock); +} + +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + rt_read_lock(lock); \ + flags = 0; \ + } while (0) + +#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) + +static __always_inline void read_unlock(rwlock_t *rwlock) +{ + rt_read_unlock(rwlock); +} + +static __always_inline void read_unlock_bh(rwlock_t *rwlock) +{ + rt_read_unlock(rwlock); + local_bh_enable(); +} + +static __always_inline void read_unlock_irq(rwlock_t *rwlock) +{ + rt_read_unlock(rwlock); +} + +static __always_inline void read_unlock_irqrestore(rwlock_t *rwlock, + unsigned long flags) +{ + rt_read_unlock(rwlock); +} + +static __always_inline void write_lock(rwlock_t *rwlock) +{ + rt_write_lock(rwlock); +} + +static __always_inline void write_lock_bh(rwlock_t *rwlock) +{ + local_bh_disable(); + rt_write_lock(rwlock); +} + +static __always_inline void write_lock_irq(rwlock_t *rwlock) +{ + rt_write_lock(rwlock); +} + +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + rt_write_lock(lock); \ + flags = 0; \ + } while (0) + +#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) + +#define write_trylock_irqsave(lock, flags) \ +({ \ + int __locked; \ + \ + typecheck(unsigned long, flags); \ + flags = 0; \ + __locked = write_trylock(lock); \ + __locked; \ +}) + +static __always_inline void write_unlock(rwlock_t *rwlock) +{ + rt_write_unlock(rwlock); +} + +static __always_inline void write_unlock_bh(rwlock_t *rwlock) +{ + rt_write_unlock(rwlock); + local_bh_enable(); +} + +static __always_inline void write_unlock_irq(rwlock_t *rwlock) +{ + rt_write_unlock(rwlock); +} + +static __always_inline void write_unlock_irqrestore(rwlock_t *rwlock, + unsigned long flags) +{ + rt_write_unlock(rwlock); +} + +#define rwlock_is_contended(lock) (((void)(lock), 0)) + +#endif /* __LINUX_RWLOCK_RT_H */ diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index 3bd03e18061c..1948442e7750 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -1,9 +1,23 @@ #ifndef __LINUX_RWLOCK_TYPES_H #define __LINUX_RWLOCK_TYPES_H +#if !defined(__LINUX_SPINLOCK_TYPES_H) +# error "Do not include directly, include spinlock_types.h" +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RW_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + } +#else +# define RW_DEP_MAP_INIT(lockname) +#endif + +#ifndef CONFIG_PREEMPT_RT /* - * include/linux/rwlock_types.h - generic rwlock type definitions - * and initializers + * generic rwlock type definitions and initializers * * portions Copyright 2005, Red Hat, Inc., Ingo Molnar * Released under the General Public License (GPL). @@ -21,16 +35,6 @@ typedef struct { #define RWLOCK_MAGIC 0xdeaf1eed -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define RW_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_CONFIG, \ - } -#else -# define RW_DEP_MAP_INIT(lockname) -#endif - #ifdef CONFIG_DEBUG_SPINLOCK #define __RW_LOCK_UNLOCKED(lockname) \ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ @@ -46,4 +50,29 @@ typedef struct { #define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) +#else /* !CONFIG_PREEMPT_RT */ + +#include <linux/rwbase_rt.h> + +typedef struct { + struct rwbase_rt rwbase; + atomic_t readers; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} rwlock_t; + +#define __RWLOCK_RT_INITIALIZER(name) \ +{ \ + .rwbase = __RWBASE_INITIALIZER(name), \ + RW_DEP_MAP_INIT(name) \ +} + +#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name) + +#define DEFINE_RWLOCK(name) \ + rwlock_t name = __RW_LOCK_UNLOCKED(name) + +#endif /* CONFIG_PREEMPT_RT */ + #endif /* __LINUX_RWLOCK_TYPES_H */ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index a66038d88878..426e98e0b675 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -16,6 +16,19 @@ #include <linux/spinlock.h> #include <linux/atomic.h> #include <linux/err.h> + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_SLEEP, \ + }, +#else +# define __RWSEM_DEP_MAP_INIT(lockname) +#endif + +#ifndef CONFIG_PREEMPT_RT + #ifdef CONFIG_RWSEM_SPIN_ON_OWNER #include <linux/osq_lock.h> #endif @@ -64,16 +77,6 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) /* Common initializer macros and functions */ -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_SLEEP, \ - }, -#else -# define __RWSEM_DEP_MAP_INIT(lockname) -#endif - #ifdef CONFIG_DEBUG_RWSEMS # define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname, #else @@ -119,6 +122,61 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) return !list_empty(&sem->wait_list); } +#else /* !CONFIG_PREEMPT_RT */ + +#include <linux/rwbase_rt.h> + +struct rw_semaphore { + struct rwbase_rt rwbase; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define __RWSEM_INITIALIZER(name) \ + { \ + .rwbase = __RWBASE_INITIALIZER(name), \ + __RWSEM_DEP_MAP_INIT(name) \ + } + +#define DECLARE_RWSEM(lockname) \ + struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void __rwsem_init(struct rw_semaphore *rwsem, const char *name, + struct lock_class_key *key); +#else +static inline void __rwsem_init(struct rw_semaphore *rwsem, const char *name, + struct lock_class_key *key) +{ +} +#endif + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + init_rwbase_rt(&(sem)->rwbase); \ + __rwsem_init((sem), #sem, &__key); \ +} while (0) + +static __always_inline int rwsem_is_locked(struct rw_semaphore *sem) +{ + return rw_base_is_locked(&sem->rwbase); +} + +static __always_inline int rwsem_is_contended(struct rw_semaphore *sem) +{ + return rw_base_is_contended(&sem->rwbase); +} + +#endif /* CONFIG_PREEMPT_RT */ + +/* + * The functions below are the same for all rwsem implementations including + * the RT specific variant. + */ + /* * lock for reading */ diff --git a/include/linux/sched.h b/include/linux/sched.h index ec8d07d88641..e12b524426b0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -42,6 +42,7 @@ struct backing_dev_info; struct bio_list; struct blk_plug; struct bpf_local_storage; +struct bpf_run_ctx; struct capture_control; struct cfs_rq; struct fs_struct; @@ -95,7 +96,9 @@ struct task_group; #define TASK_WAKING 0x0200 #define TASK_NOLOAD 0x0400 #define TASK_NEW 0x0800 -#define TASK_STATE_MAX 0x1000 +/* RT specific auxilliary flag to mark RT lock waiters */ +#define TASK_RTLOCK_WAIT 0x1000 +#define TASK_STATE_MAX 0x2000 /* Convenience macros for the sake of set_current_state: */ #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) @@ -121,8 +124,6 @@ struct task_group; #define task_is_stopped_or_traced(task) ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0) -#ifdef CONFIG_DEBUG_ATOMIC_SLEEP - /* * Special states are those that do not use the normal wait-loop pattern. See * the comment with set_special_state(). @@ -130,30 +131,37 @@ struct task_group; #define is_special_task_state(state) \ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD)) -#define __set_current_state(state_value) \ - do { \ - WARN_ON_ONCE(is_special_task_state(state_value));\ - current->task_state_change = _THIS_IP_; \ - WRITE_ONCE(current->__state, (state_value)); \ - } while (0) - -#define set_current_state(state_value) \ - do { \ - WARN_ON_ONCE(is_special_task_state(state_value));\ - current->task_state_change = _THIS_IP_; \ - smp_store_mb(current->__state, (state_value)); \ +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP +# define debug_normal_state_change(state_value) \ + do { \ + WARN_ON_ONCE(is_special_task_state(state_value)); \ + current->task_state_change = _THIS_IP_; \ } while (0) -#define set_special_state(state_value) \ +# define debug_special_state_change(state_value) \ do { \ - unsigned long flags; /* may shadow */ \ WARN_ON_ONCE(!is_special_task_state(state_value)); \ - raw_spin_lock_irqsave(¤t->pi_lock, flags); \ current->task_state_change = _THIS_IP_; \ - WRITE_ONCE(current->__state, (state_value)); \ - raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ } while (0) + +# define debug_rtlock_wait_set_state() \ + do { \ + current->saved_state_change = current->task_state_change;\ + current->task_state_change = _THIS_IP_; \ + } while (0) + +# define debug_rtlock_wait_restore_state() \ + do { \ + current->task_state_change = current->saved_state_change;\ + } while (0) + #else +# define debug_normal_state_change(cond) do { } while (0) +# define debug_special_state_change(cond) do { } while (0) +# define debug_rtlock_wait_set_state() do { } while (0) +# define debug_rtlock_wait_restore_state() do { } while (0) +#endif + /* * set_current_state() includes a barrier so that the write of current->state * is correctly serialised wrt the caller's subsequent test of whether to @@ -192,26 +200,77 @@ struct task_group; * Also see the comments of try_to_wake_up(). */ #define __set_current_state(state_value) \ - WRITE_ONCE(current->__state, (state_value)) + do { \ + debug_normal_state_change((state_value)); \ + WRITE_ONCE(current->__state, (state_value)); \ + } while (0) #define set_current_state(state_value) \ - smp_store_mb(current->__state, (state_value)) + do { \ + debug_normal_state_change((state_value)); \ + smp_store_mb(current->__state, (state_value)); \ + } while (0) /* * set_special_state() should be used for those states when the blocking task * can not use the regular condition based wait-loop. In that case we must - * serialize against wakeups such that any possible in-flight TASK_RUNNING stores - * will not collide with our state change. + * serialize against wakeups such that any possible in-flight TASK_RUNNING + * stores will not collide with our state change. */ #define set_special_state(state_value) \ do { \ unsigned long flags; /* may shadow */ \ + \ raw_spin_lock_irqsave(¤t->pi_lock, flags); \ + debug_special_state_change((state_value)); \ WRITE_ONCE(current->__state, (state_value)); \ raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ } while (0) -#endif +/* + * PREEMPT_RT specific variants for "sleeping" spin/rwlocks + * + * RT's spin/rwlock substitutions are state preserving. The state of the + * task when blocking on the lock is saved in task_struct::saved_state and + * restored after the lock has been acquired. These operations are + * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT + * lock related wakeups while the task is blocked on the lock are + * redirected to operate on task_struct::saved_state to ensure that these + * are not dropped. On restore task_struct::saved_state is set to + * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail. + * + * The lock operation looks like this: + * + * current_save_and_set_rtlock_wait_state(); + * for (;;) { + * if (try_lock()) + * break; + * raw_spin_unlock_irq(&lock->wait_lock); + * schedule_rtlock(); + * raw_spin_lock_irq(&lock->wait_lock); + * set_current_state(TASK_RTLOCK_WAIT); + * } + * current_restore_rtlock_saved_state(); + */ +#define current_save_and_set_rtlock_wait_state() \ + do { \ + lockdep_assert_irqs_disabled(); \ + raw_spin_lock(¤t->pi_lock); \ + current->saved_state = current->__state; \ + debug_rtlock_wait_set_state(); \ + WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \ + raw_spin_unlock(¤t->pi_lock); \ + } while (0); + +#define current_restore_rtlock_saved_state() \ + do { \ + lockdep_assert_irqs_disabled(); \ + raw_spin_lock(¤t->pi_lock); \ + debug_rtlock_wait_restore_state(); \ + WRITE_ONCE(current->__state, current->saved_state); \ + current->saved_state = TASK_RUNNING; \ + raw_spin_unlock(¤t->pi_lock); \ + } while (0); #define get_current_state() READ_ONCE(current->__state) @@ -230,6 +289,9 @@ extern long schedule_timeout_idle(long timeout); asmlinkage void schedule(void); extern void schedule_preempt_disabled(void); asmlinkage void preempt_schedule_irq(void); +#ifdef CONFIG_PREEMPT_RT + extern void schedule_rtlock(void); +#endif extern int __must_check io_schedule_prepare(void); extern void io_schedule_finish(int token); @@ -668,6 +730,11 @@ struct task_struct { #endif unsigned int __state; +#ifdef CONFIG_PREEMPT_RT + /* saved state for "spinlock sleepers" */ + unsigned int saved_state; +#endif + /* * This begins the randomizable portion of task_struct. Only * scheduling-critical items should be added above here. @@ -748,6 +815,7 @@ struct task_struct { unsigned int policy; int nr_cpus_allowed; const cpumask_t *cpus_ptr; + cpumask_t *user_cpus_ptr; cpumask_t cpus_mask; void *migration_pending; #ifdef CONFIG_SMP @@ -863,6 +931,10 @@ struct task_struct { /* Used by page_owner=on to detect recursion in page tracking. */ unsigned in_page_owner:1; #endif +#ifdef CONFIG_EVENTFD + /* Recursion prevention for eventfd_signal() */ + unsigned in_eventfd_signal:1; +#endif unsigned long atomic_flags; /* Flags requiring atomic access. */ @@ -1357,6 +1429,9 @@ struct task_struct { struct kmap_ctrl kmap_ctrl; #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; +# ifdef CONFIG_PREEMPT_RT + unsigned long saved_state_change; +# endif #endif int pagefault_disabled; #ifdef CONFIG_MMU @@ -1379,6 +1454,8 @@ struct task_struct { #ifdef CONFIG_BPF_SYSCALL /* Used by BPF task local storage */ struct bpf_local_storage __rcu *bpf_storage; + /* Used for BPF run context */ + struct bpf_run_ctx *bpf_ctx; #endif #ifdef CONFIG_GCC_PLUGIN_STACKLEAK @@ -1400,6 +1477,16 @@ struct task_struct { struct llist_head kretprobe_instances; #endif +#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH + /* + * If L1D flush is supported on mm context switch + * then we use this callback head to queue kill work + * to kill tasks that are not running on SMT disabled + * cores + */ + struct callback_head l1d_flush_kill; +#endif + /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. @@ -1705,6 +1792,11 @@ extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_ #ifdef CONFIG_SMP extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); +extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node); +extern void release_user_cpus_ptr(struct task_struct *p); +extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask); +extern void force_compatible_cpus_allowed_ptr(struct task_struct *p); +extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p); #else static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { @@ -1715,6 +1807,21 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma return -EINVAL; return 0; } +static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node) +{ + if (src->user_cpus_ptr) + return -EINVAL; + return 0; +} +static inline void release_user_cpus_ptr(struct task_struct *p) +{ + WARN_ON(p->user_cpus_ptr); +} + +static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) +{ + return 0; +} #endif extern int yield_to(struct task_struct *p, bool preempt); diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 928e0025d358..548986c7d233 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -709,6 +709,12 @@ static inline void unlock_task_sighand(struct task_struct *task, spin_unlock_irqrestore(&task->sighand->siglock, *flags); } +#ifdef CONFIG_LOCKDEP +extern void lockdep_assert_task_sighand_held(struct task_struct *task); +#else +static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { } +#endif + static inline unsigned long task_rlimit(const struct task_struct *task, unsigned int limit) { diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index db2c0f34aaaf..304f431178fd 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -28,30 +28,12 @@ enum { sysctl_hung_task_timeout_secs = 0 }; extern unsigned int sysctl_sched_child_runs_first; -extern unsigned int sysctl_sched_latency; -extern unsigned int sysctl_sched_min_granularity; -extern unsigned int sysctl_sched_wakeup_granularity; - enum sched_tunable_scaling { SCHED_TUNABLESCALING_NONE, SCHED_TUNABLESCALING_LOG, SCHED_TUNABLESCALING_LINEAR, SCHED_TUNABLESCALING_END, }; -extern unsigned int sysctl_sched_tunable_scaling; - -extern unsigned int sysctl_numa_balancing_scan_delay; -extern unsigned int sysctl_numa_balancing_scan_period_min; -extern unsigned int sysctl_numa_balancing_scan_period_max; -extern unsigned int sysctl_numa_balancing_scan_size; - -#ifdef CONFIG_SCHED_DEBUG -extern __read_mostly unsigned int sysctl_sched_migration_cost; -extern __read_mostly unsigned int sysctl_sched_nr_migrate; - -extern int sysctl_resched_latency_warn_ms; -extern int sysctl_resched_latency_warn_once; -#endif /* * control realtime throttling: diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h index 26a2013ac39c..06cd8fb2f409 100644 --- a/include/linux/sched/wake_q.h +++ b/include/linux/sched/wake_q.h @@ -42,8 +42,11 @@ struct wake_q_head { #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) -#define DEFINE_WAKE_Q(name) \ - struct wake_q_head name = { WAKE_Q_TAIL, &name.first } +#define WAKE_Q_HEAD_INITIALIZER(name) \ + { WAKE_Q_TAIL, &name.first } + +#define DEFINE_WAKE_Q(name) \ + struct wake_q_head name = WAKE_Q_HEAD_INITIALIZER(name) static inline void wake_q_init(struct wake_q_head *head) { diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 79d0a1237e6c..80e781c51ddc 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -101,6 +101,10 @@ struct scmi_clk_proto_ops { * to sustained performance level mapping * @est_power_get: gets the estimated power cost for a given performance domain * at a given frequency + * @fast_switch_possible: indicates if fast DVFS switching is possible or not + * for a given device + * @power_scale_mw_get: indicates if the power values provided are in milliWatts + * or in some other (abstract) scale */ struct scmi_perf_proto_ops { int (*limits_set)(const struct scmi_protocol_handle *ph, u32 domain, @@ -153,7 +157,7 @@ struct scmi_power_proto_ops { }; /** - * scmi_sensor_reading - represent a timestamped read + * struct scmi_sensor_reading - represent a timestamped read * * Used by @reading_get_timestamped method. * @@ -167,7 +171,7 @@ struct scmi_sensor_reading { }; /** - * scmi_range_attrs - specifies a sensor or axis values' range + * struct scmi_range_attrs - specifies a sensor or axis values' range * @min_range: The minimum value which can be represented by the sensor/axis. * @max_range: The maximum value which can be represented by the sensor/axis. */ @@ -177,7 +181,7 @@ struct scmi_range_attrs { }; /** - * scmi_sensor_axis_info - describes one sensor axes + * struct scmi_sensor_axis_info - describes one sensor axes * @id: The axes ID. * @type: Axes type. Chosen amongst one of @enum scmi_sensor_class. * @scale: Power-of-10 multiplier applied to the axis unit. @@ -205,8 +209,8 @@ struct scmi_sensor_axis_info { }; /** - * scmi_sensor_intervals_info - describes number and type of available update - * intervals + * struct scmi_sensor_intervals_info - describes number and type of available + * update intervals * @segmented: Flag for segmented intervals' representation. When True there * will be exactly 3 intervals in @desc, with each entry * representing a member of a segment in this order: diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h index afbf8037d8db..d2176a56828a 100644 --- a/include/linux/scpi_protocol.h +++ b/include/linux/scpi_protocol.h @@ -51,6 +51,14 @@ struct scpi_sensor_info { * OPP is an index to the list return by @dvfs_get_info * @dvfs_get_info: returns the DVFS capabilities of the given power * domain. It includes the OPP list and the latency information + * @device_domain_id: gets the scpi domain id for a given device + * @get_transition_latency: gets the DVFS transition latency for a given device + * @add_opps_to_device: adds all the OPPs for a given device + * @sensor_get_capability: get the list of capabilities for the sensors + * @sensor_get_info: get the information of the specified sensor + * @sensor_get_value: gets the current value of the sensor + * @device_get_power_state: gets the power state of a power domain + * @device_set_power_state: sets the power state of a power domain */ struct scpi_ops { u32 (*get_version)(void); diff --git a/include/linux/security.h b/include/linux/security.h index 24eda04221e9..5b7288521300 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -120,10 +120,11 @@ enum lockdown_reason { LOCKDOWN_MMIOTRACE, LOCKDOWN_DEBUGFS, LOCKDOWN_XMON_WR, + LOCKDOWN_BPF_WRITE_USER, LOCKDOWN_INTEGRITY_MAX, LOCKDOWN_KCORE, LOCKDOWN_KPROBES, - LOCKDOWN_BPF_READ, + LOCKDOWN_BPF_READ_KERNEL, LOCKDOWN_PERF, LOCKDOWN_TRACEFS, LOCKDOWN_XMON_RW, diff --git a/include/linux/serdev.h b/include/linux/serdev.h index 9f14f9c12ec4..3368c261ab62 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h @@ -327,4 +327,18 @@ static inline int serdev_tty_port_unregister(struct tty_port *port) } #endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */ +struct acpi_resource; +struct acpi_resource_uart_serialbus; + +#ifdef CONFIG_ACPI +bool serdev_acpi_get_uart_resource(struct acpi_resource *ares, + struct acpi_resource_uart_serialbus **uart); +#else +static inline bool serdev_acpi_get_uart_resource(struct acpi_resource *ares, + struct acpi_resource_uart_serialbus **uart) +{ + return false; +} +#endif /* CONFIG_ACPI */ + #endif /*_LINUX_SERDEV_H */ diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 52d7fb92a69d..c58cc142d23f 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -518,6 +518,25 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port) if (sysrq_ch) handle_sysrq(sysrq_ch); } + +static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port, + unsigned long flags) +{ + int sysrq_ch; + + if (!port->has_sysrq) { + spin_unlock_irqrestore(&port->lock, flags); + return; + } + + sysrq_ch = port->sysrq_ch; + port->sysrq_ch = 0; + + spin_unlock_irqrestore(&port->lock, flags); + + if (sysrq_ch) + handle_sysrq(sysrq_ch); +} #else /* CONFIG_MAGIC_SYSRQ_SERIAL */ static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) { @@ -531,6 +550,11 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port) { spin_unlock(&port->lock); } +static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port, + unsigned long flags) +{ + spin_unlock_irqrestore(&port->lock, flags); +} #endif /* CONFIG_MAGIC_SYSRQ_SERIAL */ /* diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h index f6c3323fc4c5..cf0de4a86640 100644 --- a/include/linux/serial_s3c.h +++ b/include/linux/serial_s3c.h @@ -27,6 +27,15 @@ #define S3C2410_UERSTAT (0x14) #define S3C2410_UFSTAT (0x18) #define S3C2410_UMSTAT (0x1C) +#define USI_CON (0xC4) +#define USI_OPTION (0xC8) + +#define USI_CON_RESET (1<<0) +#define USI_CON_RESET_MASK (1<<0) + +#define USI_OPTION_HWACG_CLKREQ_ON (1<<1) +#define USI_OPTION_HWACG_CLKSTOP_ON (1<<2) +#define USI_OPTION_HWACG_MASK (3<<1) #define S3C2410_LCON_CFGMASK ((0xF<<3)|(0x3)) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index b2db9cd9a73f..6bdb0db3e825 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -689,6 +689,7 @@ typedef unsigned char *sk_buff_data_t; * CHECKSUM_UNNECESSARY (max 3) * @dst_pending_confirm: need to confirm neighbour * @decrypted: Decrypted SKB + * @slow_gro: state present at GRO time, slower prepare step required * @napi_id: id of the NAPI struct this skb came from * @sender_cpu: (aka @napi_id) source CPU in XPS * @secmark: security marking @@ -863,13 +864,14 @@ struct sk_buff { __u8 tc_skip_classify:1; __u8 tc_at_ingress:1; #endif -#ifdef CONFIG_NET_REDIRECT __u8 redirected:1; +#ifdef CONFIG_NET_REDIRECT __u8 from_ingress:1; #endif #ifdef CONFIG_TLS_DEVICE __u8 decrypted:1; #endif + __u8 slow_gro:1; #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ @@ -990,6 +992,7 @@ static inline struct dst_entry *skb_dst(const struct sk_buff *skb) */ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) { + skb->slow_gro |= !!dst; skb->_skb_refdst = (unsigned long)dst; } @@ -1006,6 +1009,7 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) { WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); + skb->slow_gro |= !!dst; skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; } @@ -1179,6 +1183,7 @@ static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom); +struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom); struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t priority); int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, @@ -4216,6 +4221,7 @@ static inline unsigned long skb_get_nfct(const struct sk_buff *skb) static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) + skb->slow_gro |= !!nfct; skb->_nfct = nfct; #endif } @@ -4375,6 +4381,7 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(skb_nfct(dst)); #endif + dst->slow_gro = src->slow_gro; __nf_copy(dst, src, true); } @@ -4664,17 +4671,13 @@ static inline __wsum lco_csum(struct sk_buff *skb) static inline bool skb_is_redirected(const struct sk_buff *skb) { -#ifdef CONFIG_NET_REDIRECT return skb->redirected; -#else - return false; -#endif } static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) { -#ifdef CONFIG_NET_REDIRECT skb->redirected = 1; +#ifdef CONFIG_NET_REDIRECT skb->from_ingress = from_ingress; if (skb->from_ingress) skb->tstamp = 0; @@ -4683,9 +4686,7 @@ static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) static inline void skb_reset_redirect(struct sk_buff *skb) { -#ifdef CONFIG_NET_REDIRECT skb->redirected = 0; -#endif } static inline bool skb_csum_is_sctp(struct sk_buff *skb) @@ -4711,11 +4712,9 @@ static inline u64 skb_get_kcov_handle(struct sk_buff *skb) } #ifdef CONFIG_PAGE_POOL -static inline void skb_mark_for_recycle(struct sk_buff *skb, struct page *page, - struct page_pool *pp) +static inline void skb_mark_for_recycle(struct sk_buff *skb) { skb->pp_recycle = 1; - page_pool_store_mem_info(page, pp); } #endif diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 96f319099744..14ab0c0bc924 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -285,11 +285,45 @@ static inline struct sk_psock *sk_psock(const struct sock *sk) return rcu_dereference_sk_user_data(sk); } +static inline void sk_psock_set_state(struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + set_bit(bit, &psock->state); +} + +static inline void sk_psock_clear_state(struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + clear_bit(bit, &psock->state); +} + +static inline bool sk_psock_test_state(const struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + return test_bit(bit, &psock->state); +} + +static inline void sock_drop(struct sock *sk, struct sk_buff *skb) +{ + sk_drops_add(sk, skb); + kfree_skb(skb); +} + +static inline void drop_sk_msg(struct sk_psock *psock, struct sk_msg *msg) +{ + if (msg->skb) + sock_drop(psock->sk, msg->skb); + kfree(msg); +} + static inline void sk_psock_queue_msg(struct sk_psock *psock, struct sk_msg *msg) { spin_lock_bh(&psock->ingress_lock); - list_add_tail(&msg->list, &psock->ingress_msg); + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) + list_add_tail(&msg->list, &psock->ingress_msg); + else + drop_sk_msg(psock, msg); spin_unlock_bh(&psock->ingress_lock); } @@ -406,24 +440,6 @@ static inline void sk_psock_restore_proto(struct sock *sk, psock->psock_update_sk_prot(sk, psock, true); } -static inline void sk_psock_set_state(struct sk_psock *psock, - enum sk_psock_state_bits bit) -{ - set_bit(bit, &psock->state); -} - -static inline void sk_psock_clear_state(struct sk_psock *psock, - enum sk_psock_state_bits bit) -{ - clear_bit(bit, &psock->state); -} - -static inline bool sk_psock_test_state(const struct sk_psock *psock, - enum sk_psock_state_bits bit) -{ - return test_bit(bit, &psock->state); -} - static inline struct sk_psock *sk_psock_get(struct sock *sk) { struct sk_psock *psock; diff --git a/include/linux/socket.h b/include/linux/socket.h index 0d8e3dcb7f88..041d6032a348 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -223,8 +223,11 @@ struct ucred { * reuses AF_INET address family */ #define AF_XDP 44 /* XDP sockets */ +#define AF_MCTP 45 /* Management component + * transport protocol + */ -#define AF_MAX 45 /* For now.. */ +#define AF_MAX 46 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -274,6 +277,7 @@ struct ucred { #define PF_QIPCRTR AF_QIPCRTR #define PF_SMC AF_SMC #define PF_XDP AF_XDP +#define PF_MCTP AF_MCTP #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ @@ -421,6 +425,9 @@ extern int __sys_accept4_file(struct file *file, unsigned file_flags, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, int flags, unsigned long nofile); +extern struct file *do_accept(struct file *file, unsigned file_flags, + struct sockaddr __user *upeer_sockaddr, + int __user *upeer_addrlen, int flags); extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, int flags); extern int __sys_socket(int family, int type, int protocol); diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index a48ac3e77301..76ce3f3ac0f2 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -661,6 +661,8 @@ struct sdw_slave_ops { * initialized * @first_interrupt_done: status flag tracking if the interrupt handling * for a Slave happens for the first time after enumeration + * @is_mockup_device: status flag used to squelch errors in the command/control + * protocol for SoundWire mockup devices */ struct sdw_slave { struct sdw_slave_id id; @@ -683,6 +685,7 @@ struct sdw_slave { struct completion initialization_complete; u32 unattach_request; bool first_interrupt_done; + bool is_mockup_device; }; #define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev) diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index 1ebea7764011..8a463b8fc12a 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -7,6 +7,85 @@ #include <linux/irqreturn.h> #include <linux/soundwire/sdw.h> +#define SDW_SHIM_BASE 0x2C000 +#define SDW_ALH_BASE 0x2C800 +#define SDW_LINK_BASE 0x30000 +#define SDW_LINK_SIZE 0x10000 + +/* Intel SHIM Registers Definition */ +#define SDW_SHIM_LCAP 0x0 +#define SDW_SHIM_LCTL 0x4 +#define SDW_SHIM_IPPTR 0x8 +#define SDW_SHIM_SYNC 0xC + +#define SDW_SHIM_CTLSCAP(x) (0x010 + 0x60 * (x)) +#define SDW_SHIM_CTLS0CM(x) (0x012 + 0x60 * (x)) +#define SDW_SHIM_CTLS1CM(x) (0x014 + 0x60 * (x)) +#define SDW_SHIM_CTLS2CM(x) (0x016 + 0x60 * (x)) +#define SDW_SHIM_CTLS3CM(x) (0x018 + 0x60 * (x)) +#define SDW_SHIM_PCMSCAP(x) (0x020 + 0x60 * (x)) + +#define SDW_SHIM_PCMSYCHM(x, y) (0x022 + (0x60 * (x)) + (0x2 * (y))) +#define SDW_SHIM_PCMSYCHC(x, y) (0x042 + (0x60 * (x)) + (0x2 * (y))) +#define SDW_SHIM_PDMSCAP(x) (0x062 + 0x60 * (x)) +#define SDW_SHIM_IOCTL(x) (0x06C + 0x60 * (x)) +#define SDW_SHIM_CTMCTL(x) (0x06E + 0x60 * (x)) + +#define SDW_SHIM_WAKEEN 0x190 +#define SDW_SHIM_WAKESTS 0x192 + +#define SDW_SHIM_LCTL_SPA BIT(0) +#define SDW_SHIM_LCTL_SPA_MASK GENMASK(3, 0) +#define SDW_SHIM_LCTL_CPA BIT(8) +#define SDW_SHIM_LCTL_CPA_MASK GENMASK(11, 8) + +#define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1) +#define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1) +#define SDW_SHIM_SYNC_SYNCPRD GENMASK(14, 0) +#define SDW_SHIM_SYNC_SYNCCPU BIT(15) +#define SDW_SHIM_SYNC_CMDSYNC_MASK GENMASK(19, 16) +#define SDW_SHIM_SYNC_CMDSYNC BIT(16) +#define SDW_SHIM_SYNC_SYNCGO BIT(24) + +#define SDW_SHIM_PCMSCAP_ISS GENMASK(3, 0) +#define SDW_SHIM_PCMSCAP_OSS GENMASK(7, 4) +#define SDW_SHIM_PCMSCAP_BSS GENMASK(12, 8) + +#define SDW_SHIM_PCMSYCM_LCHN GENMASK(3, 0) +#define SDW_SHIM_PCMSYCM_HCHN GENMASK(7, 4) +#define SDW_SHIM_PCMSYCM_STREAM GENMASK(13, 8) +#define SDW_SHIM_PCMSYCM_DIR BIT(15) + +#define SDW_SHIM_PDMSCAP_ISS GENMASK(3, 0) +#define SDW_SHIM_PDMSCAP_OSS GENMASK(7, 4) +#define SDW_SHIM_PDMSCAP_BSS GENMASK(12, 8) +#define SDW_SHIM_PDMSCAP_CPSS GENMASK(15, 13) + +#define SDW_SHIM_IOCTL_MIF BIT(0) +#define SDW_SHIM_IOCTL_CO BIT(1) +#define SDW_SHIM_IOCTL_COE BIT(2) +#define SDW_SHIM_IOCTL_DO BIT(3) +#define SDW_SHIM_IOCTL_DOE BIT(4) +#define SDW_SHIM_IOCTL_BKE BIT(5) +#define SDW_SHIM_IOCTL_WPDD BIT(6) +#define SDW_SHIM_IOCTL_CIBD BIT(8) +#define SDW_SHIM_IOCTL_DIBD BIT(9) + +#define SDW_SHIM_CTMCTL_DACTQE BIT(0) +#define SDW_SHIM_CTMCTL_DODS BIT(1) +#define SDW_SHIM_CTMCTL_DOAIS GENMASK(4, 3) + +#define SDW_SHIM_WAKEEN_ENABLE BIT(0) +#define SDW_SHIM_WAKESTS_STATUS BIT(0) + +/* Intel ALH Register definitions */ +#define SDW_ALH_STRMZCFG(x) (0x000 + (0x4 * (x))) +#define SDW_ALH_NUM_STREAMS 64 + +#define SDW_ALH_STRMZCFG_DMAT_VAL 0x3 +#define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0) +#define SDW_ALH_STRMZCFG_CHN GENMASK(19, 16) + /** * struct sdw_intel_stream_params_data: configuration passed during * the @params_stream callback, e.g. for interaction with DSP @@ -116,6 +195,8 @@ struct sdw_intel_slave_id { * @link_list: list to handle interrupts across all links * @shim_lock: mutex to handle concurrent rmw access to shared SHIM registers. * @shim_mask: flags to track initialization of SHIM shared registers + * @shim_base: sdw shim base. + * @alh_base: sdw alh base. */ struct sdw_intel_ctx { int count; @@ -128,6 +209,8 @@ struct sdw_intel_ctx { struct list_head link_list; struct mutex shim_lock; /* lock for access to shared SHIM registers */ u32 shim_mask; + u32 shim_base; + u32 alh_base; }; /** @@ -146,6 +229,8 @@ struct sdw_intel_ctx { * machine-specific quirks are handled in the DSP driver. * @clock_stop_quirks: mask array of possible behaviors requested by the * DSP driver. The quirks are common for all links for now. + * @shim_base: sdw shim base. + * @alh_base: sdw alh base. */ struct sdw_intel_res { int count; @@ -157,6 +242,8 @@ struct sdw_intel_res { struct device *dev; u32 link_mask; u32 clock_stop_quirks; + u32 shim_base; + u32 alh_base; }; /* diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 97b8d12b5f2b..8371bca13729 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -147,7 +147,11 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer); * not using a GPIO line) * @word_delay: delay to be inserted between consecutive * words of a transfer - * + * @cs_setup: delay to be introduced by the controller after CS is asserted + * @cs_hold: delay to be introduced by the controller before CS is deasserted + * @cs_inactive: delay to be introduced by the controller after CS is + * deasserted. If @cs_change_delay is used from @spi_transfer, then the + * two delays will be added up. * @statistics: statistics for the spi_device * * A @spi_device is used to interchange data between an SPI slave @@ -188,6 +192,10 @@ struct spi_device { int cs_gpio; /* LEGACY: chip select gpio */ struct gpio_desc *cs_gpiod; /* chip select gpio desc */ struct spi_delay word_delay; /* inter-word delay */ + /* CS delays */ + struct spi_delay cs_setup; + struct spi_delay cs_hold; + struct spi_delay cs_inactive; /* the statistics */ struct spi_statistics statistics; @@ -339,6 +347,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @max_speed_hz: Highest supported transfer speed * @flags: other constraints relevant to this driver * @slave: indicates that this is an SPI slave controller + * @devm_allocated: whether the allocation of this struct is devres-managed * @max_transfer_size: function that returns the max transfer size for * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used. * @max_message_size: function that returns the max message size for @@ -412,11 +421,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * controller has native support for memory like operations. * @unprepare_message: undo any work done by prepare_message(). * @slave_abort: abort the ongoing transfer request on an SPI slave controller - * @cs_setup: delay to be introduced by the controller after CS is asserted - * @cs_hold: delay to be introduced by the controller before CS is deasserted - * @cs_inactive: delay to be introduced by the controller after CS is - * deasserted. If @cs_change_delay is used from @spi_transfer, then the - * two delays will be added up. * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per * CS number. Any individual value may be -ENOENT for CS lines that * are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods @@ -511,7 +515,7 @@ struct spi_controller { #define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */ - /* flag indicating this is a non-devres managed controller */ + /* flag indicating if the allocation of this struct is devres-managed */ bool devm_allocated; /* flag indicating this is an SPI slave controller */ @@ -550,8 +554,7 @@ struct spi_controller { * to configure specific CS timing through spi_set_cs_timing() after * spi_setup(). */ - int (*set_cs_timing)(struct spi_device *spi, struct spi_delay *setup, - struct spi_delay *hold, struct spi_delay *inactive); + int (*set_cs_timing)(struct spi_device *spi); /* bidirectional bulk transfers * @@ -638,11 +641,6 @@ struct spi_controller { /* Optimized handlers for SPI memory-like operations. */ const struct spi_controller_mem_ops *mem_ops; - /* CS delays */ - struct spi_delay cs_setup; - struct spi_delay cs_hold; - struct spi_delay cs_inactive; - /* gpio chip select */ int *cs_gpios; struct gpio_desc **cs_gpiods; diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 79897841a2cc..45310ea1b1d7 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -12,6 +12,8 @@ * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the * initializers * + * linux/spinlock_types_raw: + * The raw types and initializers * linux/spinlock_types.h: * defines the generic type and initializers * @@ -31,6 +33,8 @@ * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * + * linux/spinlock_types_raw: + * The raw RT types and initializers * linux/spinlock_types.h: * defines the generic type and initializers * @@ -308,8 +312,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) 1 : ({ local_irq_restore(flags); 0; }); \ }) -/* Include rwlock functions */ +#ifndef CONFIG_PREEMPT_RT +/* Include rwlock functions for !RT */ #include <linux/rwlock.h> +#endif /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: @@ -320,6 +326,9 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) # include <linux/spinlock_api_up.h> #endif +/* Non PREEMPT_RT kernel, map to raw spinlocks: */ +#ifndef CONFIG_PREEMPT_RT + /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ @@ -454,6 +463,10 @@ static __always_inline int spin_is_contended(spinlock_t *lock) #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) +#else /* !CONFIG_PREEMPT_RT */ +# include <linux/spinlock_rt.h> +#endif /* CONFIG_PREEMPT_RT */ + /* * Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 19a9be9d97ee..6b8e1a0b137b 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -187,6 +187,9 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) return 0; } +/* PREEMPT_RT has its own rwlock implementation */ +#ifndef CONFIG_PREEMPT_RT #include <linux/rwlock_api_smp.h> +#endif #endif /* __LINUX_SPINLOCK_API_SMP_H */ diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h new file mode 100644 index 000000000000..835aedaf68ac --- /dev/null +++ b/include/linux/spinlock_rt.h @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0-only +#ifndef __LINUX_SPINLOCK_RT_H +#define __LINUX_SPINLOCK_RT_H + +#ifndef __LINUX_SPINLOCK_H +#error Do not include directly. Use spinlock.h +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void __rt_spin_lock_init(spinlock_t *lock, const char *name, + struct lock_class_key *key, bool percpu); +#else +static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, + struct lock_class_key *key, bool percpu) +{ +} +#endif + +#define spin_lock_init(slock) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_base_init(&(slock)->lock); \ + __rt_spin_lock_init(slock, #slock, &__key, false); \ +} while (0) + +#define local_spin_lock_init(slock) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_base_init(&(slock)->lock); \ + __rt_spin_lock_init(slock, #slock, &__key, true); \ +} while (0) + +extern void rt_spin_lock(spinlock_t *lock); +extern void rt_spin_lock_nested(spinlock_t *lock, int subclass); +extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock); +extern void rt_spin_unlock(spinlock_t *lock); +extern void rt_spin_lock_unlock(spinlock_t *lock); +extern int rt_spin_trylock_bh(spinlock_t *lock); +extern int rt_spin_trylock(spinlock_t *lock); + +static __always_inline void spin_lock(spinlock_t *lock) +{ + rt_spin_lock(lock); +} + +#ifdef CONFIG_LOCKDEP +# define __spin_lock_nested(lock, subclass) \ + rt_spin_lock_nested(lock, subclass) + +# define __spin_lock_nest_lock(lock, nest_lock) \ + do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ + rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ + } while (0) +# define __spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + __spin_lock_nested(lock, subclass); \ + } while (0) + +#else + /* + * Always evaluate the 'subclass' argument to avoid that the compiler + * warns about set-but-not-used variables when building with + * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. + */ +# define __spin_lock_nested(lock, subclass) spin_lock(((void)(subclass), (lock))) +# define __spin_lock_nest_lock(lock, subclass) spin_lock(((void)(subclass), (lock))) +# define __spin_lock_irqsave_nested(lock, flags, subclass) \ + spin_lock_irqsave(((void)(subclass), (lock)), flags) +#endif + +#define spin_lock_nested(lock, subclass) \ + __spin_lock_nested(lock, subclass) + +#define spin_lock_nest_lock(lock, nest_lock) \ + __spin_lock_nest_lock(lock, nest_lock) + +#define spin_lock_irqsave_nested(lock, flags, subclass) \ + __spin_lock_irqsave_nested(lock, flags, subclass) + +static __always_inline void spin_lock_bh(spinlock_t *lock) +{ + /* Investigate: Drop bh when blocking ? */ + local_bh_disable(); + rt_spin_lock(lock); +} + +static __always_inline void spin_lock_irq(spinlock_t *lock) +{ + rt_spin_lock(lock); +} + +#define spin_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + spin_lock(lock); \ + } while (0) + +static __always_inline void spin_unlock(spinlock_t *lock) +{ + rt_spin_unlock(lock); +} + +static __always_inline void spin_unlock_bh(spinlock_t *lock) +{ + rt_spin_unlock(lock); + local_bh_enable(); +} + +static __always_inline void spin_unlock_irq(spinlock_t *lock) +{ + rt_spin_unlock(lock); +} + +static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, + unsigned long flags) +{ + rt_spin_unlock(lock); +} + +#define spin_trylock(lock) \ + __cond_lock(lock, rt_spin_trylock(lock)) + +#define spin_trylock_bh(lock) \ + __cond_lock(lock, rt_spin_trylock_bh(lock)) + +#define spin_trylock_irq(lock) \ + __cond_lock(lock, rt_spin_trylock(lock)) + +#define __spin_trylock_irqsave(lock, flags) \ +({ \ + int __locked; \ + \ + typecheck(unsigned long, flags); \ + flags = 0; \ + __locked = spin_trylock(lock); \ + __locked; \ +}) + +#define spin_trylock_irqsave(lock, flags) \ + __cond_lock(lock, __spin_trylock_irqsave(lock, flags)) + +#define spin_is_contended(lock) (((void)(lock), 0)) + +static inline int spin_is_locked(spinlock_t *lock) +{ + return rt_mutex_base_is_locked(&lock->lock); +} + +#define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock)) + +#include <linux/rwlock_rt.h> + +#endif diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index b981caafe8bf..2dfa35ffec76 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -9,65 +9,11 @@ * Released under the General Public License (GPL). */ -#if defined(CONFIG_SMP) -# include <asm/spinlock_types.h> -#else -# include <linux/spinlock_types_up.h> -#endif - -#include <linux/lockdep_types.h> +#include <linux/spinlock_types_raw.h> -typedef struct raw_spinlock { - arch_spinlock_t raw_lock; -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned int magic, owner_cpu; - void *owner; -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} raw_spinlock_t; - -#define SPINLOCK_MAGIC 0xdead4ead - -#define SPINLOCK_OWNER_INIT ((void *)-1L) - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define RAW_SPIN_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_SPIN, \ - } -# define SPIN_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_CONFIG, \ - } -#else -# define RAW_SPIN_DEP_MAP_INIT(lockname) -# define SPIN_DEP_MAP_INIT(lockname) -#endif - -#ifdef CONFIG_DEBUG_SPINLOCK -# define SPIN_DEBUG_INIT(lockname) \ - .magic = SPINLOCK_MAGIC, \ - .owner_cpu = -1, \ - .owner = SPINLOCK_OWNER_INIT, -#else -# define SPIN_DEBUG_INIT(lockname) -#endif - -#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ - { \ - .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ - SPIN_DEBUG_INIT(lockname) \ - RAW_SPIN_DEP_MAP_INIT(lockname) } - -#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ - (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) - -#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) +#ifndef CONFIG_PREEMPT_RT +/* Non PREEMPT_RT kernels map spinlock to raw_spinlock */ typedef struct spinlock { union { struct raw_spinlock rlock; @@ -96,6 +42,35 @@ typedef struct spinlock { #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) +#else /* !CONFIG_PREEMPT_RT */ + +/* PREEMPT_RT kernels map spinlock to rt_mutex */ +#include <linux/rtmutex.h> + +typedef struct spinlock { + struct rt_mutex_base lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} spinlock_t; + +#define __SPIN_LOCK_UNLOCKED(name) \ + { \ + .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \ + SPIN_DEP_MAP_INIT(name) \ + } + +#define __LOCAL_SPIN_LOCK_UNLOCKED(name) \ + { \ + .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \ + LOCAL_SPIN_DEP_MAP_INIT(name) \ + } + +#define DEFINE_SPINLOCK(name) \ + spinlock_t name = __SPIN_LOCK_UNLOCKED(name) + +#endif /* CONFIG_PREEMPT_RT */ + #include <linux/rwlock_types.h> #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h new file mode 100644 index 000000000000..91cb36b65a17 --- /dev/null +++ b/include/linux/spinlock_types_raw.h @@ -0,0 +1,73 @@ +#ifndef __LINUX_SPINLOCK_TYPES_RAW_H +#define __LINUX_SPINLOCK_TYPES_RAW_H + +#include <linux/types.h> + +#if defined(CONFIG_SMP) +# include <asm/spinlock_types.h> +#else +# include <linux/spinlock_types_up.h> +#endif + +#include <linux/lockdep_types.h> + +typedef struct raw_spinlock { + arch_spinlock_t raw_lock; +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} raw_spinlock_t; + +#define SPINLOCK_MAGIC 0xdead4ead + +#define SPINLOCK_OWNER_INIT ((void *)-1L) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RAW_SPIN_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_SPIN, \ + } +# define SPIN_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + } + +# define LOCAL_SPIN_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + .lock_type = LD_LOCK_PERCPU, \ + } +#else +# define RAW_SPIN_DEP_MAP_INIT(lockname) +# define SPIN_DEP_MAP_INIT(lockname) +# define LOCAL_SPIN_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK +# define SPIN_DEBUG_INIT(lockname) \ + .magic = SPINLOCK_MAGIC, \ + .owner_cpu = -1, \ + .owner = SPINLOCK_OWNER_INIT, +#else +# define SPIN_DEBUG_INIT(lockname) +#endif + +#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ +{ \ + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ + SPIN_DEBUG_INIT(lockname) \ + RAW_SPIN_DEP_MAP_INIT(lockname) } + +#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ + (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) + +#endif /* __LINUX_SPINLOCK_TYPES_RAW_H */ diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index 0e0cf4d6a72a..6cfaa0a9a9b9 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -61,7 +61,7 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp) int idx; idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; - WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1); + WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1); return idx; } @@ -81,11 +81,11 @@ static inline void srcu_torture_stats_print(struct srcu_struct *ssp, { int idx; - idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; + idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1; pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n", tt, tf, idx, - READ_ONCE(ssp->srcu_lock_nesting[!idx]), - READ_ONCE(ssp->srcu_lock_nesting[idx])); + data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])), + data_race(READ_ONCE(ssp->srcu_lock_nesting[idx]))); } #endif diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 0d5a2691e7e9..f9b53acb4e02 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -7,7 +7,7 @@ #include <linux/types.h> #include <linux/spinlock.h> #include <linux/pci.h> -#include <linux/gpio.h> +#include <linux/gpio/driver.h> #include <linux/mod_devicetable.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> diff --git a/include/linux/ssb/ssb_driver_extif.h b/include/linux/ssb/ssb_driver_extif.h index 3f8bc973d67d..19253bfacd1a 100644 --- a/include/linux/ssb/ssb_driver_extif.h +++ b/include/linux/ssb/ssb_driver_extif.h @@ -197,7 +197,7 @@ struct ssb_extif { static inline bool ssb_extif_available(struct ssb_extif *extif) { - return 0; + return false; } static inline diff --git a/include/linux/static_call.h b/include/linux/static_call.h index fc94faa53b5b..3e56a9751c06 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -17,11 +17,17 @@ * DECLARE_STATIC_CALL(name, func); * DEFINE_STATIC_CALL(name, func); * DEFINE_STATIC_CALL_NULL(name, typename); + * DEFINE_STATIC_CALL_RET0(name, typename); + * + * __static_call_return0; + * * static_call(name)(args...); * static_call_cond(name)(args...); * static_call_update(name, func); * static_call_query(name); * + * EXPORT_STATIC_CALL{,_TRAMP}{,_GPL}() + * * Usage example: * * # Start with the following functions (with identical prototypes): @@ -96,6 +102,33 @@ * To query which function is currently set to be called, use: * * func = static_call_query(name); + * + * + * DEFINE_STATIC_CALL_RET0 / __static_call_return0: + * + * Just like how DEFINE_STATIC_CALL_NULL() / static_call_cond() optimize the + * conditional void function call, DEFINE_STATIC_CALL_RET0 / + * __static_call_return0 optimize the do nothing return 0 function. + * + * This feature is strictly UB per the C standard (since it casts a function + * pointer to a different signature) and relies on the architecture ABI to + * make things work. In particular it relies on Caller Stack-cleanup and the + * whole return register being clobbered for short return values. All normal + * CDECL style ABIs conform. + * + * In particular the x86_64 implementation replaces the 5 byte CALL + * instruction at the callsite with a 5 byte clear of the RAX register, + * completely eliding any function call overhead. + * + * Notably argument setup is unconditional. + * + * + * EXPORT_STATIC_CALL() vs EXPORT_STATIC_CALL_TRAMP(): + * + * The difference is that the _TRAMP variant tries to only export the + * trampoline with the result that a module can use static_call{,_cond}() but + * not static_call_update(). + * */ #include <linux/types.h> diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index d5ae621d66ba..a6f03b36fc4f 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -115,7 +115,9 @@ struct stmmac_axi { #define EST_GCL 1024 struct stmmac_est { + struct mutex lock; int enable; + u32 btr_reserve[2]; u32 btr_offset[2]; u32 btr[2]; u32 ctr[2]; diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index 938c2bf29db8..02117ed0fa2e 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h @@ -20,6 +20,7 @@ enum rpc_auth_flavors { RPC_AUTH_DES = 3, RPC_AUTH_KRB = 4, RPC_AUTH_GSS = 6, + RPC_AUTH_TLS = 7, RPC_AUTH_MAXFLAVOR = 8, /* pseudoflavors: */ RPC_AUTH_GSS_KRB5 = 390003, diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index e91d51ea028b..f0f846fa396e 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -19,6 +19,7 @@ #include <linux/sunrpc/svcauth.h> #include <linux/wait.h> #include <linux/mm.h> +#include <linux/pagevec.h> /* statistics for svc_pool structures */ struct svc_pool_stats { @@ -256,6 +257,7 @@ struct svc_rqst { struct page * *rq_next_page; /* next reply page to use */ struct page * *rq_page_end; /* one past the last page */ + struct pagevec rq_pvec; struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */ struct bio_vec rq_bvec[RPCSVC_MAXPAGES]; @@ -502,6 +504,8 @@ struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node); struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node); +void svc_rqst_replace_page(struct svc_rqst *rqstp, + struct page *page); void svc_rqst_free(struct svc_rqst *); void svc_exit_thread(struct svc_rqst *); unsigned int svc_pool_map_get(void); @@ -523,6 +527,7 @@ void svc_wake_up(struct svc_serv *); void svc_reserve(struct svc_rqst *rqstp, int space); struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu); char * svc_print_addr(struct svc_rqst *, char *, size_t); +const char * svc_proc_name(const struct svc_rqst *rqstp); int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 3184465de3a0..24aa159d29a7 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -90,9 +90,9 @@ struct svcxprt_rdma { struct ib_pd *sc_pd; spinlock_t sc_send_lock; - struct list_head sc_send_ctxts; + struct llist_head sc_send_ctxts; spinlock_t sc_rw_ctxt_lock; - struct list_head sc_rw_ctxts; + struct llist_head sc_rw_ctxts; u32 sc_pending_recvs; u32 sc_recv_batch; @@ -150,7 +150,7 @@ struct svc_rdma_recv_ctxt { }; struct svc_rdma_send_ctxt { - struct list_head sc_list; + struct llist_node sc_node; struct rpc_rdma_cid sc_cid; struct ib_send_wr sc_send_wr; @@ -207,6 +207,7 @@ extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, struct svc_rdma_recv_ctxt *rctxt, int status); +extern void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail); extern int svc_rdma_sendto(struct svc_rqst *); extern int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index a965cbc136ad..b519609af1d0 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -95,6 +95,7 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) #define rpc_auth_unix cpu_to_be32(RPC_AUTH_UNIX) #define rpc_auth_short cpu_to_be32(RPC_AUTH_SHORT) #define rpc_auth_gss cpu_to_be32(RPC_AUTH_GSS) +#define rpc_auth_tls cpu_to_be32(RPC_AUTH_TLS) #define rpc_call cpu_to_be32(RPC_CALL) #define rpc_reply cpu_to_be32(RPC_REPLY) diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index c8c39f22d3b1..b15c1f07162d 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -288,7 +288,6 @@ struct rpc_xprt { const char *address_strings[RPC_DISPLAY_MAX]; #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct dentry *debugfs; /* debugfs directory */ - atomic_t inject_disconnect; #endif struct rcu_head rcu; const struct xprt_class *xprt_class; @@ -502,21 +501,4 @@ static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt) return test_and_set_bit(XPRT_BINDING, &xprt->state); } -#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) -extern unsigned int rpc_inject_disconnect; -static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) -{ - if (!rpc_inject_disconnect) - return; - if (atomic_dec_return(&xprt->inject_disconnect)) - return; - atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect); - xprt->ops->inject_disconnect(xprt); -} -#else -static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) -{ -} -#endif - #endif /* _LINUX_SUNRPC_XPRT_H */ diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index d99ca99837de..1fa2b69c6fc3 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -48,6 +48,8 @@ typedef int proc_handler(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos); int proc_dostring(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_dobool(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); int proc_dointvec(struct ctl_table *, int, void *, size_t *, loff_t *); int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *); int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *); diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h new file mode 100644 index 000000000000..b0dcfa26d07b --- /dev/null +++ b/include/linux/sysfb.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _LINUX_SYSFB_H +#define _LINUX_SYSFB_H + +/* + * Generic System Framebuffers on x86 + * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com> + */ + +#include <linux/kernel.h> +#include <linux/platform_data/simplefb.h> +#include <linux/screen_info.h> + +enum { + M_I17, /* 17-Inch iMac */ + M_I20, /* 20-Inch iMac */ + M_I20_SR, /* 20-Inch iMac (Santa Rosa) */ + M_I24, /* 24-Inch iMac */ + M_I24_8_1, /* 24-Inch iMac, 8,1th gen */ + M_I24_10_1, /* 24-Inch iMac, 10,1th gen */ + M_I27_11_1, /* 27-Inch iMac, 11,1th gen */ + M_MINI, /* Mac Mini */ + M_MINI_3_1, /* Mac Mini, 3,1th gen */ + M_MINI_4_1, /* Mac Mini, 4,1th gen */ + M_MB, /* MacBook */ + M_MB_2, /* MacBook, 2nd rev. */ + M_MB_3, /* MacBook, 3rd rev. */ + M_MB_5_1, /* MacBook, 5th rev. */ + M_MB_6_1, /* MacBook, 6th rev. */ + M_MB_7_1, /* MacBook, 7th rev. */ + M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */ + M_MBA, /* MacBook Air */ + M_MBA_3, /* Macbook Air, 3rd rev */ + M_MBP, /* MacBook Pro */ + M_MBP_2, /* MacBook Pro 2nd gen */ + M_MBP_2_2, /* MacBook Pro 2,2nd gen */ + M_MBP_SR, /* MacBook Pro (Santa Rosa) */ + M_MBP_4, /* MacBook Pro, 4th gen */ + M_MBP_5_1, /* MacBook Pro, 5,1th gen */ + M_MBP_5_2, /* MacBook Pro, 5,2th gen */ + M_MBP_5_3, /* MacBook Pro, 5,3rd gen */ + M_MBP_6_1, /* MacBook Pro, 6,1th gen */ + M_MBP_6_2, /* MacBook Pro, 6,2th gen */ + M_MBP_7_1, /* MacBook Pro, 7,1th gen */ + M_MBP_8_2, /* MacBook Pro, 8,2nd gen */ + M_UNKNOWN /* placeholder */ +}; + +struct efifb_dmi_info { + char *optname; + unsigned long base; + int stride; + int width; + int height; + int flags; +}; + +#ifdef CONFIG_EFI + +extern struct efifb_dmi_info efifb_dmi_list[]; +void sysfb_apply_efi_quirks(struct platform_device *pd); + +#else /* CONFIG_EFI */ + +static inline void sysfb_apply_efi_quirks(struct platform_device *pd) +{ +} + +#endif /* CONFIG_EFI */ + +#ifdef CONFIG_SYSFB_SIMPLEFB + +bool sysfb_parse_mode(const struct screen_info *si, + struct simplefb_platform_data *mode); +int sysfb_create_simplefb(const struct screen_info *si, + const struct simplefb_platform_data *mode); + +#else /* CONFIG_SYSFB_SIMPLE */ + +static inline bool sysfb_parse_mode(const struct screen_info *si, + struct simplefb_platform_data *mode) +{ + return false; +} + +static inline int sysfb_create_simplefb(const struct screen_info *si, + const struct simplefb_platform_data *mode) +{ + return -EINVAL; +} + +#endif /* CONFIG_SYSFB_SIMPLE */ + +#endif /* _LINUX_SYSFB_H */ diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index a12556a4b93a..e3f1e8ac1f85 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -176,7 +176,7 @@ struct bin_attribute { struct attribute attr; size_t size; void *private; - struct address_space *mapping; + struct address_space *(*f_mapping)(void); ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 54269e47ac9a..3ebfea0781f1 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -27,6 +27,7 @@ #define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */ #define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */ #define TEE_SHM_KERNEL_MAPPED BIT(6) /* Memory mapped in kernel space */ +#define TEE_SHM_PRIV BIT(7) /* Memory private to TEE driver */ struct device; struct tee_device; @@ -332,6 +333,7 @@ void *tee_get_drvdata(struct tee_device *teedev); * @returns a pointer to 'struct tee_shm' */ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); +struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); /** * tee_shm_register() - Register shared memory buffer diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index e7c96c37174f..124e13cb1469 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -468,6 +468,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc) * @interrupt_work: Work scheduled to handle ring interrupt when no * MSI-X is used. * @hop_count: Number of rings (end point hops) supported by NHI. + * @quirks: NHI specific quirks if any */ struct tb_nhi { spinlock_t lock; @@ -480,6 +481,7 @@ struct tb_nhi { bool going_away; struct work_struct interrupt_work; u32 hop_count; + unsigned long quirks; }; /** diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index ad413b382a3c..8e0631a4b046 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -675,7 +675,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file) #ifdef CONFIG_BPF_EVENTS unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx); -int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog); +int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie); void perf_event_detach_bpf_prog(struct perf_event *event); int perf_event_query_prog_array(struct perf_event *event, void __user *info); int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog); @@ -692,7 +692,7 @@ static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *c } static inline int -perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog) +perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie) { return -EOPNOTSUPP; } @@ -803,6 +803,9 @@ extern void ftrace_profile_free_filter(struct perf_event *event); void perf_trace_buf_update(void *record, u16 type); void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp); +int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie); +void perf_event_free_bpf_prog(struct perf_event *event); + void bpf_trace_run1(struct bpf_prog *prog, u64 arg1); void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2); void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2, diff --git a/include/linux/tty.h b/include/linux/tty.h index 19dc1097e09c..168e57e40bbb 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -6,11 +6,12 @@ #include <linux/major.h> #include <linux/termios.h> #include <linux/workqueue.h> +#include <linux/tty_buffer.h> #include <linux/tty_driver.h> #include <linux/tty_ldisc.h> +#include <linux/tty_port.h> #include <linux/mutex.h> #include <linux/tty_flags.h> -#include <linux/seq_file.h> #include <uapi/linux/tty.h> #include <linux/rwsem.h> #include <linux/llist.h> @@ -31,54 +32,6 @@ */ #define __DISABLED_CHAR '\0' -struct tty_buffer { - union { - struct tty_buffer *next; - struct llist_node free; - }; - int used; - int size; - int commit; - int read; - int flags; - /* Data points here */ - unsigned long data[]; -}; - -/* Values for .flags field of tty_buffer */ -#define TTYB_NORMAL 1 /* buffer has no flags buffer */ - -static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs) -{ - return ((unsigned char *)b->data) + ofs; -} - -static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs) -{ - return (char *)char_buf_ptr(b, ofs) + b->size; -} - -struct tty_bufhead { - struct tty_buffer *head; /* Queue head */ - struct work_struct work; - struct mutex lock; - atomic_t priority; - struct tty_buffer sentinel; - struct llist_head free; /* Free queue head */ - atomic_t mem_used; /* In-use buffers excluding free list */ - int mem_limit; - struct tty_buffer *tail; /* Active buffer */ -}; -/* - * When a break, frame error, or parity error happens, these codes are - * stuffed into the flags buffer. - */ -#define TTY_NORMAL 0 -#define TTY_BREAK 1 -#define TTY_FRAME 2 -#define TTY_PARITY 3 -#define TTY_OVERRUN 4 - #define INTR_CHAR(tty) ((tty)->termios.c_cc[VINTR]) #define QUIT_CHAR(tty) ((tty)->termios.c_cc[VQUIT]) #define ERASE_CHAR(tty) ((tty)->termios.c_cc[VERASE]) @@ -164,85 +117,6 @@ struct tty_bufhead { struct device; struct signal_struct; - -/* - * Port level information. Each device keeps its own port level information - * so provide a common structure for those ports wanting to use common support - * routines. - * - * The tty port has a different lifetime to the tty so must be kept apart. - * In addition be careful as tty -> port mappings are valid for the life - * of the tty object but in many cases port -> tty mappings are valid only - * until a hangup so don't use the wrong path. - */ - -struct tty_port; - -struct tty_port_operations { - /* Return 1 if the carrier is raised */ - int (*carrier_raised)(struct tty_port *port); - /* Control the DTR line */ - void (*dtr_rts)(struct tty_port *port, int raise); - /* Called when the last close completes or a hangup finishes - IFF the port was initialized. Do not use to free resources. Called - under the port mutex to serialize against activate/shutdowns */ - void (*shutdown)(struct tty_port *port); - /* Called under the port mutex from tty_port_open, serialized using - the port mutex */ - /* FIXME: long term getting the tty argument *out* of this would be - good for consoles */ - int (*activate)(struct tty_port *port, struct tty_struct *tty); - /* Called on the final put of a port */ - void (*destruct)(struct tty_port *port); -}; - -struct tty_port_client_operations { - int (*receive_buf)(struct tty_port *port, const unsigned char *, const unsigned char *, size_t); - void (*write_wakeup)(struct tty_port *port); -}; - -extern const struct tty_port_client_operations tty_port_default_client_ops; - -struct tty_port { - struct tty_bufhead buf; /* Locked internally */ - struct tty_struct *tty; /* Back pointer */ - struct tty_struct *itty; /* internal back ptr */ - const struct tty_port_operations *ops; /* Port operations */ - const struct tty_port_client_operations *client_ops; /* Port client operations */ - spinlock_t lock; /* Lock protecting tty field */ - int blocked_open; /* Waiting to open */ - int count; /* Usage count */ - wait_queue_head_t open_wait; /* Open waiters */ - wait_queue_head_t delta_msr_wait; /* Modem status change */ - unsigned long flags; /* User TTY flags ASYNC_ */ - unsigned long iflags; /* Internal flags TTY_PORT_ */ - unsigned char console:1; /* port is a console */ - struct mutex mutex; /* Locking */ - struct mutex buf_mutex; /* Buffer alloc lock */ - unsigned char *xmit_buf; /* Optional buffer */ - unsigned int close_delay; /* Close port delay */ - unsigned int closing_wait; /* Delay for output */ - int drain_delay; /* Set to zero if no pure time - based drain is needed else - set to size of fifo */ - struct kref kref; /* Ref counter */ - void *client_data; -}; - -/* tty_port::iflags bits -- use atomic bit ops */ -#define TTY_PORT_INITIALIZED 0 /* device is initialized */ -#define TTY_PORT_SUSPENDED 1 /* device is suspended */ -#define TTY_PORT_ACTIVE 2 /* device is open */ - -/* - * uart drivers: use the uart_port::status field and the UPSTAT_* defines - * for s/w-based flow control steering and carrier detection status - */ -#define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */ -#define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */ -#define TTY_PORT_KOPENED 5 /* device exclusively opened by - kernel */ - struct tty_operations; /** @@ -446,15 +320,6 @@ extern const char *tty_driver_name(const struct tty_struct *tty); extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); extern void stop_tty(struct tty_struct *tty); extern void start_tty(struct tty_struct *tty); -extern int tty_register_driver(struct tty_driver *driver); -extern void tty_unregister_driver(struct tty_driver *driver); -extern struct device *tty_register_device(struct tty_driver *driver, - unsigned index, struct device *dev); -extern struct device *tty_register_device_attr(struct tty_driver *driver, - unsigned index, struct device *device, - void *drvdata, - const struct attribute_group **attr_grp); -extern void tty_unregister_device(struct tty_driver *driver, unsigned index); extern void tty_write_message(struct tty_struct *tty, char *msg); extern int tty_send_xchar(struct tty_struct *tty, char ch); extern int tty_put_char(struct tty_struct *tty, unsigned char c); @@ -502,13 +367,7 @@ extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old); extern int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b); extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt); -extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); -extern void tty_ldisc_deref(struct tty_ldisc *); -extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); -extern const struct seq_operations tty_ldiscs_seq_ops; - extern void tty_wakeup(struct tty_struct *tty); -extern void tty_ldisc_flush(struct tty_struct *tty); extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); @@ -522,128 +381,6 @@ extern int tty_standard_install(struct tty_driver *driver, extern struct mutex tty_mutex; -extern void tty_port_init(struct tty_port *port); -extern void tty_port_link_device(struct tty_port *port, - struct tty_driver *driver, unsigned index); -extern struct device *tty_port_register_device(struct tty_port *port, - struct tty_driver *driver, unsigned index, - struct device *device); -extern struct device *tty_port_register_device_attr(struct tty_port *port, - struct tty_driver *driver, unsigned index, - struct device *device, void *drvdata, - const struct attribute_group **attr_grp); -extern struct device *tty_port_register_device_serdev(struct tty_port *port, - struct tty_driver *driver, unsigned index, - struct device *device); -extern struct device *tty_port_register_device_attr_serdev(struct tty_port *port, - struct tty_driver *driver, unsigned index, - struct device *device, void *drvdata, - const struct attribute_group **attr_grp); -extern void tty_port_unregister_device(struct tty_port *port, - struct tty_driver *driver, unsigned index); -extern int tty_port_alloc_xmit_buf(struct tty_port *port); -extern void tty_port_free_xmit_buf(struct tty_port *port); -extern void tty_port_destroy(struct tty_port *port); -extern void tty_port_put(struct tty_port *port); - -static inline struct tty_port *tty_port_get(struct tty_port *port) -{ - if (port && kref_get_unless_zero(&port->kref)) - return port; - return NULL; -} - -/* If the cts flow control is enabled, return true. */ -static inline bool tty_port_cts_enabled(const struct tty_port *port) -{ - return test_bit(TTY_PORT_CTS_FLOW, &port->iflags); -} - -static inline void tty_port_set_cts_flow(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_CTS_FLOW, &port->iflags, val); -} - -static inline bool tty_port_active(const struct tty_port *port) -{ - return test_bit(TTY_PORT_ACTIVE, &port->iflags); -} - -static inline void tty_port_set_active(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_ACTIVE, &port->iflags, val); -} - -static inline bool tty_port_check_carrier(const struct tty_port *port) -{ - return test_bit(TTY_PORT_CHECK_CD, &port->iflags); -} - -static inline void tty_port_set_check_carrier(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_CHECK_CD, &port->iflags, val); -} - -static inline bool tty_port_suspended(const struct tty_port *port) -{ - return test_bit(TTY_PORT_SUSPENDED, &port->iflags); -} - -static inline void tty_port_set_suspended(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_SUSPENDED, &port->iflags, val); -} - -static inline bool tty_port_initialized(const struct tty_port *port) -{ - return test_bit(TTY_PORT_INITIALIZED, &port->iflags); -} - -static inline void tty_port_set_initialized(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_INITIALIZED, &port->iflags, val); -} - -static inline bool tty_port_kopened(const struct tty_port *port) -{ - return test_bit(TTY_PORT_KOPENED, &port->iflags); -} - -static inline void tty_port_set_kopened(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_KOPENED, &port->iflags, val); -} - -extern struct tty_struct *tty_port_tty_get(struct tty_port *port); -extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); -extern int tty_port_carrier_raised(struct tty_port *port); -extern void tty_port_raise_dtr_rts(struct tty_port *port); -extern void tty_port_lower_dtr_rts(struct tty_port *port); -extern void tty_port_hangup(struct tty_port *port); -extern void tty_port_tty_hangup(struct tty_port *port, bool check_clocal); -extern void tty_port_tty_wakeup(struct tty_port *port); -extern int tty_port_block_til_ready(struct tty_port *port, - struct tty_struct *tty, struct file *filp); -extern int tty_port_close_start(struct tty_port *port, - struct tty_struct *tty, struct file *filp); -extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); -extern void tty_port_close(struct tty_port *port, - struct tty_struct *tty, struct file *filp); -extern int tty_port_install(struct tty_port *port, struct tty_driver *driver, - struct tty_struct *tty); -extern int tty_port_open(struct tty_port *port, - struct tty_struct *tty, struct file *filp); -static inline int tty_port_users(struct tty_port *port) -{ - return port->count + port->blocked_open; -} - -extern int tty_register_ldisc(struct tty_ldisc_ops *new_ldisc); -extern void tty_unregister_ldisc(struct tty_ldisc_ops *ldisc); -extern int tty_set_ldisc(struct tty_struct *tty, int disc); -extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, - const char *f, int count); - /* n_tty.c */ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops); #ifdef CONFIG_TTY @@ -691,12 +428,4 @@ extern void tty_lock_slave(struct tty_struct *tty); extern void tty_unlock_slave(struct tty_struct *tty); extern void tty_set_lock_subclass(struct tty_struct *tty); -#ifdef CONFIG_PROC_FS -extern void proc_tty_register_driver(struct tty_driver *); -extern void proc_tty_unregister_driver(struct tty_driver *); -#else -static inline void proc_tty_register_driver(struct tty_driver *d) {} -static inline void proc_tty_unregister_driver(struct tty_driver *d) {} -#endif - #endif diff --git a/include/linux/tty_buffer.h b/include/linux/tty_buffer.h new file mode 100644 index 000000000000..3b9d77604291 --- /dev/null +++ b/include/linux/tty_buffer.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_TTY_BUFFER_H +#define _LINUX_TTY_BUFFER_H + +#include <linux/atomic.h> +#include <linux/llist.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> + +struct tty_buffer { + union { + struct tty_buffer *next; + struct llist_node free; + }; + int used; + int size; + int commit; + int read; + int flags; + /* Data points here */ + unsigned long data[]; +}; + +/* Values for .flags field of tty_buffer */ +#define TTYB_NORMAL 1 /* buffer has no flags buffer */ + +static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs) +{ + return ((unsigned char *)b->data) + ofs; +} + +static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs) +{ + return (char *)char_buf_ptr(b, ofs) + b->size; +} + +struct tty_bufhead { + struct tty_buffer *head; /* Queue head */ + struct work_struct work; + struct mutex lock; + atomic_t priority; + struct tty_buffer sentinel; + struct llist_head free; /* Free queue head */ + atomic_t mem_used; /* In-use buffers excluding free list */ + int mem_limit; + struct tty_buffer *tail; /* Active buffer */ +}; + +/* + * When a break, frame error, or parity error happens, these codes are + * stuffed into the flags buffer. + */ +#define TTY_NORMAL 0 +#define TTY_BREAK 1 +#define TTY_FRAME 2 +#define TTY_PARITY 3 +#define TTY_OVERRUN 4 + +#endif diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 448f8ee6db6e..c20431d8def8 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -233,6 +233,7 @@ #include <linux/export.h> #include <linux/fs.h> +#include <linux/kref.h> #include <linux/list.h> #include <linux/cdev.h> #include <linux/termios.h> @@ -328,9 +329,6 @@ extern struct list_head tty_drivers; extern struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner, unsigned long flags); -extern void put_tty_driver(struct tty_driver *driver); -extern void tty_set_operations(struct tty_driver *driver, - const struct tty_operations *op); extern struct tty_driver *tty_find_polling_driver(char *name, int *line); extern void tty_driver_kref_put(struct tty_driver *driver); @@ -339,24 +337,18 @@ extern void tty_driver_kref_put(struct tty_driver *driver); #define tty_alloc_driver(lines, flags) \ __tty_alloc_driver(lines, THIS_MODULE, flags) -/* - * DEPRECATED Do not use this in new code, use tty_alloc_driver instead. - * (And change the return value checks.) - */ -static inline struct tty_driver *alloc_tty_driver(unsigned int lines) -{ - struct tty_driver *ret = tty_alloc_driver(lines, 0); - if (IS_ERR(ret)) - return NULL; - return ret; -} - static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) { kref_get(&d->kref); return d; } +static inline void tty_set_operations(struct tty_driver *driver, + const struct tty_operations *op) +{ + driver->ops = op; +} + /* tty driver magic number */ #define TTY_DRIVER_MAGIC 0x5402 @@ -434,4 +426,21 @@ static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) /* serial subtype definitions */ #define SERIAL_TYPE_NORMAL 1 +int tty_register_driver(struct tty_driver *driver); +void tty_unregister_driver(struct tty_driver *driver); +struct device *tty_register_device(struct tty_driver *driver, unsigned index, + struct device *dev); +struct device *tty_register_device_attr(struct tty_driver *driver, + unsigned index, struct device *device, void *drvdata, + const struct attribute_group **attr_grp); +void tty_unregister_device(struct tty_driver *driver, unsigned index); + +#ifdef CONFIG_PROC_FS +void proc_tty_register_driver(struct tty_driver *); +void proc_tty_unregister_driver(struct tty_driver *); +#else +static inline void proc_tty_register_driver(struct tty_driver *d) {} +static inline void proc_tty_unregister_driver(struct tty_driver *d) {} +#endif + #endif /* #ifdef _LINUX_TTY_DRIVER_H */ diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h index 67d78dc553e1..32284992b31a 100644 --- a/include/linux/tty_flip.h +++ b/include/linux/tty_flip.h @@ -2,7 +2,10 @@ #ifndef _LINUX_TTY_FLIP_H #define _LINUX_TTY_FLIP_H -#include <linux/tty.h> +#include <linux/tty_buffer.h> +#include <linux/tty_port.h> + +struct tty_ldisc; extern int tty_buffer_set_limit(struct tty_port *port, int limit); extern unsigned int tty_buffer_space_avail(struct tty_port *port); @@ -39,6 +42,9 @@ static inline int tty_insert_flip_string(struct tty_port *port, return tty_insert_flip_string_fixed_flag(port, chars, TTY_NORMAL, size); } +int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, + const char *f, int count); + extern void tty_buffer_lock_exclusive(struct tty_port *port); extern void tty_buffer_unlock_exclusive(struct tty_port *port); diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index fbe9de278629..b1d812e902aa 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -2,6 +2,8 @@ #ifndef _LINUX_TTY_LDISC_H #define _LINUX_TTY_LDISC_H +struct tty_struct; + /* * This structure defines the interface between the tty line discipline * implementation and the tty routines. The following routines can be @@ -126,6 +128,9 @@ #include <linux/fs.h> #include <linux/wait.h> #include <linux/atomic.h> +#include <linux/list.h> +#include <linux/lockdep.h> +#include <linux/seq_file.h> /* * the semaphore definition @@ -220,4 +225,16 @@ struct tty_ldisc { #define MODULE_ALIAS_LDISC(ldisc) \ MODULE_ALIAS("tty-ldisc-" __stringify(ldisc)) +extern const struct seq_operations tty_ldiscs_seq_ops; + +struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); +void tty_ldisc_deref(struct tty_ldisc *); +struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); + +void tty_ldisc_flush(struct tty_struct *tty); + +int tty_register_ldisc(struct tty_ldisc_ops *new_ldisc); +void tty_unregister_ldisc(struct tty_ldisc_ops *ldisc); +int tty_set_ldisc(struct tty_struct *tty, int disc); + #endif /* _LINUX_TTY_LDISC_H */ diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h new file mode 100644 index 000000000000..6e86e9e118b6 --- /dev/null +++ b/include/linux/tty_port.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_TTY_PORT_H +#define _LINUX_TTY_PORT_H + +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/tty_buffer.h> +#include <linux/wait.h> + +/* + * Port level information. Each device keeps its own port level information + * so provide a common structure for those ports wanting to use common support + * routines. + * + * The tty port has a different lifetime to the tty so must be kept apart. + * In addition be careful as tty -> port mappings are valid for the life + * of the tty object but in many cases port -> tty mappings are valid only + * until a hangup so don't use the wrong path. + */ + +struct attribute_group; +struct tty_driver; +struct tty_port; +struct tty_struct; + +struct tty_port_operations { + /* Return 1 if the carrier is raised */ + int (*carrier_raised)(struct tty_port *port); + /* Control the DTR line */ + void (*dtr_rts)(struct tty_port *port, int raise); + /* Called when the last close completes or a hangup finishes + IFF the port was initialized. Do not use to free resources. Called + under the port mutex to serialize against activate/shutdowns */ + void (*shutdown)(struct tty_port *port); + /* Called under the port mutex from tty_port_open, serialized using + the port mutex */ + /* FIXME: long term getting the tty argument *out* of this would be + good for consoles */ + int (*activate)(struct tty_port *port, struct tty_struct *tty); + /* Called on the final put of a port */ + void (*destruct)(struct tty_port *port); +}; + +struct tty_port_client_operations { + int (*receive_buf)(struct tty_port *port, const unsigned char *, const unsigned char *, size_t); + void (*write_wakeup)(struct tty_port *port); +}; + +extern const struct tty_port_client_operations tty_port_default_client_ops; + +struct tty_port { + struct tty_bufhead buf; /* Locked internally */ + struct tty_struct *tty; /* Back pointer */ + struct tty_struct *itty; /* internal back ptr */ + const struct tty_port_operations *ops; /* Port operations */ + const struct tty_port_client_operations *client_ops; /* Port client operations */ + spinlock_t lock; /* Lock protecting tty field */ + int blocked_open; /* Waiting to open */ + int count; /* Usage count */ + wait_queue_head_t open_wait; /* Open waiters */ + wait_queue_head_t delta_msr_wait; /* Modem status change */ + unsigned long flags; /* User TTY flags ASYNC_ */ + unsigned long iflags; /* Internal flags TTY_PORT_ */ + unsigned char console:1; /* port is a console */ + struct mutex mutex; /* Locking */ + struct mutex buf_mutex; /* Buffer alloc lock */ + unsigned char *xmit_buf; /* Optional buffer */ + unsigned int close_delay; /* Close port delay */ + unsigned int closing_wait; /* Delay for output */ + int drain_delay; /* Set to zero if no pure time + based drain is needed else + set to size of fifo */ + struct kref kref; /* Ref counter */ + void *client_data; +}; + +/* tty_port::iflags bits -- use atomic bit ops */ +#define TTY_PORT_INITIALIZED 0 /* device is initialized */ +#define TTY_PORT_SUSPENDED 1 /* device is suspended */ +#define TTY_PORT_ACTIVE 2 /* device is open */ + +/* + * uart drivers: use the uart_port::status field and the UPSTAT_* defines + * for s/w-based flow control steering and carrier detection status + */ +#define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */ +#define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */ +#define TTY_PORT_KOPENED 5 /* device exclusively opened by + kernel */ + +void tty_port_init(struct tty_port *port); +void tty_port_link_device(struct tty_port *port, struct tty_driver *driver, + unsigned index); +struct device *tty_port_register_device(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device); +struct device *tty_port_register_device_attr(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device, void *drvdata, + const struct attribute_group **attr_grp); +struct device *tty_port_register_device_serdev(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device); +struct device *tty_port_register_device_attr_serdev(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device, void *drvdata, + const struct attribute_group **attr_grp); +void tty_port_unregister_device(struct tty_port *port, + struct tty_driver *driver, unsigned index); +int tty_port_alloc_xmit_buf(struct tty_port *port); +void tty_port_free_xmit_buf(struct tty_port *port); +void tty_port_destroy(struct tty_port *port); +void tty_port_put(struct tty_port *port); + +static inline struct tty_port *tty_port_get(struct tty_port *port) +{ + if (port && kref_get_unless_zero(&port->kref)) + return port; + return NULL; +} + +/* If the cts flow control is enabled, return true. */ +static inline bool tty_port_cts_enabled(const struct tty_port *port) +{ + return test_bit(TTY_PORT_CTS_FLOW, &port->iflags); +} + +static inline void tty_port_set_cts_flow(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_CTS_FLOW, &port->iflags, val); +} + +static inline bool tty_port_active(const struct tty_port *port) +{ + return test_bit(TTY_PORT_ACTIVE, &port->iflags); +} + +static inline void tty_port_set_active(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_ACTIVE, &port->iflags, val); +} + +static inline bool tty_port_check_carrier(const struct tty_port *port) +{ + return test_bit(TTY_PORT_CHECK_CD, &port->iflags); +} + +static inline void tty_port_set_check_carrier(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_CHECK_CD, &port->iflags, val); +} + +static inline bool tty_port_suspended(const struct tty_port *port) +{ + return test_bit(TTY_PORT_SUSPENDED, &port->iflags); +} + +static inline void tty_port_set_suspended(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_SUSPENDED, &port->iflags, val); +} + +static inline bool tty_port_initialized(const struct tty_port *port) +{ + return test_bit(TTY_PORT_INITIALIZED, &port->iflags); +} + +static inline void tty_port_set_initialized(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_INITIALIZED, &port->iflags, val); +} + +static inline bool tty_port_kopened(const struct tty_port *port) +{ + return test_bit(TTY_PORT_KOPENED, &port->iflags); +} + +static inline void tty_port_set_kopened(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_KOPENED, &port->iflags, val); +} + +struct tty_struct *tty_port_tty_get(struct tty_port *port); +void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); +int tty_port_carrier_raised(struct tty_port *port); +void tty_port_raise_dtr_rts(struct tty_port *port); +void tty_port_lower_dtr_rts(struct tty_port *port); +void tty_port_hangup(struct tty_port *port); +void tty_port_tty_hangup(struct tty_port *port, bool check_clocal); +void tty_port_tty_wakeup(struct tty_port *port); +int tty_port_block_til_ready(struct tty_port *port, struct tty_struct *tty, + struct file *filp); +int tty_port_close_start(struct tty_port *port, struct tty_struct *tty, + struct file *filp); +void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); +void tty_port_close(struct tty_port *port, struct tty_struct *tty, + struct file *filp); +int tty_port_install(struct tty_port *port, struct tty_driver *driver, + struct tty_struct *tty); +int tty_port_open(struct tty_port *port, struct tty_struct *tty, + struct file *filp); + +static inline int tty_port_users(struct tty_port *port) +{ + return port->count + port->blocked_open; +} + +#endif diff --git a/include/linux/typecheck.h b/include/linux/typecheck.h index 20d310331eb5..46b15e2aaefb 100644 --- a/include/linux/typecheck.h +++ b/include/linux/typecheck.h @@ -22,4 +22,13 @@ (void)__tmp; \ }) +/* + * Check at compile time that something is a pointer type. + */ +#define typecheck_pointer(x) \ +({ typeof(x) __dummy; \ + (void)sizeof(*__dummy); \ + 1; \ +}) + #endif /* TYPECHECK_H_INCLUDED */ diff --git a/include/linux/units.h b/include/linux/units.h index dcc30a53fa93..4a25e0cc8fb3 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -4,6 +4,22 @@ #include <linux/math.h> +/* Metric prefixes in accordance with Système international (d'unités) */ +#define PETA 1000000000000000ULL +#define TERA 1000000000000ULL +#define GIGA 1000000000UL +#define MEGA 1000000UL +#define KILO 1000UL +#define HECTO 100UL +#define DECA 10UL +#define DECI 10UL +#define CENTI 100UL +#define MILLI 1000UL +#define MICRO 1000000UL +#define NANO 1000000000UL +#define PICO 1000000000000ULL +#define FEMTO 1000000000000000ULL + #define MILLIWATT_PER_WATT 1000L #define MICROWATT_PER_MILLIWATT 1000L #define MICROWATT_PER_WATT 1000000L diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h index ead8c9a47c6a..8fc2abd7aecb 100644 --- a/include/linux/usb/audio-v2.h +++ b/include/linux/usb/audio-v2.h @@ -156,6 +156,20 @@ struct uac2_feature_unit_descriptor { __u8 bmaControls[]; /* variable length */ } __attribute__((packed)); +#define UAC2_DT_FEATURE_UNIT_SIZE(ch) (6 + ((ch) + 1) * 4) + +/* As above, but more useful for defining your own descriptors: */ +#define DECLARE_UAC2_FEATURE_UNIT_DESCRIPTOR(ch) \ +struct uac2_feature_unit_descriptor_##ch { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubtype; \ + __u8 bUnitID; \ + __u8 bSourceID; \ + __le32 bmaControls[ch + 1]; \ + __u8 iFeature; \ +} __packed + /* 4.7.2.10 Effect Unit Descriptor */ struct uac2_effect_unit_descriptor { diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index 78e006355557..c892c5bc6638 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -45,6 +45,7 @@ struct ehci_caps { #define HCS_PORTROUTED(p) ((p)&(1 << 7)) /* true: port routing */ #define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */ #define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */ +#define HCS_N_PORTS_MAX 15 /* N_PORTS valid 0x1-0xF */ u32 hcc_params; /* HCCPARAMS - offset 0x8 */ /* EHCI 1.1 addendum */ @@ -126,8 +127,9 @@ struct ehci_regs { u32 configured_flag; #define FLAG_CF (1<<0) /* true: we'll support "high speed" */ - /* PORTSC: offset 0x44 */ - u32 port_status[0]; /* up to N_PORTS */ + union { + /* PORTSC: offset 0x44 */ + u32 port_status[HCS_N_PORTS_MAX]; /* up to N_PORTS */ /* EHCI 1.1 addendum */ #define PORTSC_SUSPEND_STS_ACK 0 #define PORTSC_SUSPEND_STS_NYET 1 @@ -164,28 +166,35 @@ struct ehci_regs { #define PORT_CSC (1<<1) /* connect status change */ #define PORT_CONNECT (1<<0) /* device connected */ #define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC) - - u32 reserved3[9]; - - /* USBMODE: offset 0x68 */ - u32 usbmode; /* USB Device mode */ + struct { + u32 reserved3[9]; + /* USBMODE: offset 0x68 */ + u32 usbmode; /* USB Device mode */ + }; #define USBMODE_SDIS (1<<3) /* Stream disable */ #define USBMODE_BE (1<<2) /* BE/LE endianness select */ #define USBMODE_CM_HC (3<<0) /* host controller mode */ #define USBMODE_CM_IDLE (0<<0) /* idle state */ - - u32 reserved4[6]; + }; /* Moorestown has some non-standard registers, partially due to the fact that * its EHCI controller has both TT and LPM support. HOSTPCx are extensions to * PORTSCx */ - /* HOSTPC: offset 0x84 */ - u32 hostpc[0]; /* HOSTPC extension */ + union { + struct { + u32 reserved4; + /* HOSTPC: offset 0x84 */ + u32 hostpc[HCS_N_PORTS_MAX]; #define HOSTPC_PHCD (1<<22) /* Phy clock disable */ #define HOSTPC_PSPD (3<<25) /* Port speed detection */ + }; + + /* Broadcom-proprietary USB_EHCI_INSNREG00 @ 0x80 */ + u32 brcm_insnreg[4]; + }; - u32 reserved5[17]; + u32 reserved5[2]; /* USBMODE_EX: offset 0xc8 */ u32 usbmode_ex; /* USB Device mode extension */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 75c7538e350a..10fe57cf40be 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -329,6 +329,7 @@ struct usb_gadget_ops { struct usb_ep *(*match_ep)(struct usb_gadget *, struct usb_endpoint_descriptor *, struct usb_ss_ep_comp_descriptor *); + int (*check_config)(struct usb_gadget *gadget); }; /** @@ -491,7 +492,7 @@ extern char *usb_get_gadget_udc_name(void); */ static inline size_t usb_ep_align(struct usb_ep *ep, size_t len) { - int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc) & 0x7ff; + int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc); return round_up(len, max_packet_size); } @@ -608,6 +609,7 @@ int usb_gadget_connect(struct usb_gadget *gadget); int usb_gadget_disconnect(struct usb_gadget *gadget); int usb_gadget_deactivate(struct usb_gadget *gadget); int usb_gadget_activate(struct usb_gadget *gadget); +int usb_gadget_check_config(struct usb_gadget *gadget); #else static inline int usb_gadget_frame_number(struct usb_gadget *gadget) { return 0; } @@ -631,6 +633,8 @@ static inline int usb_gadget_deactivate(struct usb_gadget *gadget) { return 0; } static inline int usb_gadget_activate(struct usb_gadget *gadget) { return 0; } +static inline int usb_gadget_check_config(struct usb_gadget *gadget) +{ return 0; } #endif /* CONFIG_USB_GADGET */ /*-------------------------------------------------------------------------*/ diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h index 3aee78dda16d..784659d4dc99 100644 --- a/include/linux/usb/otg-fsm.h +++ b/include/linux/usb/otg-fsm.h @@ -196,6 +196,7 @@ struct otg_fsm { struct mutex lock; u8 *host_req_flag; struct delayed_work hnp_polling_work; + bool hnp_work_inited; bool state_changed; }; diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index 7ceeecbb9e02..6475f880be37 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h @@ -128,5 +128,6 @@ enum usb_dr_mode { * and returns the corresponding enum usb_dr_mode */ extern enum usb_dr_mode usb_get_dr_mode(struct device *dev); +extern enum usb_dr_mode usb_get_role_switch_default_mode(struct device *dev); #endif /* __LINUX_USB_OTG_H */ diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 3357ac98878d..8cfe49d201dd 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -277,6 +277,17 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, size_t size, const char *name); +/** + * vdpa_alloc_device - allocate and initilaize a vDPA device + * + * @dev_struct: the type of the parent structure + * @member: the name of struct vdpa_device within the @dev_struct + * @parent: the parent device + * @config: the bus operations that is supported by this device + * @name: name of the vdpa device + * + * Return allocated data structure or ERR_PTR upon error + */ #define vdpa_alloc_device(dev_struct, member, parent, config, name) \ container_of(__vdpa_alloc_device( \ parent, config, \ diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index dc6ddce92066..b4b9137f9792 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -33,6 +33,8 @@ #include <video/vga.h> +struct pci_dev; + /* Legacy VGA regions */ #define VGA_RSRC_NONE 0x00 #define VGA_RSRC_LEGACY_IO 0x01 @@ -42,42 +44,45 @@ #define VGA_RSRC_NORMAL_IO 0x04 #define VGA_RSRC_NORMAL_MEM 0x08 -/* Passing that instead of a pci_dev to use the system "default" - * device, that is the one used by vgacon. Archs will probably - * have to provide their own vga_default_device(); - */ -#define VGA_DEFAULT_DEVICE (NULL) - -struct pci_dev; - -/* For use by clients */ - -/** - * vga_set_legacy_decoding - * - * @pdev: pci device of the VGA card - * @decodes: bit mask of what legacy regions the card decodes - * - * Indicates to the arbiter if the card decodes legacy VGA IOs, - * legacy VGA Memory, both, or none. All cards default to both, - * the card driver (fbdev for example) should tell the arbiter - * if it has disabled legacy decoding, so the card can be left - * out of the arbitration process (and can be safe to take - * interrupts at any time. - */ -#if defined(CONFIG_VGA_ARB) -extern void vga_set_legacy_decoding(struct pci_dev *pdev, - unsigned int decodes); -#else +#ifdef CONFIG_VGA_ARB +void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes); +int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible); +void vga_put(struct pci_dev *pdev, unsigned int rsrc); +struct pci_dev *vga_default_device(void); +void vga_set_default_device(struct pci_dev *pdev); +int vga_remove_vgacon(struct pci_dev *pdev); +int vga_client_register(struct pci_dev *pdev, + unsigned int (*set_decode)(struct pci_dev *pdev, bool state)); +#else /* CONFIG_VGA_ARB */ static inline void vga_set_legacy_decoding(struct pci_dev *pdev, - unsigned int decodes) { }; -#endif - -#if defined(CONFIG_VGA_ARB) -extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible); -#else -static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; } -#endif + unsigned int decodes) +{ +}; +static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, + int interruptible) +{ + return 0; +} +static inline void vga_put(struct pci_dev *pdev, unsigned int rsrc) +{ +} +static inline struct pci_dev *vga_default_device(void) +{ + return NULL; +} +static inline void vga_set_default_device(struct pci_dev *pdev) +{ +} +static inline int vga_remove_vgacon(struct pci_dev *pdev) +{ + return 0; +} +static inline int vga_client_register(struct pci_dev *pdev, + unsigned int (*set_decode)(struct pci_dev *pdev, bool state)) +{ + return 0; +} +#endif /* CONFIG_VGA_ARB */ /** * vga_get_interruptible @@ -109,48 +114,9 @@ static inline int vga_get_uninterruptible(struct pci_dev *pdev, return vga_get(pdev, rsrc, 0); } -#if defined(CONFIG_VGA_ARB) -extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); -#else -static inline void vga_put(struct pci_dev *pdev, unsigned int rsrc) +static inline void vga_client_unregister(struct pci_dev *pdev) { + vga_client_register(pdev, NULL); } -#endif - - -#ifdef CONFIG_VGA_ARB -extern struct pci_dev *vga_default_device(void); -extern void vga_set_default_device(struct pci_dev *pdev); -extern int vga_remove_vgacon(struct pci_dev *pdev); -#else -static inline struct pci_dev *vga_default_device(void) { return NULL; } -static inline void vga_set_default_device(struct pci_dev *pdev) { } -static inline int vga_remove_vgacon(struct pci_dev *pdev) { return 0; } -#endif - -/* - * Architectures should define this if they have several - * independent PCI domains that can afford concurrent VGA - * decoding - */ -#ifndef __ARCH_HAS_VGA_CONFLICT -static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2) -{ - return 1; -} -#endif - -#if defined(CONFIG_VGA_ARB) -int vga_client_register(struct pci_dev *pdev, void *cookie, - void (*irq_set_state)(void *cookie, bool state), - unsigned int (*set_vga_decode)(void *cookie, bool state)); -#else -static inline int vga_client_register(struct pci_dev *pdev, void *cookie, - void (*irq_set_state)(void *cookie, bool state), - unsigned int (*set_vga_decode)(void *cookie, bool state)) -{ - return 0; -} -#endif #endif /* LINUX_VGA_H */ diff --git a/include/linux/virtio.h b/include/linux/virtio.h index b1894e0323fa..41edbc01ffa4 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -110,6 +110,7 @@ struct virtio_device { bool config_enabled; bool config_change_pending; spinlock_t config_lock; + spinlock_t vqs_list_lock; /* Protects VQs list access */ struct device dev; struct virtio_device_id id; const struct virtio_config_ops *config; diff --git a/include/linux/vringh.h b/include/linux/vringh.h index 84db7b8f912f..212892cf9822 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h @@ -14,6 +14,7 @@ #include <linux/virtio_byteorder.h> #include <linux/uio.h> #include <linux/slab.h> +#include <linux/spinlock.h> #if IS_REACHABLE(CONFIG_VHOST_IOTLB) #include <linux/dma-direction.h> #include <linux/vhost_iotlb.h> diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 0da94a6dee15..b5ab452fca5b 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -148,26 +148,26 @@ void hide_boot_cursor(bool hide); /* keyboard provided interfaces */ int vt_do_diacrit(unsigned int cmd, void __user *up, int eperm); -int vt_do_kdskbmode(int console, unsigned int arg); -int vt_do_kdskbmeta(int console, unsigned int arg); +int vt_do_kdskbmode(unsigned int console, unsigned int arg); +int vt_do_kdskbmeta(unsigned int console, unsigned int arg); int vt_do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc, int perm); int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, - int console); + unsigned int console); int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm); -int vt_do_kdskled(int console, int cmd, unsigned long arg, int perm); -int vt_do_kdgkbmode(int console); -int vt_do_kdgkbmeta(int console); -void vt_reset_unicode(int console); +int vt_do_kdskled(unsigned int console, int cmd, unsigned long arg, int perm); +int vt_do_kdgkbmode(unsigned int console); +int vt_do_kdgkbmeta(unsigned int console); +void vt_reset_unicode(unsigned int console); int vt_get_shift_state(void); -void vt_reset_keyboard(int console); -int vt_get_leds(int console, int flag); -int vt_get_kbd_mode_bit(int console, int bit); -void vt_set_kbd_mode_bit(int console, int bit); -void vt_clr_kbd_mode_bit(int console, int bit); -void vt_set_led_state(int console, int leds); -void vt_kbd_con_start(int console); -void vt_kbd_con_stop(int console); +void vt_reset_keyboard(unsigned int console); +int vt_get_leds(unsigned int console, int flag); +int vt_get_kbd_mode_bit(unsigned int console, int bit); +void vt_set_kbd_mode_bit(unsigned int console, int bit); +void vt_clr_kbd_mode_bit(unsigned int console, int bit); +void vt_set_led_state(unsigned int console, int leds); +void vt_kbd_con_start(unsigned int console); +void vt_kbd_con_stop(unsigned int console); void vc_scrolldelta_helper(struct vc_data *c, int lines, unsigned int rolled_over, void *_base, unsigned int size); diff --git a/include/linux/wait.h b/include/linux/wait.h index 6598ae35e1b5..93dab0e9580f 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -56,7 +56,7 @@ struct task_struct; #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ - .head = { &(name).head, &(name).head } } + .head = LIST_HEAD_INIT(name.head) } #define DECLARE_WAIT_QUEUE_HEAD(name) \ struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name) diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index d15a7730ee18..2ebef6b1a3d6 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -29,7 +29,7 @@ void delayed_work_timer_fn(struct timer_list *t); enum { WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ - WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ + WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */ WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ #ifdef CONFIG_DEBUG_OBJECTS_WORK @@ -42,7 +42,7 @@ enum { WORK_STRUCT_COLOR_BITS = 4, WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, - WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, + WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT, WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, #ifdef CONFIG_DEBUG_OBJECTS_WORK @@ -51,19 +51,14 @@ enum { WORK_STRUCT_STATIC = 0, #endif - /* - * The last color is no color used for works which don't - * participate in workqueue flushing. - */ - WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, - WORK_NO_COLOR = WORK_NR_COLORS, + WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS), /* not bound to any CPU, prefer the local CPU */ WORK_CPU_UNBOUND = NR_CPUS, /* * Reserve 8 bits off of pwq pointer w/ debugobjects turned off. - * This makes pwqs aligned to 256 bytes and allows 15 workqueue + * This makes pwqs aligned to 256 bytes and allows 16 workqueue * flush colors. */ WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + @@ -324,7 +319,7 @@ enum { * to execute and tries to keep idle cores idle to conserve power; * however, for example, a per-cpu work item scheduled from an * interrupt handler on an idle CPU will force the scheduler to - * excute the work item on that CPU breaking the idleness, which in + * execute the work item on that CPU breaking the idleness, which in * turn may lead to more scheduling choices which are sub-optimal * in terms of power consumption. * diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 667e86cfbdcf..270677dc4f36 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -336,14 +336,9 @@ static inline void cgroup_writeback_umount(void) /* * mm/page-writeback.c */ -#ifdef CONFIG_BLOCK void laptop_io_completion(struct backing_dev_info *info); void laptop_sync_completion(void); -void laptop_mode_sync(struct work_struct *work); void laptop_mode_timer_fn(struct timer_list *t); -#else -static inline void laptop_sync_completion(void) { } -#endif bool node_dirty_ok(struct pglist_data *pgdat); int wb_domain_init(struct wb_domain *dom, gfp_t gfp); #ifdef CONFIG_CGROUP_WRITEBACK diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index b77f39f319ad..29db736af86d 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -18,6 +18,24 @@ #define __LINUX_WW_MUTEX_H #include <linux/mutex.h> +#include <linux/rtmutex.h> + +#if defined(CONFIG_DEBUG_MUTEXES) || \ + (defined(CONFIG_PREEMPT_RT) && defined(CONFIG_DEBUG_RT_MUTEXES)) +#define DEBUG_WW_MUTEXES +#endif + +#ifndef CONFIG_PREEMPT_RT +#define WW_MUTEX_BASE mutex +#define ww_mutex_base_init(l,n,k) __mutex_init(l,n,k) +#define ww_mutex_base_trylock(l) mutex_trylock(l) +#define ww_mutex_base_is_locked(b) mutex_is_locked((b)) +#else +#define WW_MUTEX_BASE rt_mutex +#define ww_mutex_base_init(l,n,k) __rt_mutex_init(l,n,k) +#define ww_mutex_base_trylock(l) rt_mutex_trylock(l) +#define ww_mutex_base_is_locked(b) rt_mutex_base_is_locked(&(b)->rtmutex) +#endif struct ww_class { atomic_long_t stamp; @@ -28,16 +46,24 @@ struct ww_class { unsigned int is_wait_die; }; +struct ww_mutex { + struct WW_MUTEX_BASE base; + struct ww_acquire_ctx *ctx; +#ifdef DEBUG_WW_MUTEXES + struct ww_class *ww_class; +#endif +}; + struct ww_acquire_ctx { struct task_struct *task; unsigned long stamp; unsigned int acquired; unsigned short wounded; unsigned short is_wait_die; -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES unsigned int done_acquire; struct ww_class *ww_class; - struct ww_mutex *contending_lock; + void *contending_lock; #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; @@ -74,9 +100,9 @@ struct ww_acquire_ctx { static inline void ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class) { - __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); + ww_mutex_base_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); lock->ctx = NULL; -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES lock->ww_class = ww_class; #endif } @@ -113,7 +139,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, ctx->acquired = 0; ctx->wounded = false; ctx->is_wait_die = ww_class->is_wait_die; -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES ctx->ww_class = ww_class; ctx->done_acquire = 0; ctx->contending_lock = NULL; @@ -143,7 +169,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, */ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) { -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES lockdep_assert_held(ctx); DEBUG_LOCKS_WARN_ON(ctx->done_acquire); @@ -163,7 +189,7 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) #ifdef CONFIG_DEBUG_LOCK_ALLOC mutex_release(&ctx->dep_map, _THIS_IP_); #endif -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES DEBUG_LOCKS_WARN_ON(ctx->acquired); if (!IS_ENABLED(CONFIG_PROVE_LOCKING)) /* @@ -269,7 +295,7 @@ static inline void ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { int ret; -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); #endif ret = ww_mutex_lock(lock, ctx); @@ -305,7 +331,7 @@ static inline int __must_check ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); #endif return ww_mutex_lock_interruptible(lock, ctx); @@ -322,7 +348,7 @@ extern void ww_mutex_unlock(struct ww_mutex *lock); */ static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) { - return mutex_trylock(&lock->base); + return ww_mutex_base_trylock(&lock->base); } /*** @@ -335,7 +361,9 @@ static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) */ static inline void ww_mutex_destroy(struct ww_mutex *lock) { +#ifndef CONFIG_PREEMPT_RT mutex_destroy(&lock->base); +#endif } /** @@ -346,7 +374,7 @@ static inline void ww_mutex_destroy(struct ww_mutex *lock) */ static inline bool ww_mutex_is_locked(struct ww_mutex *lock) { - return mutex_is_locked(&lock->base); + return ww_mutex_base_is_locked(&lock->base); } #endif diff --git a/include/linux/zorro.h b/include/linux/zorro.h index e2e4de188d84..db7416ed6057 100644 --- a/include/linux/zorro.h +++ b/include/linux/zorro.h @@ -29,7 +29,6 @@ struct zorro_dev { struct ExpansionRom rom; zorro_id id; - struct zorro_driver *driver; /* which driver has allocated this device */ struct device dev; /* Generic device interface */ u16 slotaddr; u16 slotsize; |