diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2020-11-18 05:13:23 +0300 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2020-11-18 05:13:23 +0300 |
commit | 05909cd9a0c8811731b38697af13075e8954314f (patch) | |
tree | aa5306e756666407c95067b20b21b32ff27fd11a /include/linux | |
parent | c7f0169e3bd274e576f6aaeee86ad2adf7bb14b5 (diff) | |
parent | bbf5c979011a099af5dc76498918ed7df445635b (diff) | |
download | linux-05909cd9a0c8811731b38697af13075e8954314f.tar.xz |
Merge tag 'v5.9' into next
Sync up with mainline to bring in the latest DTS files.
Diffstat (limited to 'include/linux')
703 files changed, 22998 insertions, 8130 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index d661cd0ee64d..64ae25c59d55 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -905,6 +905,13 @@ static inline int acpi_dma_configure(struct device *dev, return 0; } +static inline int acpi_dma_configure_id(struct device *dev, + enum dev_dma_attr attr, + const u32 *input_id) +{ + return 0; +} + #define ACPI_PTR(_ptr) (NULL) static inline void acpi_device_set_enumerated(struct acpi_device *adev) @@ -951,7 +958,7 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b); -#ifdef CONFIG_X86 +#ifndef CONFIG_IA64 void arch_reserve_mem_area(acpi_physical_address addr, size_t size); #else static inline void arch_reserve_mem_area(acpi_physical_address addr, @@ -1143,16 +1150,27 @@ struct acpi_probe_entry { kernel_ulong_t driver_data; }; -#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ +#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \ + valid, data, fn) \ + static const struct acpi_probe_entry __acpi_probe_##name \ + __used __section(__##table##_acpi_probe_table) = { \ + .id = table_id, \ + .type = subtable, \ + .subtable_valid = valid, \ + .probe_table = fn, \ + .driver_data = data, \ + } + +#define ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(table, name, table_id, \ + subtable, valid, data, fn) \ static const struct acpi_probe_entry __acpi_probe_##name \ - __used __section(__##table##_acpi_probe_table) \ - = { \ + __used __section(__##table##_acpi_probe_table) = { \ .id = table_id, \ .type = subtable, \ .subtable_valid = valid, \ - .probe_table = (acpi_tbl_table_handler)fn, \ - .driver_data = data, \ - } + .probe_subtbl = fn, \ + .driver_data = data, \ + } #define ACPI_PROBE_TABLE(name) __##name##_acpi_probe_table #define ACPI_PROBE_TABLE_END(name) __##name##_acpi_probe_table_end diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 8e7e2ec37f1b..20a32120bb88 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -28,27 +28,29 @@ void iort_deregister_domain_token(int trans_id); struct fwnode_handle *iort_find_domain_token(int trans_id); #ifdef CONFIG_ACPI_IORT void acpi_iort_init(void); -u32 iort_msi_map_rid(struct device *dev, u32 req_id); -struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id); +u32 iort_msi_map_id(struct device *dev, u32 id); +struct irq_domain *iort_get_device_domain(struct device *dev, u32 id, + enum irq_domain_bus_token bus_token); void acpi_configure_pmsi_domain(struct device *dev); int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id); /* IOMMU interface */ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size); -const struct iommu_ops *iort_iommu_configure(struct device *dev); +const struct iommu_ops *iort_iommu_configure_id(struct device *dev, + const u32 *id_in); int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head); #else static inline void acpi_iort_init(void) { } -static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id) -{ return req_id; } -static inline struct irq_domain *iort_get_device_domain(struct device *dev, - u32 req_id) +static inline u32 iort_msi_map_id(struct device *dev, u32 id) +{ return id; } +static inline struct irq_domain *iort_get_device_domain( + struct device *dev, u32 id, enum irq_domain_bus_token bus_token) { return NULL; } static inline void acpi_configure_pmsi_domain(struct device *dev) { } /* IOMMU interface */ static inline void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size) { } -static inline const struct iommu_ops *iort_iommu_configure( - struct device *dev) +static inline const struct iommu_ops *iort_iommu_configure_id( + struct device *dev, const u32 *id_in) { return NULL; } static inline int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 0566cb3314ef..69b1dabe39dc 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -39,8 +39,8 @@ static inline unsigned long topology_get_thermal_pressure(int cpu) return per_cpu(thermal_pressure, cpu); } -void arch_set_thermal_pressure(struct cpumask *cpus, - unsigned long th_pressure); +void topology_set_thermal_pressure(const struct cpumask *cpus, + unsigned long th_pressure); struct cpu_topology { int thread_id; diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 59494df0f55b..15c706fb0a37 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -5,12 +5,15 @@ #ifndef __LINUX_ARM_SMCCC_H #define __LINUX_ARM_SMCCC_H +#include <linux/init.h> #include <uapi/linux/const.h> /* * This file provides common defines for ARM SMC Calling Convention as * specified in - * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html + * https://developer.arm.com/docs/den0028/latest + * + * This code is up-to-date with version DEN 0028 C */ #define ARM_SMCCC_STD_CALL _AC(0,U) @@ -56,6 +59,7 @@ #define ARM_SMCCC_VERSION_1_0 0x10000 #define ARM_SMCCC_VERSION_1_1 0x10001 +#define ARM_SMCCC_VERSION_1_2 0x10002 #define ARM_SMCCC_VERSION_FUNC_ID \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ @@ -67,6 +71,11 @@ ARM_SMCCC_SMC_32, \ 0, 1) +#define ARM_SMCCC_ARCH_SOC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_32, \ + 0, 2) + #define ARM_SMCCC_ARCH_WORKAROUND_1 \ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ ARM_SMCCC_SMC_32, \ @@ -77,6 +86,28 @@ ARM_SMCCC_SMC_32, \ 0, 0x7fff) +/* Paravirtualised time calls (defined by ARM DEN0057A) */ +#define ARM_SMCCC_HV_PV_TIME_FEATURES \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_STANDARD_HYP, \ + 0x20) + +#define ARM_SMCCC_HV_PV_TIME_ST \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_STANDARD_HYP, \ + 0x21) + +/* + * Return codes defined in ARM DEN 0070A + * ARM DEN 0070A is now merged/consolidated into ARM DEN 0028 C + */ +#define SMCCC_RET_SUCCESS 0 +#define SMCCC_RET_NOT_SUPPORTED -1 +#define SMCCC_RET_NOT_REQUIRED -2 +#define SMCCC_RET_INVALID_PARAMETER -3 + #ifndef __ASSEMBLY__ #include <linux/linkage.h> @@ -98,6 +129,19 @@ enum arm_smccc_conduit { enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void); /** + * arm_smccc_get_version() + * + * Returns the version to be used for SMCCCv1.1 or later. + * + * When SMCCCv1.1 or above is not present, returns SMCCCv1.0, but this + * does not imply the presence of firmware or a valid conduit. Caller + * handling SMCCCv1.0 must determine the conduit by other means. + */ +u32 arm_smccc_get_version(void); + +void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit); + +/** * struct arm_smccc_res - Result from SMC/HVC call * @a0-a3 result values from registers 0 to 3 */ @@ -314,11 +358,6 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, */ #define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__) -/* Return codes defined in ARM DEN 0070A */ -#define SMCCC_RET_SUCCESS 0 -#define SMCCC_RET_NOT_SUPPORTED -1 -#define SMCCC_RET_NOT_REQUIRED -2 - /* * Like arm_smccc_1_1* but always returns SMCCC_RET_NOT_SUPPORTED. * Used when the SMCCC conduit is not defined. The empty asm statement @@ -364,18 +403,5 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, method; \ }) -/* Paravirtualised time calls (defined by ARM DEN0057A) */ -#define ARM_SMCCC_HV_PV_TIME_FEATURES \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_64, \ - ARM_SMCCC_OWNER_STANDARD_HYP, \ - 0x20) - -#define ARM_SMCCC_HV_PV_TIME_ST \ - ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ - ARM_SMCCC_SMC_64, \ - ARM_SMCCC_OWNER_STANDARD_HYP, \ - 0x21) - #endif /*__ASSEMBLY__*/ #endif /*__LINUX_ARM_SMCCC_H*/ diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 75e582b8d2d9..4c328fef403c 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -36,7 +36,7 @@ struct dma_chan_ref { /** * async_tx_flags - modifiers for the async_* calls * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the - * the destination address is not a source. The asynchronous case handles this + * destination address is not a source. The asynchronous case handles this * implicitly, the synchronous case needs to zero the destination block. * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is * also one of the source addresses. In the synchronous case the destination diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index 8124815eb121..5d5ff2203fa2 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h @@ -176,11 +176,6 @@ struct atm_dev { #define ATM_OF_IMMED 1 /* Attempt immediate delivery */ #define ATM_OF_INRATE 2 /* Attempt in-rate delivery */ - -/* - * ioctl, getsockopt, and setsockopt are optional and can be set to NULL. - */ - struct atmdev_ops { /* only send is required */ void (*dev_close)(struct atm_dev *dev); int (*open)(struct atm_vcc *vcc); @@ -190,10 +185,6 @@ struct atmdev_ops { /* only send is required */ int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd, void __user *arg); #endif - int (*getsockopt)(struct atm_vcc *vcc,int level,int optname, - void __user *optval,int optlen); - int (*setsockopt)(struct atm_vcc *vcc,int level,int optname, - void __user *optval,unsigned int optlen); int (*send)(struct atm_vcc *vcc,struct sk_buff *skb); int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags); void (*phy_put)(struct atm_dev *dev,unsigned char value, diff --git a/include/linux/atomic-arch-fallback.h b/include/linux/atomic-arch-fallback.h new file mode 100644 index 000000000000..bcb6aa27cfa6 --- /dev/null +++ b/include/linux/atomic-arch-fallback.h @@ -0,0 +1,2291 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-fallback.sh +// DO NOT MODIFY THIS FILE DIRECTLY + +#ifndef _LINUX_ATOMIC_FALLBACK_H +#define _LINUX_ATOMIC_FALLBACK_H + +#include <linux/compiler.h> + +#ifndef arch_xchg_relaxed +#define arch_xchg_relaxed arch_xchg +#define arch_xchg_acquire arch_xchg +#define arch_xchg_release arch_xchg +#else /* arch_xchg_relaxed */ + +#ifndef arch_xchg_acquire +#define arch_xchg_acquire(...) \ + __atomic_op_acquire(arch_xchg, __VA_ARGS__) +#endif + +#ifndef arch_xchg_release +#define arch_xchg_release(...) \ + __atomic_op_release(arch_xchg, __VA_ARGS__) +#endif + +#ifndef arch_xchg +#define arch_xchg(...) \ + __atomic_op_fence(arch_xchg, __VA_ARGS__) +#endif + +#endif /* arch_xchg_relaxed */ + +#ifndef arch_cmpxchg_relaxed +#define arch_cmpxchg_relaxed arch_cmpxchg +#define arch_cmpxchg_acquire arch_cmpxchg +#define arch_cmpxchg_release arch_cmpxchg +#else /* arch_cmpxchg_relaxed */ + +#ifndef arch_cmpxchg_acquire +#define arch_cmpxchg_acquire(...) \ + __atomic_op_acquire(arch_cmpxchg, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg_release +#define arch_cmpxchg_release(...) \ + __atomic_op_release(arch_cmpxchg, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg +#define arch_cmpxchg(...) \ + __atomic_op_fence(arch_cmpxchg, __VA_ARGS__) +#endif + +#endif /* arch_cmpxchg_relaxed */ + +#ifndef arch_cmpxchg64_relaxed +#define arch_cmpxchg64_relaxed arch_cmpxchg64 +#define arch_cmpxchg64_acquire arch_cmpxchg64 +#define arch_cmpxchg64_release arch_cmpxchg64 +#else /* arch_cmpxchg64_relaxed */ + +#ifndef arch_cmpxchg64_acquire +#define arch_cmpxchg64_acquire(...) \ + __atomic_op_acquire(arch_cmpxchg64, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg64_release +#define arch_cmpxchg64_release(...) \ + __atomic_op_release(arch_cmpxchg64, __VA_ARGS__) +#endif + +#ifndef arch_cmpxchg64 +#define arch_cmpxchg64(...) \ + __atomic_op_fence(arch_cmpxchg64, __VA_ARGS__) +#endif + +#endif /* arch_cmpxchg64_relaxed */ + +#ifndef arch_atomic_read_acquire +static __always_inline int +arch_atomic_read_acquire(const atomic_t *v) +{ + return smp_load_acquire(&(v)->counter); +} +#define arch_atomic_read_acquire arch_atomic_read_acquire +#endif + +#ifndef arch_atomic_set_release +static __always_inline void +arch_atomic_set_release(atomic_t *v, int i) +{ + smp_store_release(&(v)->counter, i); +} +#define arch_atomic_set_release arch_atomic_set_release +#endif + +#ifndef arch_atomic_add_return_relaxed +#define arch_atomic_add_return_acquire arch_atomic_add_return +#define arch_atomic_add_return_release arch_atomic_add_return +#define arch_atomic_add_return_relaxed arch_atomic_add_return +#else /* arch_atomic_add_return_relaxed */ + +#ifndef arch_atomic_add_return_acquire +static __always_inline int +arch_atomic_add_return_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_add_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_add_return_acquire arch_atomic_add_return_acquire +#endif + +#ifndef arch_atomic_add_return_release +static __always_inline int +arch_atomic_add_return_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_add_return_relaxed(i, v); +} +#define arch_atomic_add_return_release arch_atomic_add_return_release +#endif + +#ifndef arch_atomic_add_return +static __always_inline int +arch_atomic_add_return(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_add_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_add_return arch_atomic_add_return +#endif + +#endif /* arch_atomic_add_return_relaxed */ + +#ifndef arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add +#define arch_atomic_fetch_add_release arch_atomic_fetch_add +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add +#else /* arch_atomic_fetch_add_relaxed */ + +#ifndef arch_atomic_fetch_add_acquire +static __always_inline int +arch_atomic_fetch_add_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_add_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add_acquire +#endif + +#ifndef arch_atomic_fetch_add_release +static __always_inline int +arch_atomic_fetch_add_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_add_relaxed(i, v); +} +#define arch_atomic_fetch_add_release arch_atomic_fetch_add_release +#endif + +#ifndef arch_atomic_fetch_add +static __always_inline int +arch_atomic_fetch_add(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_add_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_add arch_atomic_fetch_add +#endif + +#endif /* arch_atomic_fetch_add_relaxed */ + +#ifndef arch_atomic_sub_return_relaxed +#define arch_atomic_sub_return_acquire arch_atomic_sub_return +#define arch_atomic_sub_return_release arch_atomic_sub_return +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return +#else /* arch_atomic_sub_return_relaxed */ + +#ifndef arch_atomic_sub_return_acquire +static __always_inline int +arch_atomic_sub_return_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_sub_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_sub_return_acquire arch_atomic_sub_return_acquire +#endif + +#ifndef arch_atomic_sub_return_release +static __always_inline int +arch_atomic_sub_return_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_sub_return_relaxed(i, v); +} +#define arch_atomic_sub_return_release arch_atomic_sub_return_release +#endif + +#ifndef arch_atomic_sub_return +static __always_inline int +arch_atomic_sub_return(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_sub_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_sub_return arch_atomic_sub_return +#endif + +#endif /* arch_atomic_sub_return_relaxed */ + +#ifndef arch_atomic_fetch_sub_relaxed +#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub +#else /* arch_atomic_fetch_sub_relaxed */ + +#ifndef arch_atomic_fetch_sub_acquire +static __always_inline int +arch_atomic_fetch_sub_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_sub_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub_acquire +#endif + +#ifndef arch_atomic_fetch_sub_release +static __always_inline int +arch_atomic_fetch_sub_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_sub_relaxed(i, v); +} +#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub_release +#endif + +#ifndef arch_atomic_fetch_sub +static __always_inline int +arch_atomic_fetch_sub(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_sub_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_sub arch_atomic_fetch_sub +#endif + +#endif /* arch_atomic_fetch_sub_relaxed */ + +#ifndef arch_atomic_inc +static __always_inline void +arch_atomic_inc(atomic_t *v) +{ + arch_atomic_add(1, v); +} +#define arch_atomic_inc arch_atomic_inc +#endif + +#ifndef arch_atomic_inc_return_relaxed +#ifdef arch_atomic_inc_return +#define arch_atomic_inc_return_acquire arch_atomic_inc_return +#define arch_atomic_inc_return_release arch_atomic_inc_return +#define arch_atomic_inc_return_relaxed arch_atomic_inc_return +#endif /* arch_atomic_inc_return */ + +#ifndef arch_atomic_inc_return +static __always_inline int +arch_atomic_inc_return(atomic_t *v) +{ + return arch_atomic_add_return(1, v); +} +#define arch_atomic_inc_return arch_atomic_inc_return +#endif + +#ifndef arch_atomic_inc_return_acquire +static __always_inline int +arch_atomic_inc_return_acquire(atomic_t *v) +{ + return arch_atomic_add_return_acquire(1, v); +} +#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire +#endif + +#ifndef arch_atomic_inc_return_release +static __always_inline int +arch_atomic_inc_return_release(atomic_t *v) +{ + return arch_atomic_add_return_release(1, v); +} +#define arch_atomic_inc_return_release arch_atomic_inc_return_release +#endif + +#ifndef arch_atomic_inc_return_relaxed +static __always_inline int +arch_atomic_inc_return_relaxed(atomic_t *v) +{ + return arch_atomic_add_return_relaxed(1, v); +} +#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed +#endif + +#else /* arch_atomic_inc_return_relaxed */ + +#ifndef arch_atomic_inc_return_acquire +static __always_inline int +arch_atomic_inc_return_acquire(atomic_t *v) +{ + int ret = arch_atomic_inc_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_inc_return_acquire arch_atomic_inc_return_acquire +#endif + +#ifndef arch_atomic_inc_return_release +static __always_inline int +arch_atomic_inc_return_release(atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_inc_return_relaxed(v); +} +#define arch_atomic_inc_return_release arch_atomic_inc_return_release +#endif + +#ifndef arch_atomic_inc_return +static __always_inline int +arch_atomic_inc_return(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_inc_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_inc_return arch_atomic_inc_return +#endif + +#endif /* arch_atomic_inc_return_relaxed */ + +#ifndef arch_atomic_fetch_inc_relaxed +#ifdef arch_atomic_fetch_inc +#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc +#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc +#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc +#endif /* arch_atomic_fetch_inc */ + +#ifndef arch_atomic_fetch_inc +static __always_inline int +arch_atomic_fetch_inc(atomic_t *v) +{ + return arch_atomic_fetch_add(1, v); +} +#define arch_atomic_fetch_inc arch_atomic_fetch_inc +#endif + +#ifndef arch_atomic_fetch_inc_acquire +static __always_inline int +arch_atomic_fetch_inc_acquire(atomic_t *v) +{ + return arch_atomic_fetch_add_acquire(1, v); +} +#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire +#endif + +#ifndef arch_atomic_fetch_inc_release +static __always_inline int +arch_atomic_fetch_inc_release(atomic_t *v) +{ + return arch_atomic_fetch_add_release(1, v); +} +#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release +#endif + +#ifndef arch_atomic_fetch_inc_relaxed +static __always_inline int +arch_atomic_fetch_inc_relaxed(atomic_t *v) +{ + return arch_atomic_fetch_add_relaxed(1, v); +} +#define arch_atomic_fetch_inc_relaxed arch_atomic_fetch_inc_relaxed +#endif + +#else /* arch_atomic_fetch_inc_relaxed */ + +#ifndef arch_atomic_fetch_inc_acquire +static __always_inline int +arch_atomic_fetch_inc_acquire(atomic_t *v) +{ + int ret = arch_atomic_fetch_inc_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_inc_acquire arch_atomic_fetch_inc_acquire +#endif + +#ifndef arch_atomic_fetch_inc_release +static __always_inline int +arch_atomic_fetch_inc_release(atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_inc_relaxed(v); +} +#define arch_atomic_fetch_inc_release arch_atomic_fetch_inc_release +#endif + +#ifndef arch_atomic_fetch_inc +static __always_inline int +arch_atomic_fetch_inc(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_inc_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_inc arch_atomic_fetch_inc +#endif + +#endif /* arch_atomic_fetch_inc_relaxed */ + +#ifndef arch_atomic_dec +static __always_inline void +arch_atomic_dec(atomic_t *v) +{ + arch_atomic_sub(1, v); +} +#define arch_atomic_dec arch_atomic_dec +#endif + +#ifndef arch_atomic_dec_return_relaxed +#ifdef arch_atomic_dec_return +#define arch_atomic_dec_return_acquire arch_atomic_dec_return +#define arch_atomic_dec_return_release arch_atomic_dec_return +#define arch_atomic_dec_return_relaxed arch_atomic_dec_return +#endif /* arch_atomic_dec_return */ + +#ifndef arch_atomic_dec_return +static __always_inline int +arch_atomic_dec_return(atomic_t *v) +{ + return arch_atomic_sub_return(1, v); +} +#define arch_atomic_dec_return arch_atomic_dec_return +#endif + +#ifndef arch_atomic_dec_return_acquire +static __always_inline int +arch_atomic_dec_return_acquire(atomic_t *v) +{ + return arch_atomic_sub_return_acquire(1, v); +} +#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire +#endif + +#ifndef arch_atomic_dec_return_release +static __always_inline int +arch_atomic_dec_return_release(atomic_t *v) +{ + return arch_atomic_sub_return_release(1, v); +} +#define arch_atomic_dec_return_release arch_atomic_dec_return_release +#endif + +#ifndef arch_atomic_dec_return_relaxed +static __always_inline int +arch_atomic_dec_return_relaxed(atomic_t *v) +{ + return arch_atomic_sub_return_relaxed(1, v); +} +#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed +#endif + +#else /* arch_atomic_dec_return_relaxed */ + +#ifndef arch_atomic_dec_return_acquire +static __always_inline int +arch_atomic_dec_return_acquire(atomic_t *v) +{ + int ret = arch_atomic_dec_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_dec_return_acquire arch_atomic_dec_return_acquire +#endif + +#ifndef arch_atomic_dec_return_release +static __always_inline int +arch_atomic_dec_return_release(atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_dec_return_relaxed(v); +} +#define arch_atomic_dec_return_release arch_atomic_dec_return_release +#endif + +#ifndef arch_atomic_dec_return +static __always_inline int +arch_atomic_dec_return(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_dec_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_dec_return arch_atomic_dec_return +#endif + +#endif /* arch_atomic_dec_return_relaxed */ + +#ifndef arch_atomic_fetch_dec_relaxed +#ifdef arch_atomic_fetch_dec +#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec +#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec +#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec +#endif /* arch_atomic_fetch_dec */ + +#ifndef arch_atomic_fetch_dec +static __always_inline int +arch_atomic_fetch_dec(atomic_t *v) +{ + return arch_atomic_fetch_sub(1, v); +} +#define arch_atomic_fetch_dec arch_atomic_fetch_dec +#endif + +#ifndef arch_atomic_fetch_dec_acquire +static __always_inline int +arch_atomic_fetch_dec_acquire(atomic_t *v) +{ + return arch_atomic_fetch_sub_acquire(1, v); +} +#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire +#endif + +#ifndef arch_atomic_fetch_dec_release +static __always_inline int +arch_atomic_fetch_dec_release(atomic_t *v) +{ + return arch_atomic_fetch_sub_release(1, v); +} +#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release +#endif + +#ifndef arch_atomic_fetch_dec_relaxed +static __always_inline int +arch_atomic_fetch_dec_relaxed(atomic_t *v) +{ + return arch_atomic_fetch_sub_relaxed(1, v); +} +#define arch_atomic_fetch_dec_relaxed arch_atomic_fetch_dec_relaxed +#endif + +#else /* arch_atomic_fetch_dec_relaxed */ + +#ifndef arch_atomic_fetch_dec_acquire +static __always_inline int +arch_atomic_fetch_dec_acquire(atomic_t *v) +{ + int ret = arch_atomic_fetch_dec_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_dec_acquire arch_atomic_fetch_dec_acquire +#endif + +#ifndef arch_atomic_fetch_dec_release +static __always_inline int +arch_atomic_fetch_dec_release(atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_dec_relaxed(v); +} +#define arch_atomic_fetch_dec_release arch_atomic_fetch_dec_release +#endif + +#ifndef arch_atomic_fetch_dec +static __always_inline int +arch_atomic_fetch_dec(atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_dec_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_dec arch_atomic_fetch_dec +#endif + +#endif /* arch_atomic_fetch_dec_relaxed */ + +#ifndef arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and +#define arch_atomic_fetch_and_release arch_atomic_fetch_and +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and +#else /* arch_atomic_fetch_and_relaxed */ + +#ifndef arch_atomic_fetch_and_acquire +static __always_inline int +arch_atomic_fetch_and_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_and_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and_acquire +#endif + +#ifndef arch_atomic_fetch_and_release +static __always_inline int +arch_atomic_fetch_and_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_and_relaxed(i, v); +} +#define arch_atomic_fetch_and_release arch_atomic_fetch_and_release +#endif + +#ifndef arch_atomic_fetch_and +static __always_inline int +arch_atomic_fetch_and(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_and_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_and arch_atomic_fetch_and +#endif + +#endif /* arch_atomic_fetch_and_relaxed */ + +#ifndef arch_atomic_andnot +static __always_inline void +arch_atomic_andnot(int i, atomic_t *v) +{ + arch_atomic_and(~i, v); +} +#define arch_atomic_andnot arch_atomic_andnot +#endif + +#ifndef arch_atomic_fetch_andnot_relaxed +#ifdef arch_atomic_fetch_andnot +#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot +#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot +#endif /* arch_atomic_fetch_andnot */ + +#ifndef arch_atomic_fetch_andnot +static __always_inline int +arch_atomic_fetch_andnot(int i, atomic_t *v) +{ + return arch_atomic_fetch_and(~i, v); +} +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot +#endif + +#ifndef arch_atomic_fetch_andnot_acquire +static __always_inline int +arch_atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + return arch_atomic_fetch_and_acquire(~i, v); +} +#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire +#endif + +#ifndef arch_atomic_fetch_andnot_release +static __always_inline int +arch_atomic_fetch_andnot_release(int i, atomic_t *v) +{ + return arch_atomic_fetch_and_release(~i, v); +} +#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release +#endif + +#ifndef arch_atomic_fetch_andnot_relaxed +static __always_inline int +arch_atomic_fetch_andnot_relaxed(int i, atomic_t *v) +{ + return arch_atomic_fetch_and_relaxed(~i, v); +} +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed +#endif + +#else /* arch_atomic_fetch_andnot_relaxed */ + +#ifndef arch_atomic_fetch_andnot_acquire +static __always_inline int +arch_atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_andnot_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire +#endif + +#ifndef arch_atomic_fetch_andnot_release +static __always_inline int +arch_atomic_fetch_andnot_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_andnot_relaxed(i, v); +} +#define arch_atomic_fetch_andnot_release arch_atomic_fetch_andnot_release +#endif + +#ifndef arch_atomic_fetch_andnot +static __always_inline int +arch_atomic_fetch_andnot(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_andnot_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot +#endif + +#endif /* arch_atomic_fetch_andnot_relaxed */ + +#ifndef arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or +#define arch_atomic_fetch_or_release arch_atomic_fetch_or +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or +#else /* arch_atomic_fetch_or_relaxed */ + +#ifndef arch_atomic_fetch_or_acquire +static __always_inline int +arch_atomic_fetch_or_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_or_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or_acquire +#endif + +#ifndef arch_atomic_fetch_or_release +static __always_inline int +arch_atomic_fetch_or_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_or_relaxed(i, v); +} +#define arch_atomic_fetch_or_release arch_atomic_fetch_or_release +#endif + +#ifndef arch_atomic_fetch_or +static __always_inline int +arch_atomic_fetch_or(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_or_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_or arch_atomic_fetch_or +#endif + +#endif /* arch_atomic_fetch_or_relaxed */ + +#ifndef arch_atomic_fetch_xor_relaxed +#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor +#else /* arch_atomic_fetch_xor_relaxed */ + +#ifndef arch_atomic_fetch_xor_acquire +static __always_inline int +arch_atomic_fetch_xor_acquire(int i, atomic_t *v) +{ + int ret = arch_atomic_fetch_xor_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor_acquire +#endif + +#ifndef arch_atomic_fetch_xor_release +static __always_inline int +arch_atomic_fetch_xor_release(int i, atomic_t *v) +{ + __atomic_release_fence(); + return arch_atomic_fetch_xor_relaxed(i, v); +} +#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor_release +#endif + +#ifndef arch_atomic_fetch_xor +static __always_inline int +arch_atomic_fetch_xor(int i, atomic_t *v) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_fetch_xor_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_fetch_xor arch_atomic_fetch_xor +#endif + +#endif /* arch_atomic_fetch_xor_relaxed */ + +#ifndef arch_atomic_xchg_relaxed +#define arch_atomic_xchg_acquire arch_atomic_xchg +#define arch_atomic_xchg_release arch_atomic_xchg +#define arch_atomic_xchg_relaxed arch_atomic_xchg +#else /* arch_atomic_xchg_relaxed */ + +#ifndef arch_atomic_xchg_acquire +static __always_inline int +arch_atomic_xchg_acquire(atomic_t *v, int i) +{ + int ret = arch_atomic_xchg_relaxed(v, i); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire +#endif + +#ifndef arch_atomic_xchg_release +static __always_inline int +arch_atomic_xchg_release(atomic_t *v, int i) +{ + __atomic_release_fence(); + return arch_atomic_xchg_relaxed(v, i); +} +#define arch_atomic_xchg_release arch_atomic_xchg_release +#endif + +#ifndef arch_atomic_xchg +static __always_inline int +arch_atomic_xchg(atomic_t *v, int i) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_xchg_relaxed(v, i); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_xchg arch_atomic_xchg +#endif + +#endif /* arch_atomic_xchg_relaxed */ + +#ifndef arch_atomic_cmpxchg_relaxed +#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg +#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg +#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg +#else /* arch_atomic_cmpxchg_relaxed */ + +#ifndef arch_atomic_cmpxchg_acquire +static __always_inline int +arch_atomic_cmpxchg_acquire(atomic_t *v, int old, int new) +{ + int ret = arch_atomic_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire +#endif + +#ifndef arch_atomic_cmpxchg_release +static __always_inline int +arch_atomic_cmpxchg_release(atomic_t *v, int old, int new) +{ + __atomic_release_fence(); + return arch_atomic_cmpxchg_relaxed(v, old, new); +} +#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release +#endif + +#ifndef arch_atomic_cmpxchg +static __always_inline int +arch_atomic_cmpxchg(atomic_t *v, int old, int new) +{ + int ret; + __atomic_pre_full_fence(); + ret = arch_atomic_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_cmpxchg arch_atomic_cmpxchg +#endif + +#endif /* arch_atomic_cmpxchg_relaxed */ + +#ifndef arch_atomic_try_cmpxchg_relaxed +#ifdef arch_atomic_try_cmpxchg +#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg +#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg +#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg +#endif /* arch_atomic_try_cmpxchg */ + +#ifndef arch_atomic_try_cmpxchg +static __always_inline bool +arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = arch_atomic_cmpxchg(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg +#endif + +#ifndef arch_atomic_try_cmpxchg_acquire +static __always_inline bool +arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = arch_atomic_cmpxchg_acquire(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire +#endif + +#ifndef arch_atomic_try_cmpxchg_release +static __always_inline bool +arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = arch_atomic_cmpxchg_release(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release +#endif + +#ifndef arch_atomic_try_cmpxchg_relaxed +static __always_inline bool +arch_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) +{ + int r, o = *old; + r = arch_atomic_cmpxchg_relaxed(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic_try_cmpxchg_relaxed arch_atomic_try_cmpxchg_relaxed +#endif + +#else /* arch_atomic_try_cmpxchg_relaxed */ + +#ifndef arch_atomic_try_cmpxchg_acquire +static __always_inline bool +arch_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + bool ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic_try_cmpxchg_acquire arch_atomic_try_cmpxchg_acquire +#endif + +#ifndef arch_atomic_try_cmpxchg_release +static __always_inline bool +arch_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + __atomic_release_fence(); + return arch_atomic_try_cmpxchg_relaxed(v, old, new); +} +#define arch_atomic_try_cmpxchg_release arch_atomic_try_cmpxchg_release +#endif + +#ifndef arch_atomic_try_cmpxchg +static __always_inline bool +arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + bool ret; + __atomic_pre_full_fence(); + ret = arch_atomic_try_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg +#endif + +#endif /* arch_atomic_try_cmpxchg_relaxed */ + +#ifndef arch_atomic_sub_and_test +/** + * arch_atomic_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static __always_inline bool +arch_atomic_sub_and_test(int i, atomic_t *v) +{ + return arch_atomic_sub_return(i, v) == 0; +} +#define arch_atomic_sub_and_test arch_atomic_sub_and_test +#endif + +#ifndef arch_atomic_dec_and_test +/** + * arch_atomic_dec_and_test - decrement and test + * @v: pointer of type atomic_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static __always_inline bool +arch_atomic_dec_and_test(atomic_t *v) +{ + return arch_atomic_dec_return(v) == 0; +} +#define arch_atomic_dec_and_test arch_atomic_dec_and_test +#endif + +#ifndef arch_atomic_inc_and_test +/** + * arch_atomic_inc_and_test - increment and test + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static __always_inline bool +arch_atomic_inc_and_test(atomic_t *v) +{ + return arch_atomic_inc_return(v) == 0; +} +#define arch_atomic_inc_and_test arch_atomic_inc_and_test +#endif + +#ifndef arch_atomic_add_negative +/** + * arch_atomic_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static __always_inline bool +arch_atomic_add_negative(int i, atomic_t *v) +{ + return arch_atomic_add_return(i, v) < 0; +} +#define arch_atomic_add_negative arch_atomic_add_negative +#endif + +#ifndef arch_atomic_fetch_add_unless +/** + * arch_atomic_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns original value of @v + */ +static __always_inline int +arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int c = arch_atomic_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!arch_atomic_try_cmpxchg(v, &c, c + a)); + + return c; +} +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless +#endif + +#ifndef arch_atomic_add_unless +/** + * arch_atomic_add_unless - add unless the number is already a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static __always_inline bool +arch_atomic_add_unless(atomic_t *v, int a, int u) +{ + return arch_atomic_fetch_add_unless(v, a, u) != u; +} +#define arch_atomic_add_unless arch_atomic_add_unless +#endif + +#ifndef arch_atomic_inc_not_zero +/** + * arch_atomic_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +static __always_inline bool +arch_atomic_inc_not_zero(atomic_t *v) +{ + return arch_atomic_add_unless(v, 1, 0); +} +#define arch_atomic_inc_not_zero arch_atomic_inc_not_zero +#endif + +#ifndef arch_atomic_inc_unless_negative +static __always_inline bool +arch_atomic_inc_unless_negative(atomic_t *v) +{ + int c = arch_atomic_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!arch_atomic_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative +#endif + +#ifndef arch_atomic_dec_unless_positive +static __always_inline bool +arch_atomic_dec_unless_positive(atomic_t *v) +{ + int c = arch_atomic_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!arch_atomic_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive +#endif + +#ifndef arch_atomic_dec_if_positive +static __always_inline int +arch_atomic_dec_if_positive(atomic_t *v) +{ + int dec, c = arch_atomic_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!arch_atomic_try_cmpxchg(v, &c, dec)); + + return dec; +} +#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive +#endif + +#ifdef CONFIG_GENERIC_ATOMIC64 +#include <asm-generic/atomic64.h> +#endif + +#ifndef arch_atomic64_read_acquire +static __always_inline s64 +arch_atomic64_read_acquire(const atomic64_t *v) +{ + return smp_load_acquire(&(v)->counter); +} +#define arch_atomic64_read_acquire arch_atomic64_read_acquire +#endif + +#ifndef arch_atomic64_set_release +static __always_inline void +arch_atomic64_set_release(atomic64_t *v, s64 i) +{ + smp_store_release(&(v)->counter, i); +} +#define arch_atomic64_set_release arch_atomic64_set_release +#endif + +#ifndef arch_atomic64_add_return_relaxed +#define arch_atomic64_add_return_acquire arch_atomic64_add_return +#define arch_atomic64_add_return_release arch_atomic64_add_return +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return +#else /* arch_atomic64_add_return_relaxed */ + +#ifndef arch_atomic64_add_return_acquire +static __always_inline s64 +arch_atomic64_add_return_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_add_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_add_return_acquire arch_atomic64_add_return_acquire +#endif + +#ifndef arch_atomic64_add_return_release +static __always_inline s64 +arch_atomic64_add_return_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_add_return_relaxed(i, v); +} +#define arch_atomic64_add_return_release arch_atomic64_add_return_release +#endif + +#ifndef arch_atomic64_add_return +static __always_inline s64 +arch_atomic64_add_return(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_add_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_add_return arch_atomic64_add_return +#endif + +#endif /* arch_atomic64_add_return_relaxed */ + +#ifndef arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add +#else /* arch_atomic64_fetch_add_relaxed */ + +#ifndef arch_atomic64_fetch_add_acquire +static __always_inline s64 +arch_atomic64_fetch_add_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_add_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add_acquire +#endif + +#ifndef arch_atomic64_fetch_add_release +static __always_inline s64 +arch_atomic64_fetch_add_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_add_relaxed(i, v); +} +#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add_release +#endif + +#ifndef arch_atomic64_fetch_add +static __always_inline s64 +arch_atomic64_fetch_add(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_add_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_add arch_atomic64_fetch_add +#endif + +#endif /* arch_atomic64_fetch_add_relaxed */ + +#ifndef arch_atomic64_sub_return_relaxed +#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return +#define arch_atomic64_sub_return_release arch_atomic64_sub_return +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return +#else /* arch_atomic64_sub_return_relaxed */ + +#ifndef arch_atomic64_sub_return_acquire +static __always_inline s64 +arch_atomic64_sub_return_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_sub_return_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return_acquire +#endif + +#ifndef arch_atomic64_sub_return_release +static __always_inline s64 +arch_atomic64_sub_return_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_sub_return_relaxed(i, v); +} +#define arch_atomic64_sub_return_release arch_atomic64_sub_return_release +#endif + +#ifndef arch_atomic64_sub_return +static __always_inline s64 +arch_atomic64_sub_return(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_sub_return_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_sub_return arch_atomic64_sub_return +#endif + +#endif /* arch_atomic64_sub_return_relaxed */ + +#ifndef arch_atomic64_fetch_sub_relaxed +#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub +#else /* arch_atomic64_fetch_sub_relaxed */ + +#ifndef arch_atomic64_fetch_sub_acquire +static __always_inline s64 +arch_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_sub_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub_acquire +#endif + +#ifndef arch_atomic64_fetch_sub_release +static __always_inline s64 +arch_atomic64_fetch_sub_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_sub_relaxed(i, v); +} +#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub_release +#endif + +#ifndef arch_atomic64_fetch_sub +static __always_inline s64 +arch_atomic64_fetch_sub(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_sub_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub +#endif + +#endif /* arch_atomic64_fetch_sub_relaxed */ + +#ifndef arch_atomic64_inc +static __always_inline void +arch_atomic64_inc(atomic64_t *v) +{ + arch_atomic64_add(1, v); +} +#define arch_atomic64_inc arch_atomic64_inc +#endif + +#ifndef arch_atomic64_inc_return_relaxed +#ifdef arch_atomic64_inc_return +#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return +#define arch_atomic64_inc_return_release arch_atomic64_inc_return +#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return +#endif /* arch_atomic64_inc_return */ + +#ifndef arch_atomic64_inc_return +static __always_inline s64 +arch_atomic64_inc_return(atomic64_t *v) +{ + return arch_atomic64_add_return(1, v); +} +#define arch_atomic64_inc_return arch_atomic64_inc_return +#endif + +#ifndef arch_atomic64_inc_return_acquire +static __always_inline s64 +arch_atomic64_inc_return_acquire(atomic64_t *v) +{ + return arch_atomic64_add_return_acquire(1, v); +} +#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire +#endif + +#ifndef arch_atomic64_inc_return_release +static __always_inline s64 +arch_atomic64_inc_return_release(atomic64_t *v) +{ + return arch_atomic64_add_return_release(1, v); +} +#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release +#endif + +#ifndef arch_atomic64_inc_return_relaxed +static __always_inline s64 +arch_atomic64_inc_return_relaxed(atomic64_t *v) +{ + return arch_atomic64_add_return_relaxed(1, v); +} +#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed +#endif + +#else /* arch_atomic64_inc_return_relaxed */ + +#ifndef arch_atomic64_inc_return_acquire +static __always_inline s64 +arch_atomic64_inc_return_acquire(atomic64_t *v) +{ + s64 ret = arch_atomic64_inc_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_inc_return_acquire arch_atomic64_inc_return_acquire +#endif + +#ifndef arch_atomic64_inc_return_release +static __always_inline s64 +arch_atomic64_inc_return_release(atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_inc_return_relaxed(v); +} +#define arch_atomic64_inc_return_release arch_atomic64_inc_return_release +#endif + +#ifndef arch_atomic64_inc_return +static __always_inline s64 +arch_atomic64_inc_return(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_inc_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_inc_return arch_atomic64_inc_return +#endif + +#endif /* arch_atomic64_inc_return_relaxed */ + +#ifndef arch_atomic64_fetch_inc_relaxed +#ifdef arch_atomic64_fetch_inc +#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc +#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc +#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc +#endif /* arch_atomic64_fetch_inc */ + +#ifndef arch_atomic64_fetch_inc +static __always_inline s64 +arch_atomic64_fetch_inc(atomic64_t *v) +{ + return arch_atomic64_fetch_add(1, v); +} +#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc +#endif + +#ifndef arch_atomic64_fetch_inc_acquire +static __always_inline s64 +arch_atomic64_fetch_inc_acquire(atomic64_t *v) +{ + return arch_atomic64_fetch_add_acquire(1, v); +} +#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire +#endif + +#ifndef arch_atomic64_fetch_inc_release +static __always_inline s64 +arch_atomic64_fetch_inc_release(atomic64_t *v) +{ + return arch_atomic64_fetch_add_release(1, v); +} +#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release +#endif + +#ifndef arch_atomic64_fetch_inc_relaxed +static __always_inline s64 +arch_atomic64_fetch_inc_relaxed(atomic64_t *v) +{ + return arch_atomic64_fetch_add_relaxed(1, v); +} +#define arch_atomic64_fetch_inc_relaxed arch_atomic64_fetch_inc_relaxed +#endif + +#else /* arch_atomic64_fetch_inc_relaxed */ + +#ifndef arch_atomic64_fetch_inc_acquire +static __always_inline s64 +arch_atomic64_fetch_inc_acquire(atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_inc_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_inc_acquire arch_atomic64_fetch_inc_acquire +#endif + +#ifndef arch_atomic64_fetch_inc_release +static __always_inline s64 +arch_atomic64_fetch_inc_release(atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_inc_relaxed(v); +} +#define arch_atomic64_fetch_inc_release arch_atomic64_fetch_inc_release +#endif + +#ifndef arch_atomic64_fetch_inc +static __always_inline s64 +arch_atomic64_fetch_inc(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_inc_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_inc arch_atomic64_fetch_inc +#endif + +#endif /* arch_atomic64_fetch_inc_relaxed */ + +#ifndef arch_atomic64_dec +static __always_inline void +arch_atomic64_dec(atomic64_t *v) +{ + arch_atomic64_sub(1, v); +} +#define arch_atomic64_dec arch_atomic64_dec +#endif + +#ifndef arch_atomic64_dec_return_relaxed +#ifdef arch_atomic64_dec_return +#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return +#define arch_atomic64_dec_return_release arch_atomic64_dec_return +#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return +#endif /* arch_atomic64_dec_return */ + +#ifndef arch_atomic64_dec_return +static __always_inline s64 +arch_atomic64_dec_return(atomic64_t *v) +{ + return arch_atomic64_sub_return(1, v); +} +#define arch_atomic64_dec_return arch_atomic64_dec_return +#endif + +#ifndef arch_atomic64_dec_return_acquire +static __always_inline s64 +arch_atomic64_dec_return_acquire(atomic64_t *v) +{ + return arch_atomic64_sub_return_acquire(1, v); +} +#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire +#endif + +#ifndef arch_atomic64_dec_return_release +static __always_inline s64 +arch_atomic64_dec_return_release(atomic64_t *v) +{ + return arch_atomic64_sub_return_release(1, v); +} +#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release +#endif + +#ifndef arch_atomic64_dec_return_relaxed +static __always_inline s64 +arch_atomic64_dec_return_relaxed(atomic64_t *v) +{ + return arch_atomic64_sub_return_relaxed(1, v); +} +#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed +#endif + +#else /* arch_atomic64_dec_return_relaxed */ + +#ifndef arch_atomic64_dec_return_acquire +static __always_inline s64 +arch_atomic64_dec_return_acquire(atomic64_t *v) +{ + s64 ret = arch_atomic64_dec_return_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_dec_return_acquire arch_atomic64_dec_return_acquire +#endif + +#ifndef arch_atomic64_dec_return_release +static __always_inline s64 +arch_atomic64_dec_return_release(atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_dec_return_relaxed(v); +} +#define arch_atomic64_dec_return_release arch_atomic64_dec_return_release +#endif + +#ifndef arch_atomic64_dec_return +static __always_inline s64 +arch_atomic64_dec_return(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_dec_return_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_dec_return arch_atomic64_dec_return +#endif + +#endif /* arch_atomic64_dec_return_relaxed */ + +#ifndef arch_atomic64_fetch_dec_relaxed +#ifdef arch_atomic64_fetch_dec +#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec +#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec +#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec +#endif /* arch_atomic64_fetch_dec */ + +#ifndef arch_atomic64_fetch_dec +static __always_inline s64 +arch_atomic64_fetch_dec(atomic64_t *v) +{ + return arch_atomic64_fetch_sub(1, v); +} +#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec +#endif + +#ifndef arch_atomic64_fetch_dec_acquire +static __always_inline s64 +arch_atomic64_fetch_dec_acquire(atomic64_t *v) +{ + return arch_atomic64_fetch_sub_acquire(1, v); +} +#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire +#endif + +#ifndef arch_atomic64_fetch_dec_release +static __always_inline s64 +arch_atomic64_fetch_dec_release(atomic64_t *v) +{ + return arch_atomic64_fetch_sub_release(1, v); +} +#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release +#endif + +#ifndef arch_atomic64_fetch_dec_relaxed +static __always_inline s64 +arch_atomic64_fetch_dec_relaxed(atomic64_t *v) +{ + return arch_atomic64_fetch_sub_relaxed(1, v); +} +#define arch_atomic64_fetch_dec_relaxed arch_atomic64_fetch_dec_relaxed +#endif + +#else /* arch_atomic64_fetch_dec_relaxed */ + +#ifndef arch_atomic64_fetch_dec_acquire +static __always_inline s64 +arch_atomic64_fetch_dec_acquire(atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_dec_relaxed(v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_dec_acquire arch_atomic64_fetch_dec_acquire +#endif + +#ifndef arch_atomic64_fetch_dec_release +static __always_inline s64 +arch_atomic64_fetch_dec_release(atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_dec_relaxed(v); +} +#define arch_atomic64_fetch_dec_release arch_atomic64_fetch_dec_release +#endif + +#ifndef arch_atomic64_fetch_dec +static __always_inline s64 +arch_atomic64_fetch_dec(atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_dec_relaxed(v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_dec arch_atomic64_fetch_dec +#endif + +#endif /* arch_atomic64_fetch_dec_relaxed */ + +#ifndef arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and +#else /* arch_atomic64_fetch_and_relaxed */ + +#ifndef arch_atomic64_fetch_and_acquire +static __always_inline s64 +arch_atomic64_fetch_and_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_and_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and_acquire +#endif + +#ifndef arch_atomic64_fetch_and_release +static __always_inline s64 +arch_atomic64_fetch_and_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_and_relaxed(i, v); +} +#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and_release +#endif + +#ifndef arch_atomic64_fetch_and +static __always_inline s64 +arch_atomic64_fetch_and(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_and_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#endif + +#endif /* arch_atomic64_fetch_and_relaxed */ + +#ifndef arch_atomic64_andnot +static __always_inline void +arch_atomic64_andnot(s64 i, atomic64_t *v) +{ + arch_atomic64_and(~i, v); +} +#define arch_atomic64_andnot arch_atomic64_andnot +#endif + +#ifndef arch_atomic64_fetch_andnot_relaxed +#ifdef arch_atomic64_fetch_andnot +#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot +#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot +#endif /* arch_atomic64_fetch_andnot */ + +#ifndef arch_atomic64_fetch_andnot +static __always_inline s64 +arch_atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and(~i, v); +} +#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot +#endif + +#ifndef arch_atomic64_fetch_andnot_acquire +static __always_inline s64 +arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and_acquire(~i, v); +} +#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire +#endif + +#ifndef arch_atomic64_fetch_andnot_release +static __always_inline s64 +arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and_release(~i, v); +} +#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release +#endif + +#ifndef arch_atomic64_fetch_andnot_relaxed +static __always_inline s64 +arch_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) +{ + return arch_atomic64_fetch_and_relaxed(~i, v); +} +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed +#endif + +#else /* arch_atomic64_fetch_andnot_relaxed */ + +#ifndef arch_atomic64_fetch_andnot_acquire +static __always_inline s64 +arch_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_andnot_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_andnot_acquire arch_atomic64_fetch_andnot_acquire +#endif + +#ifndef arch_atomic64_fetch_andnot_release +static __always_inline s64 +arch_atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_andnot_relaxed(i, v); +} +#define arch_atomic64_fetch_andnot_release arch_atomic64_fetch_andnot_release +#endif + +#ifndef arch_atomic64_fetch_andnot +static __always_inline s64 +arch_atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_andnot_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot +#endif + +#endif /* arch_atomic64_fetch_andnot_relaxed */ + +#ifndef arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or +#else /* arch_atomic64_fetch_or_relaxed */ + +#ifndef arch_atomic64_fetch_or_acquire +static __always_inline s64 +arch_atomic64_fetch_or_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_or_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or_acquire +#endif + +#ifndef arch_atomic64_fetch_or_release +static __always_inline s64 +arch_atomic64_fetch_or_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_or_relaxed(i, v); +} +#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or_release +#endif + +#ifndef arch_atomic64_fetch_or +static __always_inline s64 +arch_atomic64_fetch_or(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_or_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#endif + +#endif /* arch_atomic64_fetch_or_relaxed */ + +#ifndef arch_atomic64_fetch_xor_relaxed +#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor +#else /* arch_atomic64_fetch_xor_relaxed */ + +#ifndef arch_atomic64_fetch_xor_acquire +static __always_inline s64 +arch_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) +{ + s64 ret = arch_atomic64_fetch_xor_relaxed(i, v); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor_acquire +#endif + +#ifndef arch_atomic64_fetch_xor_release +static __always_inline s64 +arch_atomic64_fetch_xor_release(s64 i, atomic64_t *v) +{ + __atomic_release_fence(); + return arch_atomic64_fetch_xor_relaxed(i, v); +} +#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor_release +#endif + +#ifndef arch_atomic64_fetch_xor +static __always_inline s64 +arch_atomic64_fetch_xor(s64 i, atomic64_t *v) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_fetch_xor_relaxed(i, v); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor +#endif + +#endif /* arch_atomic64_fetch_xor_relaxed */ + +#ifndef arch_atomic64_xchg_relaxed +#define arch_atomic64_xchg_acquire arch_atomic64_xchg +#define arch_atomic64_xchg_release arch_atomic64_xchg +#define arch_atomic64_xchg_relaxed arch_atomic64_xchg +#else /* arch_atomic64_xchg_relaxed */ + +#ifndef arch_atomic64_xchg_acquire +static __always_inline s64 +arch_atomic64_xchg_acquire(atomic64_t *v, s64 i) +{ + s64 ret = arch_atomic64_xchg_relaxed(v, i); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_xchg_acquire arch_atomic64_xchg_acquire +#endif + +#ifndef arch_atomic64_xchg_release +static __always_inline s64 +arch_atomic64_xchg_release(atomic64_t *v, s64 i) +{ + __atomic_release_fence(); + return arch_atomic64_xchg_relaxed(v, i); +} +#define arch_atomic64_xchg_release arch_atomic64_xchg_release +#endif + +#ifndef arch_atomic64_xchg +static __always_inline s64 +arch_atomic64_xchg(atomic64_t *v, s64 i) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_xchg_relaxed(v, i); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_xchg arch_atomic64_xchg +#endif + +#endif /* arch_atomic64_xchg_relaxed */ + +#ifndef arch_atomic64_cmpxchg_relaxed +#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg +#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg +#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg +#else /* arch_atomic64_cmpxchg_relaxed */ + +#ifndef arch_atomic64_cmpxchg_acquire +static __always_inline s64 +arch_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) +{ + s64 ret = arch_atomic64_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_cmpxchg_acquire arch_atomic64_cmpxchg_acquire +#endif + +#ifndef arch_atomic64_cmpxchg_release +static __always_inline s64 +arch_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) +{ + __atomic_release_fence(); + return arch_atomic64_cmpxchg_relaxed(v, old, new); +} +#define arch_atomic64_cmpxchg_release arch_atomic64_cmpxchg_release +#endif + +#ifndef arch_atomic64_cmpxchg +static __always_inline s64 +arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +{ + s64 ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg +#endif + +#endif /* arch_atomic64_cmpxchg_relaxed */ + +#ifndef arch_atomic64_try_cmpxchg_relaxed +#ifdef arch_atomic64_try_cmpxchg +#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg +#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg +#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg +#endif /* arch_atomic64_try_cmpxchg */ + +#ifndef arch_atomic64_try_cmpxchg +static __always_inline bool +arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = arch_atomic64_cmpxchg(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg +#endif + +#ifndef arch_atomic64_try_cmpxchg_acquire +static __always_inline bool +arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = arch_atomic64_cmpxchg_acquire(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire +#endif + +#ifndef arch_atomic64_try_cmpxchg_release +static __always_inline bool +arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = arch_atomic64_cmpxchg_release(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release +#endif + +#ifndef arch_atomic64_try_cmpxchg_relaxed +static __always_inline bool +arch_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) +{ + s64 r, o = *old; + r = arch_atomic64_cmpxchg_relaxed(v, o, new); + if (unlikely(r != o)) + *old = r; + return likely(r == o); +} +#define arch_atomic64_try_cmpxchg_relaxed arch_atomic64_try_cmpxchg_relaxed +#endif + +#else /* arch_atomic64_try_cmpxchg_relaxed */ + +#ifndef arch_atomic64_try_cmpxchg_acquire +static __always_inline bool +arch_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + bool ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); + __atomic_acquire_fence(); + return ret; +} +#define arch_atomic64_try_cmpxchg_acquire arch_atomic64_try_cmpxchg_acquire +#endif + +#ifndef arch_atomic64_try_cmpxchg_release +static __always_inline bool +arch_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + __atomic_release_fence(); + return arch_atomic64_try_cmpxchg_relaxed(v, old, new); +} +#define arch_atomic64_try_cmpxchg_release arch_atomic64_try_cmpxchg_release +#endif + +#ifndef arch_atomic64_try_cmpxchg +static __always_inline bool +arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + bool ret; + __atomic_pre_full_fence(); + ret = arch_atomic64_try_cmpxchg_relaxed(v, old, new); + __atomic_post_full_fence(); + return ret; +} +#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg +#endif + +#endif /* arch_atomic64_try_cmpxchg_relaxed */ + +#ifndef arch_atomic64_sub_and_test +/** + * arch_atomic64_sub_and_test - subtract value from variable and test result + * @i: integer value to subtract + * @v: pointer of type atomic64_t + * + * Atomically subtracts @i from @v and returns + * true if the result is zero, or false for all + * other cases. + */ +static __always_inline bool +arch_atomic64_sub_and_test(s64 i, atomic64_t *v) +{ + return arch_atomic64_sub_return(i, v) == 0; +} +#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test +#endif + +#ifndef arch_atomic64_dec_and_test +/** + * arch_atomic64_dec_and_test - decrement and test + * @v: pointer of type atomic64_t + * + * Atomically decrements @v by 1 and + * returns true if the result is 0, or false for all other + * cases. + */ +static __always_inline bool +arch_atomic64_dec_and_test(atomic64_t *v) +{ + return arch_atomic64_dec_return(v) == 0; +} +#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test +#endif + +#ifndef arch_atomic64_inc_and_test +/** + * arch_atomic64_inc_and_test - increment and test + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1 + * and returns true if the result is zero, or false for all + * other cases. + */ +static __always_inline bool +arch_atomic64_inc_and_test(atomic64_t *v) +{ + return arch_atomic64_inc_return(v) == 0; +} +#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test +#endif + +#ifndef arch_atomic64_add_negative +/** + * arch_atomic64_add_negative - add and test if negative + * @i: integer value to add + * @v: pointer of type atomic64_t + * + * Atomically adds @i to @v and returns true + * if the result is negative, or false when + * result is greater than or equal to zero. + */ +static __always_inline bool +arch_atomic64_add_negative(s64 i, atomic64_t *v) +{ + return arch_atomic64_add_return(i, v) < 0; +} +#define arch_atomic64_add_negative arch_atomic64_add_negative +#endif + +#ifndef arch_atomic64_fetch_add_unless +/** + * arch_atomic64_fetch_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as @v was not already @u. + * Returns original value of @v + */ +static __always_inline s64 +arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + s64 c = arch_atomic64_read(v); + + do { + if (unlikely(c == u)) + break; + } while (!arch_atomic64_try_cmpxchg(v, &c, c + a)); + + return c; +} +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless +#endif + +#ifndef arch_atomic64_add_unless +/** + * arch_atomic64_add_unless - add unless the number is already a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, if @v was not already @u. + * Returns true if the addition was done. + */ +static __always_inline bool +arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u) +{ + return arch_atomic64_fetch_add_unless(v, a, u) != u; +} +#define arch_atomic64_add_unless arch_atomic64_add_unless +#endif + +#ifndef arch_atomic64_inc_not_zero +/** + * arch_atomic64_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic64_t + * + * Atomically increments @v by 1, if @v is non-zero. + * Returns true if the increment was done. + */ +static __always_inline bool +arch_atomic64_inc_not_zero(atomic64_t *v) +{ + return arch_atomic64_add_unless(v, 1, 0); +} +#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero +#endif + +#ifndef arch_atomic64_inc_unless_negative +static __always_inline bool +arch_atomic64_inc_unless_negative(atomic64_t *v) +{ + s64 c = arch_atomic64_read(v); + + do { + if (unlikely(c < 0)) + return false; + } while (!arch_atomic64_try_cmpxchg(v, &c, c + 1)); + + return true; +} +#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative +#endif + +#ifndef arch_atomic64_dec_unless_positive +static __always_inline bool +arch_atomic64_dec_unless_positive(atomic64_t *v) +{ + s64 c = arch_atomic64_read(v); + + do { + if (unlikely(c > 0)) + return false; + } while (!arch_atomic64_try_cmpxchg(v, &c, c - 1)); + + return true; +} +#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive +#endif + +#ifndef arch_atomic64_dec_if_positive +static __always_inline s64 +arch_atomic64_dec_if_positive(atomic64_t *v) +{ + s64 dec, c = arch_atomic64_read(v); + + do { + dec = c - 1; + if (unlikely(dec < 0)) + break; + } while (!arch_atomic64_try_cmpxchg(v, &c, dec)); + + return dec; +} +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive +#endif + +#endif /* _LINUX_ATOMIC_FALLBACK_H */ +// 90cd26cfd69d2250303d654955a0cc12620fb91b diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h index a7d240e465c0..fd525c71d676 100644 --- a/include/linux/atomic-fallback.h +++ b/include/linux/atomic-fallback.h @@ -6,6 +6,8 @@ #ifndef _LINUX_ATOMIC_FALLBACK_H #define _LINUX_ATOMIC_FALLBACK_H +#include <linux/compiler.h> + #ifndef xchg_relaxed #define xchg_relaxed xchg #define xchg_acquire xchg @@ -75,8 +77,11 @@ #endif /* cmpxchg64_relaxed */ +#define arch_atomic_read atomic_read +#define arch_atomic_read_acquire atomic_read_acquire + #ifndef atomic_read_acquire -static inline int +static __always_inline int atomic_read_acquire(const atomic_t *v) { return smp_load_acquire(&(v)->counter); @@ -84,8 +89,11 @@ atomic_read_acquire(const atomic_t *v) #define atomic_read_acquire atomic_read_acquire #endif +#define arch_atomic_set atomic_set +#define arch_atomic_set_release atomic_set_release + #ifndef atomic_set_release -static inline void +static __always_inline void atomic_set_release(atomic_t *v, int i) { smp_store_release(&(v)->counter, i); @@ -93,6 +101,13 @@ atomic_set_release(atomic_t *v, int i) #define atomic_set_release atomic_set_release #endif +#define arch_atomic_add atomic_add + +#define arch_atomic_add_return atomic_add_return +#define arch_atomic_add_return_acquire atomic_add_return_acquire +#define arch_atomic_add_return_release atomic_add_return_release +#define arch_atomic_add_return_relaxed atomic_add_return_relaxed + #ifndef atomic_add_return_relaxed #define atomic_add_return_acquire atomic_add_return #define atomic_add_return_release atomic_add_return @@ -100,7 +115,7 @@ atomic_set_release(atomic_t *v, int i) #else /* atomic_add_return_relaxed */ #ifndef atomic_add_return_acquire -static inline int +static __always_inline int atomic_add_return_acquire(int i, atomic_t *v) { int ret = atomic_add_return_relaxed(i, v); @@ -111,7 +126,7 @@ atomic_add_return_acquire(int i, atomic_t *v) #endif #ifndef atomic_add_return_release -static inline int +static __always_inline int atomic_add_return_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -121,7 +136,7 @@ atomic_add_return_release(int i, atomic_t *v) #endif #ifndef atomic_add_return -static inline int +static __always_inline int atomic_add_return(int i, atomic_t *v) { int ret; @@ -135,6 +150,11 @@ atomic_add_return(int i, atomic_t *v) #endif /* atomic_add_return_relaxed */ +#define arch_atomic_fetch_add atomic_fetch_add +#define arch_atomic_fetch_add_acquire atomic_fetch_add_acquire +#define arch_atomic_fetch_add_release atomic_fetch_add_release +#define arch_atomic_fetch_add_relaxed atomic_fetch_add_relaxed + #ifndef atomic_fetch_add_relaxed #define atomic_fetch_add_acquire atomic_fetch_add #define atomic_fetch_add_release atomic_fetch_add @@ -142,7 +162,7 @@ atomic_add_return(int i, atomic_t *v) #else /* atomic_fetch_add_relaxed */ #ifndef atomic_fetch_add_acquire -static inline int +static __always_inline int atomic_fetch_add_acquire(int i, atomic_t *v) { int ret = atomic_fetch_add_relaxed(i, v); @@ -153,7 +173,7 @@ atomic_fetch_add_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_add_release -static inline int +static __always_inline int atomic_fetch_add_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -163,7 +183,7 @@ atomic_fetch_add_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_add -static inline int +static __always_inline int atomic_fetch_add(int i, atomic_t *v) { int ret; @@ -177,6 +197,13 @@ atomic_fetch_add(int i, atomic_t *v) #endif /* atomic_fetch_add_relaxed */ +#define arch_atomic_sub atomic_sub + +#define arch_atomic_sub_return atomic_sub_return +#define arch_atomic_sub_return_acquire atomic_sub_return_acquire +#define arch_atomic_sub_return_release atomic_sub_return_release +#define arch_atomic_sub_return_relaxed atomic_sub_return_relaxed + #ifndef atomic_sub_return_relaxed #define atomic_sub_return_acquire atomic_sub_return #define atomic_sub_return_release atomic_sub_return @@ -184,7 +211,7 @@ atomic_fetch_add(int i, atomic_t *v) #else /* atomic_sub_return_relaxed */ #ifndef atomic_sub_return_acquire -static inline int +static __always_inline int atomic_sub_return_acquire(int i, atomic_t *v) { int ret = atomic_sub_return_relaxed(i, v); @@ -195,7 +222,7 @@ atomic_sub_return_acquire(int i, atomic_t *v) #endif #ifndef atomic_sub_return_release -static inline int +static __always_inline int atomic_sub_return_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -205,7 +232,7 @@ atomic_sub_return_release(int i, atomic_t *v) #endif #ifndef atomic_sub_return -static inline int +static __always_inline int atomic_sub_return(int i, atomic_t *v) { int ret; @@ -219,6 +246,11 @@ atomic_sub_return(int i, atomic_t *v) #endif /* atomic_sub_return_relaxed */ +#define arch_atomic_fetch_sub atomic_fetch_sub +#define arch_atomic_fetch_sub_acquire atomic_fetch_sub_acquire +#define arch_atomic_fetch_sub_release atomic_fetch_sub_release +#define arch_atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed + #ifndef atomic_fetch_sub_relaxed #define atomic_fetch_sub_acquire atomic_fetch_sub #define atomic_fetch_sub_release atomic_fetch_sub @@ -226,7 +258,7 @@ atomic_sub_return(int i, atomic_t *v) #else /* atomic_fetch_sub_relaxed */ #ifndef atomic_fetch_sub_acquire -static inline int +static __always_inline int atomic_fetch_sub_acquire(int i, atomic_t *v) { int ret = atomic_fetch_sub_relaxed(i, v); @@ -237,7 +269,7 @@ atomic_fetch_sub_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_sub_release -static inline int +static __always_inline int atomic_fetch_sub_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -247,7 +279,7 @@ atomic_fetch_sub_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_sub -static inline int +static __always_inline int atomic_fetch_sub(int i, atomic_t *v) { int ret; @@ -261,8 +293,10 @@ atomic_fetch_sub(int i, atomic_t *v) #endif /* atomic_fetch_sub_relaxed */ +#define arch_atomic_inc atomic_inc + #ifndef atomic_inc -static inline void +static __always_inline void atomic_inc(atomic_t *v) { atomic_add(1, v); @@ -270,6 +304,11 @@ atomic_inc(atomic_t *v) #define atomic_inc atomic_inc #endif +#define arch_atomic_inc_return atomic_inc_return +#define arch_atomic_inc_return_acquire atomic_inc_return_acquire +#define arch_atomic_inc_return_release atomic_inc_return_release +#define arch_atomic_inc_return_relaxed atomic_inc_return_relaxed + #ifndef atomic_inc_return_relaxed #ifdef atomic_inc_return #define atomic_inc_return_acquire atomic_inc_return @@ -278,7 +317,7 @@ atomic_inc(atomic_t *v) #endif /* atomic_inc_return */ #ifndef atomic_inc_return -static inline int +static __always_inline int atomic_inc_return(atomic_t *v) { return atomic_add_return(1, v); @@ -287,7 +326,7 @@ atomic_inc_return(atomic_t *v) #endif #ifndef atomic_inc_return_acquire -static inline int +static __always_inline int atomic_inc_return_acquire(atomic_t *v) { return atomic_add_return_acquire(1, v); @@ -296,7 +335,7 @@ atomic_inc_return_acquire(atomic_t *v) #endif #ifndef atomic_inc_return_release -static inline int +static __always_inline int atomic_inc_return_release(atomic_t *v) { return atomic_add_return_release(1, v); @@ -305,7 +344,7 @@ atomic_inc_return_release(atomic_t *v) #endif #ifndef atomic_inc_return_relaxed -static inline int +static __always_inline int atomic_inc_return_relaxed(atomic_t *v) { return atomic_add_return_relaxed(1, v); @@ -316,7 +355,7 @@ atomic_inc_return_relaxed(atomic_t *v) #else /* atomic_inc_return_relaxed */ #ifndef atomic_inc_return_acquire -static inline int +static __always_inline int atomic_inc_return_acquire(atomic_t *v) { int ret = atomic_inc_return_relaxed(v); @@ -327,7 +366,7 @@ atomic_inc_return_acquire(atomic_t *v) #endif #ifndef atomic_inc_return_release -static inline int +static __always_inline int atomic_inc_return_release(atomic_t *v) { __atomic_release_fence(); @@ -337,7 +376,7 @@ atomic_inc_return_release(atomic_t *v) #endif #ifndef atomic_inc_return -static inline int +static __always_inline int atomic_inc_return(atomic_t *v) { int ret; @@ -351,6 +390,11 @@ atomic_inc_return(atomic_t *v) #endif /* atomic_inc_return_relaxed */ +#define arch_atomic_fetch_inc atomic_fetch_inc +#define arch_atomic_fetch_inc_acquire atomic_fetch_inc_acquire +#define arch_atomic_fetch_inc_release atomic_fetch_inc_release +#define arch_atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed + #ifndef atomic_fetch_inc_relaxed #ifdef atomic_fetch_inc #define atomic_fetch_inc_acquire atomic_fetch_inc @@ -359,7 +403,7 @@ atomic_inc_return(atomic_t *v) #endif /* atomic_fetch_inc */ #ifndef atomic_fetch_inc -static inline int +static __always_inline int atomic_fetch_inc(atomic_t *v) { return atomic_fetch_add(1, v); @@ -368,7 +412,7 @@ atomic_fetch_inc(atomic_t *v) #endif #ifndef atomic_fetch_inc_acquire -static inline int +static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { return atomic_fetch_add_acquire(1, v); @@ -377,7 +421,7 @@ atomic_fetch_inc_acquire(atomic_t *v) #endif #ifndef atomic_fetch_inc_release -static inline int +static __always_inline int atomic_fetch_inc_release(atomic_t *v) { return atomic_fetch_add_release(1, v); @@ -386,7 +430,7 @@ atomic_fetch_inc_release(atomic_t *v) #endif #ifndef atomic_fetch_inc_relaxed -static inline int +static __always_inline int atomic_fetch_inc_relaxed(atomic_t *v) { return atomic_fetch_add_relaxed(1, v); @@ -397,7 +441,7 @@ atomic_fetch_inc_relaxed(atomic_t *v) #else /* atomic_fetch_inc_relaxed */ #ifndef atomic_fetch_inc_acquire -static inline int +static __always_inline int atomic_fetch_inc_acquire(atomic_t *v) { int ret = atomic_fetch_inc_relaxed(v); @@ -408,7 +452,7 @@ atomic_fetch_inc_acquire(atomic_t *v) #endif #ifndef atomic_fetch_inc_release -static inline int +static __always_inline int atomic_fetch_inc_release(atomic_t *v) { __atomic_release_fence(); @@ -418,7 +462,7 @@ atomic_fetch_inc_release(atomic_t *v) #endif #ifndef atomic_fetch_inc -static inline int +static __always_inline int atomic_fetch_inc(atomic_t *v) { int ret; @@ -432,8 +476,10 @@ atomic_fetch_inc(atomic_t *v) #endif /* atomic_fetch_inc_relaxed */ +#define arch_atomic_dec atomic_dec + #ifndef atomic_dec -static inline void +static __always_inline void atomic_dec(atomic_t *v) { atomic_sub(1, v); @@ -441,6 +487,11 @@ atomic_dec(atomic_t *v) #define atomic_dec atomic_dec #endif +#define arch_atomic_dec_return atomic_dec_return +#define arch_atomic_dec_return_acquire atomic_dec_return_acquire +#define arch_atomic_dec_return_release atomic_dec_return_release +#define arch_atomic_dec_return_relaxed atomic_dec_return_relaxed + #ifndef atomic_dec_return_relaxed #ifdef atomic_dec_return #define atomic_dec_return_acquire atomic_dec_return @@ -449,7 +500,7 @@ atomic_dec(atomic_t *v) #endif /* atomic_dec_return */ #ifndef atomic_dec_return -static inline int +static __always_inline int atomic_dec_return(atomic_t *v) { return atomic_sub_return(1, v); @@ -458,7 +509,7 @@ atomic_dec_return(atomic_t *v) #endif #ifndef atomic_dec_return_acquire -static inline int +static __always_inline int atomic_dec_return_acquire(atomic_t *v) { return atomic_sub_return_acquire(1, v); @@ -467,7 +518,7 @@ atomic_dec_return_acquire(atomic_t *v) #endif #ifndef atomic_dec_return_release -static inline int +static __always_inline int atomic_dec_return_release(atomic_t *v) { return atomic_sub_return_release(1, v); @@ -476,7 +527,7 @@ atomic_dec_return_release(atomic_t *v) #endif #ifndef atomic_dec_return_relaxed -static inline int +static __always_inline int atomic_dec_return_relaxed(atomic_t *v) { return atomic_sub_return_relaxed(1, v); @@ -487,7 +538,7 @@ atomic_dec_return_relaxed(atomic_t *v) #else /* atomic_dec_return_relaxed */ #ifndef atomic_dec_return_acquire -static inline int +static __always_inline int atomic_dec_return_acquire(atomic_t *v) { int ret = atomic_dec_return_relaxed(v); @@ -498,7 +549,7 @@ atomic_dec_return_acquire(atomic_t *v) #endif #ifndef atomic_dec_return_release -static inline int +static __always_inline int atomic_dec_return_release(atomic_t *v) { __atomic_release_fence(); @@ -508,7 +559,7 @@ atomic_dec_return_release(atomic_t *v) #endif #ifndef atomic_dec_return -static inline int +static __always_inline int atomic_dec_return(atomic_t *v) { int ret; @@ -522,6 +573,11 @@ atomic_dec_return(atomic_t *v) #endif /* atomic_dec_return_relaxed */ +#define arch_atomic_fetch_dec atomic_fetch_dec +#define arch_atomic_fetch_dec_acquire atomic_fetch_dec_acquire +#define arch_atomic_fetch_dec_release atomic_fetch_dec_release +#define arch_atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed + #ifndef atomic_fetch_dec_relaxed #ifdef atomic_fetch_dec #define atomic_fetch_dec_acquire atomic_fetch_dec @@ -530,7 +586,7 @@ atomic_dec_return(atomic_t *v) #endif /* atomic_fetch_dec */ #ifndef atomic_fetch_dec -static inline int +static __always_inline int atomic_fetch_dec(atomic_t *v) { return atomic_fetch_sub(1, v); @@ -539,7 +595,7 @@ atomic_fetch_dec(atomic_t *v) #endif #ifndef atomic_fetch_dec_acquire -static inline int +static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { return atomic_fetch_sub_acquire(1, v); @@ -548,7 +604,7 @@ atomic_fetch_dec_acquire(atomic_t *v) #endif #ifndef atomic_fetch_dec_release -static inline int +static __always_inline int atomic_fetch_dec_release(atomic_t *v) { return atomic_fetch_sub_release(1, v); @@ -557,7 +613,7 @@ atomic_fetch_dec_release(atomic_t *v) #endif #ifndef atomic_fetch_dec_relaxed -static inline int +static __always_inline int atomic_fetch_dec_relaxed(atomic_t *v) { return atomic_fetch_sub_relaxed(1, v); @@ -568,7 +624,7 @@ atomic_fetch_dec_relaxed(atomic_t *v) #else /* atomic_fetch_dec_relaxed */ #ifndef atomic_fetch_dec_acquire -static inline int +static __always_inline int atomic_fetch_dec_acquire(atomic_t *v) { int ret = atomic_fetch_dec_relaxed(v); @@ -579,7 +635,7 @@ atomic_fetch_dec_acquire(atomic_t *v) #endif #ifndef atomic_fetch_dec_release -static inline int +static __always_inline int atomic_fetch_dec_release(atomic_t *v) { __atomic_release_fence(); @@ -589,7 +645,7 @@ atomic_fetch_dec_release(atomic_t *v) #endif #ifndef atomic_fetch_dec -static inline int +static __always_inline int atomic_fetch_dec(atomic_t *v) { int ret; @@ -603,6 +659,13 @@ atomic_fetch_dec(atomic_t *v) #endif /* atomic_fetch_dec_relaxed */ +#define arch_atomic_and atomic_and + +#define arch_atomic_fetch_and atomic_fetch_and +#define arch_atomic_fetch_and_acquire atomic_fetch_and_acquire +#define arch_atomic_fetch_and_release atomic_fetch_and_release +#define arch_atomic_fetch_and_relaxed atomic_fetch_and_relaxed + #ifndef atomic_fetch_and_relaxed #define atomic_fetch_and_acquire atomic_fetch_and #define atomic_fetch_and_release atomic_fetch_and @@ -610,7 +673,7 @@ atomic_fetch_dec(atomic_t *v) #else /* atomic_fetch_and_relaxed */ #ifndef atomic_fetch_and_acquire -static inline int +static __always_inline int atomic_fetch_and_acquire(int i, atomic_t *v) { int ret = atomic_fetch_and_relaxed(i, v); @@ -621,7 +684,7 @@ atomic_fetch_and_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_and_release -static inline int +static __always_inline int atomic_fetch_and_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -631,7 +694,7 @@ atomic_fetch_and_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_and -static inline int +static __always_inline int atomic_fetch_and(int i, atomic_t *v) { int ret; @@ -645,8 +708,10 @@ atomic_fetch_and(int i, atomic_t *v) #endif /* atomic_fetch_and_relaxed */ +#define arch_atomic_andnot atomic_andnot + #ifndef atomic_andnot -static inline void +static __always_inline void atomic_andnot(int i, atomic_t *v) { atomic_and(~i, v); @@ -654,6 +719,11 @@ atomic_andnot(int i, atomic_t *v) #define atomic_andnot atomic_andnot #endif +#define arch_atomic_fetch_andnot atomic_fetch_andnot +#define arch_atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire +#define arch_atomic_fetch_andnot_release atomic_fetch_andnot_release +#define arch_atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed + #ifndef atomic_fetch_andnot_relaxed #ifdef atomic_fetch_andnot #define atomic_fetch_andnot_acquire atomic_fetch_andnot @@ -662,7 +732,7 @@ atomic_andnot(int i, atomic_t *v) #endif /* atomic_fetch_andnot */ #ifndef atomic_fetch_andnot -static inline int +static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { return atomic_fetch_and(~i, v); @@ -671,7 +741,7 @@ atomic_fetch_andnot(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_acquire -static inline int +static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { return atomic_fetch_and_acquire(~i, v); @@ -680,7 +750,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_release -static inline int +static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { return atomic_fetch_and_release(~i, v); @@ -689,7 +759,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_relaxed -static inline int +static __always_inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v) { return atomic_fetch_and_relaxed(~i, v); @@ -700,7 +770,7 @@ atomic_fetch_andnot_relaxed(int i, atomic_t *v) #else /* atomic_fetch_andnot_relaxed */ #ifndef atomic_fetch_andnot_acquire -static inline int +static __always_inline int atomic_fetch_andnot_acquire(int i, atomic_t *v) { int ret = atomic_fetch_andnot_relaxed(i, v); @@ -711,7 +781,7 @@ atomic_fetch_andnot_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot_release -static inline int +static __always_inline int atomic_fetch_andnot_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -721,7 +791,7 @@ atomic_fetch_andnot_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_andnot -static inline int +static __always_inline int atomic_fetch_andnot(int i, atomic_t *v) { int ret; @@ -735,6 +805,13 @@ atomic_fetch_andnot(int i, atomic_t *v) #endif /* atomic_fetch_andnot_relaxed */ +#define arch_atomic_or atomic_or + +#define arch_atomic_fetch_or atomic_fetch_or +#define arch_atomic_fetch_or_acquire atomic_fetch_or_acquire +#define arch_atomic_fetch_or_release atomic_fetch_or_release +#define arch_atomic_fetch_or_relaxed atomic_fetch_or_relaxed + #ifndef atomic_fetch_or_relaxed #define atomic_fetch_or_acquire atomic_fetch_or #define atomic_fetch_or_release atomic_fetch_or @@ -742,7 +819,7 @@ atomic_fetch_andnot(int i, atomic_t *v) #else /* atomic_fetch_or_relaxed */ #ifndef atomic_fetch_or_acquire -static inline int +static __always_inline int atomic_fetch_or_acquire(int i, atomic_t *v) { int ret = atomic_fetch_or_relaxed(i, v); @@ -753,7 +830,7 @@ atomic_fetch_or_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_or_release -static inline int +static __always_inline int atomic_fetch_or_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -763,7 +840,7 @@ atomic_fetch_or_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_or -static inline int +static __always_inline int atomic_fetch_or(int i, atomic_t *v) { int ret; @@ -777,6 +854,13 @@ atomic_fetch_or(int i, atomic_t *v) #endif /* atomic_fetch_or_relaxed */ +#define arch_atomic_xor atomic_xor + +#define arch_atomic_fetch_xor atomic_fetch_xor +#define arch_atomic_fetch_xor_acquire atomic_fetch_xor_acquire +#define arch_atomic_fetch_xor_release atomic_fetch_xor_release +#define arch_atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed + #ifndef atomic_fetch_xor_relaxed #define atomic_fetch_xor_acquire atomic_fetch_xor #define atomic_fetch_xor_release atomic_fetch_xor @@ -784,7 +868,7 @@ atomic_fetch_or(int i, atomic_t *v) #else /* atomic_fetch_xor_relaxed */ #ifndef atomic_fetch_xor_acquire -static inline int +static __always_inline int atomic_fetch_xor_acquire(int i, atomic_t *v) { int ret = atomic_fetch_xor_relaxed(i, v); @@ -795,7 +879,7 @@ atomic_fetch_xor_acquire(int i, atomic_t *v) #endif #ifndef atomic_fetch_xor_release -static inline int +static __always_inline int atomic_fetch_xor_release(int i, atomic_t *v) { __atomic_release_fence(); @@ -805,7 +889,7 @@ atomic_fetch_xor_release(int i, atomic_t *v) #endif #ifndef atomic_fetch_xor -static inline int +static __always_inline int atomic_fetch_xor(int i, atomic_t *v) { int ret; @@ -819,6 +903,11 @@ atomic_fetch_xor(int i, atomic_t *v) #endif /* atomic_fetch_xor_relaxed */ +#define arch_atomic_xchg atomic_xchg +#define arch_atomic_xchg_acquire atomic_xchg_acquire +#define arch_atomic_xchg_release atomic_xchg_release +#define arch_atomic_xchg_relaxed atomic_xchg_relaxed + #ifndef atomic_xchg_relaxed #define atomic_xchg_acquire atomic_xchg #define atomic_xchg_release atomic_xchg @@ -826,7 +915,7 @@ atomic_fetch_xor(int i, atomic_t *v) #else /* atomic_xchg_relaxed */ #ifndef atomic_xchg_acquire -static inline int +static __always_inline int atomic_xchg_acquire(atomic_t *v, int i) { int ret = atomic_xchg_relaxed(v, i); @@ -837,7 +926,7 @@ atomic_xchg_acquire(atomic_t *v, int i) #endif #ifndef atomic_xchg_release -static inline int +static __always_inline int atomic_xchg_release(atomic_t *v, int i) { __atomic_release_fence(); @@ -847,7 +936,7 @@ atomic_xchg_release(atomic_t *v, int i) #endif #ifndef atomic_xchg -static inline int +static __always_inline int atomic_xchg(atomic_t *v, int i) { int ret; @@ -861,6 +950,11 @@ atomic_xchg(atomic_t *v, int i) #endif /* atomic_xchg_relaxed */ +#define arch_atomic_cmpxchg atomic_cmpxchg +#define arch_atomic_cmpxchg_acquire atomic_cmpxchg_acquire +#define arch_atomic_cmpxchg_release atomic_cmpxchg_release +#define arch_atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed + #ifndef atomic_cmpxchg_relaxed #define atomic_cmpxchg_acquire atomic_cmpxchg #define atomic_cmpxchg_release atomic_cmpxchg @@ -868,7 +962,7 @@ atomic_xchg(atomic_t *v, int i) #else /* atomic_cmpxchg_relaxed */ #ifndef atomic_cmpxchg_acquire -static inline int +static __always_inline int atomic_cmpxchg_acquire(atomic_t *v, int old, int new) { int ret = atomic_cmpxchg_relaxed(v, old, new); @@ -879,7 +973,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new) #endif #ifndef atomic_cmpxchg_release -static inline int +static __always_inline int atomic_cmpxchg_release(atomic_t *v, int old, int new) { __atomic_release_fence(); @@ -889,7 +983,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new) #endif #ifndef atomic_cmpxchg -static inline int +static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) { int ret; @@ -903,6 +997,11 @@ atomic_cmpxchg(atomic_t *v, int old, int new) #endif /* atomic_cmpxchg_relaxed */ +#define arch_atomic_try_cmpxchg atomic_try_cmpxchg +#define arch_atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire +#define arch_atomic_try_cmpxchg_release atomic_try_cmpxchg_release +#define arch_atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed + #ifndef atomic_try_cmpxchg_relaxed #ifdef atomic_try_cmpxchg #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg @@ -911,7 +1010,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new) #endif /* atomic_try_cmpxchg */ #ifndef atomic_try_cmpxchg -static inline bool +static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { int r, o = *old; @@ -924,7 +1023,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { int r, o = *old; @@ -937,7 +1036,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_release -static inline bool +static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { int r, o = *old; @@ -950,7 +1049,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_relaxed -static inline bool +static __always_inline bool atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) { int r, o = *old; @@ -965,7 +1064,7 @@ atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) #else /* atomic_try_cmpxchg_relaxed */ #ifndef atomic_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) { bool ret = atomic_try_cmpxchg_relaxed(v, old, new); @@ -976,7 +1075,7 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg_release -static inline bool +static __always_inline bool atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) { __atomic_release_fence(); @@ -986,7 +1085,7 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) #endif #ifndef atomic_try_cmpxchg -static inline bool +static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) { bool ret; @@ -1000,6 +1099,8 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) #endif /* atomic_try_cmpxchg_relaxed */ +#define arch_atomic_sub_and_test atomic_sub_and_test + #ifndef atomic_sub_and_test /** * atomic_sub_and_test - subtract value from variable and test result @@ -1010,7 +1111,7 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new) * true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) { return atomic_sub_return(i, v) == 0; @@ -1018,6 +1119,8 @@ atomic_sub_and_test(int i, atomic_t *v) #define atomic_sub_and_test atomic_sub_and_test #endif +#define arch_atomic_dec_and_test atomic_dec_and_test + #ifndef atomic_dec_and_test /** * atomic_dec_and_test - decrement and test @@ -1027,7 +1130,7 @@ atomic_sub_and_test(int i, atomic_t *v) * returns true if the result is 0, or false for all other * cases. */ -static inline bool +static __always_inline bool atomic_dec_and_test(atomic_t *v) { return atomic_dec_return(v) == 0; @@ -1035,6 +1138,8 @@ atomic_dec_and_test(atomic_t *v) #define atomic_dec_and_test atomic_dec_and_test #endif +#define arch_atomic_inc_and_test atomic_inc_and_test + #ifndef atomic_inc_and_test /** * atomic_inc_and_test - increment and test @@ -1044,7 +1149,7 @@ atomic_dec_and_test(atomic_t *v) * and returns true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic_inc_and_test(atomic_t *v) { return atomic_inc_return(v) == 0; @@ -1052,6 +1157,8 @@ atomic_inc_and_test(atomic_t *v) #define atomic_inc_and_test atomic_inc_and_test #endif +#define arch_atomic_add_negative atomic_add_negative + #ifndef atomic_add_negative /** * atomic_add_negative - add and test if negative @@ -1062,7 +1169,7 @@ atomic_inc_and_test(atomic_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -static inline bool +static __always_inline bool atomic_add_negative(int i, atomic_t *v) { return atomic_add_return(i, v) < 0; @@ -1070,6 +1177,8 @@ atomic_add_negative(int i, atomic_t *v) #define atomic_add_negative atomic_add_negative #endif +#define arch_atomic_fetch_add_unless atomic_fetch_add_unless + #ifndef atomic_fetch_add_unless /** * atomic_fetch_add_unless - add unless the number is already a given value @@ -1080,7 +1189,7 @@ atomic_add_negative(int i, atomic_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns original value of @v */ -static inline int +static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) { int c = atomic_read(v); @@ -1095,6 +1204,8 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u) #define atomic_fetch_add_unless atomic_fetch_add_unless #endif +#define arch_atomic_add_unless atomic_add_unless + #ifndef atomic_add_unless /** * atomic_add_unless - add unless the number is already a given value @@ -1105,7 +1216,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u) * Atomically adds @a to @v, if @v was not already @u. * Returns true if the addition was done. */ -static inline bool +static __always_inline bool atomic_add_unless(atomic_t *v, int a, int u) { return atomic_fetch_add_unless(v, a, u) != u; @@ -1113,6 +1224,8 @@ atomic_add_unless(atomic_t *v, int a, int u) #define atomic_add_unless atomic_add_unless #endif +#define arch_atomic_inc_not_zero atomic_inc_not_zero + #ifndef atomic_inc_not_zero /** * atomic_inc_not_zero - increment unless the number is zero @@ -1121,7 +1234,7 @@ atomic_add_unless(atomic_t *v, int a, int u) * Atomically increments @v by 1, if @v is non-zero. * Returns true if the increment was done. */ -static inline bool +static __always_inline bool atomic_inc_not_zero(atomic_t *v) { return atomic_add_unless(v, 1, 0); @@ -1129,8 +1242,10 @@ atomic_inc_not_zero(atomic_t *v) #define atomic_inc_not_zero atomic_inc_not_zero #endif +#define arch_atomic_inc_unless_negative atomic_inc_unless_negative + #ifndef atomic_inc_unless_negative -static inline bool +static __always_inline bool atomic_inc_unless_negative(atomic_t *v) { int c = atomic_read(v); @@ -1145,8 +1260,10 @@ atomic_inc_unless_negative(atomic_t *v) #define atomic_inc_unless_negative atomic_inc_unless_negative #endif +#define arch_atomic_dec_unless_positive atomic_dec_unless_positive + #ifndef atomic_dec_unless_positive -static inline bool +static __always_inline bool atomic_dec_unless_positive(atomic_t *v) { int c = atomic_read(v); @@ -1161,8 +1278,10 @@ atomic_dec_unless_positive(atomic_t *v) #define atomic_dec_unless_positive atomic_dec_unless_positive #endif +#define arch_atomic_dec_if_positive atomic_dec_if_positive + #ifndef atomic_dec_if_positive -static inline int +static __always_inline int atomic_dec_if_positive(atomic_t *v) { int dec, c = atomic_read(v); @@ -1178,15 +1297,15 @@ atomic_dec_if_positive(atomic_t *v) #define atomic_dec_if_positive atomic_dec_if_positive #endif -#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) -#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) - #ifdef CONFIG_GENERIC_ATOMIC64 #include <asm-generic/atomic64.h> #endif +#define arch_atomic64_read atomic64_read +#define arch_atomic64_read_acquire atomic64_read_acquire + #ifndef atomic64_read_acquire -static inline s64 +static __always_inline s64 atomic64_read_acquire(const atomic64_t *v) { return smp_load_acquire(&(v)->counter); @@ -1194,8 +1313,11 @@ atomic64_read_acquire(const atomic64_t *v) #define atomic64_read_acquire atomic64_read_acquire #endif +#define arch_atomic64_set atomic64_set +#define arch_atomic64_set_release atomic64_set_release + #ifndef atomic64_set_release -static inline void +static __always_inline void atomic64_set_release(atomic64_t *v, s64 i) { smp_store_release(&(v)->counter, i); @@ -1203,6 +1325,13 @@ atomic64_set_release(atomic64_t *v, s64 i) #define atomic64_set_release atomic64_set_release #endif +#define arch_atomic64_add atomic64_add + +#define arch_atomic64_add_return atomic64_add_return +#define arch_atomic64_add_return_acquire atomic64_add_return_acquire +#define arch_atomic64_add_return_release atomic64_add_return_release +#define arch_atomic64_add_return_relaxed atomic64_add_return_relaxed + #ifndef atomic64_add_return_relaxed #define atomic64_add_return_acquire atomic64_add_return #define atomic64_add_return_release atomic64_add_return @@ -1210,7 +1339,7 @@ atomic64_set_release(atomic64_t *v, s64 i) #else /* atomic64_add_return_relaxed */ #ifndef atomic64_add_return_acquire -static inline s64 +static __always_inline s64 atomic64_add_return_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_add_return_relaxed(i, v); @@ -1221,7 +1350,7 @@ atomic64_add_return_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_add_return_release -static inline s64 +static __always_inline s64 atomic64_add_return_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1231,7 +1360,7 @@ atomic64_add_return_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_add_return -static inline s64 +static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v) { s64 ret; @@ -1245,6 +1374,11 @@ atomic64_add_return(s64 i, atomic64_t *v) #endif /* atomic64_add_return_relaxed */ +#define arch_atomic64_fetch_add atomic64_fetch_add +#define arch_atomic64_fetch_add_acquire atomic64_fetch_add_acquire +#define arch_atomic64_fetch_add_release atomic64_fetch_add_release +#define arch_atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed + #ifndef atomic64_fetch_add_relaxed #define atomic64_fetch_add_acquire atomic64_fetch_add #define atomic64_fetch_add_release atomic64_fetch_add @@ -1252,7 +1386,7 @@ atomic64_add_return(s64 i, atomic64_t *v) #else /* atomic64_fetch_add_relaxed */ #ifndef atomic64_fetch_add_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_add_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_add_relaxed(i, v); @@ -1263,7 +1397,7 @@ atomic64_fetch_add_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_add_release -static inline s64 +static __always_inline s64 atomic64_fetch_add_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1273,7 +1407,7 @@ atomic64_fetch_add_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_add -static inline s64 +static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v) { s64 ret; @@ -1287,6 +1421,13 @@ atomic64_fetch_add(s64 i, atomic64_t *v) #endif /* atomic64_fetch_add_relaxed */ +#define arch_atomic64_sub atomic64_sub + +#define arch_atomic64_sub_return atomic64_sub_return +#define arch_atomic64_sub_return_acquire atomic64_sub_return_acquire +#define arch_atomic64_sub_return_release atomic64_sub_return_release +#define arch_atomic64_sub_return_relaxed atomic64_sub_return_relaxed + #ifndef atomic64_sub_return_relaxed #define atomic64_sub_return_acquire atomic64_sub_return #define atomic64_sub_return_release atomic64_sub_return @@ -1294,7 +1435,7 @@ atomic64_fetch_add(s64 i, atomic64_t *v) #else /* atomic64_sub_return_relaxed */ #ifndef atomic64_sub_return_acquire -static inline s64 +static __always_inline s64 atomic64_sub_return_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_sub_return_relaxed(i, v); @@ -1305,7 +1446,7 @@ atomic64_sub_return_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_sub_return_release -static inline s64 +static __always_inline s64 atomic64_sub_return_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1315,7 +1456,7 @@ atomic64_sub_return_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_sub_return -static inline s64 +static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v) { s64 ret; @@ -1329,6 +1470,11 @@ atomic64_sub_return(s64 i, atomic64_t *v) #endif /* atomic64_sub_return_relaxed */ +#define arch_atomic64_fetch_sub atomic64_fetch_sub +#define arch_atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire +#define arch_atomic64_fetch_sub_release atomic64_fetch_sub_release +#define arch_atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed + #ifndef atomic64_fetch_sub_relaxed #define atomic64_fetch_sub_acquire atomic64_fetch_sub #define atomic64_fetch_sub_release atomic64_fetch_sub @@ -1336,7 +1482,7 @@ atomic64_sub_return(s64 i, atomic64_t *v) #else /* atomic64_fetch_sub_relaxed */ #ifndef atomic64_fetch_sub_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_sub_relaxed(i, v); @@ -1347,7 +1493,7 @@ atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_sub_release -static inline s64 +static __always_inline s64 atomic64_fetch_sub_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1357,7 +1503,7 @@ atomic64_fetch_sub_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_sub -static inline s64 +static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v) { s64 ret; @@ -1371,8 +1517,10 @@ atomic64_fetch_sub(s64 i, atomic64_t *v) #endif /* atomic64_fetch_sub_relaxed */ +#define arch_atomic64_inc atomic64_inc + #ifndef atomic64_inc -static inline void +static __always_inline void atomic64_inc(atomic64_t *v) { atomic64_add(1, v); @@ -1380,6 +1528,11 @@ atomic64_inc(atomic64_t *v) #define atomic64_inc atomic64_inc #endif +#define arch_atomic64_inc_return atomic64_inc_return +#define arch_atomic64_inc_return_acquire atomic64_inc_return_acquire +#define arch_atomic64_inc_return_release atomic64_inc_return_release +#define arch_atomic64_inc_return_relaxed atomic64_inc_return_relaxed + #ifndef atomic64_inc_return_relaxed #ifdef atomic64_inc_return #define atomic64_inc_return_acquire atomic64_inc_return @@ -1388,7 +1541,7 @@ atomic64_inc(atomic64_t *v) #endif /* atomic64_inc_return */ #ifndef atomic64_inc_return -static inline s64 +static __always_inline s64 atomic64_inc_return(atomic64_t *v) { return atomic64_add_return(1, v); @@ -1397,7 +1550,7 @@ atomic64_inc_return(atomic64_t *v) #endif #ifndef atomic64_inc_return_acquire -static inline s64 +static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { return atomic64_add_return_acquire(1, v); @@ -1406,7 +1559,7 @@ atomic64_inc_return_acquire(atomic64_t *v) #endif #ifndef atomic64_inc_return_release -static inline s64 +static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { return atomic64_add_return_release(1, v); @@ -1415,7 +1568,7 @@ atomic64_inc_return_release(atomic64_t *v) #endif #ifndef atomic64_inc_return_relaxed -static inline s64 +static __always_inline s64 atomic64_inc_return_relaxed(atomic64_t *v) { return atomic64_add_return_relaxed(1, v); @@ -1426,7 +1579,7 @@ atomic64_inc_return_relaxed(atomic64_t *v) #else /* atomic64_inc_return_relaxed */ #ifndef atomic64_inc_return_acquire -static inline s64 +static __always_inline s64 atomic64_inc_return_acquire(atomic64_t *v) { s64 ret = atomic64_inc_return_relaxed(v); @@ -1437,7 +1590,7 @@ atomic64_inc_return_acquire(atomic64_t *v) #endif #ifndef atomic64_inc_return_release -static inline s64 +static __always_inline s64 atomic64_inc_return_release(atomic64_t *v) { __atomic_release_fence(); @@ -1447,7 +1600,7 @@ atomic64_inc_return_release(atomic64_t *v) #endif #ifndef atomic64_inc_return -static inline s64 +static __always_inline s64 atomic64_inc_return(atomic64_t *v) { s64 ret; @@ -1461,6 +1614,11 @@ atomic64_inc_return(atomic64_t *v) #endif /* atomic64_inc_return_relaxed */ +#define arch_atomic64_fetch_inc atomic64_fetch_inc +#define arch_atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire +#define arch_atomic64_fetch_inc_release atomic64_fetch_inc_release +#define arch_atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed + #ifndef atomic64_fetch_inc_relaxed #ifdef atomic64_fetch_inc #define atomic64_fetch_inc_acquire atomic64_fetch_inc @@ -1469,7 +1627,7 @@ atomic64_inc_return(atomic64_t *v) #endif /* atomic64_fetch_inc */ #ifndef atomic64_fetch_inc -static inline s64 +static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { return atomic64_fetch_add(1, v); @@ -1478,7 +1636,7 @@ atomic64_fetch_inc(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { return atomic64_fetch_add_acquire(1, v); @@ -1487,7 +1645,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_release -static inline s64 +static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { return atomic64_fetch_add_release(1, v); @@ -1496,7 +1654,7 @@ atomic64_fetch_inc_release(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_relaxed -static inline s64 +static __always_inline s64 atomic64_fetch_inc_relaxed(atomic64_t *v) { return atomic64_fetch_add_relaxed(1, v); @@ -1507,7 +1665,7 @@ atomic64_fetch_inc_relaxed(atomic64_t *v) #else /* atomic64_fetch_inc_relaxed */ #ifndef atomic64_fetch_inc_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_inc_acquire(atomic64_t *v) { s64 ret = atomic64_fetch_inc_relaxed(v); @@ -1518,7 +1676,7 @@ atomic64_fetch_inc_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_inc_release -static inline s64 +static __always_inline s64 atomic64_fetch_inc_release(atomic64_t *v) { __atomic_release_fence(); @@ -1528,7 +1686,7 @@ atomic64_fetch_inc_release(atomic64_t *v) #endif #ifndef atomic64_fetch_inc -static inline s64 +static __always_inline s64 atomic64_fetch_inc(atomic64_t *v) { s64 ret; @@ -1542,8 +1700,10 @@ atomic64_fetch_inc(atomic64_t *v) #endif /* atomic64_fetch_inc_relaxed */ +#define arch_atomic64_dec atomic64_dec + #ifndef atomic64_dec -static inline void +static __always_inline void atomic64_dec(atomic64_t *v) { atomic64_sub(1, v); @@ -1551,6 +1711,11 @@ atomic64_dec(atomic64_t *v) #define atomic64_dec atomic64_dec #endif +#define arch_atomic64_dec_return atomic64_dec_return +#define arch_atomic64_dec_return_acquire atomic64_dec_return_acquire +#define arch_atomic64_dec_return_release atomic64_dec_return_release +#define arch_atomic64_dec_return_relaxed atomic64_dec_return_relaxed + #ifndef atomic64_dec_return_relaxed #ifdef atomic64_dec_return #define atomic64_dec_return_acquire atomic64_dec_return @@ -1559,7 +1724,7 @@ atomic64_dec(atomic64_t *v) #endif /* atomic64_dec_return */ #ifndef atomic64_dec_return -static inline s64 +static __always_inline s64 atomic64_dec_return(atomic64_t *v) { return atomic64_sub_return(1, v); @@ -1568,7 +1733,7 @@ atomic64_dec_return(atomic64_t *v) #endif #ifndef atomic64_dec_return_acquire -static inline s64 +static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { return atomic64_sub_return_acquire(1, v); @@ -1577,7 +1742,7 @@ atomic64_dec_return_acquire(atomic64_t *v) #endif #ifndef atomic64_dec_return_release -static inline s64 +static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { return atomic64_sub_return_release(1, v); @@ -1586,7 +1751,7 @@ atomic64_dec_return_release(atomic64_t *v) #endif #ifndef atomic64_dec_return_relaxed -static inline s64 +static __always_inline s64 atomic64_dec_return_relaxed(atomic64_t *v) { return atomic64_sub_return_relaxed(1, v); @@ -1597,7 +1762,7 @@ atomic64_dec_return_relaxed(atomic64_t *v) #else /* atomic64_dec_return_relaxed */ #ifndef atomic64_dec_return_acquire -static inline s64 +static __always_inline s64 atomic64_dec_return_acquire(atomic64_t *v) { s64 ret = atomic64_dec_return_relaxed(v); @@ -1608,7 +1773,7 @@ atomic64_dec_return_acquire(atomic64_t *v) #endif #ifndef atomic64_dec_return_release -static inline s64 +static __always_inline s64 atomic64_dec_return_release(atomic64_t *v) { __atomic_release_fence(); @@ -1618,7 +1783,7 @@ atomic64_dec_return_release(atomic64_t *v) #endif #ifndef atomic64_dec_return -static inline s64 +static __always_inline s64 atomic64_dec_return(atomic64_t *v) { s64 ret; @@ -1632,6 +1797,11 @@ atomic64_dec_return(atomic64_t *v) #endif /* atomic64_dec_return_relaxed */ +#define arch_atomic64_fetch_dec atomic64_fetch_dec +#define arch_atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire +#define arch_atomic64_fetch_dec_release atomic64_fetch_dec_release +#define arch_atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed + #ifndef atomic64_fetch_dec_relaxed #ifdef atomic64_fetch_dec #define atomic64_fetch_dec_acquire atomic64_fetch_dec @@ -1640,7 +1810,7 @@ atomic64_dec_return(atomic64_t *v) #endif /* atomic64_fetch_dec */ #ifndef atomic64_fetch_dec -static inline s64 +static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { return atomic64_fetch_sub(1, v); @@ -1649,7 +1819,7 @@ atomic64_fetch_dec(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { return atomic64_fetch_sub_acquire(1, v); @@ -1658,7 +1828,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_release -static inline s64 +static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { return atomic64_fetch_sub_release(1, v); @@ -1667,7 +1837,7 @@ atomic64_fetch_dec_release(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_relaxed -static inline s64 +static __always_inline s64 atomic64_fetch_dec_relaxed(atomic64_t *v) { return atomic64_fetch_sub_relaxed(1, v); @@ -1678,7 +1848,7 @@ atomic64_fetch_dec_relaxed(atomic64_t *v) #else /* atomic64_fetch_dec_relaxed */ #ifndef atomic64_fetch_dec_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_dec_acquire(atomic64_t *v) { s64 ret = atomic64_fetch_dec_relaxed(v); @@ -1689,7 +1859,7 @@ atomic64_fetch_dec_acquire(atomic64_t *v) #endif #ifndef atomic64_fetch_dec_release -static inline s64 +static __always_inline s64 atomic64_fetch_dec_release(atomic64_t *v) { __atomic_release_fence(); @@ -1699,7 +1869,7 @@ atomic64_fetch_dec_release(atomic64_t *v) #endif #ifndef atomic64_fetch_dec -static inline s64 +static __always_inline s64 atomic64_fetch_dec(atomic64_t *v) { s64 ret; @@ -1713,6 +1883,13 @@ atomic64_fetch_dec(atomic64_t *v) #endif /* atomic64_fetch_dec_relaxed */ +#define arch_atomic64_and atomic64_and + +#define arch_atomic64_fetch_and atomic64_fetch_and +#define arch_atomic64_fetch_and_acquire atomic64_fetch_and_acquire +#define arch_atomic64_fetch_and_release atomic64_fetch_and_release +#define arch_atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed + #ifndef atomic64_fetch_and_relaxed #define atomic64_fetch_and_acquire atomic64_fetch_and #define atomic64_fetch_and_release atomic64_fetch_and @@ -1720,7 +1897,7 @@ atomic64_fetch_dec(atomic64_t *v) #else /* atomic64_fetch_and_relaxed */ #ifndef atomic64_fetch_and_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_and_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_and_relaxed(i, v); @@ -1731,7 +1908,7 @@ atomic64_fetch_and_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_and_release -static inline s64 +static __always_inline s64 atomic64_fetch_and_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1741,7 +1918,7 @@ atomic64_fetch_and_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_and -static inline s64 +static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v) { s64 ret; @@ -1755,8 +1932,10 @@ atomic64_fetch_and(s64 i, atomic64_t *v) #endif /* atomic64_fetch_and_relaxed */ +#define arch_atomic64_andnot atomic64_andnot + #ifndef atomic64_andnot -static inline void +static __always_inline void atomic64_andnot(s64 i, atomic64_t *v) { atomic64_and(~i, v); @@ -1764,6 +1943,11 @@ atomic64_andnot(s64 i, atomic64_t *v) #define atomic64_andnot atomic64_andnot #endif +#define arch_atomic64_fetch_andnot atomic64_fetch_andnot +#define arch_atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire +#define arch_atomic64_fetch_andnot_release atomic64_fetch_andnot_release +#define arch_atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed + #ifndef atomic64_fetch_andnot_relaxed #ifdef atomic64_fetch_andnot #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot @@ -1772,7 +1956,7 @@ atomic64_andnot(s64 i, atomic64_t *v) #endif /* atomic64_fetch_andnot */ #ifndef atomic64_fetch_andnot -static inline s64 +static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { return atomic64_fetch_and(~i, v); @@ -1781,7 +1965,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { return atomic64_fetch_and_acquire(~i, v); @@ -1790,7 +1974,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_release -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { return atomic64_fetch_and_release(~i, v); @@ -1799,7 +1983,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_relaxed -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) { return atomic64_fetch_and_relaxed(~i, v); @@ -1810,7 +1994,7 @@ atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) #else /* atomic64_fetch_andnot_relaxed */ #ifndef atomic64_fetch_andnot_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_andnot_relaxed(i, v); @@ -1821,7 +2005,7 @@ atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot_release -static inline s64 +static __always_inline s64 atomic64_fetch_andnot_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1831,7 +2015,7 @@ atomic64_fetch_andnot_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_andnot -static inline s64 +static __always_inline s64 atomic64_fetch_andnot(s64 i, atomic64_t *v) { s64 ret; @@ -1845,6 +2029,13 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) #endif /* atomic64_fetch_andnot_relaxed */ +#define arch_atomic64_or atomic64_or + +#define arch_atomic64_fetch_or atomic64_fetch_or +#define arch_atomic64_fetch_or_acquire atomic64_fetch_or_acquire +#define arch_atomic64_fetch_or_release atomic64_fetch_or_release +#define arch_atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed + #ifndef atomic64_fetch_or_relaxed #define atomic64_fetch_or_acquire atomic64_fetch_or #define atomic64_fetch_or_release atomic64_fetch_or @@ -1852,7 +2043,7 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v) #else /* atomic64_fetch_or_relaxed */ #ifndef atomic64_fetch_or_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_or_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_or_relaxed(i, v); @@ -1863,7 +2054,7 @@ atomic64_fetch_or_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_or_release -static inline s64 +static __always_inline s64 atomic64_fetch_or_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1873,7 +2064,7 @@ atomic64_fetch_or_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_or -static inline s64 +static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v) { s64 ret; @@ -1887,6 +2078,13 @@ atomic64_fetch_or(s64 i, atomic64_t *v) #endif /* atomic64_fetch_or_relaxed */ +#define arch_atomic64_xor atomic64_xor + +#define arch_atomic64_fetch_xor atomic64_fetch_xor +#define arch_atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire +#define arch_atomic64_fetch_xor_release atomic64_fetch_xor_release +#define arch_atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed + #ifndef atomic64_fetch_xor_relaxed #define atomic64_fetch_xor_acquire atomic64_fetch_xor #define atomic64_fetch_xor_release atomic64_fetch_xor @@ -1894,7 +2092,7 @@ atomic64_fetch_or(s64 i, atomic64_t *v) #else /* atomic64_fetch_xor_relaxed */ #ifndef atomic64_fetch_xor_acquire -static inline s64 +static __always_inline s64 atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) { s64 ret = atomic64_fetch_xor_relaxed(i, v); @@ -1905,7 +2103,7 @@ atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_xor_release -static inline s64 +static __always_inline s64 atomic64_fetch_xor_release(s64 i, atomic64_t *v) { __atomic_release_fence(); @@ -1915,7 +2113,7 @@ atomic64_fetch_xor_release(s64 i, atomic64_t *v) #endif #ifndef atomic64_fetch_xor -static inline s64 +static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v) { s64 ret; @@ -1929,6 +2127,11 @@ atomic64_fetch_xor(s64 i, atomic64_t *v) #endif /* atomic64_fetch_xor_relaxed */ +#define arch_atomic64_xchg atomic64_xchg +#define arch_atomic64_xchg_acquire atomic64_xchg_acquire +#define arch_atomic64_xchg_release atomic64_xchg_release +#define arch_atomic64_xchg_relaxed atomic64_xchg_relaxed + #ifndef atomic64_xchg_relaxed #define atomic64_xchg_acquire atomic64_xchg #define atomic64_xchg_release atomic64_xchg @@ -1936,7 +2139,7 @@ atomic64_fetch_xor(s64 i, atomic64_t *v) #else /* atomic64_xchg_relaxed */ #ifndef atomic64_xchg_acquire -static inline s64 +static __always_inline s64 atomic64_xchg_acquire(atomic64_t *v, s64 i) { s64 ret = atomic64_xchg_relaxed(v, i); @@ -1947,7 +2150,7 @@ atomic64_xchg_acquire(atomic64_t *v, s64 i) #endif #ifndef atomic64_xchg_release -static inline s64 +static __always_inline s64 atomic64_xchg_release(atomic64_t *v, s64 i) { __atomic_release_fence(); @@ -1957,7 +2160,7 @@ atomic64_xchg_release(atomic64_t *v, s64 i) #endif #ifndef atomic64_xchg -static inline s64 +static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i) { s64 ret; @@ -1971,6 +2174,11 @@ atomic64_xchg(atomic64_t *v, s64 i) #endif /* atomic64_xchg_relaxed */ +#define arch_atomic64_cmpxchg atomic64_cmpxchg +#define arch_atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire +#define arch_atomic64_cmpxchg_release atomic64_cmpxchg_release +#define arch_atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed + #ifndef atomic64_cmpxchg_relaxed #define atomic64_cmpxchg_acquire atomic64_cmpxchg #define atomic64_cmpxchg_release atomic64_cmpxchg @@ -1978,7 +2186,7 @@ atomic64_xchg(atomic64_t *v, s64 i) #else /* atomic64_cmpxchg_relaxed */ #ifndef atomic64_cmpxchg_acquire -static inline s64 +static __always_inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) { s64 ret = atomic64_cmpxchg_relaxed(v, old, new); @@ -1989,7 +2197,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) #endif #ifndef atomic64_cmpxchg_release -static inline s64 +static __always_inline s64 atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) { __atomic_release_fence(); @@ -1999,7 +2207,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) #endif #ifndef atomic64_cmpxchg -static inline s64 +static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) { s64 ret; @@ -2013,6 +2221,11 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) #endif /* atomic64_cmpxchg_relaxed */ +#define arch_atomic64_try_cmpxchg atomic64_try_cmpxchg +#define arch_atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire +#define arch_atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release +#define arch_atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed + #ifndef atomic64_try_cmpxchg_relaxed #ifdef atomic64_try_cmpxchg #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg @@ -2021,7 +2234,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) #endif /* atomic64_try_cmpxchg */ #ifndef atomic64_try_cmpxchg -static inline bool +static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2034,7 +2247,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2047,7 +2260,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_release -static inline bool +static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2060,7 +2273,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_relaxed -static inline bool +static __always_inline bool atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) { s64 r, o = *old; @@ -2075,7 +2288,7 @@ atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) #else /* atomic64_try_cmpxchg_relaxed */ #ifndef atomic64_try_cmpxchg_acquire -static inline bool +static __always_inline bool atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) { bool ret = atomic64_try_cmpxchg_relaxed(v, old, new); @@ -2086,7 +2299,7 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg_release -static inline bool +static __always_inline bool atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) { __atomic_release_fence(); @@ -2096,7 +2309,7 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) #endif #ifndef atomic64_try_cmpxchg -static inline bool +static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) { bool ret; @@ -2110,6 +2323,8 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) #endif /* atomic64_try_cmpxchg_relaxed */ +#define arch_atomic64_sub_and_test atomic64_sub_and_test + #ifndef atomic64_sub_and_test /** * atomic64_sub_and_test - subtract value from variable and test result @@ -2120,7 +2335,7 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) * true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v) { return atomic64_sub_return(i, v) == 0; @@ -2128,6 +2343,8 @@ atomic64_sub_and_test(s64 i, atomic64_t *v) #define atomic64_sub_and_test atomic64_sub_and_test #endif +#define arch_atomic64_dec_and_test atomic64_dec_and_test + #ifndef atomic64_dec_and_test /** * atomic64_dec_and_test - decrement and test @@ -2137,7 +2354,7 @@ atomic64_sub_and_test(s64 i, atomic64_t *v) * returns true if the result is 0, or false for all other * cases. */ -static inline bool +static __always_inline bool atomic64_dec_and_test(atomic64_t *v) { return atomic64_dec_return(v) == 0; @@ -2145,6 +2362,8 @@ atomic64_dec_and_test(atomic64_t *v) #define atomic64_dec_and_test atomic64_dec_and_test #endif +#define arch_atomic64_inc_and_test atomic64_inc_and_test + #ifndef atomic64_inc_and_test /** * atomic64_inc_and_test - increment and test @@ -2154,7 +2373,7 @@ atomic64_dec_and_test(atomic64_t *v) * and returns true if the result is zero, or false for all * other cases. */ -static inline bool +static __always_inline bool atomic64_inc_and_test(atomic64_t *v) { return atomic64_inc_return(v) == 0; @@ -2162,6 +2381,8 @@ atomic64_inc_and_test(atomic64_t *v) #define atomic64_inc_and_test atomic64_inc_and_test #endif +#define arch_atomic64_add_negative atomic64_add_negative + #ifndef atomic64_add_negative /** * atomic64_add_negative - add and test if negative @@ -2172,7 +2393,7 @@ atomic64_inc_and_test(atomic64_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -static inline bool +static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v) { return atomic64_add_return(i, v) < 0; @@ -2180,6 +2401,8 @@ atomic64_add_negative(s64 i, atomic64_t *v) #define atomic64_add_negative atomic64_add_negative #endif +#define arch_atomic64_fetch_add_unless atomic64_fetch_add_unless + #ifndef atomic64_fetch_add_unless /** * atomic64_fetch_add_unless - add unless the number is already a given value @@ -2190,7 +2413,7 @@ atomic64_add_negative(s64 i, atomic64_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns original value of @v */ -static inline s64 +static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { s64 c = atomic64_read(v); @@ -2205,6 +2428,8 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) #define atomic64_fetch_add_unless atomic64_fetch_add_unless #endif +#define arch_atomic64_add_unless atomic64_add_unless + #ifndef atomic64_add_unless /** * atomic64_add_unless - add unless the number is already a given value @@ -2215,7 +2440,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) * Atomically adds @a to @v, if @v was not already @u. * Returns true if the addition was done. */ -static inline bool +static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u) { return atomic64_fetch_add_unless(v, a, u) != u; @@ -2223,6 +2448,8 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u) #define atomic64_add_unless atomic64_add_unless #endif +#define arch_atomic64_inc_not_zero atomic64_inc_not_zero + #ifndef atomic64_inc_not_zero /** * atomic64_inc_not_zero - increment unless the number is zero @@ -2231,7 +2458,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u) * Atomically increments @v by 1, if @v is non-zero. * Returns true if the increment was done. */ -static inline bool +static __always_inline bool atomic64_inc_not_zero(atomic64_t *v) { return atomic64_add_unless(v, 1, 0); @@ -2239,8 +2466,10 @@ atomic64_inc_not_zero(atomic64_t *v) #define atomic64_inc_not_zero atomic64_inc_not_zero #endif +#define arch_atomic64_inc_unless_negative atomic64_inc_unless_negative + #ifndef atomic64_inc_unless_negative -static inline bool +static __always_inline bool atomic64_inc_unless_negative(atomic64_t *v) { s64 c = atomic64_read(v); @@ -2255,8 +2484,10 @@ atomic64_inc_unless_negative(atomic64_t *v) #define atomic64_inc_unless_negative atomic64_inc_unless_negative #endif +#define arch_atomic64_dec_unless_positive atomic64_dec_unless_positive + #ifndef atomic64_dec_unless_positive -static inline bool +static __always_inline bool atomic64_dec_unless_positive(atomic64_t *v) { s64 c = atomic64_read(v); @@ -2271,8 +2502,10 @@ atomic64_dec_unless_positive(atomic64_t *v) #define atomic64_dec_unless_positive atomic64_dec_unless_positive #endif +#define arch_atomic64_dec_if_positive atomic64_dec_if_positive + #ifndef atomic64_dec_if_positive -static inline s64 +static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v) { s64 dec, c = atomic64_read(v); @@ -2288,8 +2521,5 @@ atomic64_dec_if_positive(atomic64_t *v) #define atomic64_dec_if_positive atomic64_dec_if_positive #endif -#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) -#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) - #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// 25de4a2804d70f57e994fe3b419148658bb5378a +// 9d95b56f98d82a2a26c7b79ccdd0c47572d50a6f diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 4c0d009a46f0..571a11008ab5 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -25,6 +25,12 @@ * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. */ +#define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) +#define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) + +#define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) +#define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) + /* * The idea here is to build acquire/release variants by adding explicit * barriers on top of the relaxed variant. In the case where the relaxed @@ -71,7 +77,12 @@ __ret; \ }) +#ifdef ARCH_ATOMIC +#include <linux/atomic-arch-fallback.h> +#include <asm-generic/atomic-instrumented.h> +#else #include <linux/atomic-fallback.h> +#endif #include <asm-generic/atomic-long.h> diff --git a/include/linux/audit.h b/include/linux/audit.h index f9ceae57ca8d..b3d859831a31 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -12,6 +12,7 @@ #include <linux/sched.h> #include <linux/ptrace.h> #include <uapi/linux/audit.h> +#include <uapi/linux/netfilter/nf_tables.h> #define AUDIT_INO_UNSET ((unsigned long)-1) #define AUDIT_DEV_UNSET ((dev_t)-1) @@ -19,7 +20,7 @@ struct audit_sig_info { uid_t uid; pid_t pid; - char ctx[0]; + char ctx[]; }; struct audit_buffer; @@ -94,6 +95,29 @@ struct audit_ntp_data { struct audit_ntp_data {}; #endif +enum audit_nfcfgop { + AUDIT_XT_OP_REGISTER, + AUDIT_XT_OP_REPLACE, + AUDIT_XT_OP_UNREGISTER, + AUDIT_NFT_OP_TABLE_REGISTER, + AUDIT_NFT_OP_TABLE_UNREGISTER, + AUDIT_NFT_OP_CHAIN_REGISTER, + AUDIT_NFT_OP_CHAIN_UNREGISTER, + AUDIT_NFT_OP_RULE_REGISTER, + AUDIT_NFT_OP_RULE_UNREGISTER, + AUDIT_NFT_OP_SET_REGISTER, + AUDIT_NFT_OP_SET_UNREGISTER, + AUDIT_NFT_OP_SETELEM_REGISTER, + AUDIT_NFT_OP_SETELEM_UNREGISTER, + AUDIT_NFT_OP_GEN_REGISTER, + AUDIT_NFT_OP_OBJ_REGISTER, + AUDIT_NFT_OP_OBJ_UNREGISTER, + AUDIT_NFT_OP_OBJ_RESET, + AUDIT_NFT_OP_FLOWTABLE_REGISTER, + AUDIT_NFT_OP_FLOWTABLE_UNREGISTER, + AUDIT_NFT_OP_INVALID, +}; + extern int is_audit_feature_set(int which); extern int __init audit_register_class(int class, unsigned *list); @@ -268,7 +292,7 @@ extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1, extern void __audit_syscall_exit(int ret_success, long ret_value); extern struct filename *__audit_reusename(const __user char *uptr); extern void __audit_getname(struct filename *name); - +extern void __audit_getcwd(void); extern void __audit_inode(struct filename *name, const struct dentry *dentry, unsigned int flags); extern void __audit_file(const struct file *); @@ -327,6 +351,11 @@ static inline void audit_getname(struct filename *name) if (unlikely(!audit_dummy_context())) __audit_getname(name); } +static inline void audit_getcwd(void) +{ + if (unlikely(audit_context())) + __audit_getcwd(); +} static inline void audit_inode(struct filename *name, const struct dentry *dentry, unsigned int aflags) { @@ -379,6 +408,8 @@ extern void __audit_log_kern_module(char *name); extern void __audit_fanotify(unsigned int response); extern void __audit_tk_injoffset(struct timespec64 offset); extern void __audit_ntp_log(const struct audit_ntp_data *ad); +extern void __audit_log_nfcfg(const char *name, u8 af, unsigned int nentries, + enum audit_nfcfgop op, gfp_t gfp); static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) { @@ -514,6 +545,14 @@ static inline void audit_ntp_log(const struct audit_ntp_data *ad) __audit_ntp_log(ad); } +static inline void audit_log_nfcfg(const char *name, u8 af, + unsigned int nentries, + enum audit_nfcfgop op, gfp_t gfp) +{ + if (audit_enabled) + __audit_log_nfcfg(name, af, nentries, op, gfp); +} + extern int audit_n_rules; extern int audit_signals; #else /* CONFIG_AUDITSYSCALL */ @@ -545,13 +584,7 @@ static inline struct filename *audit_reusename(const __user char *name) } static inline void audit_getname(struct filename *name) { } -static inline void __audit_inode(struct filename *name, - const struct dentry *dentry, - unsigned int flags) -{ } -static inline void __audit_inode_child(struct inode *parent, - const struct dentry *dentry, - const unsigned char type) +static inline void audit_getcwd(void) { } static inline void audit_inode(struct filename *name, const struct dentry *dentry, @@ -646,6 +679,12 @@ static inline void audit_ntp_log(const struct audit_ntp_data *ad) static inline void audit_ptrace(struct task_struct *t) { } + +static inline void audit_log_nfcfg(const char *name, u8 af, + unsigned int nentries, + enum audit_nfcfgop op, gfp_t gfp) +{ } + #define audit_n_rules 0 #define audit_signals 0 #endif /* CONFIG_AUDITSYSCALL */ @@ -655,9 +694,4 @@ static inline bool audit_loginuid_set(struct task_struct *tsk) return uid_valid(audit_get_loginuid(tsk)); } -static inline void audit_log_string(struct audit_buffer *ab, const char *buf) -{ - audit_log_n_string(ab, buf, strlen(buf)); -} - #endif diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index ca956b672ac0..40bad71865ea 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -476,6 +476,7 @@ struct virtchnl_rss_key { u16 vsi_id; u16 key_len; u8 key[1]; /* RSS hash key, packed bytes */ + u8 pad[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); @@ -484,6 +485,7 @@ struct virtchnl_rss_lut { u16 vsi_id; u16 lut_entries; u8 lut[1]; /* RSS lookup table */ + u8 pad[1]; }; VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); @@ -572,6 +574,7 @@ struct virtchnl_filter { enum virtchnl_action action; u32 action_meta; u8 field_flags; + u8 pad[3]; }; VIRTCHNL_CHECK_STRUCT_LEN(272, virtchnl_filter); @@ -610,6 +613,7 @@ struct virtchnl_pf_event { /* link_speed provided in Mbps */ u32 link_speed; u8 link_status; + u8 pad[3]; } link_event_adv; } event_data; @@ -635,6 +639,7 @@ struct virtchnl_iwarp_qv_info { u16 ceq_idx; u16 aeq_idx; u8 itr_idx; + u8 pad[3]; }; VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_iwarp_qv_info); diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 7367150f962a..fff9367a6348 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -33,8 +33,6 @@ enum wb_congested_state { WB_sync_congested, /* The sync queue is getting full */ }; -typedef int (congested_fn)(void *, int); - enum wb_stat_item { WB_RECLAIMABLE, WB_WRITEBACK, @@ -88,26 +86,6 @@ struct wb_completion { struct wb_completion cmpl = WB_COMPLETION_INIT(bdi) /* - * For cgroup writeback, multiple wb's may map to the same blkcg. Those - * wb's can operate mostly independently but should share the congested - * state. To facilitate such sharing, the congested state is tracked using - * the following struct which is created on demand, indexed by blkcg ID on - * its bdi, and refcounted. - */ -struct bdi_writeback_congested { - unsigned long state; /* WB_[a]sync_congested flags */ - refcount_t refcnt; /* nr of attached wb's and blkg */ - -#ifdef CONFIG_CGROUP_WRITEBACK - struct backing_dev_info *__bdi; /* the associated bdi, set to NULL - * on bdi unregistration. For memcg-wb - * internal use only! */ - int blkcg_id; /* ID of the associated blkcg */ - struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */ -#endif -}; - -/* * Each wb (bdi_writeback) can perform writeback operations, is measured * and throttled, independently. Without cgroup writeback, each bdi * (bdi_writeback) is served by its embedded bdi->wb. @@ -140,7 +118,7 @@ struct bdi_writeback { struct percpu_counter stat[NR_WB_STAT_ITEMS]; - struct bdi_writeback_congested *congested; + unsigned long congested; /* WB_[a]sync_congested flags */ unsigned long bw_time_stamp; /* last time write bw is updated */ unsigned long dirtied_stamp; @@ -190,10 +168,6 @@ struct backing_dev_info { struct list_head bdi_list; unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ unsigned long io_pages; /* max allowed IO size */ - congested_fn *congested_fn; /* Function pointer if device is md/dm */ - void *congested_data; /* Pointer to aux data for congested func */ - - const char *name; struct kref refcnt; /* Reference counter for the structure */ unsigned int capabilities; /* Device capabilities */ @@ -210,11 +184,8 @@ struct backing_dev_info { struct list_head wb_list; /* list of all wbs */ #ifdef CONFIG_CGROUP_WRITEBACK struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ - struct rb_root cgwb_congested_tree; /* their congested states */ struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */ -#else - struct bdi_writeback_congested *wb_congested; #endif wait_queue_head_t wb_waitq; @@ -234,18 +205,8 @@ enum { BLK_RW_SYNC = 1, }; -void clear_wb_congested(struct bdi_writeback_congested *congested, int sync); -void set_wb_congested(struct bdi_writeback_congested *congested, int sync); - -static inline void clear_bdi_congested(struct backing_dev_info *bdi, int sync) -{ - clear_wb_congested(bdi->wb.congested, sync); -} - -static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) -{ - set_wb_congested(bdi->wb.congested, sync); -} +void clear_bdi_congested(struct backing_dev_info *bdi, int sync); +void set_bdi_congested(struct backing_dev_info *bdi, int sync); struct wb_lock_cookie { bool locked; diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index c9ad5c3b7b4b..0b06b2d26c9a 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -33,14 +33,10 @@ int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...); __printf(2, 0) int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args); -int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner); +void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner); void bdi_unregister(struct backing_dev_info *bdi); -struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id); -static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask) -{ - return bdi_alloc_node(gfp_mask, NUMA_NO_NODE); -} +struct backing_dev_info *bdi_alloc(int node_id); void wb_start_background_writeback(struct bdi_writeback *wb); void wb_workfn(struct work_struct *work); @@ -173,11 +169,7 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) { - struct backing_dev_info *bdi = wb->bdi; - - if (bdi->congested_fn) - return bdi->congested_fn(bdi->congested_data, cong_bits); - return wb->congested->state & cong_bits; + return wb->congested & cong_bits; } long congestion_wait(int sync, long timeout); @@ -228,9 +220,6 @@ static inline int bdi_sched_wait(void *word) #ifdef CONFIG_CGROUP_WRITEBACK -struct bdi_writeback_congested * -wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp); -void wb_congested_put(struct bdi_writeback_congested *congested); struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, struct cgroup_subsys_state *memcg_css); struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, @@ -408,19 +397,6 @@ static inline bool inode_cgwb_enabled(struct inode *inode) return false; } -static inline struct bdi_writeback_congested * -wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) -{ - refcount_inc(&bdi->wb_congested->refcnt); - return bdi->wb_congested; -} - -static inline void wb_congested_put(struct bdi_writeback_congested *congested) -{ - if (refcount_dec_and_test(&congested->refcnt)) - kfree(congested); -} - static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) { return &bdi->wb; diff --git a/include/linux/backlight.h b/include/linux/backlight.h index c7d6b2e8c3b5..614653e07e3a 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -14,113 +14,336 @@ #include <linux/mutex.h> #include <linux/notifier.h> -/* Notes on locking: - * - * backlight_device->ops_lock is an internal backlight lock protecting the - * ops pointer and no code outside the core should need to touch it. - * - * Access to update_status() is serialised by the update_lock mutex since - * most drivers seem to need this and historically get it wrong. - * - * Most drivers don't need locking on their get_brightness() method. - * If yours does, you need to implement it in the driver. You can use the - * update_lock mutex if appropriate. +/** + * enum backlight_update_reason - what method was used to update backlight * - * Any other use of the locks below is probably wrong. + * A driver indicates the method (reason) used for updating the backlight + * when calling backlight_force_update(). */ - enum backlight_update_reason { + /** + * @BACKLIGHT_UPDATE_HOTKEY: The backlight was updated using a hot-key. + */ BACKLIGHT_UPDATE_HOTKEY, + + /** + * @BACKLIGHT_UPDATE_SYSFS: The backlight was updated using sysfs. + */ BACKLIGHT_UPDATE_SYSFS, }; +/** + * enum backlight_type - the type of backlight control + * + * The type of interface used to control the backlight. + */ enum backlight_type { + /** + * @BACKLIGHT_RAW: + * + * The backlight is controlled using hardware registers. + */ BACKLIGHT_RAW = 1, + + /** + * @BACKLIGHT_PLATFORM: + * + * The backlight is controlled using a platform-specific interface. + */ BACKLIGHT_PLATFORM, + + /** + * @BACKLIGHT_FIRMWARE: + * + * The backlight is controlled using a standard firmware interface. + */ BACKLIGHT_FIRMWARE, + + /** + * @BACKLIGHT_TYPE_MAX: Number of entries. + */ BACKLIGHT_TYPE_MAX, }; +/** + * enum backlight_notification - the type of notification + * + * The notifications that is used for notification sent to the receiver + * that registered notifications using backlight_register_notifier(). + */ enum backlight_notification { + /** + * @BACKLIGHT_REGISTERED: The backlight device is registered. + */ BACKLIGHT_REGISTERED, + + /** + * @BACKLIGHT_UNREGISTERED: The backlight revice is unregistered. + */ BACKLIGHT_UNREGISTERED, }; +/** enum backlight_scale - the type of scale used for brightness values + * + * The type of scale used for brightness values. + */ enum backlight_scale { + /** + * @BACKLIGHT_SCALE_UNKNOWN: The scale is unknown. + */ BACKLIGHT_SCALE_UNKNOWN = 0, + + /** + * @BACKLIGHT_SCALE_LINEAR: The scale is linear. + * + * The linear scale will increase brightness the same for each step. + */ BACKLIGHT_SCALE_LINEAR, + + /** + * @BACKLIGHT_SCALE_NON_LINEAR: The scale is not linear. + * + * This is often used when the brightness values tries to adjust to + * the relative perception of the eye demanding a non-linear scale. + */ BACKLIGHT_SCALE_NON_LINEAR, }; struct backlight_device; struct fb_info; +/** + * struct backlight_ops - backlight operations + * + * The backlight operations are specified when the backlight device is registered. + */ struct backlight_ops { + /** + * @options: Configure how operations are called from the core. + * + * The options parameter is used to adjust the behaviour of the core. + * Set BL_CORE_SUSPENDRESUME to get the update_status() operation called + * upon suspend and resume. + */ unsigned int options; #define BL_CORE_SUSPENDRESUME (1 << 0) - /* Notify the backlight driver some property has changed */ + /** + * @update_status: Operation called when properties have changed. + * + * Notify the backlight driver some property has changed. + * The update_status operation is protected by the update_lock. + * + * The backlight driver is expected to use backlight_is_blank() + * to check if the display is blanked and set brightness accordingly. + * update_status() is called when any of the properties has changed. + * + * RETURNS: + * + * 0 on success, negative error code if any failure occurred. + */ int (*update_status)(struct backlight_device *); - /* Return the current backlight brightness (accounting for power, - fb_blank etc.) */ + + /** + * @get_brightness: Return the current backlight brightness. + * + * The driver may implement this as a readback from the HW. + * This operation is optional and if not present then the current + * brightness property value is used. + * + * RETURNS: + * + * A brightness value which is 0 or a positive number. + * On failure a negative error code is returned. + */ int (*get_brightness)(struct backlight_device *); - /* Check if given framebuffer device is the one bound to this backlight; - return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */ - int (*check_fb)(struct backlight_device *, struct fb_info *); + + /** + * @check_fb: Check the framebuffer device. + * + * Check if given framebuffer device is the one bound to this backlight. + * This operation is optional and if not implemented it is assumed that the + * fbdev is always the one bound to the backlight. + * + * RETURNS: + * + * If info is NULL or the info matches the fbdev bound to the backlight return true. + * If info does not match the fbdev bound to the backlight return false. + */ + int (*check_fb)(struct backlight_device *bd, struct fb_info *info); }; -/* This structure defines all the properties of a backlight */ +/** + * struct backlight_properties - backlight properties + * + * This structure defines all the properties of a backlight. + */ struct backlight_properties { - /* Current User requested brightness (0 - max_brightness) */ + /** + * @brightness: The current brightness requested by the user. + * + * The backlight core makes sure the range is (0 to max_brightness) + * when the brightness is set via the sysfs attribute: + * /sys/class/backlight/<backlight>/brightness. + * + * This value can be set in the backlight_properties passed + * to devm_backlight_device_register() to set a default brightness + * value. + */ int brightness; - /* Maximal value for brightness (read-only) */ + + /** + * @max_brightness: The maximum brightness value. + * + * This value must be set in the backlight_properties passed to + * devm_backlight_device_register() and shall not be modified by the + * driver after registration. + */ int max_brightness; - /* Current FB Power mode (0: full on, 1..3: power saving - modes; 4: full off), see FB_BLANK_XXX */ + + /** + * @power: The current power mode. + * + * User space can configure the power mode using the sysfs + * attribute: /sys/class/backlight/<backlight>/bl_power + * When the power property is updated update_status() is called. + * + * The possible values are: (0: full on, 1 to 3: power saving + * modes; 4: full off), see FB_BLANK_XXX. + * + * When the backlight device is enabled @power is set + * to FB_BLANK_UNBLANK. When the backlight device is disabled + * @power is set to FB_BLANK_POWERDOWN. + */ int power; - /* FB Blanking active? (values as for power) */ - /* Due to be removed, please use (state & BL_CORE_FBBLANK) */ + + /** + * @fb_blank: The power state from the FBIOBLANK ioctl. + * + * When the FBIOBLANK ioctl is called @fb_blank is set to the + * blank parameter and the update_status() operation is called. + * + * When the backlight device is enabled @fb_blank is set + * to FB_BLANK_UNBLANK. When the backlight device is disabled + * @fb_blank is set to FB_BLANK_POWERDOWN. + * + * Backlight drivers should avoid using this property. It has been + * replaced by state & BL_CORE_FBLANK (although most drivers should + * use backlight_is_blank() as the preferred means to get the blank + * state). + * + * fb_blank is deprecated and will be removed. + */ int fb_blank; - /* Backlight type */ + + /** + * @type: The type of backlight supported. + * + * The backlight type allows userspace to make appropriate + * policy decisions based on the backlight type. + * + * This value must be set in the backlight_properties + * passed to devm_backlight_device_register(). + */ enum backlight_type type; - /* Flags used to signal drivers of state changes */ + + /** + * @state: The state of the backlight core. + * + * The state is a bitmask. BL_CORE_FBBLANK is set when the display + * is expected to be blank. BL_CORE_SUSPENDED is set when the + * driver is suspended. + * + * backlight drivers are expected to use backlight_is_blank() + * in their update_status() operation rather than reading the + * state property. + * + * The state is maintained by the core and drivers may not modify it. + */ unsigned int state; - /* Type of the brightness scale (linear, non-linear, ...) */ - enum backlight_scale scale; #define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */ #define BL_CORE_FBBLANK (1 << 1) /* backlight is under an fb blank event */ + /** + * @scale: The type of the brightness scale. + */ + enum backlight_scale scale; }; +/** + * struct backlight_device - backlight device data + * + * This structure holds all data required by a backlight device. + */ struct backlight_device { - /* Backlight properties */ + /** + * @props: Backlight properties + */ struct backlight_properties props; - /* Serialise access to update_status method */ + /** + * @update_lock: The lock used when calling the update_status() operation. + * + * update_lock is an internal backlight lock that serialise access + * to the update_status() operation. The backlight core holds the update_lock + * when calling the update_status() operation. The update_lock shall not + * be used by backlight drivers. + */ struct mutex update_lock; - /* This protects the 'ops' field. If 'ops' is NULL, the driver that - registered this device has been unloaded, and if class_get_devdata() - points to something in the body of that driver, it is also invalid. */ + /** + * @ops_lock: The lock used around everything related to backlight_ops. + * + * ops_lock is an internal backlight lock that protects the ops pointer + * and is used around all accesses to ops and when the operations are + * invoked. The ops_lock shall not be used by backlight drivers. + */ struct mutex ops_lock; + + /** + * @ops: Pointer to the backlight operations. + * + * If ops is NULL, the driver that registered this device has been unloaded, + * and if class_get_devdata() points to something in the body of that driver, + * it is also invalid. + */ const struct backlight_ops *ops; - /* The framebuffer notifier block */ + /** + * @fb_notif: The framebuffer notifier block + */ struct notifier_block fb_notif; - /* list entry of all registered backlight devices */ + /** + * @entry: List entry of all registered backlight devices + */ struct list_head entry; + /** + * @dev: Parent device. + */ struct device dev; - /* Multiple framebuffers may share one backlight device */ + /** + * @fb_bl_on: The state of individual fbdev's. + * + * Multiple fbdev's may share one backlight device. The fb_bl_on + * records the state of the individual fbdev. + */ bool fb_bl_on[FB_MAX]; + /** + * @use_count: The number of uses of fb_bl_on. + */ int use_count; }; +/** + * backlight_update_status - force an update of the backlight device status + * @bd: the backlight device + */ static inline int backlight_update_status(struct backlight_device *bd) { int ret = -ENOENT; @@ -166,48 +389,83 @@ static inline int backlight_disable(struct backlight_device *bd) } /** - * backlight_put - Drop backlight reference - * @bd: the backlight device to put + * backlight_is_blank - Return true if display is expected to be blank + * @bd: the backlight device + * + * Display is expected to be blank if any of these is true:: + * + * 1) if power in not UNBLANK + * 2) if fb_blank is not UNBLANK + * 3) if state indicate BLANK or SUSPENDED + * + * Returns true if display is expected to be blank, false otherwise. */ -static inline void backlight_put(struct backlight_device *bd) +static inline bool backlight_is_blank(const struct backlight_device *bd) { - if (bd) - put_device(&bd->dev); + return bd->props.power != FB_BLANK_UNBLANK || + bd->props.fb_blank != FB_BLANK_UNBLANK || + bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK); } -extern struct backlight_device *backlight_device_register(const char *name, - struct device *dev, void *devdata, const struct backlight_ops *ops, - const struct backlight_properties *props); -extern struct backlight_device *devm_backlight_device_register( - struct device *dev, const char *name, struct device *parent, - void *devdata, const struct backlight_ops *ops, - const struct backlight_properties *props); -extern void backlight_device_unregister(struct backlight_device *bd); -extern void devm_backlight_device_unregister(struct device *dev, - struct backlight_device *bd); -extern void backlight_force_update(struct backlight_device *bd, - enum backlight_update_reason reason); -extern int backlight_register_notifier(struct notifier_block *nb); -extern int backlight_unregister_notifier(struct notifier_block *nb); -extern struct backlight_device *backlight_device_get_by_type(enum backlight_type type); -extern int backlight_device_set_brightness(struct backlight_device *bd, unsigned long brightness); +/** + * backlight_get_brightness - Returns the current brightness value + * @bd: the backlight device + * + * Returns the current brightness value, taking in consideration the current + * state. If backlight_is_blank() returns true then return 0 as brightness + * otherwise return the current brightness property value. + * + * Backlight drivers are expected to use this function in their update_status() + * operation to get the brightness value. + */ +static inline int backlight_get_brightness(const struct backlight_device *bd) +{ + if (backlight_is_blank(bd)) + return 0; + else + return bd->props.brightness; +} + +struct backlight_device * +backlight_device_register(const char *name, struct device *dev, void *devdata, + const struct backlight_ops *ops, + const struct backlight_properties *props); +struct backlight_device * +devm_backlight_device_register(struct device *dev, const char *name, + struct device *parent, void *devdata, + const struct backlight_ops *ops, + const struct backlight_properties *props); +void backlight_device_unregister(struct backlight_device *bd); +void devm_backlight_device_unregister(struct device *dev, + struct backlight_device *bd); +void backlight_force_update(struct backlight_device *bd, + enum backlight_update_reason reason); +int backlight_register_notifier(struct notifier_block *nb); +int backlight_unregister_notifier(struct notifier_block *nb); +struct backlight_device *backlight_device_get_by_name(const char *name); +struct backlight_device *backlight_device_get_by_type(enum backlight_type type); +int backlight_device_set_brightness(struct backlight_device *bd, + unsigned long brightness); #define to_backlight_device(obj) container_of(obj, struct backlight_device, dev) +/** + * bl_get_data - access devdata + * @bl_dev: pointer to backlight device + * + * When a backlight device is registered the driver has the possibility + * to supply a void * devdata. bl_get_data() return a pointer to the + * devdata. + * + * RETURNS: + * + * pointer to devdata stored while registering the backlight device. + */ static inline void * bl_get_data(struct backlight_device *bl_dev) { return dev_get_drvdata(&bl_dev->dev); } -struct generic_bl_info { - const char *name; - int max_intensity; - int default_intensity; - int limit_mask; - void (*set_bl_intensity)(int intensity); - void (*kick_battery)(void); -}; - #ifdef CONFIG_OF struct backlight_device *of_find_backlight_by_node(struct device_node *node); #else @@ -219,14 +477,8 @@ of_find_backlight_by_node(struct device_node *node) #endif #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) -struct backlight_device *of_find_backlight(struct device *dev); struct backlight_device *devm_of_find_backlight(struct device *dev); #else -static inline struct backlight_device *of_find_backlight(struct device *dev) -{ - return NULL; -} - static inline struct backlight_device * devm_of_find_backlight(struct device *dev) { diff --git a/include/linux/bch.h b/include/linux/bch.h index aa765af85c38..85fdce83d4e2 100644 --- a/include/linux/bch.h +++ b/include/linux/bch.h @@ -33,6 +33,7 @@ * @cache: log-based polynomial representation buffer * @elp: error locator polynomial * @poly_2t: temporary polynomials of degree 2t + * @swap_bits: swap bits within data and syndrome bytes */ struct bch_control { unsigned int m; @@ -51,16 +52,18 @@ struct bch_control { int *cache; struct gf_poly *elp; struct gf_poly *poly_2t[4]; + bool swap_bits; }; -struct bch_control *init_bch(int m, int t, unsigned int prim_poly); +struct bch_control *bch_init(int m, int t, unsigned int prim_poly, + bool swap_bits); -void free_bch(struct bch_control *bch); +void bch_free(struct bch_control *bch); -void encode_bch(struct bch_control *bch, const uint8_t *data, +void bch_encode(struct bch_control *bch, const uint8_t *data, unsigned int len, uint8_t *ecc); -int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, +int bch_decode(struct bch_control *bch, const uint8_t *data, unsigned int len, const uint8_t *recv_ecc, const uint8_t *calc_ecc, const unsigned int *syn, unsigned int *errloc); diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index a345d9fed3d8..0571701ab1c5 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -26,46 +26,39 @@ struct linux_binprm { unsigned long p; /* current top of mem */ unsigned long argmin; /* rlimit marker for copy_strings() */ unsigned int + /* Should an execfd be passed to userspace? */ + have_execfd:1, + + /* Use the creds of a script (see binfmt_misc) */ + execfd_creds:1, /* - * True after the bprm_set_creds hook has been called once - * (multiple calls can be made via prepare_binprm() for - * binfmt_script/misc). - */ - called_set_creds:1, - /* - * True if most recent call to the commoncaps bprm_set_creds - * hook (due to multiple prepare_binprm() calls from the - * binfmt_script/misc handlers) resulted in elevated - * privileges. - */ - cap_elevated:1, - /* - * Set by bprm_set_creds hook to indicate a privilege-gaining - * exec has happened. Used to sanitize execution environment - * and to set AT_SECURE auxv for glibc. + * Set by bprm_creds_for_exec hook to indicate a + * privilege-gaining exec has happened. Used to set + * AT_SECURE auxv for glibc. */ secureexec:1, /* - * Set by flush_old_exec, when exec_mmap has been called. - * This is past the point of no return, when the - * exec_update_mutex has been taken. + * Set when errors can no longer be returned to the + * original userspace. */ - called_exec_mmap:1; + point_of_no_return:1; #ifdef __alpha__ unsigned int taso:1; #endif - unsigned int recursion_depth; /* only for search_binary_handler() */ - struct file * file; + struct file *executable; /* Executable to pass to the interpreter */ + struct file *interpreter; + struct file *file; struct cred *cred; /* new credentials */ int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */ unsigned int per_clear; /* bits to clear in current->personality */ int argc, envc; - const char * filename; /* Name of binary as seen by procps */ - const char * interp; /* Name of the binary really executed. Most + const char *filename; /* Name of binary as seen by procps */ + const char *interp; /* Name of the binary really executed. Most of the time same as filename, but could be different for binfmt_{misc,script} */ + const char *fdpath; /* generated filename for execveat */ unsigned interp_flags; - unsigned interp_data; + int execfd; /* File descriptor of the executable */ unsigned long loader, exec; struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */ @@ -76,10 +69,6 @@ struct linux_binprm { #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 #define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT) -/* fd of the binary should be passed to the interpreter */ -#define BINPRM_FLAGS_EXECFD_BIT 1 -#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT) - /* filename of the binary will be inaccessible after exec */ #define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2 #define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT) @@ -123,10 +112,8 @@ static inline void insert_binfmt(struct linux_binfmt *fmt) extern void unregister_binfmt(struct linux_binfmt *); -extern int prepare_binprm(struct linux_binprm *); extern int __must_check remove_arg_zero(struct linux_binprm *); -extern int search_binary_handler(struct linux_binprm *); -extern int flush_old_exec(struct linux_binprm * bprm); +extern int begin_new_exec(struct linux_binprm * bprm); extern void setup_new_exec(struct linux_binprm * bprm); extern void finalize_exec(struct linux_binprm *bprm); extern void would_dump(struct linux_binprm *, struct file *); @@ -144,19 +131,11 @@ extern int setup_arg_pages(struct linux_binprm * bprm, extern int transfer_args_to_stack(struct linux_binprm *bprm, unsigned long *sp_location); extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm); -extern int copy_strings_kernel(int argc, const char *const *argv, - struct linux_binprm *bprm); -extern void install_exec_creds(struct linux_binprm *bprm); +int copy_string_kernel(const char *arg, struct linux_binprm *bprm); extern void set_binfmt(struct linux_binfmt *new); extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t); -extern int do_execve(struct filename *, - const char __user * const __user *, - const char __user * const __user *); -extern int do_execveat(int, struct filename *, - const char __user * const __user *, - const char __user * const __user *, - int); -int do_execve_file(struct file *file, void *__argv, void *__envp); +int kernel_execve(const char *filename, + const char *const *argv, const char *const *envp); #endif /* _LINUX_BINFMTS_H */ diff --git a/include/linux/bio.h b/include/linux/bio.h index a0ee494a6329..c6d765382926 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -8,8 +8,6 @@ #include <linux/highmem.h> #include <linux/mempool.h> #include <linux/ioprio.h> - -#ifdef CONFIG_BLOCK /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ #include <linux/blk_types.h> @@ -70,7 +68,7 @@ static inline bool bio_has_data(struct bio *bio) return false; } -static inline bool bio_no_advance_iter(struct bio *bio) +static inline bool bio_no_advance_iter(const struct bio *bio) { return bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE || @@ -138,8 +136,8 @@ static inline bool bio_next_segment(const struct bio *bio, #define bio_for_each_segment_all(bvl, bio, iter) \ for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) -static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, - unsigned bytes) +static inline void bio_advance_iter(const struct bio *bio, + struct bvec_iter *iter, unsigned int bytes) { iter->bi_sector += bytes >> 9; @@ -169,6 +167,14 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, #define bio_for_each_bvec(bvl, bio, iter) \ __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) +/* + * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the + * same reasons as bio_for_each_segment_all(). + */ +#define bio_for_each_bvec_all(bvl, bio, i) \ + for (i = 0, bvl = bio_first_bvec_all(bio); \ + i < (bio)->bi_vcnt; i++, bvl++) \ + #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) static inline unsigned bio_segments(struct bio *bio) @@ -417,6 +423,7 @@ static inline void bio_io_error(struct bio *bio) static inline void bio_wouldblock_error(struct bio *bio) { + bio_set_flag(bio, BIO_QUIET); bio->bi_status = BLK_STS_AGAIN; bio_endio(bio); } @@ -444,12 +451,6 @@ void bio_release_pages(struct bio *bio, bool mark_dirty); extern void bio_set_pages_dirty(struct bio *bio); extern void bio_check_pages_dirty(struct bio *bio); -void generic_start_io_acct(struct request_queue *q, int op, - unsigned long sectors, struct hd_struct *part); -void generic_end_io_acct(struct request_queue *q, int op, - struct hd_struct *part, - unsigned long start_time); - extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, struct bio *src, struct bvec_iter *src_iter); extern void bio_copy_data(struct bio *dst, struct bio *src); @@ -488,21 +489,12 @@ do { \ #define bio_dev(bio) \ disk_devt((bio)->bi_disk) -#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) -void bio_associate_blkg_from_page(struct bio *bio, struct page *page); -#else -static inline void bio_associate_blkg_from_page(struct bio *bio, - struct page *page) { } -#endif - #ifdef CONFIG_BLK_CGROUP -void bio_disassociate_blkg(struct bio *bio); void bio_associate_blkg(struct bio *bio); void bio_associate_blkg_from_css(struct bio *bio, struct cgroup_subsys_state *css); void bio_clone_blkg_association(struct bio *dst, struct bio *src); #else /* CONFIG_BLK_CGROUP */ -static inline void bio_disassociate_blkg(struct bio *bio) { } static inline void bio_associate_blkg(struct bio *bio) { } static inline void bio_associate_blkg_from_css(struct bio *bio, struct cgroup_subsys_state *css) @@ -821,5 +813,4 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) bio->bi_opf |= REQ_NOWAIT; } -#endif /* CONFIG_BLOCK */ #endif /* __LINUX_BIO_H */ diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 48ea093ff04c..4e035aca6f7e 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -77,7 +77,7 @@ */ #define FIELD_FIT(_mask, _val) \ ({ \ - __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \ + __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \ !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ }) diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 9acf654f0b19..99f2ac30b1d9 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -72,7 +72,7 @@ static inline int get_bitmask_order(unsigned int count) static __always_inline unsigned long hweight_long(unsigned long w) { - return sizeof(w) == 4 ? hweight32(w) : hweight64(w); + return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w); } /** diff --git a/include/linux/bits.h b/include/linux/bits.h index 4671fbf28842..7f475d59a097 100644 --- a/include/linux/bits.h +++ b/include/linux/bits.h @@ -18,8 +18,7 @@ * position @h. For example * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. */ -#if !defined(__ASSEMBLY__) && \ - (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000) +#if !defined(__ASSEMBLY__) #include <linux/build_bug.h> #define GENMASK_INPUT_CHECK(h, l) \ (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \ diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 35f8ffe92b70..c8fc9792ac77 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -109,12 +109,6 @@ struct blkcg_gq { struct hlist_node blkcg_node; struct blkcg *blkcg; - /* - * Each blkg gets congested separately and the congestion state is - * propagated to the matching bdi_writeback_congested. - */ - struct bdi_writeback_congested *wb_congested; - /* all non-root blkcg_gq's are guaranteed to have access to parent */ struct blkcg_gq *parent; @@ -183,10 +177,6 @@ extern bool blkcg_debug_stats; struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, struct request_queue *q, bool update_hint); -struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, - struct request_queue *q); -struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, - struct request_queue *q); int blkcg_init_queue(struct request_queue *q); void blkcg_exit_queue(struct request_queue *q); @@ -481,32 +471,6 @@ static inline bool blkg_tryget(struct blkcg_gq *blkg) } /** - * blkg_tryget_closest - try and get a blkg ref on the closet blkg - * @blkg: blkg to get - * - * This needs to be called rcu protected. As the failure mode here is to walk - * up the blkg tree, this ensure that the blkg->parent pointers are always - * valid. This returns the blkg that it ended up taking a reference on or %NULL - * if no reference was taken. - */ -static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg) -{ - struct blkcg_gq *ret_blkg = NULL; - - WARN_ON_ONCE(!rcu_read_lock_held()); - - while (blkg) { - if (blkg_tryget(blkg)) { - ret_blkg = blkg; - break; - } - blkg = blkg->parent; - } - - return ret_blkg; -} - -/** * blkg_put - put a blkg reference * @blkg: blkg to put */ @@ -547,14 +511,6 @@ static inline void blkg_put(struct blkcg_gq *blkg) if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ (p_blkg)->q, false))) -#ifdef CONFIG_BLK_DEV_THROTTLING -extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, - struct bio *bio); -#else -static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, - struct bio *bio) { return false; } -#endif - bool __blkcg_punt_bio_submit(struct bio *bio); static inline bool blkcg_punt_bio_submit(struct bio *bio) @@ -570,65 +526,10 @@ static inline void blkcg_bio_issue_init(struct bio *bio) bio_issue_init(&bio->bi_issue, bio_sectors(bio)); } -static inline bool blkcg_bio_issue_check(struct request_queue *q, - struct bio *bio) -{ - struct blkcg_gq *blkg; - bool throtl = false; - - rcu_read_lock(); - - if (!bio->bi_blkg) { - char b[BDEVNAME_SIZE]; - - WARN_ONCE(1, - "no blkg associated for bio on block-device: %s\n", - bio_devname(bio, b)); - bio_associate_blkg(bio); - } - - blkg = bio->bi_blkg; - - throtl = blk_throtl_bio(q, blkg, bio); - - if (!throtl) { - struct blkg_iostat_set *bis; - int rwd, cpu; - - if (op_is_discard(bio->bi_opf)) - rwd = BLKG_IOSTAT_DISCARD; - else if (op_is_write(bio->bi_opf)) - rwd = BLKG_IOSTAT_WRITE; - else - rwd = BLKG_IOSTAT_READ; - - cpu = get_cpu(); - bis = per_cpu_ptr(blkg->iostat_cpu, cpu); - u64_stats_update_begin(&bis->sync); - - /* - * If the bio is flagged with BIO_QUEUE_ENTERED it means this - * is a split bio and we would have already accounted for the - * size of the bio. - */ - if (!bio_flagged(bio, BIO_QUEUE_ENTERED)) - bis->cur.bytes[rwd] += bio->bi_iter.bi_size; - bis->cur.ios[rwd]++; - - u64_stats_update_end(&bis->sync); - if (cgroup_subsys_on_dfl(io_cgrp_subsys)) - cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu); - put_cpu(); - } - - blkcg_bio_issue_init(bio); - - rcu_read_unlock(); - return !throtl; -} - static inline void blkcg_use_delay(struct blkcg_gq *blkg) { + if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0)) + return; if (atomic_add_return(1, &blkg->use_delay) == 1) atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); } @@ -637,6 +538,8 @@ static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) { int old = atomic_read(&blkg->use_delay); + if (WARN_ON_ONCE(old < 0)) + return 0; if (old == 0) return 0; @@ -661,22 +564,42 @@ static inline int blkcg_unuse_delay(struct blkcg_gq *blkg) return 1; } +/** + * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount + * @blkg: target blkg + * @delay: delay duration in nsecs + * + * When enabled with this function, the delay is not decayed and must be + * explicitly cleared with blkcg_clear_delay(). Must not be mixed with + * blkcg_[un]use_delay() and blkcg_add_delay() usages. + */ +static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay) +{ + int old = atomic_read(&blkg->use_delay); + + /* We only want 1 person setting the congestion count for this blkg. */ + if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old) + atomic_inc(&blkg->blkcg->css.cgroup->congestion_count); + + atomic64_set(&blkg->delay_nsec, delay); +} + +/** + * blkcg_clear_delay - Disable allocator delay mechanism + * @blkg: target blkg + * + * Disable use_delay mechanism. See blkcg_set_delay(). + */ static inline void blkcg_clear_delay(struct blkcg_gq *blkg) { int old = atomic_read(&blkg->use_delay); - if (!old) - return; + /* We only want 1 person clearing the congestion count for this blkg. */ - while (old) { - int cur = atomic_cmpxchg(&blkg->use_delay, old, 0); - if (cur == old) { - atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); - break; - } - old = cur; - } + if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old) + atomic_dec(&blkg->blkcg->css.cgroup->congestion_count); } +void blk_cgroup_bio_start(struct bio *bio); void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta); void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); void blkcg_maybe_throttle_current(void); @@ -730,8 +653,7 @@ static inline void blkg_put(struct blkcg_gq *blkg) { } static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; } static inline void blkcg_bio_issue_init(struct bio *bio) { } -static inline bool blkcg_bio_issue_check(struct request_queue *q, - struct bio *bio) { return true; } +static inline void blk_cgroup_bio_start(struct bio *bio) { } #define blk_queue_for_each_rl(rl, q) \ for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h new file mode 100644 index 000000000000..e82342907f2b --- /dev/null +++ b/include/linux/blk-crypto.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2019 Google LLC + */ + +#ifndef __LINUX_BLK_CRYPTO_H +#define __LINUX_BLK_CRYPTO_H + +#include <linux/types.h> + +enum blk_crypto_mode_num { + BLK_ENCRYPTION_MODE_INVALID, + BLK_ENCRYPTION_MODE_AES_256_XTS, + BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV, + BLK_ENCRYPTION_MODE_ADIANTUM, + BLK_ENCRYPTION_MODE_MAX, +}; + +#define BLK_CRYPTO_MAX_KEY_SIZE 64 +/** + * struct blk_crypto_config - an inline encryption key's crypto configuration + * @crypto_mode: encryption algorithm this key is for + * @data_unit_size: the data unit size for all encryption/decryptions with this + * key. This is the size in bytes of each individual plaintext and + * ciphertext. This is always a power of 2. It might be e.g. the + * filesystem block size or the disk sector size. + * @dun_bytes: the maximum number of bytes of DUN used when using this key + */ +struct blk_crypto_config { + enum blk_crypto_mode_num crypto_mode; + unsigned int data_unit_size; + unsigned int dun_bytes; +}; + +/** + * struct blk_crypto_key - an inline encryption key + * @crypto_cfg: the crypto configuration (like crypto_mode, key size) for this + * key + * @data_unit_size_bits: log2 of data_unit_size + * @size: size of this key in bytes (determined by @crypto_cfg.crypto_mode) + * @raw: the raw bytes of this key. Only the first @size bytes are used. + * + * A blk_crypto_key is immutable once created, and many bios can reference it at + * the same time. It must not be freed until all bios using it have completed + * and it has been evicted from all devices on which it may have been used. + */ +struct blk_crypto_key { + struct blk_crypto_config crypto_cfg; + unsigned int data_unit_size_bits; + unsigned int size; + u8 raw[BLK_CRYPTO_MAX_KEY_SIZE]; +}; + +#define BLK_CRYPTO_MAX_IV_SIZE 32 +#define BLK_CRYPTO_DUN_ARRAY_SIZE (BLK_CRYPTO_MAX_IV_SIZE / sizeof(u64)) + +/** + * struct bio_crypt_ctx - an inline encryption context + * @bc_key: the key, algorithm, and data unit size to use + * @bc_dun: the data unit number (starting IV) to use + * + * A bio_crypt_ctx specifies that the contents of the bio will be encrypted (for + * write requests) or decrypted (for read requests) inline by the storage device + * or controller, or by the crypto API fallback. + */ +struct bio_crypt_ctx { + const struct blk_crypto_key *bc_key; + u64 bc_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; +}; + +#include <linux/blk_types.h> +#include <linux/blkdev.h> + +struct request; +struct request_queue; + +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + +static inline bool bio_has_crypt_ctx(struct bio *bio) +{ + return bio->bi_crypt_context; +} + +void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key, + const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], + gfp_t gfp_mask); + +bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc, + unsigned int bytes, + const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]); + +int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key, + enum blk_crypto_mode_num crypto_mode, + unsigned int dun_bytes, + unsigned int data_unit_size); + +int blk_crypto_start_using_key(const struct blk_crypto_key *key, + struct request_queue *q); + +int blk_crypto_evict_key(struct request_queue *q, + const struct blk_crypto_key *key); + +bool blk_crypto_config_supported(struct request_queue *q, + const struct blk_crypto_config *cfg); + +#else /* CONFIG_BLK_INLINE_ENCRYPTION */ + +static inline bool bio_has_crypt_ctx(struct bio *bio) +{ + return false; +} + +#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ + +void __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask); +static inline void bio_crypt_clone(struct bio *dst, struct bio *src, + gfp_t gfp_mask) +{ + if (bio_has_crypt_ctx(src)) + __bio_crypt_clone(dst, src, gfp_mask); +} + +#endif /* __LINUX_BLK_CRYPTO_H */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index b45148ba3291..9d2d5ad367a4 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -140,6 +140,8 @@ struct blk_mq_hw_ctx { */ atomic_t nr_active; + /** @cpuhp_online: List to store request if CPU is going to die */ + struct hlist_node cpuhp_online; /** @cpuhp_dead: List to store request if some CPU die. */ struct hlist_node cpuhp_dead; /** @kobj: Kernel object for sysfs. */ @@ -265,27 +267,9 @@ struct blk_mq_queue_data { bool last; }; -typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *, - const struct blk_mq_queue_data *); -typedef void (commit_rqs_fn)(struct blk_mq_hw_ctx *); -typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *); -typedef void (put_budget_fn)(struct blk_mq_hw_ctx *); -typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); -typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); -typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); -typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *, - unsigned int, unsigned int); -typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *, - unsigned int); - typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, bool); typedef bool (busy_tag_iter_fn)(struct request *, void *, bool); -typedef int (poll_fn)(struct blk_mq_hw_ctx *); -typedef int (map_queues_fn)(struct blk_mq_tag_set *set); -typedef bool (busy_fn)(struct request_queue *); -typedef void (complete_fn)(struct request *); -typedef void (cleanup_rq_fn)(struct request *); /** * struct blk_mq_ops - Callback functions that implements block driver @@ -295,7 +279,8 @@ struct blk_mq_ops { /** * @queue_rq: Queue a new request from block IO. */ - queue_rq_fn *queue_rq; + blk_status_t (*queue_rq)(struct blk_mq_hw_ctx *, + const struct blk_mq_queue_data *); /** * @commit_rqs: If a driver uses bd->last to judge when to submit @@ -304,7 +289,7 @@ struct blk_mq_ops { * purpose of kicking the hardware (which the last request otherwise * would have done). */ - commit_rqs_fn *commit_rqs; + void (*commit_rqs)(struct blk_mq_hw_ctx *); /** * @get_budget: Reserve budget before queue request, once .queue_rq is @@ -312,37 +297,38 @@ struct blk_mq_ops { * reserved budget. Also we have to handle failure case * of .get_budget for avoiding I/O deadlock. */ - get_budget_fn *get_budget; + bool (*get_budget)(struct request_queue *); + /** * @put_budget: Release the reserved budget. */ - put_budget_fn *put_budget; + void (*put_budget)(struct request_queue *); /** * @timeout: Called on request timeout. */ - timeout_fn *timeout; + enum blk_eh_timer_return (*timeout)(struct request *, bool); /** * @poll: Called to poll for completion of a specific tag. */ - poll_fn *poll; + int (*poll)(struct blk_mq_hw_ctx *); /** * @complete: Mark the request as complete. */ - complete_fn *complete; + void (*complete)(struct request *); /** * @init_hctx: Called when the block layer side of a hardware queue has * been set up, allowing the driver to allocate/init matching * structures. */ - init_hctx_fn *init_hctx; + int (*init_hctx)(struct blk_mq_hw_ctx *, void *, unsigned int); /** * @exit_hctx: Ditto for exit/teardown. */ - exit_hctx_fn *exit_hctx; + void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); /** * @init_request: Called for every command allocated by the block layer @@ -351,11 +337,13 @@ struct blk_mq_ops { * Tag greater than or equal to queue_depth is for setting up * flush request. */ - init_request_fn *init_request; + int (*init_request)(struct blk_mq_tag_set *set, struct request *, + unsigned int, unsigned int); /** * @exit_request: Ditto for exit/teardown. */ - exit_request_fn *exit_request; + void (*exit_request)(struct blk_mq_tag_set *set, struct request *, + unsigned int); /** * @initialize_rq_fn: Called from inside blk_get_request(). @@ -366,18 +354,18 @@ struct blk_mq_ops { * @cleanup_rq: Called before freeing one request which isn't completed * yet, and usually for freeing the driver private data. */ - cleanup_rq_fn *cleanup_rq; + void (*cleanup_rq)(struct request *); /** * @busy: If set, returns whether or not this queue currently is busy. */ - busy_fn *busy; + bool (*busy)(struct request_queue *); /** * @map_queues: This allows drivers specify their own queue mapping by * overriding the setup-time function that builds the mq_map. */ - map_queues_fn *map_queues; + int (*map_queues)(struct blk_mq_tag_set *set); #ifdef CONFIG_BLK_DEBUG_FS /** @@ -391,6 +379,11 @@ struct blk_mq_ops { enum { BLK_MQ_F_SHOULD_MERGE = 1 << 0, BLK_MQ_F_TAG_SHARED = 1 << 1, + /* + * Set when this device requires underlying blk-mq device for + * completing IO: + */ + BLK_MQ_F_STACKING = 1 << 2, BLK_MQ_F_BLOCKING = 1 << 5, BLK_MQ_F_NO_SCHED = 1 << 6, BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, @@ -400,6 +393,9 @@ enum { BLK_MQ_S_TAG_ACTIVE = 1, BLK_MQ_S_SCHED_RESTART = 2, + /* hw queue is inactive after all its CPUs become offline */ + BLK_MQ_S_INACTIVE = 3, + BLK_MQ_MAX_DEPTH = 10240, BLK_MQ_CPU_WORK_BATCH = 8, @@ -437,8 +433,6 @@ enum { BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0), /* allocate from reserved pool */ BLK_MQ_REQ_RESERVED = (__force blk_mq_req_flags_t)(1 << 1), - /* allocate internal/sched tag */ - BLK_MQ_REQ_INTERNAL = (__force blk_mq_req_flags_t)(1 << 2), /* set RQF_PREEMPT */ BLK_MQ_REQ_PREEMPT = (__force blk_mq_req_flags_t)(1 << 3), }; @@ -493,7 +487,8 @@ void __blk_mq_end_request(struct request *rq, blk_status_t error); void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); -bool blk_mq_complete_request(struct request *rq); +void blk_mq_complete_request(struct request *rq); +bool blk_mq_complete_request_remote(struct request *rq); bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, struct bio *bio, unsigned int nr_segs); bool blk_mq_queue_stopped(struct request_queue *q); @@ -508,6 +503,7 @@ void blk_mq_unquiesce_queue(struct request_queue *q); void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_run_hw_queues(struct request_queue *q, bool async); +void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs); void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, busy_tag_iter_fn *fn, void *priv); void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset); @@ -525,6 +521,15 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q); unsigned int blk_mq_rq_cpu(struct request *rq); +bool __blk_should_fake_timeout(struct request_queue *q); +static inline bool blk_should_fake_timeout(struct request_queue *q) +{ + if (IS_ENABLED(CONFIG_FAIL_IO_TIMEOUT) && + test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) + return __blk_should_fake_timeout(q); + return false; +} + /** * blk_mq_rq_from_pdu - cast a PDU to a request * @pdu: the PDU (Protocol Data Unit) to be casted @@ -577,4 +582,6 @@ static inline void blk_mq_cleanup_rq(struct request *rq) rq->q->mq_ops->cleanup_rq(rq); } +blk_qc_t blk_mq_submit_bio(struct bio *bio); + #endif diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 31eb92876be7..b3fc5d3dd8ea 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -14,10 +14,38 @@ struct bio_set; struct bio; struct bio_integrity_payload; struct page; -struct block_device; struct io_context; struct cgroup_subsys_state; typedef void (bio_end_io_t) (struct bio *); +struct bio_crypt_ctx; + +struct block_device { + dev_t bd_dev; /* not a kdev_t - it's a search key */ + int bd_openers; + struct inode * bd_inode; /* will die */ + struct super_block * bd_super; + struct mutex bd_mutex; /* open/close mutex */ + void * bd_claiming; + void * bd_holder; + int bd_holders; + bool bd_write_holder; +#ifdef CONFIG_SYSFS + struct list_head bd_holder_disks; +#endif + struct block_device * bd_contains; + u8 bd_partno; + struct hd_struct * bd_part; + /* number of times partitions within this device have been opened. */ + unsigned bd_part_count; + int bd_invalidated; + struct gendisk * bd_disk; + struct backing_dev_info *bd_bdi; + + /* The counter of freeze processes */ + int bd_fsfreeze_count; + /* Mutex for freeze */ + struct mutex bd_fsfreeze_mutex; +} __randomize_layout; /* * Block error status values. See block/blk-core:blk_errors for the details. @@ -63,6 +91,18 @@ typedef u8 __bitwise blk_status_t; */ #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) +/* + * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone + * related resources are unavailable, but the driver can guarantee the queue + * will be rerun in the future once the resources become available again. + * + * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references + * a zone specific resource and IO to a different zone on the same device could + * still be served. Examples of that are zones that are write-locked, but a read + * to the same zone could be served. + */ +#define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14) + /** * blk_path_error - returns true if error may be path related * @error: status the request was completed with @@ -173,6 +213,11 @@ struct bio { u64 bi_iocost_cost; #endif #endif + +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + struct bio_crypt_ctx *bi_crypt_context; +#endif + union { #if defined(CONFIG_BLK_DEV_INTEGRITY) struct bio_integrity_payload *bi_integrity; /* data integrity */ @@ -220,7 +265,7 @@ enum { * throttling rules. Don't do it again. */ BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion * of this bio. */ - BIO_QUEUE_ENTERED, /* can use blk_queue_enter_live() */ + BIO_CGROUP_ACCT, /* has been accounted to a cgroup */ BIO_TRACKED, /* set if bio goes through the rq_qos path */ BIO_FLAG_LAST }; @@ -282,12 +327,8 @@ enum req_opf { REQ_OP_DISCARD = 3, /* securely erase sectors */ REQ_OP_SECURE_ERASE = 5, - /* reset a zone write pointer */ - REQ_OP_ZONE_RESET = 6, /* write the same sector many times */ REQ_OP_WRITE_SAME = 7, - /* reset all the zone present on the device */ - REQ_OP_ZONE_RESET_ALL = 8, /* write the zero filled sector many times */ REQ_OP_WRITE_ZEROES = 9, /* Open a zone */ @@ -296,6 +337,12 @@ enum req_opf { REQ_OP_ZONE_CLOSE = 11, /* Transition a zone to full */ REQ_OP_ZONE_FINISH = 12, + /* write data at the current zone write pointer */ + REQ_OP_ZONE_APPEND = 13, + /* reset a zone write pointer */ + REQ_OP_ZONE_RESET = 15, + /* reset all the zone present on the device */ + REQ_OP_ZONE_RESET_ALL = 17, /* SCSI passthrough using struct scsi_request */ REQ_OP_SCSI_IN = 32, @@ -323,7 +370,6 @@ enum req_flag_bits { __REQ_RAHEAD, /* read ahead, can fail anytime */ __REQ_BACKGROUND, /* background IO */ __REQ_NOWAIT, /* Don't wait if request will block */ - __REQ_NOWAIT_INLINE, /* Return would-block error inline */ /* * When a shared kthread needs to issue a bio for a cgroup, doing * so synchronously can lead to priority inversions as the kthread @@ -358,7 +404,6 @@ enum req_flag_bits { #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND) #define REQ_NOWAIT (1ULL << __REQ_NOWAIT) -#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE) #define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT) #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP) @@ -452,13 +497,12 @@ static inline int op_stat_group(unsigned int op) typedef unsigned int blk_qc_t; #define BLK_QC_T_NONE -1U -#define BLK_QC_T_EAGAIN -2U #define BLK_QC_T_SHIFT 16 #define BLK_QC_T_INTERNAL (1U << 31) static inline bool blk_qc_t_valid(blk_qc_t cookie) { - return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN; + return cookie != BLK_QC_T_NONE; } static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 32868fbedc9e..868e11face00 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -4,9 +4,6 @@ #include <linux/sched.h> #include <linux/sched/clock.h> - -#ifdef CONFIG_BLOCK - #include <linux/major.h> #include <linux/genhd.h> #include <linux/list.h> @@ -43,6 +40,7 @@ struct pr_ops; struct rq_qos; struct blk_queue_stats; struct blk_stat_callback; +struct blk_keyslot_manager; #define BLKDEV_MIN_RQ 4 #define BLKDEV_MAX_RQ 128 /* Default maximum */ @@ -82,8 +80,6 @@ typedef __u32 __bitwise req_flags_t; /* set for "ide_preempt" requests and also for requests for which the SCSI "quiesce" state must be ignored. */ #define RQF_PREEMPT ((__force req_flags_t)(1 << 8)) -/* contains copies of user pages */ -#define RQF_COPY_USER ((__force req_flags_t)(1 << 9)) /* vaguely specified driver internal error. Ignored by the block layer */ #define RQF_FAILED ((__force req_flags_t)(1 << 10)) /* don't warn about errors */ @@ -223,11 +219,14 @@ struct request { unsigned short nr_integrity_segments; #endif +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + struct bio_crypt_ctx *crypt_ctx; + struct blk_ksm_keyslot *crypt_keyslot; +#endif + unsigned short write_hint; unsigned short ioprio; - unsigned int extra_len; /* length of alignment and padding */ - enum mq_rq_state state; refcount_t ref; @@ -287,10 +286,7 @@ static inline unsigned short req_get_ioprio(struct request *req) struct blk_queue_ctx; -typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); - struct bio_vec; -typedef int (dma_drain_needed_fn)(struct request *); enum blk_eh_timer_return { BLK_EH_DONE, /* drivers has completed the command */ @@ -310,11 +306,14 @@ enum blk_queue_state { /* * Zoned block device models (zoned limit). + * + * Note: This needs to be ordered from the least to the most severe + * restrictions for the inheritance in blk_stack_limits() to work. */ enum blk_zoned_model { - BLK_ZONED_NONE, /* Regular block device */ - BLK_ZONED_HA, /* Host-aware zoned block device */ - BLK_ZONED_HM, /* Host-managed zoned block device */ + BLK_ZONED_NONE = 0, /* Regular block device */ + BLK_ZONED_HA, /* Host-aware zoned block device */ + BLK_ZONED_HM, /* Host-managed zoned block device */ }; struct queue_limits { @@ -336,6 +335,7 @@ struct queue_limits { unsigned int max_hw_discard_sectors; unsigned int max_write_same_sectors; unsigned int max_write_zeroes_sectors; + unsigned int max_zone_append_sectors; unsigned int discard_granularity; unsigned int discard_alignment; @@ -352,6 +352,8 @@ struct queue_limits { typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, void *data); +void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model); + #ifdef CONFIG_BLK_DEV_ZONED #define BLK_ALL_ZONES ((unsigned int)-1) @@ -361,7 +363,8 @@ unsigned int blkdev_nr_zones(struct gendisk *disk); extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, sector_t sectors, sector_t nr_sectors, gfp_t gfp_mask); -extern int blk_revalidate_disk_zones(struct gendisk *disk); +int blk_revalidate_disk_zones(struct gendisk *disk, + void (*update_driver_data)(struct gendisk *disk)); extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); @@ -398,9 +401,6 @@ struct request_queue { struct blk_queue_stats *stats; struct rq_qos *rq_qos; - make_request_fn *make_request_fn; - dma_drain_needed_fn *dma_drain_needed; - const struct blk_mq_ops *mq_ops; /* sw queues */ @@ -469,11 +469,14 @@ struct request_queue { */ unsigned long nr_requests; /* Max # of requests */ - unsigned int dma_drain_size; - void *dma_drain_buffer; unsigned int dma_pad_mask; unsigned int dma_alignment; +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + /* Inline crypto capabilities */ + struct blk_keyslot_manager *ksm; +#endif + unsigned int rq_timeout; int poll_nsec; @@ -515,6 +518,8 @@ struct request_queue { unsigned int nr_zones; unsigned long *conv_zones_bitmap; unsigned long *seq_zones_wlock; + unsigned int max_open_zones; + unsigned int max_active_zones; #endif /* CONFIG_BLK_DEV_ZONED */ /* @@ -523,9 +528,9 @@ struct request_queue { unsigned int sg_timeout; unsigned int sg_reserved_size; int node; + struct mutex debugfs_mutex; #ifdef CONFIG_BLK_DEV_IO_TRACE struct blk_trace __rcu *blk_trace; - struct mutex blk_trace_mutex; #endif /* * for flush operations @@ -569,8 +574,9 @@ struct request_queue { struct list_head tag_set_list; struct bio_set bio_split; -#ifdef CONFIG_BLK_DEBUG_FS struct dentry *debugfs_dir; + +#ifdef CONFIG_BLK_DEBUG_FS struct dentry *sched_debugfs_dir; struct dentry *rqos_debugfs_dir; #endif @@ -579,12 +585,11 @@ struct request_queue { size_t cmd_size; - struct work_struct release_work; - #define BLK_MAX_WRITE_HINTS 5 u64 write_hints[BLK_MAX_WRITE_HINTS]; }; +/* Keep blk_queue_flag_name[] in sync with the definitions below */ #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ #define QUEUE_FLAG_DYING 1 /* queue being torn down */ #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ @@ -724,11 +729,51 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q, return true; return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap); } + +static inline void blk_queue_max_open_zones(struct request_queue *q, + unsigned int max_open_zones) +{ + q->max_open_zones = max_open_zones; +} + +static inline unsigned int queue_max_open_zones(const struct request_queue *q) +{ + return q->max_open_zones; +} + +static inline void blk_queue_max_active_zones(struct request_queue *q, + unsigned int max_active_zones) +{ + q->max_active_zones = max_active_zones; +} + +static inline unsigned int queue_max_active_zones(const struct request_queue *q) +{ + return q->max_active_zones; +} #else /* CONFIG_BLK_DEV_ZONED */ static inline unsigned int blk_queue_nr_zones(struct request_queue *q) { return 0; } +static inline bool blk_queue_zone_is_seq(struct request_queue *q, + sector_t sector) +{ + return false; +} +static inline unsigned int blk_queue_zone_no(struct request_queue *q, + sector_t sector) +{ + return 0; +} +static inline unsigned int queue_max_open_zones(const struct request_queue *q) +{ + return 0; +} +static inline unsigned int queue_max_active_zones(const struct request_queue *q) +{ + return 0; +} #endif /* CONFIG_BLK_DEV_ZONED */ static inline bool rq_is_sync(struct request *rq) @@ -747,6 +792,9 @@ static inline bool rq_mergeable(struct request *rq) if (req_op(rq) == REQ_OP_WRITE_ZEROES) return false; + if (req_op(rq) == REQ_OP_ZONE_APPEND) + return false; + if (rq->cmd_flags & REQ_NOMERGE_FLAGS) return false; if (rq->rq_flags & RQF_NOMERGE_FLAGS) @@ -842,8 +890,7 @@ static inline void rq_flush_dcache_pages(struct request *rq) extern int blk_register_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk); -extern blk_qc_t generic_make_request(struct bio *bio); -extern blk_qc_t direct_make_request(struct bio *bio); +blk_qc_t submit_bio_noacct(struct bio *bio); extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_put_request(struct request *); extern struct request *blk_get_request(struct request_queue *, unsigned int op, @@ -857,7 +904,7 @@ extern void blk_rq_unprep_clone(struct request *rq); extern blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq); extern int blk_rq_append_bio(struct request *rq, struct bio **bio); -extern void blk_queue_split(struct request_queue *, struct bio **); +extern void blk_queue_split(struct bio **); extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, unsigned int, void __user *); @@ -1060,7 +1107,6 @@ void blk_steal_bios(struct bio_list *list, struct request *rq); extern bool blk_update_request(struct request *rq, blk_status_t error, unsigned int nr_bytes); -extern void __blk_complete_request(struct request *); extern void blk_abort_request(struct request *); /* @@ -1081,6 +1127,8 @@ extern void blk_queue_max_write_same_sectors(struct request_queue *q, extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, unsigned int max_write_same_sectors); extern void blk_queue_logical_block_size(struct request_queue *, unsigned int); +extern void blk_queue_max_zone_append_sectors(struct request_queue *q, + unsigned int max_zone_append_sectors); extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); extern void blk_queue_alignment_offset(struct request_queue *q, unsigned int alignment); @@ -1093,15 +1141,9 @@ extern void blk_set_default_limits(struct queue_limits *lim); extern void blk_set_stacking_limits(struct queue_limits *lim); extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, sector_t offset); -extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, - sector_t offset); extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, sector_t offset); -extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); -extern int blk_queue_dma_drain(struct request_queue *q, - dma_drain_needed_fn *dma_drain_needed, - void *buf, unsigned int size); extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); extern void blk_queue_dma_alignment(struct request_queue *, int); @@ -1138,15 +1180,23 @@ static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) return max_t(unsigned short, rq->nr_phys_segments, 1); } -extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); +int __blk_rq_map_sg(struct request_queue *q, struct request *rq, + struct scatterlist *sglist, struct scatterlist **last_sg); +static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, + struct scatterlist *sglist) +{ + struct scatterlist *last_sg = NULL; + + return __blk_rq_map_sg(q, rq, sglist, &last_sg); +} extern void blk_dump_rq_flags(struct request *, char *); -extern long nr_blockdev_pages(void); bool __must_check blk_get_queue(struct request_queue *); -struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id); +struct request_queue *blk_alloc_queue(int node_id); extern void blk_put_queue(struct request_queue *); extern void blk_set_queue_dying(struct request_queue *); +#ifdef CONFIG_BLOCK /* * blk_plug permits building a queue of related requests by holding the I/O * fragments for a short period. This allows merging of sequential requests @@ -1164,6 +1214,7 @@ struct blk_plug { struct list_head cb_list; /* md requires an unplug callback */ unsigned short rq_count; bool multiple_queues; + bool nowait; }; #define BLK_MAX_REQUEST_COUNT 16 #define BLK_PLUG_FLUSH_SIZE (128 * 1024) @@ -1206,7 +1257,47 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) !list_empty(&plug->cb_list)); } -extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); +int blkdev_issue_flush(struct block_device *, gfp_t); +long nr_blockdev_pages(void); +#else /* CONFIG_BLOCK */ +struct blk_plug { +}; + +static inline void blk_start_plug(struct blk_plug *plug) +{ +} + +static inline void blk_finish_plug(struct blk_plug *plug) +{ +} + +static inline void blk_flush_plug(struct task_struct *task) +{ +} + +static inline void blk_schedule_flush_plug(struct task_struct *task) +{ +} + + +static inline bool blk_needs_flush_plug(struct task_struct *tsk) +{ + return false; +} + +static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask) +{ + return 0; +} + +static inline long nr_blockdev_pages(void) +{ + return 0; +} +#endif /* CONFIG_BLOCK */ + +extern void blk_io_schedule(void); + extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct page *page); @@ -1293,6 +1384,11 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q) return q->limits.max_segment_size; } +static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q) +{ + return q->limits.max_zone_append_sectors; +} + static inline unsigned queue_logical_block_size(const struct request_queue *q) { int retval = 512; @@ -1458,6 +1554,24 @@ static inline sector_t bdev_zone_sectors(struct block_device *bdev) return 0; } +static inline unsigned int bdev_max_open_zones(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return queue_max_open_zones(q); + return 0; +} + +static inline unsigned int bdev_max_active_zones(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + return queue_max_active_zones(q); + return 0; +} + static inline int queue_dma_alignment(const struct request_queue *q) { return q ? q->dma_alignment : 511; @@ -1483,7 +1597,7 @@ static inline unsigned int blksize_bits(unsigned int size) static inline unsigned int block_size(struct block_device *bdev) { - return bdev->bd_block_size; + return 1 << bdev->bd_inode->i_blkbits; } int kblockd_schedule_work(struct work_struct *work); @@ -1551,6 +1665,12 @@ struct blk_integrity *bdev_get_integrity(struct block_device *bdev) return blk_get_integrity(bdev->bd_disk); } +static inline bool +blk_integrity_queue_supports_integrity(struct request_queue *q) +{ + return q->integrity.profile; +} + static inline bool blk_integrity_rq(struct request *rq) { return rq->cmd_flags & REQ_INTEGRITY; @@ -1631,6 +1751,11 @@ static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) { return NULL; } +static inline bool +blk_integrity_queue_supports_integrity(struct request_queue *q) +{ + return false; +} static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) { return 0; @@ -1682,7 +1807,27 @@ static inline struct bio_vec *rq_integrity_vec(struct request *rq) #endif /* CONFIG_BLK_DEV_INTEGRITY */ +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + +bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q); + +void blk_ksm_unregister(struct request_queue *q); + +#else /* CONFIG_BLK_INLINE_ENCRYPTION */ + +static inline bool blk_ksm_register(struct blk_keyslot_manager *ksm, + struct request_queue *q) +{ + return true; +} + +static inline void blk_ksm_unregister(struct request_queue *q) { } + +#endif /* CONFIG_BLK_INLINE_ENCRYPTION */ + + struct block_device_operations { + blk_qc_t (*submit_bio) (struct bio *bio); int (*open) (struct block_device *, fmode_t); void (*release) (struct gendisk *, fmode_t); int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int); @@ -1690,8 +1835,6 @@ struct block_device_operations { int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); unsigned int (*check_events) (struct gendisk *disk, unsigned int clearing); - /* ->media_changed() is DEPRECATED, use ->check_events() instead */ - int (*media_changed) (struct gendisk *); void (*unlock_native_capacity) (struct gendisk *); int (*revalidate_disk) (struct gendisk *); int (*getgeo)(struct block_device *, struct hd_geometry *); @@ -1719,6 +1862,7 @@ extern int bdev_write_page(struct block_device *, sector_t, struct page *, #ifdef CONFIG_BLK_DEV_ZONED bool blk_req_needs_zone_write_lock(struct request *rq); +bool blk_req_zone_write_trylock(struct request *rq); void __blk_req_zone_write_lock(struct request *rq); void __blk_req_zone_write_unlock(struct request *rq); @@ -1770,64 +1914,91 @@ static inline bool blk_req_can_dispatch_to_zone(struct request *rq) } #endif /* CONFIG_BLK_DEV_ZONED */ -#else /* CONFIG_BLOCK */ - -struct block_device; - -/* - * stubs for when the block layer is configured out - */ -#define buffer_heads_over_limit 0 - -static inline long nr_blockdev_pages(void) +static inline void blk_wake_io_task(struct task_struct *waiter) { - return 0; + /* + * If we're polling, the task itself is doing the completions. For + * that case, we don't need to signal a wakeup, it's enough to just + * mark us as RUNNING. + */ + if (waiter == current) + __set_current_state(TASK_RUNNING); + else + wake_up_process(waiter); } -struct blk_plug { -}; +unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, + unsigned int op); +void disk_end_io_acct(struct gendisk *disk, unsigned int op, + unsigned long start_time); -static inline void blk_start_plug(struct blk_plug *plug) +/** + * bio_start_io_acct - start I/O accounting for bio based drivers + * @bio: bio to start account for + * + * Returns the start time that should be passed back to bio_end_io_acct(). + */ +static inline unsigned long bio_start_io_acct(struct bio *bio) { + return disk_start_io_acct(bio->bi_disk, bio_sectors(bio), bio_op(bio)); } -static inline void blk_finish_plug(struct blk_plug *plug) +/** + * bio_end_io_acct - end I/O accounting for bio based drivers + * @bio: bio to end account for + * @start: start time returned by bio_start_io_acct() + */ +static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) { + return disk_end_io_acct(bio->bi_disk, bio_op(bio), start_time); } -static inline void blk_flush_plug(struct task_struct *task) -{ -} +int bdev_read_only(struct block_device *bdev); +int set_blocksize(struct block_device *bdev, int size); -static inline void blk_schedule_flush_plug(struct task_struct *task) -{ -} +const char *bdevname(struct block_device *bdev, char *buffer); +struct block_device *lookup_bdev(const char *); +void blkdev_show(struct seq_file *seqf, off_t offset); -static inline bool blk_needs_flush_plug(struct task_struct *tsk) +#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ +#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ +#ifdef CONFIG_BLOCK +#define BLKDEV_MAJOR_MAX 512 +#else +#define BLKDEV_MAJOR_MAX 0 +#endif + +int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder); +struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, + void *holder); +struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder); +int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole, + void *holder); +void bd_abort_claiming(struct block_device *bdev, struct block_device *whole, + void *holder); +void blkdev_put(struct block_device *bdev, fmode_t mode); + +struct block_device *I_BDEV(struct inode *inode); +struct block_device *bdget(dev_t); +struct block_device *bdgrab(struct block_device *bdev); +void bdput(struct block_device *); + +#ifdef CONFIG_BLOCK +void invalidate_bdev(struct block_device *bdev); +int sync_blockdev(struct block_device *bdev); +#else +static inline void invalidate_bdev(struct block_device *bdev) { - return false; } - -static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, - sector_t *error_sector) +static inline int sync_blockdev(struct block_device *bdev) { return 0; } +#endif +int fsync_bdev(struct block_device *bdev); -#endif /* CONFIG_BLOCK */ - -static inline void blk_wake_io_task(struct task_struct *waiter) -{ - /* - * If we're polling, the task itself is doing the completions. For - * that case, we don't need to signal a wakeup, it's enough to just - * mark us as RUNNING. - */ - if (waiter == current) - __set_current_state(TASK_RUNNING); - else - wake_up_process(waiter); -} +struct super_block *freeze_bdev(struct block_device *bdev); +int thaw_bdev(struct block_device *bdev, struct super_block *sb); -#endif +#endif /* _LINUX_BLKDEV_H */ diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index c11b413d5b1a..64f367044e25 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -46,7 +46,8 @@ struct bpf_cgroup_storage { }; struct bpf_cgroup_storage_map *map; struct bpf_cgroup_storage_key key; - struct list_head list; + struct list_head list_map; + struct list_head list_cg; struct rb_node node; struct rcu_head rcu; }; @@ -57,8 +58,6 @@ struct bpf_cgroup_link { enum bpf_attach_type type; }; -extern const struct bpf_link_ops bpf_cgroup_link_lops; - struct bpf_prog_list { struct list_head node; struct bpf_prog *prog; @@ -80,6 +79,9 @@ struct cgroup_bpf { struct list_head progs[MAX_BPF_ATTACH_TYPE]; u32 flags[MAX_BPF_ATTACH_TYPE]; + /* list of cgroup shared storages */ + struct list_head storages; + /* temp storage for effective prog array used by prog_attach/detach */ struct bpf_prog_array *inactive; @@ -100,8 +102,6 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, struct bpf_cgroup_link *link, enum bpf_attach_type type); -int __cgroup_bpf_replace(struct cgroup *cgrp, struct bpf_cgroup_link *link, - struct bpf_prog *new_prog); int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, union bpf_attr __user *uattr); @@ -112,8 +112,6 @@ int cgroup_bpf_attach(struct cgroup *cgrp, u32 flags); int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, enum bpf_attach_type type); -int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *old_prog, - struct bpf_prog *new_prog); int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, union bpf_attr __user *uattr); @@ -138,8 +136,7 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, struct ctl_table *table, int write, - void __user *buf, size_t *pcount, - loff_t *ppos, void **new_buf, + void **buf, size_t *pcount, loff_t *ppos, enum bpf_attach_type type); int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, @@ -168,6 +165,9 @@ static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage this_cpu_write(bpf_cgroup_storage[stype], storage[stype]); } +struct bpf_cgroup_storage * +cgroup_storage_lookup(struct bpf_cgroup_storage_map *map, + void *key, bool locked); struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, enum bpf_cgroup_storage_type stype); void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); @@ -176,7 +176,6 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, enum bpf_attach_type type); void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map); -void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map); int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, @@ -217,6 +216,9 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE) +#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \ + BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE) + #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND) @@ -302,12 +304,12 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, }) -#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \ +#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \ ({ \ int __ret = 0; \ if (cgroup_bpf_enabled) \ __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \ - buf, count, pos, nbuf, \ + buf, count, pos, \ BPF_CGROUP_SYSCTL); \ __ret; \ }) @@ -354,7 +356,6 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr, #else struct bpf_prog; -struct bpf_link; struct cgroup_bpf {}; static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } static inline void cgroup_bpf_offline(struct cgroup *cgrp) {} @@ -378,13 +379,6 @@ static inline int cgroup_bpf_link_attach(const union bpf_attr *attr, return -EINVAL; } -static inline int cgroup_bpf_replace(struct bpf_link *link, - struct bpf_prog *old_prog, - struct bpf_prog *new_prog) -{ - return -EINVAL; -} - static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) { @@ -395,8 +389,6 @@ static inline void bpf_cgroup_storage_set( struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {} static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map) { return 0; } -static inline void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, - struct bpf_map *map) {} static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; } static inline void bpf_cgroup_storage_free( @@ -411,10 +403,12 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, } #define cgroup_bpf_enabled (0) +#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; }) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) @@ -429,7 +423,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; }) #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ optlen, max_optlen, retval) ({ retval; }) diff --git a/include/linux/bpf-netns.h b/include/linux/bpf-netns.h new file mode 100644 index 000000000000..722f799c1a2e --- /dev/null +++ b/include/linux/bpf-netns.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BPF_NETNS_H +#define _BPF_NETNS_H + +#include <linux/mutex.h> +#include <uapi/linux/bpf.h> + +enum netns_bpf_attach_type { + NETNS_BPF_INVALID = -1, + NETNS_BPF_FLOW_DISSECTOR = 0, + NETNS_BPF_SK_LOOKUP, + MAX_NETNS_BPF_ATTACH_TYPE +}; + +static inline enum netns_bpf_attach_type +to_netns_bpf_attach_type(enum bpf_attach_type attach_type) +{ + switch (attach_type) { + case BPF_FLOW_DISSECTOR: + return NETNS_BPF_FLOW_DISSECTOR; + case BPF_SK_LOOKUP: + return NETNS_BPF_SK_LOOKUP; + default: + return NETNS_BPF_INVALID; + } +} + +/* Protects updates to netns_bpf */ +extern struct mutex netns_bpf_mutex; + +union bpf_attr; +struct bpf_prog; + +#ifdef CONFIG_NET +int netns_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr); +int netns_bpf_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog); +int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); +int netns_bpf_link_create(const union bpf_attr *attr, + struct bpf_prog *prog); +#else +static inline int netns_bpf_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + return -EOPNOTSUPP; +} + +static inline int netns_bpf_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} + +static inline int netns_bpf_prog_detach(const union bpf_attr *attr, + enum bpf_prog_type ptype) +{ + return -EOPNOTSUPP; +} + +static inline int netns_bpf_link_create(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} +#endif + +#endif /* _BPF_NETNS_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index fd2b2322412d..55f694b63164 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -19,6 +19,7 @@ #include <linux/mutex.h> #include <linux/module.h> #include <linux/kallsyms.h> +#include <linux/capability.h> struct bpf_verifier_env; struct bpf_verifier_log; @@ -31,10 +32,22 @@ struct seq_file; struct btf; struct btf_type; struct exception_table_entry; +struct seq_operations; +struct bpf_iter_aux_info; extern struct idr btf_idr; extern spinlock_t btf_idr_lock; +typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, + struct bpf_iter_aux_info *aux); +typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); +struct bpf_iter_seq_info { + const struct seq_operations *seq_ops; + bpf_iter_init_seq_priv_t init_seq_private; + bpf_iter_fini_seq_priv_t fini_seq_private; + u32 seq_priv_size; +}; + /* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { /* funcs callable from userspace (via syscall) */ @@ -88,6 +101,15 @@ struct bpf_map_ops { int (*map_direct_value_meta)(const struct bpf_map *map, u64 imm, u32 *off); int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); + __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, + struct poll_table_struct *pts); + + /* BTF name and id of struct allocated by map_alloc */ + const char * const map_btf_name; + int *map_btf_id; + + /* bpf_iter info used to open a seq_file */ + const struct bpf_iter_seq_info *iter_seq_info; }; struct bpf_map_memory { @@ -118,7 +140,7 @@ struct bpf_map { struct bpf_map_memory memory; char name[BPF_OBJ_NAME_LEN]; u32 btf_vmlinux_value_type_id; - bool unpriv_array; + bool bypass_spec_v1; bool frozen; /* write-once; write-protected by freeze_mutex */ /* 22 bytes hole */ @@ -241,7 +263,11 @@ enum bpf_arg_type { ARG_PTR_TO_INT, /* pointer to int */ ARG_PTR_TO_LONG, /* pointer to long */ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ + ARG_PTR_TO_SOCKET_OR_NULL, /* pointer to bpf_sock (fullsock) or NULL */ ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ + ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ + ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */ + ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ }; /* type of values returned from helper functions */ @@ -253,6 +279,8 @@ enum bpf_return_type { RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ + RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */ + RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */ }; /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs @@ -275,6 +303,12 @@ struct bpf_func_proto { enum bpf_arg_type arg_type[5]; }; int *btf_id; /* BTF ids of arguments */ + bool (*check_btf_id)(u32 btf_id, u32 arg); /* if the argument btf_id is + * valid. Often used if more + * than one btf id is permitted + * for this argument. + */ + int *ret_btf_id; /* return value btf_id */ }; /* bpf_context is intentionally undefined structure. Pointer to bpf_context is @@ -319,6 +353,13 @@ enum bpf_reg_type { PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ PTR_TO_BTF_ID, /* reg points to kernel struct */ + PTR_TO_BTF_ID_OR_NULL, /* reg points to kernel struct or NULL */ + PTR_TO_MEM, /* reg points to valid memory region */ + PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */ + PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */ + PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */ + PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */ + PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */ }; /* The information passed from prog-specific *_is_valid_access @@ -641,6 +682,13 @@ struct bpf_jit_poke_descriptor { u16 reason; }; +/* reg_type info for ctx arguments */ +struct bpf_ctx_arg_aux { + u32 offset; + enum bpf_reg_type reg_type; + u32 btf_id; +}; + struct bpf_prog_aux { atomic64_t refcnt; u32 used_map_cnt; @@ -652,6 +700,10 @@ struct bpf_prog_aux { u32 func_cnt; /* used by non-func prog as the number of func progs */ u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ u32 attach_btf_id; /* in-kernel BTF type id to attach to */ + u32 ctx_arg_info_size; + u32 max_rdonly_access; + u32 max_rdwr_access; + const struct bpf_ctx_arg_aux *ctx_arg_info; struct bpf_prog *linked_prog; bool verifier_zext; /* Zero extensions has been inserted by verifier. */ bool offload_requested; @@ -729,6 +781,33 @@ struct bpf_array_aux { struct work_struct work; }; +struct bpf_link { + atomic64_t refcnt; + u32 id; + enum bpf_link_type type; + const struct bpf_link_ops *ops; + struct bpf_prog *prog; + struct work_struct work; +}; + +struct bpf_link_ops { + void (*release)(struct bpf_link *link); + void (*dealloc)(struct bpf_link *link); + int (*detach)(struct bpf_link *link); + int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, + struct bpf_prog *old_prog); + void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); + int (*fill_link_info)(const struct bpf_link *link, + struct bpf_link_info *info); +}; + +struct bpf_link_primer { + struct bpf_link *link; + struct file *file; + int fd; + u32 id; +}; + struct bpf_struct_ops_value; struct btf_type; struct btf_member; @@ -898,6 +977,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, struct bpf_prog *old_prog); +int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); +int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, + struct bpf_prog *prog); int bpf_prog_array_copy_info(struct bpf_prog_array *array, u32 *prog_ids, u32 request_cnt, u32 *prog_cnt); @@ -987,6 +1069,7 @@ _out: \ #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); +extern struct mutex bpf_stats_enabled_mutex; /* * Block execution of BPF programs attached to instrumentation (perf, @@ -1020,15 +1103,18 @@ static inline void bpf_enable_instrumentation(void) extern const struct file_operations bpf_map_fops; extern const struct file_operations bpf_prog_fops; +extern const struct file_operations bpf_iter_fops; #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ extern const struct bpf_prog_ops _name ## _prog_ops; \ extern const struct bpf_verifier_ops _name ## _verifier_ops; #define BPF_MAP_TYPE(_id, _ops) \ extern const struct bpf_map_ops _ops; +#define BPF_LINK_TYPE(_id, _name) #include <linux/bpf_types.h> #undef BPF_PROG_TYPE #undef BPF_MAP_TYPE +#undef BPF_LINK_TYPE extern const struct bpf_prog_ops bpf_offload_prog_ops; extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; @@ -1077,29 +1163,39 @@ int generic_map_update_batch(struct bpf_map *map, int generic_map_delete_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr); +struct bpf_map *bpf_map_get_curr_or_next(u32 *id); +struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); extern int sysctl_unprivileged_bpf_disabled; -int bpf_map_new_fd(struct bpf_map *map, int flags); -int bpf_prog_new_fd(struct bpf_prog *prog); +static inline bool bpf_allow_ptr_leaks(void) +{ + return perfmon_capable(); +} -struct bpf_link { - atomic64_t refcnt; - const struct bpf_link_ops *ops; - struct bpf_prog *prog; - struct work_struct work; -}; +static inline bool bpf_allow_ptr_to_map_access(void) +{ + return perfmon_capable(); +} -struct bpf_link_ops { - void (*release)(struct bpf_link *link); - void (*dealloc)(struct bpf_link *link); +static inline bool bpf_bypass_spec_v1(void) +{ + return perfmon_capable(); +} -}; +static inline bool bpf_bypass_spec_v4(void) +{ + return perfmon_capable(); +} -void bpf_link_init(struct bpf_link *link, const struct bpf_link_ops *ops, - struct bpf_prog *prog); -void bpf_link_cleanup(struct bpf_link *link, struct file *link_file, - int link_fd); +int bpf_map_new_fd(struct bpf_map *map, int flags); +int bpf_prog_new_fd(struct bpf_prog *prog); + +void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, + const struct bpf_link_ops *ops, struct bpf_prog *prog); +int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); +int bpf_link_settle(struct bpf_link_primer *primer); +void bpf_link_cleanup(struct bpf_link_primer *primer); void bpf_link_inc(struct bpf_link *link); void bpf_link_put(struct bpf_link *link); int bpf_link_new_fd(struct bpf_link *link); @@ -1109,6 +1205,52 @@ struct bpf_link *bpf_link_get_from_fd(u32 ufd); int bpf_obj_pin_user(u32 ufd, const char __user *pathname); int bpf_obj_get_user(const char __user *pathname, int flags); +#define BPF_ITER_FUNC_PREFIX "bpf_iter_" +#define DEFINE_BPF_ITER_FUNC(target, args...) \ + extern int bpf_iter_ ## target(args); \ + int __init bpf_iter_ ## target(args) { return 0; } + +struct bpf_iter_aux_info { + struct bpf_map *map; +}; + +typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, + union bpf_iter_link_info *linfo, + struct bpf_iter_aux_info *aux); +typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); + +#define BPF_ITER_CTX_ARG_MAX 2 +struct bpf_iter_reg { + const char *target; + bpf_iter_attach_target_t attach_target; + bpf_iter_detach_target_t detach_target; + u32 ctx_arg_info_size; + struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; + const struct bpf_iter_seq_info *seq_info; +}; + +struct bpf_iter_meta { + __bpf_md_ptr(struct seq_file *, seq); + u64 session_id; + u64 seq_num; +}; + +struct bpf_iter__bpf_map_elem { + __bpf_md_ptr(struct bpf_iter_meta *, meta); + __bpf_md_ptr(struct bpf_map *, map); + __bpf_md_ptr(void *, key); + __bpf_md_ptr(void *, value); +}; + +int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); +void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); +bool bpf_iter_prog_supported(struct bpf_prog *prog); +int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int bpf_iter_new_fd(struct bpf_link *link); +bool bpf_link_is_iter(struct bpf_link *link); +struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); +int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); + int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, @@ -1163,11 +1305,13 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, struct net_device *dev_rx); int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, struct bpf_prog *xdp_prog); +bool dev_map_can_have_prog(struct bpf_map *map); struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); void __cpu_map_flush(void); int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, struct net_device *dev_rx); +bool cpu_map_prog_allowed(struct bpf_map *map); /* Return map's numa specified by userspace */ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) @@ -1215,6 +1359,7 @@ int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog, struct bpf_prog *bpf_prog_by_id(u32 id); +const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id); #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { @@ -1259,6 +1404,35 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) { } +static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, + const struct bpf_link_ops *ops, + struct bpf_prog *prog) +{ +} + +static inline int bpf_link_prime(struct bpf_link *link, + struct bpf_link_primer *primer) +{ + return -EOPNOTSUPP; +} + +static inline int bpf_link_settle(struct bpf_link_primer *primer) +{ + return -EOPNOTSUPP; +} + +static inline void bpf_link_cleanup(struct bpf_link_primer *primer) +{ +} + +static inline void bpf_link_inc(struct bpf_link *link) +{ +} + +static inline void bpf_link_put(struct bpf_link *link) +{ +} + static inline int bpf_obj_get_user(const char __user *pathname, int flags) { return -EOPNOTSUPP; @@ -1275,6 +1449,10 @@ static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map { return NULL; } +static inline bool dev_map_can_have_prog(struct bpf_map *map) +{ + return false; +} static inline void __dev_flush(void) { @@ -1323,6 +1501,11 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, return 0; } +static inline bool cpu_map_prog_allowed(struct bpf_map *map) +{ + return false; +} + static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type) { @@ -1365,6 +1548,12 @@ static inline struct bpf_prog *bpf_prog_by_id(u32 id) { return ERR_PTR(-ENOTSUPP); } + +static inline const struct bpf_func_proto * +bpf_base_func_proto(enum bpf_func_id func_id) +{ + return NULL; +} #endif /* CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, @@ -1444,13 +1633,16 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map) #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ #if defined(CONFIG_BPF_STREAM_PARSER) -int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which); +int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, + struct bpf_prog *old, u32 which); int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); +int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); void sock_map_unhash(struct sock *sk); void sock_map_close(struct sock *sk, long timeout); #else static inline int sock_map_prog_update(struct bpf_map *map, - struct bpf_prog *prog, u32 which) + struct bpf_prog *prog, + struct bpf_prog *old, u32 which) { return -EOPNOTSUPP; } @@ -1460,6 +1652,12 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr, { return -EINVAL; } + +static inline int sock_map_prog_detach(const union bpf_attr *attr, + enum bpf_prog_type ptype) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_BPF_STREAM_PARSER */ #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) @@ -1502,11 +1700,15 @@ extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; extern const struct bpf_func_proto bpf_get_numa_node_id_proto; extern const struct bpf_func_proto bpf_tail_call_proto; extern const struct bpf_func_proto bpf_ktime_get_ns_proto; +extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; extern const struct bpf_func_proto bpf_get_current_comm_proto; extern const struct bpf_func_proto bpf_get_stackid_proto; extern const struct bpf_func_proto bpf_get_stack_proto; +extern const struct bpf_func_proto bpf_get_task_stack_proto; +extern const struct bpf_func_proto bpf_get_stackid_proto_pe; +extern const struct bpf_func_proto bpf_get_stack_proto_pe; extern const struct bpf_func_proto bpf_sock_map_update_proto; extern const struct bpf_func_proto bpf_sock_hash_update_proto; extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; @@ -1523,13 +1725,28 @@ extern const struct bpf_func_proto bpf_strtoul_proto; extern const struct bpf_func_proto bpf_tcp_sock_proto; extern const struct bpf_func_proto bpf_jiffies64_proto; extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; +extern const struct bpf_func_proto bpf_event_output_data_proto; +extern const struct bpf_func_proto bpf_ringbuf_output_proto; +extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; +extern const struct bpf_func_proto bpf_ringbuf_submit_proto; +extern const struct bpf_func_proto bpf_ringbuf_discard_proto; +extern const struct bpf_func_proto bpf_ringbuf_query_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; +extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; const struct bpf_func_proto *bpf_tracing_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); +const struct bpf_func_proto *tracing_prog_func_proto( + enum bpf_func_id func_id, const struct bpf_prog *prog); + /* Shared helpers among cBPF and eBPF. */ void bpf_user_rnd_init_once(void); u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); +u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); #if defined(CONFIG_NET) bool bpf_sock_common_is_valid_access(int off, int size, diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index ba0c2d56f8a3..a52a5688418e 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -64,6 +64,8 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2, #ifdef CONFIG_INET BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport, struct sk_reuseport_md, struct sk_reuseport_kern) +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_LOOKUP, sk_lookup, + struct bpf_sk_lookup, struct bpf_sk_lookup_kern) #endif #if defined(CONFIG_BPF_JIT) BPF_PROG_TYPE(BPF_PROG_TYPE_STRUCT_OPS, bpf_struct_ops, @@ -118,3 +120,14 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops) #if defined(CONFIG_BPF_JIT) BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops) #endif +BPF_MAP_TYPE(BPF_MAP_TYPE_RINGBUF, ringbuf_map_ops) + +BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint) +BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing) +#ifdef CONFIG_CGROUP_BPF +BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup) +#endif +BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter) +#ifdef CONFIG_NET +BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns) +#endif diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 6abd5a778fcd..53c7bd568c5d 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -54,6 +54,8 @@ struct bpf_reg_state { u32 btf_id; /* for PTR_TO_BTF_ID */ + u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ + /* Max size from any of the above. */ unsigned long raw; }; @@ -63,6 +65,8 @@ struct bpf_reg_state { * offset, so they can share range knowledge. * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we * came from, when one is tested for != NULL. + * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation + * for the purpose of tracking that it's freed. * For PTR_TO_SOCKET this is used to share which pointers retain the * same reference to the socket, to determine proper reference freeing. */ @@ -375,6 +379,10 @@ struct bpf_verifier_env { u32 used_map_cnt; /* number of used maps */ u32 id_gen; /* used to generate unique reg IDs */ bool allow_ptr_leaks; + bool allow_ptr_to_map_access; + bool bpf_capable; + bool bypass_spec_v1; + bool bypass_spec_v4; bool seen_direct_write; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ const struct bpf_line_info *prev_linfo; diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h index d815622cd31e..2ae3c8e1d83c 100644 --- a/include/linux/bpfilter.h +++ b/include/linux/bpfilter.h @@ -3,22 +3,23 @@ #define _LINUX_BPFILTER_H #include <uapi/linux/bpfilter.h> -#include <linux/umh.h> +#include <linux/usermode_driver.h> +#include <linux/sockptr.h> struct sock; -int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, +int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen); int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen); +void bpfilter_umh_cleanup(struct umd_info *info); + struct bpfilter_umh_ops { - struct umh_info info; + struct umd_info info; /* since ip_getsockopt() can run in parallel, serialize access to umh */ struct mutex lock; - int (*sockopt)(struct sock *sk, int optname, - char __user *optval, + int (*sockopt)(struct sock *sk, int optname, sockptr_t optval, unsigned int optlen, bool is_set); int (*start)(void); - bool stop; }; extern struct bpfilter_umh_ops bpfilter_ops; #endif diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index f4b77018c625..6ad4c000661a 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -15,7 +15,9 @@ #define PHY_ID_BCMAC131 0x0143bc70 #define PHY_ID_BCM5481 0x0143bca0 #define PHY_ID_BCM5395 0x0143bcf0 +#define PHY_ID_BCM53125 0x03625f20 #define PHY_ID_BCM54810 0x03625d00 +#define PHY_ID_BCM54811 0x03625cc0 #define PHY_ID_BCM5482 0x0143bcb0 #define PHY_ID_BCM5411 0x00206070 #define PHY_ID_BCM5421 0x002060e0 @@ -24,6 +26,7 @@ #define PHY_ID_BCM5461 0x002060c0 #define PHY_ID_BCM54612E 0x03625e60 #define PHY_ID_BCM54616S 0x03625d10 +#define PHY_ID_BCM54140 0xae025009 #define PHY_ID_BCM57780 0x03625d90 #define PHY_ID_BCM89610 0x03625cd0 @@ -114,6 +117,14 @@ #define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10) #define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0) +#define MII_BCM54XX_RDB_ADDR 0x1e +#define MII_BCM54XX_RDB_DATA 0x1f + +/* legacy access control via rdb/expansion register */ +#define BCM54XX_RDB_REG0087 0x0087 +#define BCM54XX_EXP_REG7E (MII_BCM54XX_EXP_SEL_ER + 0x7E) +#define BCM54XX_ACCESS_MODE_LEGACY_EN BIT(15) + /* * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18) */ @@ -290,4 +301,51 @@ #define MII_BRCM_CORE_EXPB0 0xB0 #define MII_BRCM_CORE_EXPB1 0xB1 +/* Enhanced Cable Diagnostics */ +#define BCM54XX_RDB_ECD_CTRL 0x2a0 +#define BCM54XX_EXP_ECD_CTRL (MII_BCM54XX_EXP_SEL_ER + 0xc0) + +#define BCM54XX_ECD_CTRL_CABLE_TYPE_CAT3 1 /* CAT3 or worse */ +#define BCM54XX_ECD_CTRL_CABLE_TYPE_CAT5 0 /* CAT5 or better */ +#define BCM54XX_ECD_CTRL_CABLE_TYPE_MASK BIT(0) /* cable type */ +#define BCM54XX_ECD_CTRL_INVALID BIT(3) /* invalid result */ +#define BCM54XX_ECD_CTRL_UNIT_CM 0 /* centimeters */ +#define BCM54XX_ECD_CTRL_UNIT_M 1 /* meters */ +#define BCM54XX_ECD_CTRL_UNIT_MASK BIT(10) /* cable length unit */ +#define BCM54XX_ECD_CTRL_IN_PROGRESS BIT(11) /* test in progress */ +#define BCM54XX_ECD_CTRL_BREAK_LINK BIT(12) /* unconnect link + * during test + */ +#define BCM54XX_ECD_CTRL_CROSS_SHORT_DIS BIT(13) /* disable inter-pair + * short check + */ +#define BCM54XX_ECD_CTRL_RUN BIT(15) /* run immediate */ + +#define BCM54XX_RDB_ECD_FAULT_TYPE 0x2a1 +#define BCM54XX_EXP_ECD_FAULT_TYPE (MII_BCM54XX_EXP_SEL_ER + 0xc1) +#define BCM54XX_ECD_FAULT_TYPE_INVALID 0x0 +#define BCM54XX_ECD_FAULT_TYPE_OK 0x1 +#define BCM54XX_ECD_FAULT_TYPE_OPEN 0x2 +#define BCM54XX_ECD_FAULT_TYPE_SAME_SHORT 0x3 /* short same pair */ +#define BCM54XX_ECD_FAULT_TYPE_CROSS_SHORT 0x4 /* short different pairs */ +#define BCM54XX_ECD_FAULT_TYPE_BUSY 0x9 +#define BCM54XX_ECD_FAULT_TYPE_PAIR_D_MASK GENMASK(3, 0) +#define BCM54XX_ECD_FAULT_TYPE_PAIR_C_MASK GENMASK(7, 4) +#define BCM54XX_ECD_FAULT_TYPE_PAIR_B_MASK GENMASK(11, 8) +#define BCM54XX_ECD_FAULT_TYPE_PAIR_A_MASK GENMASK(15, 12) +#define BCM54XX_ECD_PAIR_A_LENGTH_RESULTS 0x2a2 +#define BCM54XX_ECD_PAIR_B_LENGTH_RESULTS 0x2a3 +#define BCM54XX_ECD_PAIR_C_LENGTH_RESULTS 0x2a4 +#define BCM54XX_ECD_PAIR_D_LENGTH_RESULTS 0x2a5 + +#define BCM54XX_RDB_ECD_PAIR_A_LENGTH_RESULTS 0x2a2 +#define BCM54XX_EXP_ECD_PAIR_A_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc2) +#define BCM54XX_RDB_ECD_PAIR_B_LENGTH_RESULTS 0x2a3 +#define BCM54XX_EXP_ECD_PAIR_B_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc3) +#define BCM54XX_RDB_ECD_PAIR_C_LENGTH_RESULTS 0x2a4 +#define BCM54XX_EXP_ECD_PAIR_C_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc4) +#define BCM54XX_RDB_ECD_PAIR_D_LENGTH_RESULTS 0x2a5 +#define BCM54XX_EXP_ECD_PAIR_D_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc5) +#define BCM54XX_ECD_LENGTH_RESULTS_INVALID 0xffff + #endif /* _LINUX_BRCMPHY_H */ diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h index 8ed53d7524ea..e66b711d091e 100644 --- a/include/linux/bsearch.h +++ b/include/linux/bsearch.h @@ -4,7 +4,29 @@ #include <linux/types.h> -void *bsearch(const void *key, const void *base, size_t num, size_t size, - cmp_func_t cmp); +static __always_inline +void *__inline_bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp) +{ + const char *pivot; + int result; + + while (num > 0) { + pivot = base + (num >> 1) * size; + result = cmp(key, pivot); + + if (result == 0) + return (void *)pivot; + + if (result > 0) { + base = pivot + size; + num--; + } + num >>= 1; + } + + return NULL; +} + +extern void *bsearch(const void *key, const void *base, size_t num, size_t size, cmp_func_t cmp); #endif /* _LINUX_BSEARCH_H */ diff --git a/include/linux/btf.h b/include/linux/btf.h index 5c1ea99b480f..8b81fbb4497c 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -82,6 +82,11 @@ static inline bool btf_type_is_int(const struct btf_type *t) return BTF_INFO_KIND(t->info) == BTF_KIND_INT; } +static inline bool btf_type_is_small_int(const struct btf_type *t) +{ + return btf_type_is_int(t) && t->size <= sizeof(u64); +} + static inline bool btf_type_is_enum(const struct btf_type *t) { return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM; diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h new file mode 100644 index 000000000000..4867d549e3c1 --- /dev/null +++ b/include/linux/btf_ids.h @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_BTF_IDS_H +#define _LINUX_BTF_IDS_H + +#ifdef CONFIG_DEBUG_INFO_BTF + +#include <linux/compiler.h> /* for __PASTE */ + +/* + * Following macros help to define lists of BTF IDs placed + * in .BTF_ids section. They are initially filled with zeros + * (during compilation) and resolved later during the + * linking phase by resolve_btfids tool. + * + * Any change in list layout must be reflected in resolve_btfids + * tool logic. + */ + +#define BTF_IDS_SECTION ".BTF_ids" + +#define ____BTF_ID(symbol) \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".local " #symbol " ; \n" \ +".type " #symbol ", STT_OBJECT; \n" \ +".size " #symbol ", 4; \n" \ +#symbol ": \n" \ +".zero 4 \n" \ +".popsection; \n"); + +#define __BTF_ID(symbol) \ + ____BTF_ID(symbol) + +#define __ID(prefix) \ + __PASTE(prefix, __COUNTER__) + +/* + * The BTF_ID defines unique symbol for each ID pointing + * to 4 zero bytes. + */ +#define BTF_ID(prefix, name) \ + __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__)) + +/* + * The BTF_ID_LIST macro defines pure (unsorted) list + * of BTF IDs, with following layout: + * + * BTF_ID_LIST(list1) + * BTF_ID(type1, name1) + * BTF_ID(type2, name2) + * + * list1: + * __BTF_ID__type1__name1__1: + * .zero 4 + * __BTF_ID__type2__name2__2: + * .zero 4 + * + */ +#define __BTF_ID_LIST(name, scope) \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +"." #scope " " #name "; \n" \ +#name ":; \n" \ +".popsection; \n"); \ + +#define BTF_ID_LIST(name) \ +__BTF_ID_LIST(name, local) \ +extern u32 name[]; + +#define BTF_ID_LIST_GLOBAL(name) \ +__BTF_ID_LIST(name, globl) + +/* + * The BTF_ID_UNUSED macro defines 4 zero bytes. + * It's used when we want to define 'unused' entry + * in BTF_ID_LIST, like: + * + * BTF_ID_LIST(bpf_skb_output_btf_ids) + * BTF_ID(struct, sk_buff) + * BTF_ID_UNUSED + * BTF_ID(struct, task_struct) + */ + +#define BTF_ID_UNUSED \ +asm( \ +".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \ +".zero 4 \n" \ +".popsection; \n"); + +#else + +#define BTF_ID_LIST(name) static u32 name[5]; +#define BTF_ID(prefix, name) +#define BTF_ID_UNUSED +#define BTF_ID_LIST_GLOBAL(name) u32 name[1]; + +#endif /* CONFIG_DEBUG_INFO_BTF */ + +#ifdef CONFIG_NET +/* Define a list of socket types which can be the argument for + * skc_to_*_sock() helpers. All these sockets should have + * sock_common as the first argument in its memory layout. + */ +#define BTF_SOCK_TYPE_xxx \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) + +enum { +#define BTF_SOCK_TYPE(name, str) name, +BTF_SOCK_TYPE_xxx +#undef BTF_SOCK_TYPE +MAX_BTF_SOCK_TYPE, +}; + +extern u32 btf_sock_ids[]; +#endif + +#endif diff --git a/include/linux/btree.h b/include/linux/btree.h index 68f858c831b1..243ee544397a 100644 --- a/include/linux/btree.h +++ b/include/linux/btree.h @@ -10,7 +10,7 @@ * * A B+Tree is a data structure for looking up arbitrary (currently allowing * unsigned long, u32, u64 and 2 * u64) keys into pointers. The data structure - * is described at http://en.wikipedia.org/wiki/B-tree, we currently do not + * is described at https://en.wikipedia.org/wiki/B-tree, we currently do not * use binary search to find the key on lookups. * * Each B+Tree consists of a head, that contains bookkeeping information and diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 15b765a181b8..6b47f94378c5 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -272,14 +272,6 @@ void buffer_init(void); * inline definitions */ -static inline void attach_page_buffers(struct page *page, - struct buffer_head *head) -{ - get_page(page); - SetPagePrivate(page); - set_page_private(page, (unsigned long)head); -} - static inline void get_bh(struct buffer_head *bh) { atomic_inc(&bh->b_count); @@ -414,6 +406,7 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; } static inline void invalidate_inode_buffers(struct inode *inode) {} static inline int remove_inode_buffers(struct inode *inode) { return 1; } static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } +#define buffer_heads_over_limit 0 #endif /* CONFIG_BLOCK */ #endif /* _LINUX_BUFFER_HEAD_H */ diff --git a/include/linux/bvec.h b/include/linux/bvec.h index a81c13ac1972..dd74503f7e5e 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -12,8 +12,17 @@ #include <linux/errno.h> #include <linux/mm.h> -/* - * was unsigned short, but we might as well be ready for > 64kB I/O pages +/** + * struct bio_vec - a contiguous range of physical memory addresses + * @bv_page: First page associated with the address range. + * @bv_len: Number of bytes in the address range. + * @bv_offset: Start of the address range relative to the start of @bv_page. + * + * The following holds for a bvec if n * PAGE_SIZE < bv_offset + bv_len: + * + * nth_page(@bv_page, n) == @bv_page + n + * + * This holds because page_is_mergeable() checks the above property. */ struct bio_vec { struct page *bv_page; @@ -108,11 +117,18 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv, return true; } +static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter) +{ + iter->bi_bvec_done = 0; + iter->bi_idx++; +} + #define for_each_bvec(bvl, bio_vec, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ - bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) + (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \ + (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter))) /* for iterating one bio from start to end */ #define BVEC_ITER_ALL_INIT (struct bvec_iter) \ diff --git a/include/linux/cache.h b/include/linux/cache.h index 750621e41d1c..1aa8009f6d06 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -15,8 +15,14 @@ /* * __read_mostly is used to keep rarely changing variables out of frequently - * updated cachelines. If an architecture doesn't support it, ignore the - * hint. + * updated cachelines. Its use should be reserved for data that is used + * frequently in hot paths. Performance traces can help decide when to use + * this. You want __read_mostly data to be tightly packed, so that in the + * best case multiple frequently read variables for a hot path will be next + * to each other in order to reduce the number of cachelines needed to + * execute a critical path. We should be mindful and selective of its use. + * ie: if you're going to use it please supply a *good* justification in your + * commit log */ #ifndef __read_mostly #define __read_mostly diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index a954def26c0d..900b9f4e0605 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -34,7 +34,7 @@ struct can_skb_priv { int ifindex; int skbcnt; - struct can_frame cf[0]; + struct can_frame cf[]; }; static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb) diff --git a/include/linux/capability.h b/include/linux/capability.h index ecce0f43c73a..1e7fe311cabe 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -251,6 +251,21 @@ extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap); extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns); +static inline bool perfmon_capable(void) +{ + return capable(CAP_PERFMON) || capable(CAP_SYS_ADMIN); +} + +static inline bool bpf_capable(void) +{ + return capable(CAP_BPF) || capable(CAP_SYS_ADMIN); +} + +static inline bool checkpoint_restore_ns_capable(struct user_namespace *ns) +{ + return ns_capable(ns, CAP_CHECKPOINT_RESTORE) || + ns_capable(ns, CAP_SYS_ADMIN); +} /* audit system wants to get cap info from files as well */ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); diff --git a/include/linux/cb710.h b/include/linux/cb710.h index 60de3fedd3a7..405657a9a0d5 100644 --- a/include/linux/cb710.h +++ b/include/linux/cb710.h @@ -36,7 +36,7 @@ struct cb710_chip { unsigned slot_mask; unsigned slots; spinlock_t irq_lock; - struct cb710_slot slot[0]; + struct cb710_slot slot[]; }; /* NOTE: cb710_chip.slots is modified only during device init/exit and diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index 528271c60018..f48d0a31deae 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -73,7 +73,6 @@ struct cdrom_device_ops { int (*drive_status) (struct cdrom_device_info *, int); unsigned int (*check_events) (struct cdrom_device_info *cdi, unsigned int clearing, int slot); - int (*media_changed) (struct cdrom_device_info *, int); int (*tray_move) (struct cdrom_device_info *, int); int (*lock_door) (struct cdrom_device_info *, int); int (*select_speed) (struct cdrom_device_info *, int); @@ -94,6 +93,11 @@ struct cdrom_device_ops { struct packet_command *); }; +int cdrom_multisession(struct cdrom_device_info *cdi, + struct cdrom_multisession *info); +int cdrom_read_tocentry(struct cdrom_device_info *cdi, + struct cdrom_tocentry *entry); + /* the general block_device operations structure: */ extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode); @@ -102,9 +106,8 @@ extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); extern unsigned int cdrom_check_events(struct cdrom_device_info *cdi, unsigned int clearing); -extern int cdrom_media_changed(struct cdrom_device_info *); -extern int register_cdrom(struct cdrom_device_info *cdi); +extern int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi); extern void unregister_cdrom(struct cdrom_device_info *cdi); typedef struct { diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 39e6f4c57580..999636d53cf2 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -11,14 +11,14 @@ #define CEPH_FEATURE_INCARNATION_2 (1ull<<57) // CEPH_FEATURE_SERVER_JEWEL #define DEFINE_CEPH_FEATURE(bit, incarnation, name) \ - static const uint64_t CEPH_FEATURE_##name = (1ULL<<bit); \ - static const uint64_t CEPH_FEATUREMASK_##name = \ + static const uint64_t __maybe_unused CEPH_FEATURE_##name = (1ULL<<bit); \ + static const uint64_t __maybe_unused CEPH_FEATUREMASK_##name = \ (1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation); /* this bit is ignored but still advertised by release *when* */ #define DEFINE_CEPH_FEATURE_DEPRECATED(bit, incarnation, name, when) \ - static const uint64_t DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \ - static const uint64_t DEPRECATED_CEPH_FEATUREMASK_##name = \ + static const uint64_t __maybe_unused DEPRECATED_CEPH_FEATURE_##name = (1ULL<<bit); \ + static const uint64_t __maybe_unused DEPRECATED_CEPH_FEATUREMASK_##name = \ (1ULL<<bit | CEPH_FEATURE_INCARNATION_##incarnation); /* @@ -58,7 +58,7 @@ * because 10.2.z (jewel) did not care if its peers advertised this * feature bit. * - * - In the second phase we stop advertising the the bit and call it + * - In the second phase we stop advertising the bit and call it * RETIRED. This can normally be done in the *next* major release * following the one in which we marked the feature DEPRECATED. In * the above example, for 12.0.z (luminous) we can say: diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index ebf5ba62b772..455e9b9e2adf 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -130,6 +130,7 @@ struct ceph_dir_layout { #define CEPH_MSG_CLIENT_REQUEST 24 #define CEPH_MSG_CLIENT_REQUEST_FORWARD 25 #define CEPH_MSG_CLIENT_REPLY 26 +#define CEPH_MSG_CLIENT_METRICS 29 #define CEPH_MSG_CLIENT_CAPS 0x310 #define CEPH_MSG_CLIENT_LEASE 0x311 #define CEPH_MSG_CLIENT_SNAP 0x312 diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 525b7c3f1c81..c8645f0b797d 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -52,6 +52,7 @@ struct ceph_options { unsigned long osd_idle_ttl; /* jiffies */ unsigned long osd_keepalive_timeout; /* jiffies */ unsigned long osd_request_timeout; /* jiffies */ + u32 read_from_replica; /* CEPH_OSD_FLAG_BALANCE/LOCALIZE_READS */ /* * any type that can't be simply compared or doesn't need @@ -64,6 +65,7 @@ struct ceph_options { int num_mon; char *name; struct ceph_crypto_key *key; + struct rb_root crush_locs; }; /* @@ -73,6 +75,7 @@ struct ceph_options { #define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) #define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) #define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0 /* no timeout */ +#define CEPH_READ_FROM_REPLICA_DEFAULT 0 /* read from primary */ #define CEPH_MONC_HUNT_INTERVAL msecs_to_jiffies(3 * 1000) #define CEPH_MONC_PING_INTERVAL msecs_to_jiffies(10 * 1000) @@ -188,7 +191,7 @@ static inline int calc_pages_for(u64 off, u64 len) #define RB_CMP3WAY(a, b) ((a) < (b) ? -1 : (a) > (b)) #define DEFINE_RB_INSDEL_FUNCS2(name, type, keyfld, cmpexp, keyexp, nodefld) \ -static void insert_##name(struct rb_root *root, type *t) \ +static bool __insert_##name(struct rb_root *root, type *t) \ { \ struct rb_node **n = &root->rb_node; \ struct rb_node *parent = NULL; \ @@ -206,11 +209,17 @@ static void insert_##name(struct rb_root *root, type *t) \ else if (cmp > 0) \ n = &(*n)->rb_right; \ else \ - BUG(); \ + return false; \ } \ \ rb_link_node(&t->nodefld, parent, n); \ rb_insert_color(&t->nodefld, root); \ + return true; \ +} \ +static void __maybe_unused insert_##name(struct rb_root *root, type *t) \ +{ \ + if (!__insert_##name(root, t)) \ + BUG(); \ } \ static void erase_##name(struct rb_root *root, type *t) \ { \ @@ -273,6 +282,7 @@ extern struct kmem_cache *ceph_dentry_cachep; extern struct kmem_cache *ceph_file_cachep; extern struct kmem_cache *ceph_dir_file_cachep; extern struct kmem_cache *ceph_mds_request_cachep; +extern mempool_t *ceph_wb_pagevec_pool; /* ceph_common.c */ extern bool libceph_compatible(void *data); diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h index dbb8a6959a73..ce4ffeb384d7 100644 --- a/include/linux/ceph/mon_client.h +++ b/include/linux/ceph/mon_client.h @@ -19,7 +19,7 @@ struct ceph_monmap { struct ceph_fsid fsid; u32 epoch; u32 num_mon; - struct ceph_entity_inst mon_inst[0]; + struct ceph_entity_inst mon_inst[]; }; struct ceph_mon_client; diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 9d9f745b98a1..83fa08a06507 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -8,6 +8,7 @@ #include <linux/mempool.h> #include <linux/rbtree.h> #include <linux/refcount.h> +#include <linux/ktime.h> #include <linux/ceph/types.h> #include <linux/ceph/osdmap.h> @@ -135,6 +136,7 @@ struct ceph_osd_req_op { struct { u64 expected_object_size; u64 expected_write_size; + u32 flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */ } alloc_hint; struct { u64 snapid; @@ -164,6 +166,7 @@ struct ceph_osd_request_target { bool recovery_deletes; unsigned int flags; /* CEPH_OSD_FLAG_* */ + bool used_replica; bool paused; u32 epoch; @@ -213,6 +216,8 @@ struct ceph_osd_request { /* internal */ unsigned long r_stamp; /* jiffies, send or check time */ unsigned long r_start_stamp; /* jiffies */ + ktime_t r_start_latency; /* ktime_t */ + ktime_t r_end_latency; /* ktime_t */ int r_attempts; u32 r_map_dne_bound; @@ -399,7 +404,7 @@ void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc); &__oreq->r_ops[__whch].typ.fld; \ }) -extern void osd_req_op_init(struct ceph_osd_request *osd_req, +struct ceph_osd_req_op *osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u32 flags); extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *, @@ -468,7 +473,8 @@ extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, unsigned int which, u64 expected_object_size, - u64 expected_write_size); + u64 expected_write_size, + u32 flags); extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, struct ceph_snap_context *snapc, diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index 5e601975745f..3f4498fef6ad 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -302,9 +302,26 @@ bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap, int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap, const struct ceph_pg *raw_pgid); +struct crush_loc { + char *cl_type_name; + char *cl_name; +}; + +struct crush_loc_node { + struct rb_node cl_node; + struct crush_loc cl_loc; /* pointers into cl_data */ + char cl_data[]; +}; + +int ceph_parse_crush_location(char *crush_location, struct rb_root *locs); +int ceph_compare_crush_locs(struct rb_root *locs1, struct rb_root *locs2); +void ceph_clear_crush_locs(struct rb_root *locs); + +int ceph_get_crush_locality(struct ceph_osdmap *osdmap, int id, + struct rb_root *locs); + extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id); - extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id); extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name); u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id); diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h index 88ed3c5c04c5..3a518fd0eaad 100644 --- a/include/linux/ceph/rados.h +++ b/include/linux/ceph/rados.h @@ -465,6 +465,19 @@ enum { const char *ceph_osd_watch_op_name(int o); enum { + CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_WRITE = 1, + CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_WRITE = 2, + CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ = 4, + CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_READ = 8, + CEPH_OSD_ALLOC_HINT_FLAG_APPEND_ONLY = 16, + CEPH_OSD_ALLOC_HINT_FLAG_IMMUTABLE = 32, + CEPH_OSD_ALLOC_HINT_FLAG_SHORTLIVED = 64, + CEPH_OSD_ALLOC_HINT_FLAG_LONGLIVED = 128, + CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE = 256, + CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE = 512, +}; + +enum { CEPH_OSD_BACKOFF_OP_BLOCK = 1, CEPH_OSD_BACKOFF_OP_ACK_BLOCK = 2, CEPH_OSD_BACKOFF_OP_UNBLOCK = 3, @@ -517,6 +530,7 @@ struct ceph_osd_op { struct { __le64 expected_object_size; __le64 expected_write_size; + __le32 flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */ } __attribute__ ((packed)) alloc_hint; struct { __le64 snapid; diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 52661155f85f..fee0b5547cd0 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -790,7 +790,9 @@ struct sock_cgroup_data { union { #ifdef __LITTLE_ENDIAN struct { - u8 is_data; + u8 is_data : 1; + u8 no_refcnt : 1; + u8 unused : 6; u8 padding; u16 prioidx; u32 classid; @@ -800,7 +802,9 @@ struct sock_cgroup_data { u32 classid; u16 prioidx; u8 padding; - u8 is_data; + u8 unused : 6; + u8 no_refcnt : 1; + u8 is_data : 1; } __packed; #endif u64 val; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 4598e4da6b1b..618838c48313 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -822,6 +822,7 @@ extern spinlock_t cgroup_sk_update_lock; void cgroup_sk_alloc_disable(void); void cgroup_sk_alloc(struct sock_cgroup_data *skcd); +void cgroup_sk_clone(struct sock_cgroup_data *skcd); void cgroup_sk_free(struct sock_cgroup_data *skcd); static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) @@ -835,7 +836,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) */ v = READ_ONCE(skcd->val); - if (v & 1) + if (v & 3) return &cgrp_dfl_root.cgrp; return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; @@ -847,6 +848,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) #else /* CONFIG_CGROUP_DATA */ static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} +static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {} static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} #endif /* CONFIG_CGROUP_DATA */ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index bd1ee9039558..03a5de5f99f4 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -189,7 +189,7 @@ struct clk_duty { * and >= numerator) Return 0 on success, otherwise -EERROR. * * @init: Perform platform-specific initialization magic. - * This is not not used by any of the basic clock types. + * This is not used by any of the basic clock types. * This callback exist for HW which needs to perform some * initialisation magic for CCF to get an accurate view of the * clock. It may also be used dynamic resource allocation is @@ -1096,7 +1096,6 @@ int clk_hw_get_parent_index(struct clk_hw *hw); int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *new_parent); unsigned int __clk_get_enable_count(struct clk *clk); unsigned long clk_hw_get_rate(const struct clk_hw *hw); -unsigned long __clk_get_flags(struct clk *clk); unsigned long clk_hw_get_flags(const struct clk_hw *hw); #define clk_hw_can_set_rate_parent(hw) \ (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT) diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index 49a53a137610..a4f82e836a7c 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -59,6 +59,7 @@ #define AT91_PMC_PLL_UPDT 0x1C /* PMC PLL update register [for SAM9X60] */ #define AT91_PMC_PLL_UPDT_UPDATE (1 << 8) /* Update PLL settings */ #define AT91_PMC_PLL_UPDT_ID (1 << 0) /* PLL ID */ +#define AT91_PMC_PLL_UPDT_ID_MSK (0xf) /* PLL ID mask */ #define AT91_PMC_PLL_UPDT_STUPTIM (0xff << 16) /* Startup time */ #define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */ @@ -136,6 +137,8 @@ #define AT91_PMC_PLLADIV2_ON (1 << 12) #define AT91_PMC_H32MXDIV BIT(24) +#define AT91_PMC_XTALF 0x34 /* Main XTAL Frequency Register [SAMA7G5 only] */ + #define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */ #define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */ #define AT91_PMC_USBS_PLLA (0 << 0) @@ -174,6 +177,7 @@ #define AT91_PMC_MOSCRCS (1 << 17) /* Main On-Chip RC [some SAM9] */ #define AT91_PMC_CFDEV (1 << 18) /* Clock Failure Detector Event [some SAM9] */ #define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */ +#define AT91_PMC_MCKXRDY (1 << 26) /* Master Clock x [x=1..4] Ready Status */ #define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */ #define AT91_PMC_FSMR 0x70 /* Fast Startup Mode Register */ diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h index 2b1b35240074..3f01d43f0598 100644 --- a/include/linux/clk/tegra.h +++ b/include/linux/clk/tegra.h @@ -131,6 +131,9 @@ extern void tegra210_set_sata_pll_seq_sw(bool state); extern void tegra210_put_utmipll_in_iddq(void); extern void tegra210_put_utmipll_out_iddq(void); extern int tegra210_clk_handle_mbist_war(unsigned int id); +extern void tegra210_clk_emc_dll_enable(bool flag); +extern void tegra210_clk_emc_dll_update_setting(u32 emc_dll_src_value); +extern void tegra210_clk_emc_update_setting(u32 emc_src_value); struct clk; @@ -143,4 +146,28 @@ void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb, void *cb_arg); int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same); +struct tegra210_clk_emc_config { + unsigned long rate; + bool same_freq; + u32 value; + + unsigned long parent_rate; + u8 parent; +}; + +struct tegra210_clk_emc_provider { + struct module *owner; + struct device *dev; + + struct tegra210_clk_emc_config *configs; + unsigned int num_configs; + + int (*set_rate)(struct device *dev, + const struct tegra210_clk_emc_config *config); +}; + +int tegra210_clk_emc_attach(struct clk *clk, + struct tegra210_clk_emc_provider *provider); +void tegra210_clk_emc_detach(struct clk *clk); + #endif /* __LINUX_CLK_TEGRA_H_ */ diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h deleted file mode 100644 index 4b0a69863656..000000000000 --- a/include/linux/clock_cooling.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/include/linux/clock_cooling.h - * - * Copyright (C) 2014 Eduardo Valentin <edubezval@gmail.com> - * - * Copyright (C) 2013 Texas Instruments Inc. - * Contact: Eduardo Valentin <eduardo.valentin@ti.com> - * - * Highly based on cpufreq_cooling.c. - * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) - * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org> - */ - -#ifndef __CPU_COOLING_H__ -#define __CPU_COOLING_H__ - -#include <linux/of.h> -#include <linux/thermal.h> -#include <linux/cpumask.h> - -#ifdef CONFIG_CLOCK_THERMAL -/** - * clock_cooling_register - function to create clock cooling device. - * @dev: struct device pointer to the device used as clock cooling device. - * @clock_name: string containing the clock used as cooling mechanism. - */ -struct thermal_cooling_device * -clock_cooling_register(struct device *dev, const char *clock_name); - -/** - * clock_cooling_unregister - function to remove clock cooling device. - * @cdev: thermal cooling device pointer. - */ -void clock_cooling_unregister(struct thermal_cooling_device *cdev); - -unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, - unsigned long freq); -#else /* !CONFIG_CLOCK_THERMAL */ -static inline struct thermal_cooling_device * -clock_cooling_register(struct device *dev, const char *clock_name) -{ - return NULL; -} -static inline -void clock_cooling_unregister(struct thermal_cooling_device *cdev) -{ -} -static inline -unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev, - unsigned long freq) -{ - return THERMAL_CSTATE_INVALID; -} -#endif /* CONFIG_CLOCK_THERMAL */ - -#endif /* __CPU_COOLING_H__ */ diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 4b898cdbdf05..25a521d299c1 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -85,11 +85,13 @@ static inline unsigned long compact_gap(unsigned int order) #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; +extern unsigned int sysctl_compaction_proactiveness; extern int sysctl_compaction_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *length, loff_t *ppos); + void *buffer, size_t *length, loff_t *ppos); extern int sysctl_extfrag_threshold; extern int sysctl_compact_unevictable_allowed; +extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order); extern int fragmentation_index(struct zone *zone, unsigned int order); extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, unsigned int alloc_flags, @@ -97,7 +99,7 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, struct page **page); extern void reset_isolation_suitable(pg_data_t *pgdat); extern enum compact_result compaction_suitable(struct zone *zone, int order, - unsigned int alloc_flags, int classzone_idx); + unsigned int alloc_flags, int highest_zoneidx); extern void defer_compaction(struct zone *zone, int order); extern bool compaction_deferred(struct zone *zone, int order); @@ -182,7 +184,7 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, extern int kcompactd_run(int nid); extern void kcompactd_stop(int nid); -extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); +extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx); #else static inline void reset_isolation_suitable(pg_data_t *pgdat) @@ -190,7 +192,7 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) } static inline enum compact_result compaction_suitable(struct zone *zone, int order, - int alloc_flags, int classzone_idx) + int alloc_flags, int highest_zoneidx) { return COMPACT_SKIPPED; } @@ -232,7 +234,8 @@ static inline void kcompactd_stop(int nid) { } -static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) +static inline void wakeup_kcompactd(pg_data_t *pgdat, + int order, int highest_zoneidx) { } diff --git a/include/linux/compat.h b/include/linux/compat.h index 0480ba4db592..b354ce58966e 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -402,8 +402,15 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, unsigned long bitmap_size); long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, unsigned long bitmap_size); -int copy_siginfo_from_user32(kernel_siginfo_t *to, const struct compat_siginfo __user *from); -int copy_siginfo_to_user32(struct compat_siginfo __user *to, const kernel_siginfo_t *from); +void copy_siginfo_to_external32(struct compat_siginfo *to, + const struct kernel_siginfo *from); +int copy_siginfo_from_user32(kernel_siginfo_t *to, + const struct compat_siginfo __user *from); +int __copy_siginfo_to_user32(struct compat_siginfo __user *to, + const kernel_siginfo_t *from); +#ifndef copy_siginfo_to_user32 +#define copy_siginfo_to_user32 __copy_siginfo_to_user32 +#endif int get_compat_sigevent(struct sigevent *event, const struct compat_sigevent __user *u_event); @@ -422,11 +429,11 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, compat_sigset_t v; switch (_NSIG_WORDS) { case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3]; - /* fall through */ + fallthrough; case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2]; - /* fall through */ + fallthrough; case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1]; - /* fall through */ + fallthrough; case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0]; } return copy_to_user(compat, &v, size) ? -EFAULT : 0; @@ -730,10 +737,6 @@ asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg); asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len, unsigned flags, struct sockaddr __user *addr, int __user *addrlen); -asmlinkage long compat_sys_setsockopt(int fd, int level, int optname, - char __user *optval, unsigned int optlen); -asmlinkage long compat_sys_getsockopt(int fd, int level, int optname, - char __user *optval, int __user *optlen); asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags); asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, @@ -848,7 +851,6 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32); asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len, unsigned flags); -asmlinkage long compat_sys_sysctl(struct compat_sysctl_args __user *args); /* obsolete: fs/readdir.c */ asmlinkage long compat_sys_old_readdir(unsigned int fd, diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 333a6695a918..cee0c728d39a 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -5,8 +5,6 @@ /* Compiler specific definitions for Clang compiler */ -#define uninitialized_var(x) x = *(&(x)) - /* same as gcc, this was present in clang-2.6 so we can assume it works * with any version that can compile the kernel */ @@ -16,7 +14,7 @@ #define KASAN_ABI_VERSION 5 #if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer) -/* emulate gcc's __SANITIZE_ADDRESS__ flag */ +/* Emulate GCC's __SANITIZE_ADDRESS__ flag */ #define __SANITIZE_ADDRESS__ #define __no_sanitize_address \ __attribute__((no_sanitize("address", "hwaddress"))) @@ -24,8 +22,25 @@ #define __no_sanitize_address #endif +#if __has_feature(thread_sanitizer) +/* emulate gcc's __SANITIZE_THREAD__ flag */ +#define __SANITIZE_THREAD__ +#define __no_sanitize_thread \ + __attribute__((no_sanitize("thread"))) +#else +#define __no_sanitize_thread +#endif + +#if __has_feature(undefined_behavior_sanitizer) +/* GCC does not have __SANITIZE_UNDEFINED__ */ +#define __no_sanitize_undefined \ + __attribute__((no_sanitize("undefined"))) +#else +#define __no_sanitize_undefined +#endif + /* - * Not all versions of clang implement the the type-generic versions + * Not all versions of clang implement the type-generic versions * of the builtin overflow checkers. Fortunately, clang implements * __has_builtin allowing us to avoid awkward version * checks. Unfortunately, we don't know which version of gcc clang @@ -42,3 +57,7 @@ * compilers, like ICC. */ #define barrier() __asm__ __volatile__("" : : : "memory") + +#if __has_feature(shadow_call_stack) +# define __noscs __attribute__((__no_sanitize__("shadow-call-stack"))) +#endif diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index d7ee4c6bad48..7a3769040d7d 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -10,7 +10,8 @@ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) -#if GCC_VERSION < 40600 +/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */ +#if GCC_VERSION < 40900 # error Sorry, your compiler is too old - please upgrade it. #endif @@ -58,12 +59,6 @@ (typeof(ptr)) (__ptr + (off)); \ }) -/* - * A trick to suppress uninitialized variable warning without generating any - * code - */ -#define uninitialized_var(x) x = x - #ifdef CONFIG_RETPOLINE #define __noretpoline __attribute__((__indirect_branch__("keep"))) #endif @@ -126,9 +121,7 @@ #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__) #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ -#if GCC_VERSION >= 40800 #define __HAVE_BUILTIN_BSWAP16__ -#endif #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ #if GCC_VERSION >= 70000 @@ -145,6 +138,18 @@ #define __no_sanitize_address #endif +#if defined(__SANITIZE_THREAD__) && __has_attribute(__no_sanitize_thread__) +#define __no_sanitize_thread __attribute__((no_sanitize_thread)) +#else +#define __no_sanitize_thread +#endif + +#if __has_attribute(__no_sanitize_undefined__) +#define __no_sanitize_undefined __attribute__((no_sanitize_undefined)) +#else +#define __no_sanitize_undefined +#endif + #if GCC_VERSION >= 50100 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 448c91bf543b..6810d80acb0b 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -177,116 +177,24 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) #endif -#include <uapi/linux/types.h> - -#define __READ_ONCE_SIZE \ -({ \ - switch (size) { \ - case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ - case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ - case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ - case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ - default: \ - barrier(); \ - __builtin_memcpy((void *)res, (const void *)p, size); \ - barrier(); \ - } \ -}) - -static __always_inline -void __read_once_size(const volatile void *p, void *res, int size) -{ - __READ_ONCE_SIZE; -} - -#ifdef CONFIG_KASAN -/* - * We can't declare function 'inline' because __no_sanitize_address confilcts - * with inlining. Attempt to inline it may cause a build failure. - * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 - * '__maybe_unused' allows us to avoid defined-but-not-used warnings. - */ -# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused -#else -# define __no_kasan_or_inline __always_inline -#endif - -static __no_kasan_or_inline -void __read_once_size_nocheck(const volatile void *p, void *res, int size) -{ - __READ_ONCE_SIZE; -} - -static __always_inline void __write_once_size(volatile void *p, void *res, int size) -{ - switch (size) { - case 1: *(volatile __u8 *)p = *(__u8 *)res; break; - case 2: *(volatile __u16 *)p = *(__u16 *)res; break; - case 4: *(volatile __u32 *)p = *(__u32 *)res; break; - case 8: *(volatile __u64 *)p = *(__u64 *)res; break; - default: - barrier(); - __builtin_memcpy((void *)p, (const void *)res, size); - barrier(); - } -} - -/* - * Prevent the compiler from merging or refetching reads or writes. The - * compiler is also forbidden from reordering successive instances of - * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some - * particular ordering. One way to make the compiler aware of ordering is to - * put the two invocations of READ_ONCE or WRITE_ONCE in different C - * statements. +/** + * data_race - mark an expression as containing intentional data races * - * These two macros will also work on aggregate data types like structs or - * unions. If the size of the accessed data type exceeds the word size of - * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will - * fall back to memcpy(). There's at least two memcpy()s: one for the - * __builtin_memcpy() and then one for the macro doing the copy of variable - * - '__u' allocated on the stack. + * This data_race() macro is useful for situations in which data races + * should be forgiven. One example is diagnostic code that accesses + * shared variables but is not a part of the core synchronization design. * - * Their two major use cases are: (1) Mediating communication between - * process-level code and irq/NMI handlers, all running on the same CPU, - * and (2) Ensuring that the compiler does not fold, spindle, or otherwise - * mutilate accesses that either do not require ordering or that interact - * with an explicit memory barrier or atomic instruction that provides the - * required ordering. + * This macro *does not* affect normal code generation, but is a hint + * to tooling that data races here are to be ignored. */ -#include <asm/barrier.h> -#include <linux/kasan-checks.h> - -#define __READ_ONCE(x, check) \ +#define data_race(expr) \ ({ \ - union { typeof(x) __val; char __c[1]; } __u; \ - if (check) \ - __read_once_size(&(x), __u.__c, sizeof(x)); \ - else \ - __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ - smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ - __u.__val; \ -}) -#define READ_ONCE(x) __READ_ONCE(x, 1) - -/* - * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need - * to hide memory access from KASAN. - */ -#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) - -static __no_kasan_or_inline -unsigned long read_word_at_a_time(const void *addr) -{ - kasan_check_read(addr, 1); - return *(unsigned long *)addr; -} - -#define WRITE_ONCE(x, val) \ -({ \ - union { typeof(x) __val; char __c[1]; } __u = \ - { .__val = (__force typeof(x)) (val) }; \ - __write_once_size(&(x), __u.__c, sizeof(x)); \ - __u.__val; \ + __unqual_scalar_typeof(({ expr; })) __v = ({ \ + __kcsan_disable_current(); \ + expr; \ + }); \ + __kcsan_enable_current(); \ + __v; \ }) #endif /* __KERNEL__ */ @@ -312,47 +220,6 @@ static inline void *offset_to_ptr(const int *off) #endif /* __ASSEMBLY__ */ -/* Compile time object size, -1 for unknown */ -#ifndef __compiletime_object_size -# define __compiletime_object_size(obj) -1 -#endif -#ifndef __compiletime_warning -# define __compiletime_warning(message) -#endif -#ifndef __compiletime_error -# define __compiletime_error(message) -#endif - -#ifdef __OPTIMIZE__ -# define __compiletime_assert(condition, msg, prefix, suffix) \ - do { \ - extern void prefix ## suffix(void) __compiletime_error(msg); \ - if (!(condition)) \ - prefix ## suffix(); \ - } while (0) -#else -# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) -#endif - -#define _compiletime_assert(condition, msg, prefix, suffix) \ - __compiletime_assert(condition, msg, prefix, suffix) - -/** - * compiletime_assert - break build and emit msg if condition is false - * @condition: a compile-time constant condition to check - * @msg: a message to emit if condition is false - * - * In tradition of POSIX assert, this macro will break the build if the - * supplied condition is *false*, emitting the supplied error message if the - * compiler has support to do so. - */ -#define compiletime_assert(condition, msg) \ - _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) - -#define compiletime_assert_atomic_type(t) \ - compiletime_assert(__native_word(t), \ - "Need native word sized stores/loads for atomicity.") - /* &a[0] degrades to a pointer: a different type from an array */ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) @@ -362,4 +229,6 @@ static inline void *offset_to_ptr(const int *off) */ #define prevent_tail_call_optimization() mb() +#include <asm/rwonce.h> + #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index cdf016596659..ea7b756b1c8f 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -22,14 +22,8 @@ /* * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17. - * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute + * In the meantime, to support gcc < 5, we implement __has_attribute * by hand. - * - * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__ - * depending on the compiler used to build it; however, these attributes have - * no semantic effects for sparse, so it does not matter. Also note that, - * in order to avoid sparse's warnings, even the unsupported ones must be - * defined to 0. */ #ifndef __has_attribute # define __has_attribute(x) __GCC4_has_attribute_##x @@ -37,9 +31,11 @@ # define __GCC4_has_attribute___copy__ 0 # define __GCC4_has_attribute___designated_init__ 0 # define __GCC4_has_attribute___externally_visible__ 1 +# define __GCC4_has_attribute___no_caller_saved_registers__ 0 # define __GCC4_has_attribute___noclone__ 1 # define __GCC4_has_attribute___nonstring__ 0 # define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8) +# define __GCC4_has_attribute___no_sanitize_undefined__ (__GNUC_MINOR__ >= 9) # define __GCC4_has_attribute___fallthrough__ 0 #endif @@ -176,6 +172,18 @@ #define __mode(x) __attribute__((__mode__(x))) /* + * Optional: only supported since gcc >= 7 + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/x86-Function-Attributes.html#index-no_005fcaller_005fsaved_005fregisters-function-attribute_002c-x86 + * clang: https://clang.llvm.org/docs/AttributeReference.html#no-caller-saved-registers + */ +#if __has_attribute(__no_caller_saved_registers__) +# define __no_caller_saved_registers __attribute__((__no_caller_saved_registers__)) +#else +# define __no_caller_saved_registers +#endif + +/* * Optional: not supported by clang * * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noclone-function-attribute diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index e970f97a7fcb..6e390d58a9f8 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -5,48 +5,54 @@ #ifndef __ASSEMBLY__ #ifdef __CHECKER__ -# define __user __attribute__((noderef, address_space(1))) +/* address spaces */ # define __kernel __attribute__((address_space(0))) -# define __safe __attribute__((safe)) -# define __force __attribute__((force)) -# define __nocast __attribute__((nocast)) -# define __iomem __attribute__((noderef, address_space(2))) +# define __user __attribute__((noderef, address_space(__user))) +# define __iomem __attribute__((noderef, address_space(__iomem))) +# define __percpu __attribute__((noderef, address_space(__percpu))) +# define __rcu __attribute__((noderef, address_space(__rcu))) +static inline void __chk_user_ptr(const volatile void __user *ptr) { } +static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } +/* context/locking */ # define __must_hold(x) __attribute__((context(x,1,1))) # define __acquires(x) __attribute__((context(x,0,1))) # define __releases(x) __attribute__((context(x,1,0))) # define __acquire(x) __context__(x,1) # define __release(x) __context__(x,-1) # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) -# define __percpu __attribute__((noderef, address_space(3))) -# define __rcu __attribute__((noderef, address_space(4))) +/* other */ +# define __force __attribute__((force)) +# define __nocast __attribute__((nocast)) +# define __safe __attribute__((safe)) # define __private __attribute__((noderef)) -extern void __chk_user_ptr(const volatile void __user *); -extern void __chk_io_ptr(const volatile void __iomem *); # define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) #else /* __CHECKER__ */ +/* address spaces */ +# define __kernel # ifdef STRUCTLEAK_PLUGIN -# define __user __attribute__((user)) +# define __user __attribute__((user)) # else # define __user # endif -# define __kernel -# define __safe -# define __force -# define __nocast # define __iomem -# define __chk_user_ptr(x) (void)0 -# define __chk_io_ptr(x) (void)0 -# define __builtin_warning(x, y...) (1) +# define __percpu +# define __rcu +# define __chk_user_ptr(x) (void)0 +# define __chk_io_ptr(x) (void)0 +/* context/locking */ # define __must_hold(x) # define __acquires(x) # define __releases(x) -# define __acquire(x) (void)0 -# define __release(x) (void)0 +# define __acquire(x) (void)0 +# define __release(x) (void)0 # define __cond_lock(x,c) (c) -# define __percpu -# define __rcu +/* other */ +# define __force +# define __nocast +# define __safe # define __private # define ACCESS_PRIVATE(p, member) ((p)->member) +# define __builtin_warning(x, y...) (1) #endif /* __CHECKER__ */ /* Indirect macros required for expanded argument pasting, eg. __LINE__. */ @@ -167,6 +173,40 @@ struct ftrace_likely_data { */ #define noinline_for_stack noinline +/* + * Sanitizer helper attributes: Because using __always_inline and + * __no_sanitize_* conflict, provide helper attributes that will either expand + * to __no_sanitize_* in compilation units where instrumentation is enabled + * (__SANITIZE_*__), or __always_inline in compilation units without + * instrumentation (__SANITIZE_*__ undefined). + */ +#ifdef __SANITIZE_ADDRESS__ +/* + * We can't declare function 'inline' because __no_sanitize_address conflicts + * with inlining. Attempt to inline it may cause a build failure. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + * '__maybe_unused' allows us to avoid defined-but-not-used warnings. + */ +# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused +# define __no_sanitize_or_inline __no_kasan_or_inline +#else +# define __no_kasan_or_inline __always_inline +#endif + +#define __no_kcsan __no_sanitize_thread +#ifdef __SANITIZE_THREAD__ +# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused +#endif + +#ifndef __no_sanitize_or_inline +#define __no_sanitize_or_inline __always_inline +#endif + +/* Section for code which can't be instrumented at all */ +#define noinstr \ + noinline notrace __attribute((__section__(".noinstr.text"))) \ + __no_kcsan __no_sanitize_address + #endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ @@ -193,6 +233,10 @@ struct ftrace_likely_data { # define randomized_struct_fields_end #endif +#ifndef __noscs +# define __noscs +#endif + #ifndef asm_volatile_goto #define asm_volatile_goto(x...) asm goto(x) #endif @@ -210,11 +254,74 @@ struct ftrace_likely_data { /* Are two types/vars the same type (ignoring qualifiers)? */ #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) +/* + * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving + * non-scalar types unchanged. + */ +/* + * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char' + * is not type-compatible with 'signed char', and we define a separate case. + */ +#define __scalar_type_to_expr_cases(type) \ + unsigned type: (unsigned type)0, \ + signed type: (signed type)0 + +#define __unqual_scalar_typeof(x) typeof( \ + _Generic((x), \ + char: (char)0, \ + __scalar_type_to_expr_cases(char), \ + __scalar_type_to_expr_cases(short), \ + __scalar_type_to_expr_cases(int), \ + __scalar_type_to_expr_cases(long), \ + __scalar_type_to_expr_cases(long long), \ + default: (x))) + /* Is this type a native word size -- useful for atomic operations */ #define __native_word(t) \ (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \ sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) +/* Compile time object size, -1 for unknown */ +#ifndef __compiletime_object_size +# define __compiletime_object_size(obj) -1 +#endif +#ifndef __compiletime_warning +# define __compiletime_warning(message) +#endif +#ifndef __compiletime_error +# define __compiletime_error(message) +#endif + +#ifdef __OPTIMIZE__ +# define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (!(condition)) \ + prefix ## suffix(); \ + } while (0) +#else +# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) +#endif + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) + +/** + * compiletime_assert - break build and emit msg if condition is false + * @condition: a compile-time constant condition to check + * @msg: a message to emit if condition is false + * + * In tradition of POSIX assert, this macro will break the build if the + * supplied condition is *false*, emitting the supplied error message if the + * compiler has support to do so. + */ +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) + +#define compiletime_assert_atomic_type(t) \ + compiletime_assert(__native_word(t), \ + "Need native word sized stores/loads for atomicity.") + /* Helpers for emitting diagnostics in pragmas. */ #ifndef __diag #define __diag(string) diff --git a/include/linux/configfs.h b/include/linux/configfs.h index fa9490a8874c..2e8c69b43c64 100644 --- a/include/linux/configfs.h +++ b/include/linux/configfs.h @@ -13,7 +13,7 @@ * * configfs Copyright (C) 2005 Oracle. All rights reserved. * - * Please read Documentation/filesystems/configfs/configfs.txt before using + * Please read Documentation/filesystems/configfs.rst before using * the configfs interface, ESPECIALLY the parts about reference counts and * item destructors. */ diff --git a/include/linux/console.h b/include/linux/console.h index 7a140f4e5d0c..0670d3491e0e 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -24,17 +24,13 @@ struct module; struct tty_struct; struct notifier_block; -/* - * this is what the terminal answers to a ESC-Z or csi0c query. - */ -#define VT100ID "\033[?1;2c" -#define VT102ID "\033[?6c" - enum con_scroll { SM_UP, SM_DOWN, }; +enum vc_intensity; + /** * struct consw - callbacks for consoles * @@ -74,8 +70,9 @@ struct consw { void (*con_scrolldelta)(struct vc_data *vc, int lines); int (*con_set_origin)(struct vc_data *vc); void (*con_save_screen)(struct vc_data *vc); - u8 (*con_build_attr)(struct vc_data *vc, u8 color, u8 intensity, - u8 blink, u8 underline, u8 reverse, u8 italic); + u8 (*con_build_attr)(struct vc_data *vc, u8 color, + enum vc_intensity intensity, + bool blink, bool underline, bool reverse, bool italic); void (*con_invert_region)(struct vc_data *vc, u16 *p, int count); u16 *(*con_screen_pos)(struct vc_data *vc, int offset); unsigned long (*con_getxy)(struct vc_data *vc, unsigned long position, @@ -134,7 +131,7 @@ static inline int con_debug_leave(void) */ #define CON_PRINTBUFFER (1) -#define CON_CONSDEV (2) /* Last on the command line */ +#define CON_CONSDEV (2) /* Preferred console, /dev/console */ #define CON_ENABLED (4) #define CON_BOOT (8) #define CON_ANYTIME (16) /* Safe to call when cpu is offline */ diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 24d4c16e3ae0..153734816b49 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h @@ -21,6 +21,43 @@ struct uni_pagedir; struct uni_screen; #define NPAR 16 +#define VC_TABSTOPS_COUNT 256U + +enum vc_intensity { + VCI_HALF_BRIGHT, + VCI_NORMAL, + VCI_BOLD, + VCI_MASK = 0x3, +}; + +/** + * struct vc_state -- state of a VC + * @x: cursor's x-position + * @y: cursor's y-position + * @color: foreground & background colors + * @Gx_charset: what's G0/G1 slot set to (like GRAF_MAP, LAT1_MAP) + * @charset: what character set to use (0=G0 or 1=G1) + * @intensity: see enum vc_intensity for values + * @reverse: reversed foreground/background colors + * + * These members are defined separately from struct vc_data as we save & + * restore them at times. + */ +struct vc_state { + unsigned int x, y; + + unsigned char color; + + unsigned char Gx_charset[2]; + unsigned int charset : 1; + + /* attribute flags */ + enum vc_intensity intensity; + bool italic; + bool underline; + bool blink; + bool reverse; +}; /* * Example: vc_data of a console that was scrolled 3 lines down. @@ -57,6 +94,8 @@ struct uni_screen; struct vc_data { struct tty_port port; /* Upper level data */ + struct vc_state state, saved_state; + unsigned short vc_num; /* Console number */ unsigned int vc_cols; /* [#] Console size */ unsigned int vc_rows; @@ -73,8 +112,6 @@ struct vc_data { /* attributes for all characters on screen */ unsigned char vc_attr; /* Current attributes */ unsigned char vc_def_color; /* Default colors */ - unsigned char vc_color; /* Foreground & background */ - unsigned char vc_s_color; /* Saved foreground & background */ unsigned char vc_ulcolor; /* Color for underline mode */ unsigned char vc_itcolor; unsigned char vc_halfcolor; /* Color for half intensity mode */ @@ -82,8 +119,6 @@ struct vc_data { unsigned int vc_cursor_type; unsigned short vc_complement_mask; /* [#] Xor mask for mouse pointer */ unsigned short vc_s_complement_mask; /* Saved mouse pointer mask */ - unsigned int vc_x, vc_y; /* Cursor position */ - unsigned int vc_saved_x, vc_saved_y; unsigned long vc_pos; /* Cursor address */ /* fonts */ unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */ @@ -98,8 +133,6 @@ struct vc_data { int vt_newvt; wait_queue_head_t paste_wait; /* mode flags */ - unsigned int vc_charset : 1; /* Character set G0 / G1 */ - unsigned int vc_s_charset : 1; /* Saved character set */ unsigned int vc_disp_ctrl : 1; /* Display chars < 32? */ unsigned int vc_toggle_meta : 1; /* Toggle high bit? */ unsigned int vc_decscnm : 1; /* Screen Mode */ @@ -107,17 +140,6 @@ struct vc_data { unsigned int vc_decawm : 1; /* Autowrap Mode */ unsigned int vc_deccm : 1; /* Cursor Visible */ unsigned int vc_decim : 1; /* Insert Mode */ - /* attribute flags */ - unsigned int vc_intensity : 2; /* 0=half-bright, 1=normal, 2=bold */ - unsigned int vc_italic:1; - unsigned int vc_underline : 1; - unsigned int vc_blink : 1; - unsigned int vc_reverse : 1; - unsigned int vc_s_intensity : 2; /* saved rendition */ - unsigned int vc_s_italic:1; - unsigned int vc_s_underline : 1; - unsigned int vc_s_blink : 1; - unsigned int vc_s_reverse : 1; /* misc */ unsigned int vc_priv : 3; unsigned int vc_need_wrap : 1; @@ -126,13 +148,9 @@ struct vc_data { unsigned char vc_utf : 1; /* Unicode UTF-8 encoding */ unsigned char vc_utf_count; int vc_utf_char; - unsigned int vc_tab_stop[8]; /* Tab stops. 256 columns. */ + DECLARE_BITMAP(vc_tab_stop, VC_TABSTOPS_COUNT); /* Tab stops. 256 columns. */ unsigned char vc_palette[16*3]; /* Colour palette for VGA+ */ unsigned short * vc_translate; - unsigned char vc_G0_charset; - unsigned char vc_G1_charset; - unsigned char vc_saved_G0; - unsigned char vc_saved_G1; unsigned int vc_resize_user; /* resize request from user */ unsigned int vc_bell_pitch; /* Console bell pitch */ unsigned int vc_bell_duration; /* Console bell duration */ @@ -149,24 +167,29 @@ struct vc { struct work_struct SAK_work; /* might add scrmem, kbd at some time, - to have everything in one place - the disadvantage - would be that vc_cons etc can no longer be static */ + to have everything in one place */ }; extern struct vc vc_cons [MAX_NR_CONSOLES]; extern void vc_SAK(struct work_struct *work); -#define CUR_DEF 0 -#define CUR_NONE 1 -#define CUR_UNDERLINE 2 -#define CUR_LOWER_THIRD 3 -#define CUR_LOWER_HALF 4 -#define CUR_TWO_THIRDS 5 -#define CUR_BLOCK 6 -#define CUR_HWMASK 0x0f -#define CUR_SWMASK 0xfff0 - -#define CUR_DEFAULT CUR_UNDERLINE +#define CUR_MAKE(size, change, set) ((size) | ((change) << 8) | \ + ((set) << 16)) +#define CUR_SIZE(c) ((c) & 0x00000f) +# define CUR_DEF 0 +# define CUR_NONE 1 +# define CUR_UNDERLINE 2 +# define CUR_LOWER_THIRD 3 +# define CUR_LOWER_HALF 4 +# define CUR_TWO_THIRDS 5 +# define CUR_BLOCK 6 +#define CUR_SW 0x000010 +#define CUR_ALWAYS_BG 0x000020 +#define CUR_INVERT_FG_BG 0x000040 +#define CUR_FG 0x000700 +#define CUR_BG 0x007000 +#define CUR_CHANGE(c) ((c) & 0x00ff00) +#define CUR_SET(c) (((c) & 0xff0000) >> 8) bool con_is_visible(const struct vc_data *vc); diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 8150f5ac176c..d53cd331c4dd 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -5,6 +5,8 @@ #include <linux/sched.h> #include <linux/vtime.h> #include <linux/context_tracking_state.h> +#include <linux/instrumentation.h> + #include <asm/ptrace.h> @@ -33,13 +35,13 @@ static inline void user_exit(void) } /* Called with interrupts disabled. */ -static inline void user_enter_irqoff(void) +static __always_inline void user_enter_irqoff(void) { if (context_tracking_enabled()) __context_tracking_enter(CONTEXT_USER); } -static inline void user_exit_irqoff(void) +static __always_inline void user_exit_irqoff(void) { if (context_tracking_enabled()) __context_tracking_exit(CONTEXT_USER); @@ -75,7 +77,7 @@ static inline void exception_exit(enum ctx_state prev_ctx) * is enabled. If context tracking is disabled, returns * CONTEXT_DISABLED. This should be used primarily for debugging. */ -static inline enum ctx_state ct_state(void) +static __always_inline enum ctx_state ct_state(void) { return context_tracking_enabled() ? this_cpu_read(context_tracking.state) : CONTEXT_DISABLED; @@ -101,12 +103,14 @@ static inline void context_tracking_init(void) { } #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN /* must be called with irqs disabled */ -static inline void guest_enter_irqoff(void) +static __always_inline void guest_enter_irqoff(void) { + instrumentation_begin(); if (vtime_accounting_enabled_this_cpu()) vtime_guest_enter(current); else current->flags |= PF_VCPU; + instrumentation_end(); if (context_tracking_enabled()) __context_tracking_enter(CONTEXT_GUEST); @@ -118,39 +122,48 @@ static inline void guest_enter_irqoff(void) * one time slice). Lets treat guest mode as quiescent state, just like * we do with user-mode execution. */ - if (!context_tracking_enabled_this_cpu()) + if (!context_tracking_enabled_this_cpu()) { + instrumentation_begin(); rcu_virt_note_context_switch(smp_processor_id()); + instrumentation_end(); + } } -static inline void guest_exit_irqoff(void) +static __always_inline void guest_exit_irqoff(void) { if (context_tracking_enabled()) __context_tracking_exit(CONTEXT_GUEST); + instrumentation_begin(); if (vtime_accounting_enabled_this_cpu()) vtime_guest_exit(current); else current->flags &= ~PF_VCPU; + instrumentation_end(); } #else -static inline void guest_enter_irqoff(void) +static __always_inline void guest_enter_irqoff(void) { /* * This is running in ioctl context so its safe * to assume that it's the stime pending cputime * to flush. */ + instrumentation_begin(); vtime_account_kernel(current); current->flags |= PF_VCPU; rcu_virt_note_context_switch(smp_processor_id()); + instrumentation_end(); } -static inline void guest_exit_irqoff(void) +static __always_inline void guest_exit_irqoff(void) { + instrumentation_begin(); /* Flush the guest cputime we spent on the guest */ vtime_account_kernel(current); current->flags &= ~PF_VCPU; + instrumentation_end(); } #endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h index e7fe6678b7ad..65a60d3313b0 100644 --- a/include/linux/context_tracking_state.h +++ b/include/linux/context_tracking_state.h @@ -26,12 +26,12 @@ struct context_tracking { extern struct static_key_false context_tracking_key; DECLARE_PER_CPU(struct context_tracking, context_tracking); -static inline bool context_tracking_enabled(void) +static __always_inline bool context_tracking_enabled(void) { return static_branch_unlikely(&context_tracking_key); } -static inline bool context_tracking_enabled_cpu(int cpu) +static __always_inline bool context_tracking_enabled_cpu(int cpu) { return context_tracking_enabled() && per_cpu(context_tracking.active, cpu); } @@ -41,7 +41,7 @@ static inline bool context_tracking_enabled_this_cpu(void) return context_tracking_enabled() && __this_cpu_read(context_tracking.active); } -static inline bool context_tracking_in_user(void) +static __always_inline bool context_tracking_in_user(void) { return __this_cpu_read(context_tracking.state) == CONTEXT_USER; } diff --git a/include/linux/coredump.h b/include/linux/coredump.h index abf4b4e65dbb..7a899e83835d 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -22,4 +22,8 @@ extern void do_coredump(const kernel_siginfo_t *siginfo); static inline void do_coredump(const kernel_siginfo_t *siginfo) {} #endif +extern int core_uses_pid; +extern char core_pattern[]; +extern unsigned int core_pipe_limit; + #endif /* _LINUX_COREDUMP_H */ diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 193cc9dbf448..58fffdecdbfd 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -48,6 +48,7 @@ enum coresight_dev_subtype_sink { CORESIGHT_DEV_SUBTYPE_SINK_NONE, CORESIGHT_DEV_SUBTYPE_SINK_PORT, CORESIGHT_DEV_SUBTYPE_SINK_BUFFER, + CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM, }; enum coresight_dev_subtype_link { @@ -100,10 +101,12 @@ union coresight_dev_subtype { }; /** - * struct coresight_platform_data - data harvested from the DT specification - * @nr_inport: number of input ports for this component. - * @nr_outport: number of output ports for this component. - * @conns: Array of nr_outport connections from this component + * struct coresight_platform_data - data harvested from the firmware + * specification. + * + * @nr_inport: Number of elements for the input connections. + * @nr_outport: Number of elements for the output connections. + * @conns: Sparse array of nr_outport connections from this component. */ struct coresight_platform_data { int nr_inport; @@ -140,12 +143,28 @@ struct coresight_desc { * @chid_fwnode: remote component's fwnode handle. * @child_dev: a @coresight_device representation of the component connected to @outport. + * @link: Representation of the connection as a sysfs link. */ struct coresight_connection { int outport; int child_port; struct fwnode_handle *child_fwnode; struct coresight_device *child_dev; + struct coresight_sysfs_link *link; +}; + +/** + * struct coresight_sysfs_link - representation of a connection in sysfs. + * @orig: Originating (master) coresight device for the link. + * @orig_name: Name to use for the link orig->target. + * @target: Target (slave) coresight device for the link. + * @target_name: Name to use for the link target->orig. + */ +struct coresight_sysfs_link { + struct coresight_device *orig; + const char *orig_name; + struct coresight_device *target; + const char *target_name; }; /** @@ -161,10 +180,15 @@ struct coresight_connection { * @enable: 'true' if component is currently part of an active path. * @activated: 'true' only if a _sink_ has been activated. A sink can be * activated but not yet enabled. Enabling for a _sink_ - * appens when a source has been selected for that it. + * happens when a source has been selected and a path is enabled + * from source to that sink. * @ea: Device attribute for sink representation under PMU directory. + * @def_sink: cached reference to default sink found for this device. * @ect_dev: Associated cross trigger device. Not part of the trace data * path or connections. + * @nr_links: number of sysfs links created to other components from this + * device. These will appear in the "connections" group. + * @has_conns_grp: Have added a "connections" group for sysfs links. */ struct coresight_device { struct coresight_platform_data *pdata; @@ -178,8 +202,12 @@ struct coresight_device { /* sink specific fields */ bool activated; /* true only if a sink is part of a path */ struct dev_ext_attribute *ea; + struct coresight_device *def_sink; /* cross trigger handling */ struct coresight_device *ect_dev; + /* sysfs links between components */ + int nr_links; + bool has_conns_grp; }; /* diff --git a/include/linux/cpu.h b/include/linux/cpu.h index beaed2dc269e..8aa84c052fdf 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -64,6 +64,7 @@ extern ssize_t cpu_show_tsx_async_abort(struct device *dev, char *buf); extern ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, @@ -144,18 +145,8 @@ static inline void get_online_cpus(void) { cpus_read_lock(); } static inline void put_online_cpus(void) { cpus_read_unlock(); } #ifdef CONFIG_PM_SLEEP_SMP -int __freeze_secondary_cpus(int primary, bool suspend); -static inline int freeze_secondary_cpus(int primary) -{ - return __freeze_secondary_cpus(primary, true); -} - -static inline int disable_nonboot_cpus(void) -{ - return __freeze_secondary_cpus(0, false); -} - -void enable_nonboot_cpus(void); +extern int freeze_secondary_cpus(int primary); +extern void thaw_secondary_cpus(void); static inline int suspend_disable_secondary_cpus(void) { @@ -168,12 +159,11 @@ static inline int suspend_disable_secondary_cpus(void) } static inline void suspend_enable_secondary_cpus(void) { - return enable_nonboot_cpus(); + return thaw_secondary_cpus(); } #else /* !CONFIG_PM_SLEEP_SMP */ -static inline int disable_nonboot_cpus(void) { return 0; } -static inline void enable_nonboot_cpus(void) {} +static inline void thaw_secondary_cpus(void) {} static inline int suspend_disable_secondary_cpus(void) { return 0; } static inline void suspend_enable_secondary_cpus(void) { } #endif /* !CONFIG_PM_SLEEP_SMP */ diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h index 65501d8f9778..a3bdc8a98f2c 100644 --- a/include/linux/cpu_cooling.h +++ b/include/linux/cpu_cooling.h @@ -63,18 +63,10 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy) struct cpuidle_driver; #ifdef CONFIG_CPU_IDLE_THERMAL -int cpuidle_cooling_register(struct cpuidle_driver *drv); -int cpuidle_of_cooling_register(struct device_node *np, - struct cpuidle_driver *drv); +void cpuidle_cooling_register(struct cpuidle_driver *drv); #else /* CONFIG_CPU_IDLE_THERMAL */ -static inline int cpuidle_cooling_register(struct cpuidle_driver *drv) +static inline void cpuidle_cooling_register(struct cpuidle_driver *drv) { - return 0; -} -static inline int cpuidle_of_cooling_register(struct device_node *np, - struct cpuidle_driver *drv) -{ - return 0; } #endif /* CONFIG_CPU_IDLE_THERMAL */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index f7240251a949..a911e5d06845 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -127,7 +127,7 @@ struct cpufreq_policy { /* Cached frequency lookup from cpufreq_driver_resolve_freq. */ unsigned int cached_target_freq; - int cached_resolved_idx; + unsigned int cached_resolved_idx; /* Synchronization for frequency transitions */ bool transition_ongoing; /* Tracks transition status */ @@ -330,7 +330,7 @@ struct cpufreq_driver { * * get_intermediate should return a stable intermediate frequency * platform wants to switch to and target_intermediate() should set CPU - * to to that frequency, before jumping to the frequency corresponding + * to that frequency, before jumping to the frequency corresponding * to 'index'. Core will take care of sending notifications and driver * doesn't have to handle them in target_intermediate() or * target_index(). @@ -367,7 +367,7 @@ struct cpufreq_driver { /* platform specific boost support code */ bool boost_enabled; - int (*set_boost)(int state); + int (*set_boost)(struct cpufreq_policy *policy, int state); }; /* flags */ @@ -576,6 +576,22 @@ unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy); int cpufreq_register_governor(struct cpufreq_governor *governor); void cpufreq_unregister_governor(struct cpufreq_governor *governor); +int cpufreq_start_governor(struct cpufreq_policy *policy); +void cpufreq_stop_governor(struct cpufreq_policy *policy); + +#define cpufreq_governor_init(__governor) \ +static int __init __governor##_init(void) \ +{ \ + return cpufreq_register_governor(&__governor); \ +} \ +core_initcall(__governor##_init) + +#define cpufreq_governor_exit(__governor) \ +static void __exit __governor##_exit(void) \ +{ \ + return cpufreq_unregister_governor(&__governor); \ +} \ +module_exit(__governor##_exit) struct cpufreq_governor *cpufreq_default_governor(void); struct cpufreq_governor *cpufreq_fallback_governor(void); @@ -940,8 +956,8 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, case CPUFREQ_RELATION_C: return cpufreq_table_find_index_c(policy, target_freq); default: - pr_err("%s: Invalid relation: %d\n", __func__, relation); - return -EINVAL; + WARN_ON_ONCE(1); + return 0; } } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 77d70b633531..bf9181cef444 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -102,6 +102,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING, + CPUHP_AP_IRQ_RISCV_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_MICROCODE_LOADER, @@ -131,6 +132,7 @@ enum cpuhp_state { CPUHP_AP_MIPS_GIC_TIMER_STARTING, CPUHP_AP_ARC_TIMER_STARTING, CPUHP_AP_RISCV_TIMER_STARTING, + CPUHP_AP_CLINT_TIMER_STARTING, CPUHP_AP_CSKY_TIMER_STARTING, CPUHP_AP_HYPERV_TIMER_STARTING, CPUHP_AP_KVM_STARTING, @@ -140,8 +142,8 @@ enum cpuhp_state { /* Must be the last timer callback */ CPUHP_AP_DUMMY_TIMER_STARTING, CPUHP_AP_ARM_XEN_STARTING, - CPUHP_AP_ARM_KVMPV_STARTING, CPUHP_AP_ARM_CORESIGHT_STARTING, + CPUHP_AP_ARM_CORESIGHT_CTI_STARTING, CPUHP_AP_ARM64_ISNDEP_STARTING, CPUHP_AP_SMPCFD_DYING, CPUHP_AP_X86_TBOOT_DYING, @@ -152,6 +154,7 @@ enum cpuhp_state { CPUHP_AP_SMPBOOT_THREADS, CPUHP_AP_X86_VDSO_VMA_ONLINE, CPUHP_AP_IRQ_AFFINITY_ONLINE, + CPUHP_AP_BLK_MQ_ONLINE, CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS, CPUHP_AP_X86_INTEL_EPB_ONLINE, CPUHP_AP_PERF_ONLINE, @@ -178,6 +181,7 @@ enum cpuhp_state { CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE, + CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE, CPUHP_AP_WATCHDOG_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_RCUTREE_ONLINE, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index ec2ef63771f0..6175c77bf25e 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -65,19 +65,24 @@ struct cpuidle_state { * CPUs execute ->enter_s2idle with the local tick or entire timekeeping * suspended, so it must not re-enable interrupts at any point (even * temporarily) or attempt to change states of clock event devices. + * + * This callback may point to the same function as ->enter if all of + * the above requirements are met by it. */ - void (*enter_s2idle) (struct cpuidle_device *dev, - struct cpuidle_driver *drv, - int index); + int (*enter_s2idle)(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index); }; /* Idle State Flags */ -#define CPUIDLE_FLAG_NONE (0x00) -#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ -#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ -#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ -#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */ -#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */ +#define CPUIDLE_FLAG_NONE (0x00) +#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ +#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ +#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ +#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */ +#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */ +#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */ +#define CPUIDLE_FLAG_RCU_IDLE BIT(6) /* idle-state takes care of RCU */ struct cpuidle_device_kobj; struct cpuidle_state_kobj; diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index 525510a9f965..6594dbc34a37 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h @@ -38,6 +38,8 @@ phys_addr_t paddr_vmcoreinfo_note(void); #define VMCOREINFO_OSRELEASE(value) \ vmcoreinfo_append_str("OSRELEASE=%s\n", value) +#define VMCOREINFO_BUILD_ID(value) \ + vmcoreinfo_append_str("BUILD-ID=%s\n", value) #define VMCOREINFO_PAGESIZE(value) \ vmcoreinfo_append_str("PAGESIZE=%ld\n", value) #define VMCOREINFO_SYMBOL(name) \ @@ -64,6 +66,10 @@ extern unsigned char *vmcoreinfo_data; extern size_t vmcoreinfo_size; extern u32 *vmcoreinfo_note; +/* raw contents of kernel .notes section */ +extern const void __start_notes __weak; +extern const void __stop_notes __weak; + Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type, void *data, size_t data_len); void final_note(Elf_Word *buf); diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 4664fc1871de..a5192b718dbe 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -5,9 +5,10 @@ #include <linux/kexec.h> #include <linux/proc_fs.h> #include <linux/elf.h> +#include <linux/pgtable.h> #include <uapi/linux/vmcore.h> -#include <asm/pgtable.h> /* for pgprot_t */ +#include <linux/pgtable.h> /* for pgprot_t */ #ifdef CONFIG_CRASH_DUMP #define ELFCORE_ADDR_MAX (-1ULL) @@ -97,8 +98,6 @@ extern void unregister_oldmem_pfn_is_ram(void); static inline bool is_kdump_kernel(void) { return 0; } #endif /* CONFIG_CRASH_DUMP */ -extern unsigned long saved_max_pfn; - /* Device Dump information to be filled by drivers */ struct vmcoredd_data { char dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Unique name of the dump */ diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h index 54741295c70b..2f811baf78d2 100644 --- a/include/linux/crush/crush.h +++ b/include/linux/crush/crush.h @@ -17,7 +17,7 @@ * The algorithm was originally described in detail in this paper * (although the algorithm has evolved somewhat since then): * - * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf + * https://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf * * LGPL2 */ @@ -87,7 +87,7 @@ struct crush_rule_mask { struct crush_rule { __u32 len; struct crush_rule_mask mask; - struct crush_rule_step steps[0]; + struct crush_rule_step steps[]; }; #define crush_rule_size(len) (sizeof(struct crush_rule) + \ @@ -301,6 +301,12 @@ struct crush_map { __u32 *choose_tries; #else + /* device/bucket type id -> type name (CrushWrapper::type_map) */ + struct rb_root type_names; + + /* device/bucket id -> name (CrushWrapper::name_map) */ + struct rb_root names; + /* CrushWrapper::choose_args */ struct rb_root choose_args; #endif @@ -342,4 +348,10 @@ struct crush_work { struct crush_work_bucket **work; /* Per-bucket working store */ }; +#ifdef __KERNEL__ +/* osdmap.c */ +void clear_crush_names(struct rb_root *root); +void clear_choose_args(struct crush_map *c); +#endif + #endif diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 763863dbc079..ef90e07c9635 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -16,9 +16,8 @@ #include <linux/kernel.h> #include <linux/list.h> #include <linux/bug.h> +#include <linux/refcount.h> #include <linux/slab.h> -#include <linux/string.h> -#include <linux/uaccess.h> #include <linux/completion.h> /* @@ -61,8 +60,8 @@ #define CRYPTO_ALG_ASYNC 0x00000080 /* - * Set this bit if and only if the algorithm requires another algorithm of - * the same type to handle corner cases. + * Set if the algorithm (or an algorithm which it uses) requires another + * algorithm of the same type to handle corner cases. */ #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 @@ -102,6 +101,38 @@ #define CRYPTO_NOLOAD 0x00008000 /* + * The algorithm may allocate memory during request processing, i.e. during + * encryption, decryption, or hashing. Users can request an algorithm with this + * flag unset if they can't handle memory allocation failures. + * + * This flag is currently only implemented for algorithms of type "skcipher", + * "aead", "ahash", "shash", and "cipher". Algorithms of other types might not + * have this flag set even if they allocate memory. + * + * In some edge cases, algorithms can allocate memory regardless of this flag. + * To avoid these cases, users must obey the following usage constraints: + * skcipher: + * - The IV buffer and all scatterlist elements must be aligned to the + * algorithm's alignmask. + * - If the data were to be divided into chunks of size + * crypto_skcipher_walksize() (with any remainder going at the end), no + * chunk can cross a page boundary or a scatterlist element boundary. + * aead: + * - The IV buffer and all scatterlist elements must be aligned to the + * algorithm's alignmask. + * - The first scatterlist element must contain all the associated data, + * and its pages must be !PageHighMem. + * - If the plaintext/ciphertext were to be divided into chunks of size + * crypto_aead_walksize() (with the remainder going at the end), no chunk + * can cross a page boundary or a scatterlist element boundary. + * ahash: + * - The result buffer must be aligned to the algorithm's alignmask. + * - crypto_ahash_finup() must not be used unless the algorithm implements + * ->finup() natively. + */ +#define CRYPTO_ALG_ALLOCATES_MEMORY 0x00010000 + +/* * Transform masks and values (for crt_flags). */ #define CRYPTO_TFM_NEED_KEY 0x00000001 @@ -595,6 +626,8 @@ int crypto_has_alg(const char *name, u32 type, u32 mask); struct crypto_tfm { u32 crt_flags; + + int node; void (*exit)(struct crypto_tfm *tfm); diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h deleted file mode 100644 index f6ba4c3e60d7..000000000000 --- a/include/linux/cryptohash.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __CRYPTOHASH_H -#define __CRYPTOHASH_H - -#include <uapi/linux/types.h> - -#define SHA_DIGEST_WORDS 5 -#define SHA_MESSAGE_BYTES (512 /*bits*/ / 8) -#define SHA_WORKSPACE_WORDS 16 - -void sha_init(__u32 *buf); -void sha_transform(__u32 *digest, const char *data, __u32 *W); - -#endif diff --git a/include/linux/dasd_mod.h b/include/linux/dasd_mod.h new file mode 100644 index 000000000000..14e6cf8c6267 --- /dev/null +++ b/include/linux/dasd_mod.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DASD_MOD_H +#define DASD_MOD_H + +#include <asm/dasd.h> + +struct gendisk; + +extern int dasd_biodasdinfo(struct gendisk *disk, dasd_information2_t *info); + +#endif diff --git a/include/linux/dax.h b/include/linux/dax.h index d7af5d243f24..43b39ab9de1a 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -5,7 +5,6 @@ #include <linux/fs.h> #include <linux/mm.h> #include <linux/radix-tree.h> -#include <asm/pgtable.h> /* Flag for synchronous flush */ #define DAXDEV_F_SYNC (1UL << 0) @@ -59,6 +58,8 @@ static inline void set_dax_synchronous(struct dax_device *dax_dev) { __set_dax_synchronous(dax_dev); } +bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, + int blocksize, sector_t start, sector_t len); /* * Check if given mapping is supported by the file / underlying device. */ @@ -105,6 +106,12 @@ static inline bool dax_synchronous(struct dax_device *dax_dev) static inline void set_dax_synchronous(struct dax_device *dax_dev) { } +static inline bool dax_supported(struct dax_device *dax_dev, + struct block_device *bdev, int blocksize, sector_t start, + sector_t len) +{ + return false; +} static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, struct dax_device *dax_dev) { @@ -190,14 +197,23 @@ static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) } #endif +#if IS_ENABLED(CONFIG_DAX) int dax_read_lock(void); void dax_read_unlock(int id); +#else +static inline int dax_read_lock(void) +{ + return 0; +} + +static inline void dax_read_unlock(int id) +{ +} +#endif /* CONFIG_DAX */ bool dax_alive(struct dax_device *dax_dev); void *dax_get_private(struct dax_device *dax_dev); long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn); -bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, - int blocksize, sector_t start, sector_t len); size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, diff --git a/include/linux/dcache.h b/include/linux/dcache.h index c1488cc84fd9..65d975bf9390 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -89,7 +89,7 @@ extern struct dentry_stat_t dentry_stat; struct dentry { /* RCU lookup touched fields */ unsigned int d_flags; /* protected by d_lock */ - seqcount_t d_seq; /* per dentry seqlock */ + seqcount_spinlock_t d_seq; /* per dentry seqlock */ struct hlist_bl_node d_hash; /* lookup hash list */ struct dentry *d_parent; /* parent directory */ struct qstr d_name; @@ -177,6 +177,8 @@ struct dentry_operations { #define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */ +#define DCACHE_DONTCACHE 0x00000080 /* Purge from memory on final dput() */ + #define DCACHE_CANT_MOUNT 0x00000100 #define DCACHE_GENOCIDE 0x00000200 #define DCACHE_SHRINK_LIST 0x00000400 diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 257ab3c92cb8..e7e45f0cc7da 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -12,7 +12,7 @@ extern int debug_locks __read_mostly; extern int debug_locks_silent __read_mostly; -static inline int __debug_locks_off(void) +static __always_inline int __debug_locks_off(void) { return xchg(&debug_locks, 0); } diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 63cb3606dea7..851dd1f9a8a5 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -38,6 +38,11 @@ struct debugfs_regset32 { struct device *dev; /* Optional device for Runtime PM */ }; +struct debugfs_u32_array { + u32 *array; + u32 n_elements; +}; + extern struct dentry *arch_debugfs_dir; #define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ @@ -136,7 +141,8 @@ void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, int nregs, void __iomem *base, char *prefix); void debugfs_create_u32_array(const char *name, umode_t mode, - struct dentry *parent, u32 *array, u32 elements); + struct dentry *parent, + struct debugfs_u32_array *array); struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name, struct dentry *parent, @@ -316,8 +322,8 @@ static inline bool debugfs_initialized(void) } static inline void debugfs_create_u32_array(const char *name, umode_t mode, - struct dentry *parent, u32 *array, - u32 elements) + struct dentry *parent, + struct debugfs_u32_array *array) { } diff --git a/include/linux/decompress/unzstd.h b/include/linux/decompress/unzstd.h new file mode 100644 index 000000000000..56d539ae880f --- /dev/null +++ b/include/linux/decompress/unzstd.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef LINUX_DECOMPRESS_UNZSTD_H +#define LINUX_DECOMPRESS_UNZSTD_H + +int unzstd(unsigned char *inbuf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), + unsigned char *output, + long *pos, + void (*error_fn)(char *x)); +#endif diff --git a/include/linux/delay.h b/include/linux/delay.h index 8e6828094c1e..1d0e2ce6b6d9 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -16,7 +16,7 @@ * 3. CPU clock rate changes. * * Please see this thread: - * http://lists.openwall.net/linux-kernel/2011/01/09/56 + * https://lists.openwall.net/linux-kernel/2011/01/09/56 */ #include <linux/kernel.h> @@ -65,4 +65,15 @@ static inline void ssleep(unsigned int seconds) msleep(seconds * 1000); } +/* see Documentation/timers/timers-howto.rst for the thresholds */ +static inline void fsleep(unsigned long usecs) +{ + if (usecs <= 10) + udelay(usecs); + else if (usecs <= 20000) + usleep_range(usecs, 2 * usecs); + else + msleep(DIV_ROUND_UP(usecs, 1000)); +} + #endif /* defined(_LINUX_DELAY_H) */ diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h index 5aad06b4ca7b..3028b644b4fb 100644 --- a/include/linux/dev_printk.h +++ b/include/linux/dev_printk.h @@ -109,7 +109,8 @@ void _dev_info(const struct device *dev, const char *fmt, ...) #define dev_info(dev, fmt, ...) \ _dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__) -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) #define dev_dbg(dev, fmt, ...) \ dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__) #elif defined(DEBUG) @@ -181,7 +182,8 @@ do { \ dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__) #define dev_info_ratelimited(dev, fmt, ...) \ dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__) -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) /* descriptor check is first to prevent flooding with "callbacks suppressed" */ #define dev_dbg_ratelimited(dev, fmt, ...) \ do { \ diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 57e871a559a9..12782fbb4c25 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -31,6 +31,13 @@ #define DEVFREQ_PRECHANGE (0) #define DEVFREQ_POSTCHANGE (1) +/* DEVFREQ work timers */ +enum devfreq_timer { + DEVFREQ_TIMER_DEFERRABLE = 0, + DEVFREQ_TIMER_DELAYED, + DEVFREQ_TIMER_NUM, +}; + struct devfreq; struct devfreq_governor; @@ -70,6 +77,7 @@ struct devfreq_dev_status { * @initial_freq: The operating frequency when devfreq_add_device() is * called. * @polling_ms: The polling interval in ms. 0 disables polling. + * @timer: Timer type is either deferrable or delayed timer. * @target: The device should set its operating frequency at * freq or lowest-upper-than-freq value. If freq is * higher than any operable frequency, set maximum. @@ -96,6 +104,7 @@ struct devfreq_dev_status { struct devfreq_dev_profile { unsigned long initial_freq; unsigned int polling_ms; + enum devfreq_timer timer; int (*target)(struct device *dev, unsigned long *freq, u32 flags); int (*get_dev_status)(struct device *dev, diff --git a/include/linux/devfreq_cooling.h b/include/linux/devfreq_cooling.h index 79a6e37a1d6f..9df2dfca68dd 100644 --- a/include/linux/devfreq_cooling.h +++ b/include/linux/devfreq_cooling.h @@ -1,17 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * devfreq_cooling: Thermal cooling device implementation for devices using * devfreq * * Copyright (C) 2014-2015 ARM Limited * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #ifndef __DEVFREQ_COOLING_H__ diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index af48d9da3916..93096e524e43 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -322,16 +322,12 @@ struct dm_target { bool discards_supported:1; }; -/* Each target can link one of these into the table */ -struct dm_target_callbacks { - struct list_head list; - int (*congested_fn) (struct dm_target_callbacks *, int); -}; - void *dm_per_bio_data(struct bio *bio, size_t data_size); struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); unsigned dm_bio_get_target_bio_nr(const struct bio *bio); +u64 dm_start_time_ns_from_clone(struct bio *bio); + int dm_register_target(struct target_type *t); void dm_unregister_target(struct target_type *t); @@ -424,6 +420,7 @@ const char *dm_device_name(struct mapped_device *md); int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); struct gendisk *dm_disk(struct mapped_device *md); int dm_suspended(struct dm_target *ti); +int dm_post_suspending(struct dm_target *ti); int dm_noflush_suspending(struct dm_target *ti); void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); union map_info *dm_get_rq_mapinfo(struct request *rq); @@ -476,11 +473,6 @@ int dm_table_add_target(struct dm_table *t, const char *type, sector_t start, sector_t len, char *params); /* - * Target_ctr should call this if it needs to add any callbacks. - */ -void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb); - -/* * Target can use this to set the table's type. * Can only ever be called from a target's ctr. * Useful for "hybrid" target (supports both bio-based @@ -557,13 +549,8 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size); #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) -#ifdef CONFIG_DM_DEBUG -#define DMDEBUG(fmt, ...) printk(KERN_DEBUG DM_FMT(fmt), ##__VA_ARGS__) +#define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__) #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) -#else -#define DMDEBUG(fmt, ...) no_printk(fmt, ##__VA_ARGS__) -#define DMDEBUG_LIMIT(fmt, ...) no_printk(fmt, ##__VA_ARGS__) -#endif #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 0 : scnprintf(result + sz, maxlen - sz, x)) diff --git a/include/linux/device.h b/include/linux/device.h index ac8e37cd716a..9e6ea8931a52 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -13,6 +13,7 @@ #define _DEVICE_H_ #include <linux/dev_printk.h> +#include <linux/energy_model.h> #include <linux/ioport.h> #include <linux/kobject.h> #include <linux/klist.h> @@ -128,8 +129,12 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, __ATTR_PREALLOC(_name, _mode, _show, _store) #define DEVICE_ATTR_RW(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RW(_name) +#define DEVICE_ATTR_ADMIN_RW(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_RW_MODE(_name, 0600) #define DEVICE_ATTR_RO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_RO(_name) +#define DEVICE_ATTR_ADMIN_RO(_name) \ + struct device_attribute dev_attr_##_name = __ATTR_RO_MODE(_name, 0400) #define DEVICE_ATTR_WO(_name) \ struct device_attribute dev_attr_##_name = __ATTR_WO(_name) #define DEVICE_ULONG_ATTR(_name, _mode, _var) \ @@ -145,68 +150,66 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, struct device_attribute dev_attr_##_name = \ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) -extern int device_create_file(struct device *device, - const struct device_attribute *entry); -extern void device_remove_file(struct device *dev, - const struct device_attribute *attr); -extern bool device_remove_file_self(struct device *dev, - const struct device_attribute *attr); -extern int __must_check device_create_bin_file(struct device *dev, +int device_create_file(struct device *device, + const struct device_attribute *entry); +void device_remove_file(struct device *dev, + const struct device_attribute *attr); +bool device_remove_file_self(struct device *dev, + const struct device_attribute *attr); +int __must_check device_create_bin_file(struct device *dev, const struct bin_attribute *attr); -extern void device_remove_bin_file(struct device *dev, - const struct bin_attribute *attr); +void device_remove_bin_file(struct device *dev, + const struct bin_attribute *attr); /* device resource management */ typedef void (*dr_release_t)(struct device *dev, void *res); typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); #ifdef CONFIG_DEBUG_DEVRES -extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, - int nid, const char *name) __malloc; +void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, + int nid, const char *name) __malloc; #define devres_alloc(release, size, gfp) \ __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) #define devres_alloc_node(release, size, gfp, nid) \ __devres_alloc_node(release, size, gfp, nid, #release) #else -extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, - int nid) __malloc; +void *devres_alloc_node(dr_release_t release, size_t size, + gfp_t gfp, int nid) __malloc; static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp) { return devres_alloc_node(release, size, gfp, NUMA_NO_NODE); } #endif -extern void devres_for_each_res(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data, - void (*fn)(struct device *, void *, void *), - void *data); -extern void devres_free(void *res); -extern void devres_add(struct device *dev, void *res); -extern void *devres_find(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); -extern void *devres_get(struct device *dev, void *new_res, - dr_match_t match, void *match_data); -extern void *devres_remove(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); -extern int devres_destroy(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); -extern int devres_release(struct device *dev, dr_release_t release, - dr_match_t match, void *match_data); +void devres_for_each_res(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data, + void (*fn)(struct device *, void *, void *), + void *data); +void devres_free(void *res); +void devres_add(struct device *dev, void *res); +void *devres_find(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); +void *devres_get(struct device *dev, void *new_res, + dr_match_t match, void *match_data); +void *devres_remove(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); +int devres_destroy(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); +int devres_release(struct device *dev, dr_release_t release, + dr_match_t match, void *match_data); /* devres group */ -extern void * __must_check devres_open_group(struct device *dev, void *id, - gfp_t gfp); -extern void devres_close_group(struct device *dev, void *id); -extern void devres_remove_group(struct device *dev, void *id); -extern int devres_release_group(struct device *dev, void *id); +void * __must_check devres_open_group(struct device *dev, void *id, gfp_t gfp); +void devres_close_group(struct device *dev, void *id); +void devres_remove_group(struct device *dev, void *id); +int devres_release_group(struct device *dev, void *id); /* managed devm_k.alloc/kfree for device drivers */ -extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc; -extern __printf(3, 0) -char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, - va_list ap) __malloc; -extern __printf(3, 4) -char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc; +void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc; +__printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp, + const char *fmt, va_list ap) __malloc; +__printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp, + const char *fmt, ...) __malloc; static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) { return devm_kmalloc(dev, size, gfp | __GFP_ZERO); @@ -226,16 +229,14 @@ static inline void *devm_kcalloc(struct device *dev, { return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO); } -extern void devm_kfree(struct device *dev, const void *p); -extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; -extern const char *devm_kstrdup_const(struct device *dev, - const char *s, gfp_t gfp); -extern void *devm_kmemdup(struct device *dev, const void *src, size_t len, - gfp_t gfp); +void devm_kfree(struct device *dev, const void *p); +char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; +const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp); +void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp); -extern unsigned long devm_get_free_pages(struct device *dev, - gfp_t gfp_mask, unsigned int order); -extern void devm_free_pages(struct device *dev, unsigned long addr); +unsigned long devm_get_free_pages(struct device *dev, + gfp_t gfp_mask, unsigned int order); +void devm_free_pages(struct device *dev, unsigned long addr); void __iomem *devm_ioremap_resource(struct device *dev, const struct resource *res); @@ -387,34 +388,6 @@ enum device_link_state { #define DL_FLAG_SYNC_STATE_ONLY BIT(7) /** - * struct device_link - Device link representation. - * @supplier: The device on the supplier end of the link. - * @s_node: Hook to the supplier device's list of links to consumers. - * @consumer: The device on the consumer end of the link. - * @c_node: Hook to the consumer device's list of links to suppliers. - * @status: The state of the link (with respect to the presence of drivers). - * @flags: Link flags. - * @rpm_active: Whether or not the consumer device is runtime-PM-active. - * @kref: Count repeated addition of the same link. - * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks. - * @supplier_preactivated: Supplier has been made active before consumer probe. - */ -struct device_link { - struct device *supplier; - struct list_head s_node; - struct device *consumer; - struct list_head c_node; - enum device_link_state status; - u32 flags; - refcount_t rpm_active; - struct kref kref; -#ifdef CONFIG_SRCU - struct rcu_head rcu_head; -#endif - bool supplier_preactivated; /* Owned by consumer probe. */ -}; - -/** * enum dl_dev_state - Device driver presence tracking information. * @DL_DEV_NO_DRIVER: There is no driver attached to the device. * @DL_DEV_PROBING: A driver is probing. @@ -433,7 +406,8 @@ enum dl_dev_state { * @suppliers: List of links to supplier devices. * @consumers: List of links to consumer devices. * @needs_suppliers: Hook to global list of devices waiting for suppliers. - * @defer_sync: Hook to global list of devices that have deferred sync_state. + * @defer_hook: Hook to global list of devices that have deferred sync_state or + * deferred fw_devlink. * @need_for_probe: If needs_suppliers is on a list, this indicates if the * suppliers are needed for probe or not. * @status: Driver status information. @@ -442,7 +416,7 @@ struct dev_links_info { struct list_head suppliers; struct list_head consumers; struct list_head needs_suppliers; - struct list_head defer_sync; + struct list_head defer_hook; bool need_for_probe; enum dl_dev_state status; }; @@ -480,6 +454,7 @@ struct dev_links_info { * @pm_domain: Provide callbacks that are executed during system suspend, * hibernation, system resume and during runtime PM transitions * along with subsystem-level and driver-level callbacks. + * @em_pd: device's energy model performance domain * @pins: For device pin management. * See Documentation/driver-api/pinctl.rst for details. * @msi_list: Hosts MSI descriptors @@ -523,6 +498,11 @@ struct dev_links_info { * sync_state() callback. * @dma_coherent: this particular device is dma coherent, even if the * architecture supports non-coherent devices. + * @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the + * streaming DMA operations (->map_* / ->unmap_* / ->sync_*), + * and optionall (if the coherent mask is large enough) also + * for dma allocations. This flag is managed by the dma ops + * instance from ->dma_supported. * * At the lowest level, every device in a Linux system is represented by an * instance of struct device. The device structure contains the information @@ -559,6 +539,10 @@ struct device { struct dev_pm_info power; struct dev_pm_domain *pm_domain; +#ifdef CONFIG_ENERGY_MODEL + struct em_perf_domain *em_pd; +#endif + #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN struct irq_domain *msi_domain; #endif @@ -568,8 +552,9 @@ struct device { #ifdef CONFIG_GENERIC_MSI_IRQ struct list_head msi_list; #endif - +#ifdef CONFIG_DMA_OPS const struct dma_map_ops *dma_ops; +#endif u64 *dma_mask; /* dma mask (if dma'able device) */ u64 coherent_dma_mask;/* Like dma_mask, but for alloc_coherent mappings as @@ -622,6 +607,39 @@ struct device { defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) bool dma_coherent:1; #endif +#ifdef CONFIG_DMA_OPS_BYPASS + bool dma_ops_bypass : 1; +#endif +}; + +/** + * struct device_link - Device link representation. + * @supplier: The device on the supplier end of the link. + * @s_node: Hook to the supplier device's list of links to consumers. + * @consumer: The device on the consumer end of the link. + * @c_node: Hook to the consumer device's list of links to suppliers. + * @link_dev: device used to expose link details in sysfs + * @status: The state of the link (with respect to the presence of drivers). + * @flags: Link flags. + * @rpm_active: Whether or not the consumer device is runtime-PM-active. + * @kref: Count repeated addition of the same link. + * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks. + * @supplier_preactivated: Supplier has been made active before consumer probe. + */ +struct device_link { + struct device *supplier; + struct list_head s_node; + struct device *consumer; + struct list_head c_node; + struct device link_dev; + enum device_link_state status; + u32 flags; + refcount_t rpm_active; + struct kref kref; +#ifdef CONFIG_SRCU + struct rcu_head rcu_head; +#endif + bool supplier_preactivated; /* Owned by consumer probe. */ }; static inline struct device *kobj_to_dev(struct kobject *kobj) @@ -651,8 +669,7 @@ static inline const char *dev_name(const struct device *dev) return kobject_name(&dev->kobj); } -extern __printf(2, 3) -int dev_set_name(struct device *dev, const char *name, ...); +__printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...); #ifdef CONFIG_NUMA static inline int dev_to_node(struct device *dev) @@ -809,39 +826,39 @@ static inline bool dev_has_sync_state(struct device *dev) /* * High level routines for use by the bus drivers */ -extern int __must_check device_register(struct device *dev); -extern void device_unregister(struct device *dev); -extern void device_initialize(struct device *dev); -extern int __must_check device_add(struct device *dev); -extern void device_del(struct device *dev); -extern int device_for_each_child(struct device *dev, void *data, - int (*fn)(struct device *dev, void *data)); -extern int device_for_each_child_reverse(struct device *dev, void *data, - int (*fn)(struct device *dev, void *data)); -extern struct device *device_find_child(struct device *dev, void *data, - int (*match)(struct device *dev, void *data)); -extern struct device *device_find_child_by_name(struct device *parent, - const char *name); -extern int device_rename(struct device *dev, const char *new_name); -extern int device_move(struct device *dev, struct device *new_parent, - enum dpm_order dpm_order); -extern int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); -extern const char *device_get_devnode(struct device *dev, - umode_t *mode, kuid_t *uid, kgid_t *gid, - const char **tmp); +int __must_check device_register(struct device *dev); +void device_unregister(struct device *dev); +void device_initialize(struct device *dev); +int __must_check device_add(struct device *dev); +void device_del(struct device *dev); +int device_for_each_child(struct device *dev, void *data, + int (*fn)(struct device *dev, void *data)); +int device_for_each_child_reverse(struct device *dev, void *data, + int (*fn)(struct device *dev, void *data)); +struct device *device_find_child(struct device *dev, void *data, + int (*match)(struct device *dev, void *data)); +struct device *device_find_child_by_name(struct device *parent, + const char *name); +int device_rename(struct device *dev, const char *new_name); +int device_move(struct device *dev, struct device *new_parent, + enum dpm_order dpm_order); +int device_change_owner(struct device *dev, kuid_t kuid, kgid_t kgid); +const char *device_get_devnode(struct device *dev, umode_t *mode, kuid_t *uid, + kgid_t *gid, const char **tmp); +int device_is_dependent(struct device *dev, void *target); static inline bool device_supports_offline(struct device *dev) { return dev->bus && dev->bus->offline && dev->bus->online; } -extern void lock_device_hotplug(void); -extern void unlock_device_hotplug(void); -extern int lock_device_hotplug_sysfs(void); -extern int device_offline(struct device *dev); -extern int device_online(struct device *dev); -extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); -extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); +void lock_device_hotplug(void); +void unlock_device_hotplug(void); +int lock_device_hotplug_sysfs(void); +int device_offline(struct device *dev); +int device_online(struct device *dev); +void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); +void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode); void device_set_of_node_from_dev(struct device *dev, const struct device *dev2); static inline int dev_num_vf(struct device *dev) @@ -854,14 +871,13 @@ static inline int dev_num_vf(struct device *dev) /* * Root device objects for grouping under /sys/devices */ -extern struct device *__root_device_register(const char *name, - struct module *owner); +struct device *__root_device_register(const char *name, struct module *owner); /* This is a macro to avoid include problems with THIS_MODULE */ #define root_device_register(name) \ __root_device_register(name, THIS_MODULE) -extern void root_device_unregister(struct device *root); +void root_device_unregister(struct device *root); static inline void *dev_get_platdata(const struct device *dev) { @@ -872,37 +888,31 @@ static inline void *dev_get_platdata(const struct device *dev) * Manual binding of a device to driver. See drivers/base/bus.c * for information on use. */ -extern int __must_check device_bind_driver(struct device *dev); -extern void device_release_driver(struct device *dev); -extern int __must_check device_attach(struct device *dev); -extern int __must_check driver_attach(struct device_driver *drv); -extern void device_initial_probe(struct device *dev); -extern int __must_check device_reprobe(struct device *dev); +int __must_check device_bind_driver(struct device *dev); +void device_release_driver(struct device *dev); +int __must_check device_attach(struct device *dev); +int __must_check driver_attach(struct device_driver *drv); +void device_initial_probe(struct device *dev); +int __must_check device_reprobe(struct device *dev); -extern bool device_is_bound(struct device *dev); +bool device_is_bound(struct device *dev); /* * Easy functions for dynamically creating devices on the fly */ -extern __printf(5, 0) -struct device *device_create_vargs(struct class *cls, struct device *parent, - dev_t devt, void *drvdata, - const char *fmt, va_list vargs); -extern __printf(5, 6) -struct device *device_create(struct class *cls, struct device *parent, - dev_t devt, void *drvdata, - const char *fmt, ...); -extern __printf(6, 7) -struct device *device_create_with_groups(struct class *cls, - struct device *parent, dev_t devt, void *drvdata, - const struct attribute_group **groups, - const char *fmt, ...); -extern void device_destroy(struct class *cls, dev_t devt); - -extern int __must_check device_add_groups(struct device *dev, - const struct attribute_group **groups); -extern void device_remove_groups(struct device *dev, - const struct attribute_group **groups); +__printf(5, 6) struct device * +device_create(struct class *cls, struct device *parent, dev_t devt, + void *drvdata, const char *fmt, ...); +__printf(6, 7) struct device * +device_create_with_groups(struct class *cls, struct device *parent, dev_t devt, + void *drvdata, const struct attribute_group **groups, + const char *fmt, ...); +void device_destroy(struct class *cls, dev_t devt); + +int __must_check device_add_groups(struct device *dev, + const struct attribute_group **groups); +void device_remove_groups(struct device *dev, + const struct attribute_group **groups); static inline int __must_check device_add_group(struct device *dev, const struct attribute_group *grp) @@ -920,14 +930,14 @@ static inline void device_remove_group(struct device *dev, return device_remove_groups(dev, groups); } -extern int __must_check devm_device_add_groups(struct device *dev, +int __must_check devm_device_add_groups(struct device *dev, const struct attribute_group **groups); -extern void devm_device_remove_groups(struct device *dev, - const struct attribute_group **groups); -extern int __must_check devm_device_add_group(struct device *dev, - const struct attribute_group *grp); -extern void devm_device_remove_group(struct device *dev, - const struct attribute_group *grp); +void devm_device_remove_groups(struct device *dev, + const struct attribute_group **groups); +int __must_check devm_device_add_group(struct device *dev, + const struct attribute_group *grp); +void devm_device_remove_group(struct device *dev, + const struct attribute_group *grp); /* * Platform "fixup" functions - allow the platform to have their say @@ -944,21 +954,21 @@ extern int (*platform_notify_remove)(struct device *dev); * get_device - atomically increment the reference count for the device. * */ -extern struct device *get_device(struct device *dev); -extern void put_device(struct device *dev); -extern bool kill_device(struct device *dev); +struct device *get_device(struct device *dev); +void put_device(struct device *dev); +bool kill_device(struct device *dev); #ifdef CONFIG_DEVTMPFS -extern int devtmpfs_mount(void); +int devtmpfs_mount(void); #else static inline int devtmpfs_mount(void) { return 0; } #endif /* drivers/base/power/shutdown.c */ -extern void device_shutdown(void); +void device_shutdown(void); /* debugging and troubleshooting/diagnostic helpers. */ -extern const char *dev_driver_string(const struct device *dev); +const char *dev_driver_string(const struct device *dev); /* Device links interface. */ struct device_link *device_link_add(struct device *consumer, @@ -968,6 +978,9 @@ void device_link_remove(void *consumer, struct device *supplier); void device_links_supplier_sync_state_pause(void); void device_links_supplier_sync_state_resume(void); +extern __printf(3, 4) +int dev_err_probe(const struct device *dev, int err, const char *fmt, ...); + /* Create alias, so I can be autoloaded. */ #define MODULE_ALIAS_CHARDEV(major,minor) \ MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h index 9a72214496e5..d02f32b7514e 100644 --- a/include/linux/device_cgroup.h +++ b/include/linux/device_cgroup.h @@ -44,6 +44,9 @@ static inline int devcgroup_inode_mknod(int mode, dev_t dev) if (!S_ISBLK(mode) && !S_ISCHR(mode)) return 0; + if (S_ISCHR(mode) && dev == WHITEOUT_DEV) + return 0; + if (S_ISBLK(mode)) type = DEVCG_DEV_BLOCK; else diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h index 3c8b7d274bd9..29d255fdd5d6 100644 --- a/include/linux/dm-bufio.h +++ b/include/linux/dm-bufio.h @@ -119,6 +119,11 @@ int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c); int dm_bufio_issue_flush(struct dm_bufio_client *c); /* + * Send a discard request to the underlying device. + */ +int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count); + +/* * Like dm_bufio_release but also move the buffer to the new * block. dm_bufio_write_dirty_buffers is needed to commit the new block. */ @@ -132,6 +137,13 @@ void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block); void dm_bufio_forget(struct dm_bufio_client *c, sector_t block); /* + * Free the given range of buffers. + * This is just a hint, if the buffer is in use or dirty, this function + * does nothing. + */ +void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks); + +/* * Set the minimum number of buffers before cleanup happens. */ void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 57bcef6f988a..a2ca294eaebe 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -311,6 +311,7 @@ struct dma_buf { void *vmap_ptr; const char *exp_name; const char *name; + spinlock_t name_lock; /* spinlock to protect name access */ struct module *owner; struct list_head list_node; void *priv; @@ -334,6 +335,14 @@ struct dma_buf { */ struct dma_buf_attach_ops { /** + * @allow_peer2peer: + * + * If this is set to true the importer must be able to handle peer + * resources without struct pages. + */ + bool allow_peer2peer; + + /** * @move_notify: [optional] notification that the DMA-buf is moving * * If this callback is provided the framework can avoid pinning the @@ -361,6 +370,7 @@ struct dma_buf_attach_ops { * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf. * @sgt: cached mapping. * @dir: direction of cached mapping. + * @peer2peer: true if the importer can handle peer resources without pages. * @priv: exporter specific attachment data. * @importer_ops: importer operations for this attachment, if provided * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held. @@ -381,6 +391,7 @@ struct dma_buf_attachment { struct list_head node; struct sg_table *sgt; enum dma_data_direction dir; + bool peer2peer; const struct dma_buf_attach_ops *importer_ops; void *importer_priv; void *priv; diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h index 4208f94d93f7..7b3b04ba60f3 100644 --- a/include/linux/dma-debug.h +++ b/include/linux/dma-debug.h @@ -67,8 +67,6 @@ extern void debug_dma_sync_sg_for_device(struct device *dev, extern void debug_dma_dump_mappings(struct device *dev); -extern void debug_dma_assert_idle(struct page *page); - #else /* CONFIG_DMA_API_DEBUG */ static inline void dma_debug_add_bus(struct bus_type *bus) @@ -157,10 +155,6 @@ static inline void debug_dma_dump_mappings(struct device *dev) { } -static inline void debug_dma_assert_idle(struct page *page) -{ -} - #endif /* CONFIG_DMA_API_DEBUG */ #endif /* __DMA_DEBUG_H */ diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 24b8684aa21d..6e87225600ae 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -1,10 +1,16 @@ /* SPDX-License-Identifier: GPL-2.0 */ +/* + * Internals of the DMA direct mapping implementation. Only for use by the + * DMA mapping code and IOMMU drivers. + */ #ifndef _LINUX_DMA_DIRECT_H #define _LINUX_DMA_DIRECT_H 1 #include <linux/dma-mapping.h> +#include <linux/dma-noncoherent.h> #include <linux/memblock.h> /* for min_low_pfn */ #include <linux/mem_encrypt.h> +#include <linux/swiotlb.h> extern unsigned int zone_dma_bits; @@ -75,8 +81,6 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs); -struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, - gfp_t gfp, unsigned long attrs); int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); @@ -85,4 +89,103 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); int dma_direct_supported(struct device *dev, u64 mask); +bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr); +int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs); +dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, + size_t size, enum dma_data_direction dir, unsigned long attrs); +size_t dma_direct_max_mapping_size(struct device *dev); + +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ + defined(CONFIG_SWIOTLB) +void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir); +#else +static inline void dma_direct_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ +} +#endif + +#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ + defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ + defined(CONFIG_SWIOTLB) +void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs); +void dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir); +#else +static inline void dma_direct_unmap_sg(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir, + unsigned long attrs) +{ +} +static inline void dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ +} +#endif + +static inline void dma_direct_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + phys_addr_t paddr = dma_to_phys(dev, addr); + + if (unlikely(is_swiotlb_buffer(paddr))) + swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); + + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_device(paddr, size, dir); +} + +static inline void dma_direct_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + phys_addr_t paddr = dma_to_phys(dev, addr); + + if (!dev_is_dma_coherent(dev)) { + arch_sync_dma_for_cpu(paddr, size, dir); + arch_sync_dma_for_cpu_all(); + } + + if (unlikely(is_swiotlb_buffer(paddr))) + swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); +} + +static inline dma_addr_t dma_direct_map_page(struct device *dev, + struct page *page, unsigned long offset, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + phys_addr_t phys = page_to_phys(page) + offset; + dma_addr_t dma_addr = phys_to_dma(dev, phys); + + if (unlikely(swiotlb_force == SWIOTLB_FORCE)) + return swiotlb_map(dev, phys, size, dir, attrs); + + if (unlikely(!dma_capable(dev, dma_addr, size, true))) { + if (swiotlb_force != SWIOTLB_NO_FORCE) + return swiotlb_map(dev, phys, size, dir, attrs); + + dev_WARN_ONCE(dev, 1, + "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", + &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); + return DMA_MAPPING_ERROR; + } + + if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + arch_sync_dma_for_device(phys, size, dir); + return dma_addr; +} + +static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + phys_addr_t phys = dma_to_phys(dev, addr); + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + dma_direct_sync_single_for_cpu(dev, addr, size, dir); + + if (unlikely(is_swiotlb_buffer(phys))) + swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs); +} #endif /* _LINUX_DMA_DIRECT_H */ diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 3347c54f3a87..09e23adb351d 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -357,6 +357,19 @@ dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep) } while (1); } +#ifdef CONFIG_LOCKDEP +bool dma_fence_begin_signalling(void); +void dma_fence_end_signalling(bool cookie); +void __dma_fence_might_wait(void); +#else +static inline bool dma_fence_begin_signalling(void) +{ + return true; +} +static inline void dma_fence_end_signalling(bool cookie) {} +static inline void __dma_fence_might_wait(void) {} +#endif + int dma_fence_signal(struct dma_fence *fence); int dma_fence_signal_locked(struct dma_fence *fence); signed long dma_fence_default_wait(struct dma_fence *fence, diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 330ad58fbf4d..52635e91143b 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -14,7 +14,7 @@ /** * List of possible attributes associated with a DMA mapping. The semantics - * of each attribute should be defined in Documentation/DMA-attributes.txt. + * of each attribute should be defined in Documentation/core-api/dma-attributes.rst. */ /* @@ -188,76 +188,10 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, } #endif /* CONFIG_DMA_DECLARE_COHERENT */ -static inline bool dma_is_direct(const struct dma_map_ops *ops) -{ - return likely(!ops); -} - -/* - * All the dma_direct_* declarations are here just for the indirect call bypass, - * and must not be used directly drivers! - */ -dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs); -int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, - enum dma_data_direction dir, unsigned long attrs); -dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, - size_t size, enum dma_data_direction dir, unsigned long attrs); - -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ - defined(CONFIG_SWIOTLB) -void dma_direct_sync_single_for_device(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir); -void dma_direct_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir); -#else -static inline void dma_direct_sync_single_for_device(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ -} -static inline void dma_direct_sync_sg_for_device(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir) -{ -} -#endif - -#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ - defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ - defined(CONFIG_SWIOTLB) -void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs); -void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, - int nents, enum dma_data_direction dir, unsigned long attrs); -void dma_direct_sync_single_for_cpu(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir); -void dma_direct_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir); -#else -static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ -} -static inline void dma_direct_unmap_sg(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir, - unsigned long attrs) -{ -} -static inline void dma_direct_sync_single_for_cpu(struct device *dev, - dma_addr_t addr, size_t size, enum dma_data_direction dir) -{ -} -static inline void dma_direct_sync_sg_for_cpu(struct device *dev, - struct scatterlist *sgl, int nents, enum dma_data_direction dir) -{ -} -#endif - -size_t dma_direct_max_mapping_size(struct device *dev); - #ifdef CONFIG_HAS_DMA #include <asm/dma-mapping.h> +#ifdef CONFIG_DMA_OPS static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { if (dev->dma_ops) @@ -270,165 +204,16 @@ static inline void set_dma_ops(struct device *dev, { dev->dma_ops = dma_ops; } - -static inline dma_addr_t dma_map_page_attrs(struct device *dev, - struct page *page, size_t offset, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - dma_addr_t addr; - - BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) - addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); - else - addr = ops->map_page(dev, page, offset, size, dir, attrs); - debug_dma_map_page(dev, page, offset, size, dir, addr); - - return addr; -} - -static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) - dma_direct_unmap_page(dev, addr, size, dir, attrs); - else if (ops->unmap_page) - ops->unmap_page(dev, addr, size, dir, attrs); - debug_dma_unmap_page(dev, addr, size, dir); -} - -/* - * dma_maps_sg_attrs returns 0 on error and > 0 on success. - * It should never return a value < 0. - */ -static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - int ents; - - BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) - ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); - else - ents = ops->map_sg(dev, sg, nents, dir, attrs); - BUG_ON(ents < 0); - debug_dma_map_sg(dev, sg, nents, ents, dir); - - return ents; -} - -static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - debug_dma_unmap_sg(dev, sg, nents, dir); - if (dma_is_direct(ops)) - dma_direct_unmap_sg(dev, sg, nents, dir, attrs); - else if (ops->unmap_sg) - ops->unmap_sg(dev, sg, nents, dir, attrs); -} - -static inline dma_addr_t dma_map_resource(struct device *dev, - phys_addr_t phys_addr, - size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - dma_addr_t addr = DMA_MAPPING_ERROR; - - BUG_ON(!valid_dma_direction(dir)); - - /* Don't allow RAM to be mapped */ - if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) - return DMA_MAPPING_ERROR; - - if (dma_is_direct(ops)) - addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); - else if (ops->map_resource) - addr = ops->map_resource(dev, phys_addr, size, dir, attrs); - - debug_dma_map_resource(dev, phys_addr, size, dir, addr); - return addr; -} - -static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (!dma_is_direct(ops) && ops->unmap_resource) - ops->unmap_resource(dev, addr, size, dir, attrs); - debug_dma_unmap_resource(dev, addr, size, dir); -} - -static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, - size_t size, - enum dma_data_direction dir) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) - dma_direct_sync_single_for_cpu(dev, addr, size, dir); - else if (ops->sync_single_for_cpu) - ops->sync_single_for_cpu(dev, addr, size, dir); - debug_dma_sync_single_for_cpu(dev, addr, size, dir); -} - -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t addr, size_t size, - enum dma_data_direction dir) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) - dma_direct_sync_single_for_device(dev, addr, size, dir); - else if (ops->sync_single_for_device) - ops->sync_single_for_device(dev, addr, size, dir); - debug_dma_sync_single_for_device(dev, addr, size, dir); -} - -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) +#else /* CONFIG_DMA_OPS */ +static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) - dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); - else if (ops->sync_sg_for_cpu) - ops->sync_sg_for_cpu(dev, sg, nelems, dir); - debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); + return NULL; } - -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, - int nelems, enum dma_data_direction dir) +static inline void set_dma_ops(struct device *dev, + const struct dma_map_ops *dma_ops) { - const struct dma_map_ops *ops = get_dma_ops(dev); - - BUG_ON(!valid_dma_direction(dir)); - if (dma_is_direct(ops)) - dma_direct_sync_sg_for_device(dev, sg, nelems, dir); - else if (ops->sync_sg_for_device) - ops->sync_sg_for_device(dev, sg, nelems, dir); - debug_dma_sync_sg_for_device(dev, sg, nelems, dir); - } +#endif /* CONFIG_DMA_OPS */ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { @@ -439,6 +224,28 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return 0; } +dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, + size_t offset, size_t size, enum dma_data_direction dir, + unsigned long attrs); +void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir, unsigned long attrs); +int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, unsigned long attrs); +void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs); +dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, + size_t size, enum dma_data_direction dir, unsigned long attrs); +void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir, unsigned long attrs); +void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, + enum dma_data_direction dir); +void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir); +void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir); +void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir); void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs); void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, @@ -461,6 +268,7 @@ int dma_set_mask(struct device *dev, u64 mask); int dma_set_coherent_mask(struct device *dev, u64 mask); u64 dma_get_required_mask(struct device *dev); size_t dma_max_mapping_size(struct device *dev); +bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); unsigned long dma_get_merge_boundary(struct device *dev); #else /* CONFIG_HAS_DMA */ static inline dma_addr_t dma_map_page_attrs(struct device *dev, @@ -571,6 +379,10 @@ static inline size_t dma_max_mapping_size(struct device *dev) { return 0; } +static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) +{ + return false; +} static inline unsigned long dma_get_merge_boundary(struct device *dev) { return 0; @@ -609,6 +421,86 @@ static inline void dma_sync_single_range_for_device(struct device *dev, return dma_sync_single_for_device(dev, addr + offset, size, dir); } +/** + * dma_map_sgtable - Map the given buffer for DMA + * @dev: The device for which to perform the DMA operation + * @sgt: The sg_table object describing the buffer + * @dir: DMA direction + * @attrs: Optional DMA attributes for the map operation + * + * Maps a buffer described by a scatterlist stored in the given sg_table + * object for the @dir DMA operation by the @dev device. After success the + * ownership for the buffer is transferred to the DMA domain. One has to + * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the + * ownership of the buffer back to the CPU domain before touching the + * buffer by the CPU. + * + * Returns 0 on success or -EINVAL on error during mapping the buffer. + */ +static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + int nents; + + nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); + if (nents <= 0) + return -EINVAL; + sgt->nents = nents; + return 0; +} + +/** + * dma_unmap_sgtable - Unmap the given buffer for DMA + * @dev: The device for which to perform the DMA operation + * @sgt: The sg_table object describing the buffer + * @dir: DMA direction + * @attrs: Optional DMA attributes for the unmap operation + * + * Unmaps a buffer described by a scatterlist stored in the given sg_table + * object for the @dir DMA operation by the @dev device. After this function + * the ownership of the buffer is transferred back to the CPU domain. + */ +static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); +} + +/** + * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access + * @dev: The device for which to perform the DMA operation + * @sgt: The sg_table object describing the buffer + * @dir: DMA direction + * + * Performs the needed cache synchronization and moves the ownership of the + * buffer back to the CPU domain, so it is safe to perform any access to it + * by the CPU. Before doing any further DMA operations, one has to transfer + * the ownership of the buffer back to the DMA domain by calling the + * dma_sync_sgtable_for_device(). + */ +static inline void dma_sync_sgtable_for_cpu(struct device *dev, + struct sg_table *sgt, enum dma_data_direction dir) +{ + dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir); +} + +/** + * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA + * @dev: The device for which to perform the DMA operation + * @sgt: The sg_table object describing the buffer + * @dir: DMA direction + * + * Performs the needed cache synchronization and moves the ownership of the + * buffer back to the DMA domain, so it is safe to perform the DMA operation. + * Once finished, one has to call dma_sync_sgtable_for_cpu() or + * dma_unmap_sgtable(). + */ +static inline void dma_sync_sgtable_for_device(struct device *dev, + struct sg_table *sgt, enum dma_data_direction dir) +{ + dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir); +} + #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) @@ -630,9 +522,10 @@ void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot, const void *caller); void dma_common_free_remap(void *cpu_addr, size_t size); -bool dma_in_atomic_pool(void *start, size_t size); -void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags); -bool dma_free_from_pool(void *start, size_t size); +struct page *dma_alloc_from_pool(struct device *dev, size_t size, + void **cpu_addr, gfp_t flags, + bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t)); +bool dma_free_from_pool(struct device *dev, void *start, size_t size); int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h index b59f1b6be3e9..ca09a4e07d2d 100644 --- a/include/linux/dma-noncoherent.h +++ b/include/linux/dma-noncoherent.h @@ -3,7 +3,7 @@ #define _LINUX_DMA_NONCOHERENT_H 1 #include <linux/dma-mapping.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> #ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H #include <asm/dma-coherence.h> diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index ee50d10f052b..d44a77e8a7e3 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -46,8 +46,6 @@ #include <linux/rcupdate.h> extern struct ww_class reservation_ww_class; -extern struct lock_class_key reservation_seqcount_class; -extern const char reservation_seqcount_string[]; /** * struct dma_resv_list - a list of shared fences @@ -71,7 +69,7 @@ struct dma_resv_list { */ struct dma_resv { struct ww_mutex lock; - seqcount_t seq; + seqcount_ww_mutex_t seq; struct dma_fence __rcu *fence_excl; struct dma_resv_list __rcu *fence; diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h index 61d5cc0ad601..1962f75fa2d3 100644 --- a/include/linux/dma/k3-psil.h +++ b/include/linux/dma/k3-psil.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com */ #ifndef K3_PSIL_H_ diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h index caadbab1632a..5eb34ad973a7 100644 --- a/include/linux/dma/k3-udma-glue.h +++ b/include/linux/dma/k3-udma-glue.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com */ #ifndef K3_UDMA_GLUE_H_ diff --git a/include/linux/dma/ti-cppi5.h b/include/linux/dma/ti-cppi5.h index 579356ae447e..5896441ee604 100644 --- a/include/linux/dma/ti-cppi5.h +++ b/include/linux/dma/ti-cppi5.h @@ -2,7 +2,7 @@ /* * CPPI5 descriptors interface * - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com */ #ifndef __TI_CPPI5_H__ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index e1c03339918f..6fbd5c99e30c 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -39,6 +39,7 @@ enum dma_status { DMA_IN_PROGRESS, DMA_PAUSED, DMA_ERROR, + DMA_OUT_OF_ORDER, }; /** @@ -61,6 +62,9 @@ enum dma_transaction_type { DMA_SLAVE, DMA_CYCLIC, DMA_INTERLEAVE, + DMA_COMPLETION_NO_ORDER, + DMA_REPEAT, + DMA_LOAD_EOT, /* last transaction type for creation of the capabilities mask */ DMA_TX_TYPE_END, }; @@ -153,7 +157,7 @@ struct dma_interleaved_template { bool dst_sgl; size_t numf; size_t frame_size; - struct data_chunk sgl[0]; + struct data_chunk sgl[]; }; /** @@ -162,7 +166,7 @@ struct dma_interleaved_template { * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of * this transaction * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client - * acknowledges receipt, i.e. has has a chance to establish any dependency + * acknowledges receipt, i.e. has a chance to establish any dependency * chains * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P @@ -176,6 +180,16 @@ struct dma_interleaved_template { * @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command * data and the descriptor should be in different format from normal * data descriptors. + * @DMA_PREP_REPEAT: tell the driver that the transaction shall be automatically + * repeated when it ends until a transaction is issued on the same channel + * with the DMA_PREP_LOAD_EOT flag set. This flag is only applicable to + * interleaved transactions and is ignored for all other transaction types. + * @DMA_PREP_LOAD_EOT: tell the driver that the transaction shall replace any + * active repeated (as indicated by DMA_PREP_REPEAT) transaction when the + * repeated transaction ends. Not setting this flag when the previously queued + * transaction is marked with DMA_PREP_REPEAT will cause the new transaction + * to never be processed and stay in the issued queue forever. The flag is + * ignored if the previous transaction is not a repeated transaction. */ enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), @@ -186,6 +200,8 @@ enum dma_ctrl_flags { DMA_PREP_FENCE = (1 << 5), DMA_CTRL_REUSE = (1 << 6), DMA_PREP_CMD = (1 << 7), + DMA_PREP_REPEAT = (1 << 8), + DMA_PREP_LOAD_EOT = (1 << 9), }; /** @@ -465,7 +481,11 @@ enum dma_residue_granularity { * Since the enum dma_transfer_direction is not defined as bit flag for * each type, the dma controller should set BIT(<TYPE>) and same * should be checked by controller as well + * @min_burst: min burst capability per-transfer * @max_burst: max burst capability per-transfer + * @max_sg_burst: max number of SG list entries executed in a single burst + * DMA tansaction with no software intervention for reinitialization. + * Zero value means unlimited number of entries. * @cmd_pause: true, if pause is supported (i.e. for reading residue or * for resume later) * @cmd_resume: true, if resume is supported @@ -478,7 +498,9 @@ struct dma_slave_caps { u32 src_addr_widths; u32 dst_addr_widths; u32 directions; + u32 min_burst; u32 max_burst; + u32 max_sg_burst; bool cmd_pause; bool cmd_resume; bool cmd_terminate; @@ -535,7 +557,7 @@ struct dmaengine_unmap_data { struct device *dev; struct kref kref; size_t len; - dma_addr_t addr[0]; + dma_addr_t addr[]; }; struct dma_async_tx_descriptor; @@ -769,7 +791,11 @@ struct dma_filter { * Since the enum dma_transfer_direction is not defined as bit flag for * each type, the dma controller should set BIT(<TYPE>) and same * should be checked by controller as well + * @min_burst: min burst capability per-transfer * @max_burst: max burst capability per-transfer + * @max_sg_burst: max number of SG list entries executed in a single burst + * DMA tansaction with no software intervention for reinitialization. + * Zero value means unlimited number of entries. * @residue_granularity: granularity of the transfer residue reported * by tx_status * @device_alloc_chan_resources: allocate resources and return the @@ -789,6 +815,8 @@ struct dma_filter { * be called after period_len bytes have been transferred. * @device_prep_interleaved_dma: Transfer expression in a generic way. * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address + * @device_caps: May be used to override the generic DMA slave capabilities + * with per-channel specific ones * @device_config: Pushes a new configuration to a channel, return 0 or an error * code * @device_pause: Pauses any transfer happening on a channel. Returns @@ -839,7 +867,9 @@ struct dma_device { u32 src_addr_widths; u32 dst_addr_widths; u32 directions; + u32 min_burst; u32 max_burst; + u32 max_sg_burst; bool descriptor_reuse; enum dma_residue_granularity residue_granularity; @@ -887,6 +917,8 @@ struct dma_device { struct dma_chan *chan, dma_addr_t dst, u64 data, unsigned long flags); + void (*device_caps)(struct dma_chan *chan, + struct dma_slave_caps *caps); int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config); int (*device_pause)(struct dma_chan *chan); @@ -980,6 +1012,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( { if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma) return NULL; + if (flags & DMA_PREP_REPEAT && + !test_bit(DMA_REPEAT, chan->device->cap_mask.bits)) + return NULL; return chan->device->device_prep_interleaved_dma(chan, xt, flags); } diff --git a/include/linux/dmar.h b/include/linux/dmar.h index d7bf029df737..65565820328a 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -48,6 +48,7 @@ struct dmar_drhd_unit { u16 segment; /* PCI domain */ u8 ignored:1; /* ignore drhd */ u8 include_all:1; + u8 gfx_dedicated:1; /* graphic dedicated */ struct intel_iommu *iommu; }; diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index c620d9139c28..311aa04e7520 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -12,11 +12,31 @@ struct sk_buff; struct net_device; struct packet_type; +struct dsa_8021q_crosschip_link { + struct list_head list; + int port; + struct dsa_switch *other_ds; + int other_port; + refcount_t refcount; +}; + +#define DSA_8021Q_N_SUBVLAN 8 + #if IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index, bool enabled); +int dsa_8021q_crosschip_bridge_join(struct dsa_switch *ds, int port, + struct dsa_switch *other_ds, + int other_port, + struct list_head *crosschip_links); + +int dsa_8021q_crosschip_bridge_leave(struct dsa_switch *ds, int port, + struct dsa_switch *other_ds, + int other_port, + struct list_head *crosschip_links); + struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, u16 tpid, u16 tci); @@ -24,10 +44,16 @@ u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port); u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port); +u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan); + int dsa_8021q_rx_switch_id(u16 vid); int dsa_8021q_rx_source_port(u16 vid); +u16 dsa_8021q_rx_subvlan(u16 vid); + +bool vid_is_dsa_8021q(u16 vid); + #else int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index, @@ -36,6 +62,22 @@ int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index, return 0; } +int dsa_8021q_crosschip_bridge_join(struct dsa_switch *ds, int port, + struct dsa_switch *other_ds, + int other_port, + struct list_head *crosschip_links) +{ + return 0; +} + +int dsa_8021q_crosschip_bridge_leave(struct dsa_switch *ds, int port, + struct dsa_switch *other_ds, + int other_port, + struct list_head *crosschip_links) +{ + return 0; +} + struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, u16 tpid, u16 tci) { @@ -52,6 +94,11 @@ u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port) return 0; } +u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan) +{ + return 0; +} + int dsa_8021q_rx_switch_id(u16 vid) { return 0; @@ -62,6 +109,16 @@ int dsa_8021q_rx_source_port(u16 vid) return 0; } +u16 dsa_8021q_rx_subvlan(u16 vid) +{ + return 0; +} + +bool vid_is_dsa_8021q(u16 vid) +{ + return false; +} + #endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */ #endif /* _NET_DSA_8021Q_H */ diff --git a/include/linux/dsa/loop.h b/include/linux/dsa/loop.h new file mode 100644 index 000000000000..5a3470bcc8a7 --- /dev/null +++ b/include/linux/dsa/loop.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef DSA_LOOP_H +#define DSA_LOOP_H + +#include <linux/types.h> +#include <linux/ethtool.h> +#include <net/dsa.h> + +struct dsa_loop_vlan { + u16 members; + u16 untagged; +}; + +struct dsa_loop_mib_entry { + char name[ETH_GSTRING_LEN]; + unsigned long val; +}; + +enum dsa_loop_mib_counters { + DSA_LOOP_PHY_READ_OK, + DSA_LOOP_PHY_READ_ERR, + DSA_LOOP_PHY_WRITE_OK, + DSA_LOOP_PHY_WRITE_ERR, + __DSA_LOOP_CNT_MAX, +}; + +struct dsa_loop_port { + struct dsa_loop_mib_entry mib[__DSA_LOOP_CNT_MAX]; + u16 pvid; + int mtu; +}; + +struct dsa_loop_priv { + struct mii_bus *bus; + unsigned int port_base; + struct dsa_loop_vlan vlans[VLAN_N_VID]; + struct net_device *netdev; + struct dsa_loop_port ports[DSA_MAX_PORTS]; +}; + +#endif /* DSA_LOOP_H */ diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h index fa5735c353cd..dd93735ae228 100644 --- a/include/linux/dsa/sja1105.h +++ b/include/linux/dsa/sja1105.h @@ -9,6 +9,7 @@ #include <linux/skbuff.h> #include <linux/etherdevice.h> +#include <linux/dsa/8021q.h> #include <net/dsa.h> #define ETH_P_SJA1105 ETH_P_DSA_8021Q @@ -53,12 +54,14 @@ struct sja1105_skb_cb { ((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb)) struct sja1105_port { + u16 subvlan_map[DSA_8021Q_N_SUBVLAN]; struct kthread_worker *xmit_worker; struct kthread_work xmit_work; struct sk_buff_head xmit_queue; struct sja1105_tagger_data *data; struct dsa_port *dp; bool hwts_tx_en; + u16 xmit_tpid; }; #endif /* _NET_DSA_SJA1105_H */ diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 4cf02ecd67de..8aa0c7c2608c 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -48,7 +48,11 @@ struct _ddebug { -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG_CORE) + +/* exported for module authors to exercise >control */ +int dynamic_debug_exec_queries(const char *query, const char *modname); + int ddebug_add_module(struct _ddebug *tab, unsigned int n, const char *modname); extern int ddebug_remove_module(const char *mod_name); @@ -80,7 +84,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor, #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ static struct _ddebug __aligned(8) \ - __attribute__((section("__verbose"))) name = { \ + __section(__dyndbg) name = { \ .modname = KBUILD_MODNAME, \ .function = __func__, \ .filename = __FILE__, \ @@ -105,7 +109,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor, static_branch_unlikely(&descriptor.key.dd_key_false) #endif -#else /* !HAVE_JUMP_LABEL */ +#else /* !CONFIG_JUMP_LABEL */ #define _DPRINTK_KEY_INIT @@ -117,7 +121,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor, unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) #endif -#endif +#endif /* CONFIG_JUMP_LABEL */ #define __dynamic_func_call(id, fmt, func, ...) do { \ DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \ @@ -133,7 +137,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor, /* * "Factory macro" for generating a call to func, guarded by a - * DYNAMIC_DEBUG_BRANCH. The dynamic debug decriptor will be + * DYNAMIC_DEBUG_BRANCH. The dynamic debug descriptor will be * initialized using the fmt argument. The function will be called with * the address of the descriptor as first argument, followed by all * the varargs. Note that fmt is repeated in invocations of this @@ -172,10 +176,11 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor, KERN_DEBUG, prefix_str, prefix_type, \ rowsize, groupsize, buf, len, ascii) -#else +#else /* !CONFIG_DYNAMIC_DEBUG_CORE */ #include <linux/string.h> #include <linux/errno.h> +#include <linux/printk.h> static inline int ddebug_add_module(struct _ddebug *tab, unsigned int n, const char *modname) @@ -210,6 +215,13 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val, print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \ rowsize, groupsize, buf, len, ascii); \ } while (0) -#endif + +static inline int dynamic_debug_exec_queries(const char *query, const char *modname) +{ + pr_warn("kernel not built with CONFIG_DYNAMIC_DEBUG_CORE\n"); + return 0; +} + +#endif /* !CONFIG_DYNAMIC_DEBUG_CORE */ #endif diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index 99fc06f0afc1..407c2f281b64 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h @@ -38,6 +38,8 @@ #ifdef __KERNEL__ +#include <asm/bug.h> + struct dql { /* Fields accessed in enqueue path (dql_queued) */ unsigned int num_queued; /* Total ever queued */ diff --git a/include/linux/edac.h b/include/linux/edac.h index 0f20b986b0ab..15e8f3d8a895 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -31,14 +31,6 @@ struct device; extern int edac_op_state; struct bus_type *edac_get_sysfs_subsys(void); -int edac_get_report_status(void); -void edac_set_report_status(int new); - -enum { - EDAC_REPORTING_ENABLED, - EDAC_REPORTING_DISABLED, - EDAC_REPORTING_FORCE -}; static inline void opstate_init(void) { @@ -603,27 +595,6 @@ struct mem_ctl_info { : NULL) /** - * edac_get_dimm_by_index - Get DIMM info at @index from a memory - * controller - * - * @mci: MC descriptor struct mem_ctl_info - * @index: index in the memory controller's DIMM array - * - * Returns a struct dimm_info * or NULL on failure. - */ -static inline struct dimm_info * -edac_get_dimm_by_index(struct mem_ctl_info *mci, int index) -{ - if (index < 0 || index >= mci->tot_dimms) - return NULL; - - if (WARN_ON_ONCE(mci->dimms[index]->idx != index)) - return NULL; - - return mci->dimms[index]; -} - -/** * edac_get_dimm - Get DIMM info from a memory controller given by * [layer0,layer1,layer2] position * @@ -658,6 +629,12 @@ static inline struct dimm_info *edac_get_dimm(struct mem_ctl_info *mci, if (mci->n_layers > 2) index = index * mci->layers[2].size + layer2; - return edac_get_dimm_by_index(mci, index); + if (index < 0 || index >= mci->tot_dimms) + return NULL; + + if (WARN_ON_ONCE(mci->dimms[index]->idx != index)) + return NULL; + + return mci->dimms[index]; } #endif /* _LINUX_EDAC_H_ */ diff --git a/include/linux/efi.h b/include/linux/efi.h index 9430d01c0c3d..73db1ae04cef 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -39,6 +39,7 @@ #define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1))) #define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1))) #define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1))) +#define EFI_TIMEOUT (18 | (1UL << (BITS_PER_LONG-1))) #define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1))) #define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1))) @@ -349,6 +350,7 @@ void efi_native_runtime_setup(void); * associated with ConOut */ #define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) +#define LINUX_EFI_ARM_CPU_STATE_TABLE_GUID EFI_GUID(0xef79e4aa, 0x3c3d, 0x4989, 0xb9, 0x02, 0x07, 0xa9, 0x43, 0xe5, 0x50, 0xd2) #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) #define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) #define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa) @@ -379,8 +381,8 @@ typedef union { typedef struct { efi_guid_t guid; - const char *name; unsigned long *ptr; + const char name[16]; } efi_config_table_type_t; #define EFI_SYSTEM_TABLE_SIGNATURE ((u64)0x5453595320494249ULL) @@ -426,6 +428,7 @@ typedef struct { u32 tables; } efi_system_table_32_t; +typedef union efi_simple_text_input_protocol efi_simple_text_input_protocol_t; typedef union efi_simple_text_output_protocol efi_simple_text_output_protocol_t; typedef union { @@ -434,7 +437,7 @@ typedef union { unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */ u32 fw_revision; unsigned long con_in_handle; - unsigned long con_in; + efi_simple_text_input_protocol_t *con_in; unsigned long con_out_handle; efi_simple_text_output_protocol_t *con_out; unsigned long stderr_handle; @@ -603,7 +606,11 @@ extern void *efi_get_pal_addr (void); extern void efi_map_pal_code (void); extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_gettimeofday (struct timespec64 *ts); +#ifdef CONFIG_EFI extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ +#else +static inline void efi_enter_virtual_mode (void) {} +#endif #ifdef CONFIG_X86 extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size, @@ -991,6 +998,7 @@ int efivars_register(struct efivars *efivars, int efivars_unregister(struct efivars *efivars); struct kobject *efivars_kobject(void); +int efivar_supports_writes(void); int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), void *data, bool duplicates, struct list_head *head); @@ -1234,14 +1242,11 @@ struct linux_efi_memreserve { struct { phys_addr_t base; phys_addr_t size; - } entry[0]; + } entry[]; }; -#define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \ - (count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0])) - #define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \ - / sizeof(((struct linux_efi_memreserve *)0)->entry[0])) + / sizeof_field(struct linux_efi_memreserve, entry[0])) void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size); diff --git a/include/linux/efi_embedded_fw.h b/include/linux/efi_embedded_fw.h index 57eac5241303..a97a12bb2c9e 100644 --- a/include/linux/efi_embedded_fw.h +++ b/include/linux/efi_embedded_fw.h @@ -8,8 +8,8 @@ #define EFI_EMBEDDED_FW_PREFIX_LEN 8 /* - * This struct and efi_embedded_fw_list are private to the efi-embedded fw - * implementation they are in this header for use by lib/test_firmware.c only! + * This struct is private to the efi-embedded fw implementation. + * They are in this header for use by lib/test_firmware.c only! */ struct efi_embedded_fw { struct list_head list; @@ -18,8 +18,6 @@ struct efi_embedded_fw { size_t length; }; -extern struct list_head efi_embedded_fw_list; - /** * struct efi_embedded_fw_desc - This struct is used by the EFI embedded-fw * code to search for embedded firmwares. diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 901bda352dcb..bacc40a0bdf3 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h @@ -39,7 +39,7 @@ struct elevator_mq_ops { void (*request_merged)(struct request_queue *, struct request *, enum elv_merge); void (*requests_merged)(struct request_queue *, struct request *, struct request *); void (*limit_depth)(unsigned int, struct blk_mq_alloc_data *); - void (*prepare_request)(struct request *, struct bio *bio); + void (*prepare_request)(struct request *); void (*finish_request)(struct request *); void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool); struct request *(*dispatch_request)(struct blk_mq_hw_ctx *); diff --git a/include/linux/elf.h b/include/linux/elf.h index e3649b3e970e..5d5b0321da0b 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -2,6 +2,7 @@ #ifndef _LINUX_ELF_H #define _LINUX_ELF_H +#include <linux/types.h> #include <asm/elf.h> #include <uapi/linux/elf.h> @@ -21,6 +22,9 @@ SET_PERSONALITY(ex) #endif +#define ELF32_GNU_PROPERTY_ALIGN 4 +#define ELF64_GNU_PROPERTY_ALIGN 8 + #if ELF_CLASS == ELFCLASS32 extern Elf32_Dyn _DYNAMIC []; @@ -31,6 +35,7 @@ extern Elf32_Dyn _DYNAMIC []; #define elf_addr_t Elf32_Off #define Elf_Half Elf32_Half #define Elf_Word Elf32_Word +#define ELF_GNU_PROPERTY_ALIGN ELF32_GNU_PROPERTY_ALIGN #else @@ -42,6 +47,7 @@ extern Elf64_Dyn _DYNAMIC []; #define elf_addr_t Elf64_Off #define Elf_Half Elf64_Half #define Elf_Word Elf64_Word +#define ELF_GNU_PROPERTY_ALIGN ELF64_GNU_PROPERTY_ALIGN #endif @@ -56,4 +62,41 @@ static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm) { extern int elf_coredump_extra_notes_size(void); extern int elf_coredump_extra_notes_write(struct coredump_params *cprm); #endif + +/* + * NT_GNU_PROPERTY_TYPE_0 header: + * Keep this internal until/unless there is an agreed UAPI definition. + * pr_type values (GNU_PROPERTY_*) are public and defined in the UAPI header. + */ +struct gnu_property { + u32 pr_type; + u32 pr_datasz; +}; + +struct arch_elf_state; + +#ifndef CONFIG_ARCH_USE_GNU_PROPERTY +static inline int arch_parse_elf_property(u32 type, const void *data, + size_t datasz, bool compat, + struct arch_elf_state *arch) +{ + return 0; +} +#else +extern int arch_parse_elf_property(u32 type, const void *data, size_t datasz, + bool compat, struct arch_elf_state *arch); +#endif + +#ifdef CONFIG_ARCH_HAVE_ELF_PROT +int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, + bool has_interp, bool is_interp); +#else +static inline int arch_elf_adjust_prot(int prot, + const struct arch_elf_state *state, + bool has_interp, bool is_interp) +{ + return prot; +} +#endif + #endif /* _LINUX_ELF_H */ diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h index 7a37f4ce9fd2..10485f0c9740 100644 --- a/include/linux/elfcore-compat.h +++ b/include/linux/elfcore-compat.h @@ -32,10 +32,6 @@ struct compat_elf_prstatus struct old_timeval32 pr_cutime; struct old_timeval32 pr_cstime; compat_elf_gregset_t pr_reg; -#ifdef CONFIG_BINFMT_ELF_FDPIC - compat_ulong_t pr_exec_fdpic_loadmap; - compat_ulong_t pr_interp_fdpic_loadmap; -#endif compat_int_t pr_fpvalid; }; diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 4cad0e784b28..46c3d691f677 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h @@ -5,12 +5,65 @@ #include <linux/user.h> #include <linux/bug.h> #include <linux/sched/task_stack.h> - -#include <asm/elf.h> -#include <uapi/linux/elfcore.h> +#include <linux/types.h> +#include <linux/signal.h> +#include <linux/time.h> +#include <linux/ptrace.h> +#include <linux/fs.h> +#include <linux/elf.h> struct coredump_params; +struct elf_siginfo +{ + int si_signo; /* signal number */ + int si_code; /* extra code */ + int si_errno; /* errno */ +}; + +/* + * Definitions to generate Intel SVR4-like core files. + * These mostly have the same names as the SVR4 types with "elf_" + * tacked on the front to prevent clashes with linux definitions, + * and the typedef forms have been avoided. This is mostly like + * the SVR4 structure, but more Linuxy, with things that Linux does + * not support and which gdb doesn't really use excluded. + */ +struct elf_prstatus +{ + struct elf_siginfo pr_info; /* Info associated with signal */ + short pr_cursig; /* Current signal */ + unsigned long pr_sigpend; /* Set of pending signals */ + unsigned long pr_sighold; /* Set of held signals */ + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct __kernel_old_timeval pr_utime; /* User time */ + struct __kernel_old_timeval pr_stime; /* System time */ + struct __kernel_old_timeval pr_cutime; /* Cumulative user time */ + struct __kernel_old_timeval pr_cstime; /* Cumulative system time */ + elf_gregset_t pr_reg; /* GP registers */ + int pr_fpvalid; /* True if math co-processor being used. */ +}; + +#define ELF_PRARGSZ (80) /* Number of chars for args */ + +struct elf_prpsinfo +{ + char pr_state; /* numeric process state */ + char pr_sname; /* char for pr_state */ + char pr_zomb; /* zombie */ + char pr_nice; /* nice val */ + unsigned long pr_flag; /* flags */ + __kernel_uid_t pr_uid; + __kernel_gid_t pr_gid; + pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; + /* Lots missing */ + char pr_fname[16]; /* filename of executable */ + char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ +}; + static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs) { #ifdef ELF_CORE_COPY_REGS @@ -51,13 +104,6 @@ static inline int elf_core_copy_task_fpregs(struct task_struct *t, struct pt_reg #endif } -#ifdef ELF_CORE_COPY_XFPREGS -static inline int elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu) -{ - return ELF_CORE_COPY_XFPREGS(t, xfpu); -} -#endif - /* * These functions parameterize elf_core_dump in fs/binfmt_elf.c to write out * extra segments containing the gate DSO contents. Dumping its diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h index 594d4e78654f..69b136e4dd2b 100644 --- a/include/linux/elfnote.h +++ b/include/linux/elfnote.h @@ -54,7 +54,7 @@ .popsection ; #define ELFNOTE(name, type, desc) \ - ELFNOTE_START(name, type, "") \ + ELFNOTE_START(name, type, "a") \ desc ; \ ELFNOTE_END diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index ade6486a3382..b67a51c574b9 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -2,6 +2,7 @@ #ifndef _LINUX_ENERGY_MODEL_H #define _LINUX_ENERGY_MODEL_H #include <linux/cpumask.h> +#include <linux/device.h> #include <linux/jump_label.h> #include <linux/kobject.h> #include <linux/rcupdate.h> @@ -10,13 +11,15 @@ #include <linux/types.h> /** - * em_cap_state - Capacity state of a performance domain - * @frequency: The CPU frequency in KHz, for consistency with CPUFreq - * @power: The power consumed by 1 CPU at this level, in milli-watts + * em_perf_state - Performance state of a performance domain + * @frequency: The frequency in KHz, for consistency with CPUFreq + * @power: The power consumed at this level, in milli-watts (by 1 CPU or + by a registered device). It can be a total power: static and + dynamic. * @cost: The cost coefficient associated with this level, used during * energy calculation. Equal to: power * max_frequency / frequency */ -struct em_cap_state { +struct em_perf_state { unsigned long frequency; unsigned long power; unsigned long cost; @@ -24,102 +27,119 @@ struct em_cap_state { /** * em_perf_domain - Performance domain - * @table: List of capacity states, in ascending order - * @nr_cap_states: Number of capacity states - * @cpus: Cpumask covering the CPUs of the domain + * @table: List of performance states, in ascending order + * @nr_perf_states: Number of performance states + * @cpus: Cpumask covering the CPUs of the domain. It's here + * for performance reasons to avoid potential cache + * misses during energy calculations in the scheduler + * and simplifies allocating/freeing that memory region. * - * A "performance domain" represents a group of CPUs whose performance is - * scaled together. All CPUs of a performance domain must have the same - * micro-architecture. Performance domains often have a 1-to-1 mapping with - * CPUFreq policies. + * In case of CPU device, a "performance domain" represents a group of CPUs + * whose performance is scaled together. All CPUs of a performance domain + * must have the same micro-architecture. Performance domains often have + * a 1-to-1 mapping with CPUFreq policies. In case of other devices the @cpus + * field is unused. */ struct em_perf_domain { - struct em_cap_state *table; - int nr_cap_states; + struct em_perf_state *table; + int nr_perf_states; unsigned long cpus[]; }; +#define em_span_cpus(em) (to_cpumask((em)->cpus)) + #ifdef CONFIG_ENERGY_MODEL -#define EM_CPU_MAX_POWER 0xFFFF +#define EM_MAX_POWER 0xFFFF struct em_data_callback { /** - * active_power() - Provide power at the next capacity state of a CPU - * @power : Active power at the capacity state in mW (modified) - * @freq : Frequency at the capacity state in kHz (modified) - * @cpu : CPU for which we do this operation + * active_power() - Provide power at the next performance state of + * a device + * @power : Active power at the performance state in mW + * (modified) + * @freq : Frequency at the performance state in kHz + * (modified) + * @dev : Device for which we do this operation (can be a CPU) * - * active_power() must find the lowest capacity state of 'cpu' above + * active_power() must find the lowest performance state of 'dev' above * 'freq' and update 'power' and 'freq' to the matching active power * and frequency. * - * The power is the one of a single CPU in the domain, expressed in - * milli-watts. It is expected to fit in the [0, EM_CPU_MAX_POWER] - * range. + * In case of CPUs, the power is the one of a single CPU in the domain, + * expressed in milli-watts. It is expected to fit in the + * [0, EM_MAX_POWER] range. * * Return 0 on success. */ - int (*active_power)(unsigned long *power, unsigned long *freq, int cpu); + int (*active_power)(unsigned long *power, unsigned long *freq, + struct device *dev); }; #define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb } struct em_perf_domain *em_cpu_get(int cpu); -int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, - struct em_data_callback *cb); +struct em_perf_domain *em_pd_get(struct device *dev); +int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, + struct em_data_callback *cb, cpumask_t *span); +void em_dev_unregister_perf_domain(struct device *dev); /** - * em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain + * em_cpu_energy() - Estimates the energy consumed by the CPUs of a + performance domain * @pd : performance domain for which energy has to be estimated * @max_util : highest utilization among CPUs of the domain * @sum_util : sum of the utilization of all CPUs in the domain * + * This function must be used only for CPU devices. There is no validation, + * i.e. if the EM is a CPU type and has cpumask allocated. It is called from + * the scheduler code quite frequently and that is why there is not checks. + * * Return: the sum of the energy consumed by the CPUs of the domain assuming * a capacity state satisfying the max utilization of the domain. */ -static inline unsigned long em_pd_energy(struct em_perf_domain *pd, +static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, unsigned long max_util, unsigned long sum_util) { unsigned long freq, scale_cpu; - struct em_cap_state *cs; + struct em_perf_state *ps; int i, cpu; /* - * In order to predict the capacity state, map the utilization of the - * most utilized CPU of the performance domain to a requested frequency, - * like schedutil. + * In order to predict the performance state, map the utilization of + * the most utilized CPU of the performance domain to a requested + * frequency, like schedutil. */ cpu = cpumask_first(to_cpumask(pd->cpus)); scale_cpu = arch_scale_cpu_capacity(cpu); - cs = &pd->table[pd->nr_cap_states - 1]; - freq = map_util_freq(max_util, cs->frequency, scale_cpu); + ps = &pd->table[pd->nr_perf_states - 1]; + freq = map_util_freq(max_util, ps->frequency, scale_cpu); /* - * Find the lowest capacity state of the Energy Model above the + * Find the lowest performance state of the Energy Model above the * requested frequency. */ - for (i = 0; i < pd->nr_cap_states; i++) { - cs = &pd->table[i]; - if (cs->frequency >= freq) + for (i = 0; i < pd->nr_perf_states; i++) { + ps = &pd->table[i]; + if (ps->frequency >= freq) break; } /* - * The capacity of a CPU in the domain at that capacity state (cs) + * The capacity of a CPU in the domain at the performance state (ps) * can be computed as: * - * cs->freq * scale_cpu - * cs->cap = -------------------- (1) + * ps->freq * scale_cpu + * ps->cap = -------------------- (1) * cpu_max_freq * * So, ignoring the costs of idle states (which are not available in - * the EM), the energy consumed by this CPU at that capacity state is - * estimated as: + * the EM), the energy consumed by this CPU at that performance state + * is estimated as: * - * cs->power * cpu_util + * ps->power * cpu_util * cpu_nrg = -------------------- (2) - * cs->cap + * ps->cap * - * since 'cpu_util / cs->cap' represents its percentage of busy time. + * since 'cpu_util / ps->cap' represents its percentage of busy time. * * NOTE: Although the result of this computation actually is in * units of power, it can be manipulated as an energy value @@ -129,55 +149,64 @@ static inline unsigned long em_pd_energy(struct em_perf_domain *pd, * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product * of two terms: * - * cs->power * cpu_max_freq cpu_util + * ps->power * cpu_max_freq cpu_util * cpu_nrg = ------------------------ * --------- (3) - * cs->freq scale_cpu + * ps->freq scale_cpu * - * The first term is static, and is stored in the em_cap_state struct - * as 'cs->cost'. + * The first term is static, and is stored in the em_perf_state struct + * as 'ps->cost'. * * Since all CPUs of the domain have the same micro-architecture, they - * share the same 'cs->cost', and the same CPU capacity. Hence, the + * share the same 'ps->cost', and the same CPU capacity. Hence, the * total energy of the domain (which is the simple sum of the energy of * all of its CPUs) can be factorized as: * - * cs->cost * \Sum cpu_util + * ps->cost * \Sum cpu_util * pd_nrg = ------------------------ (4) * scale_cpu */ - return cs->cost * sum_util / scale_cpu; + return ps->cost * sum_util / scale_cpu; } /** - * em_pd_nr_cap_states() - Get the number of capacity states of a perf. domain + * em_pd_nr_perf_states() - Get the number of performance states of a perf. + * domain * @pd : performance domain for which this must be done * - * Return: the number of capacity states in the performance domain table + * Return: the number of performance states in the performance domain table */ -static inline int em_pd_nr_cap_states(struct em_perf_domain *pd) +static inline int em_pd_nr_perf_states(struct em_perf_domain *pd) { - return pd->nr_cap_states; + return pd->nr_perf_states; } #else struct em_data_callback {}; #define EM_DATA_CB(_active_power_cb) { } -static inline int em_register_perf_domain(cpumask_t *span, - unsigned int nr_states, struct em_data_callback *cb) +static inline +int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, + struct em_data_callback *cb, cpumask_t *span) { return -EINVAL; } +static inline void em_dev_unregister_perf_domain(struct device *dev) +{ +} static inline struct em_perf_domain *em_cpu_get(int cpu) { return NULL; } -static inline unsigned long em_pd_energy(struct em_perf_domain *pd, +static inline struct em_perf_domain *em_pd_get(struct device *dev) +{ + return NULL; +} +static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, unsigned long max_util, unsigned long sum_util) { return 0; } -static inline int em_pd_nr_cap_states(struct em_perf_domain *pd) +static inline int em_pd_nr_perf_states(struct em_perf_domain *pd) { return 0; } diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h new file mode 100644 index 000000000000..159c7476b11b --- /dev/null +++ b/include/linux/entry-common.h @@ -0,0 +1,405 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_ENTRYCOMMON_H +#define __LINUX_ENTRYCOMMON_H + +#include <linux/tracehook.h> +#include <linux/syscalls.h> +#include <linux/seccomp.h> +#include <linux/sched.h> + +#include <asm/entry-common.h> + +/* + * Define dummy _TIF work flags if not defined by the architecture or for + * disabled functionality. + */ +#ifndef _TIF_SYSCALL_EMU +# define _TIF_SYSCALL_EMU (0) +#endif + +#ifndef _TIF_SYSCALL_TRACEPOINT +# define _TIF_SYSCALL_TRACEPOINT (0) +#endif + +#ifndef _TIF_SECCOMP +# define _TIF_SECCOMP (0) +#endif + +#ifndef _TIF_SYSCALL_AUDIT +# define _TIF_SYSCALL_AUDIT (0) +#endif + +#ifndef _TIF_PATCH_PENDING +# define _TIF_PATCH_PENDING (0) +#endif + +#ifndef _TIF_UPROBE +# define _TIF_UPROBE (0) +#endif + +/* + * TIF flags handled in syscall_enter_from_usermode() + */ +#ifndef ARCH_SYSCALL_ENTER_WORK +# define ARCH_SYSCALL_ENTER_WORK (0) +#endif + +#define SYSCALL_ENTER_WORK \ + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_EMU | \ + ARCH_SYSCALL_ENTER_WORK) + +/* + * TIF flags handled in syscall_exit_to_user_mode() + */ +#ifndef ARCH_SYSCALL_EXIT_WORK +# define ARCH_SYSCALL_EXIT_WORK (0) +#endif + +#define SYSCALL_EXIT_WORK \ + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | ARCH_SYSCALL_EXIT_WORK) + +/* + * TIF flags handled in exit_to_user_mode_loop() + */ +#ifndef ARCH_EXIT_TO_USER_MODE_WORK +# define ARCH_EXIT_TO_USER_MODE_WORK (0) +#endif + +#define EXIT_TO_USER_MODE_WORK \ + (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | \ + ARCH_EXIT_TO_USER_MODE_WORK) + +/** + * arch_check_user_regs - Architecture specific sanity check for user mode regs + * @regs: Pointer to currents pt_regs + * + * Defaults to an empty implementation. Can be replaced by architecture + * specific code. + * + * Invoked from syscall_enter_from_user_mode() in the non-instrumentable + * section. Use __always_inline so the compiler cannot push it out of line + * and make it instrumentable. + */ +static __always_inline void arch_check_user_regs(struct pt_regs *regs); + +#ifndef arch_check_user_regs +static __always_inline void arch_check_user_regs(struct pt_regs *regs) {} +#endif + +/** + * arch_syscall_enter_tracehook - Wrapper around tracehook_report_syscall_entry() + * @regs: Pointer to currents pt_regs + * + * Returns: 0 on success or an error code to skip the syscall. + * + * Defaults to tracehook_report_syscall_entry(). Can be replaced by + * architecture specific code. + * + * Invoked from syscall_enter_from_user_mode() + */ +static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs); + +#ifndef arch_syscall_enter_tracehook +static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs) +{ + return tracehook_report_syscall_entry(regs); +} +#endif + +/** + * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts + * @regs: Pointer to currents pt_regs + * + * Invoked from architecture specific syscall entry code with interrupts + * disabled. The calling code has to be non-instrumentable. When the + * function returns all state is correct, interrupts are enabled and the + * subsequent functions can be instrumented. + * + * This handles lockdep, RCU (context tracking) and tracing state. + * + * This is invoked when there is extra architecture specific functionality + * to be done between establishing state and handling user mode entry work. + */ +void syscall_enter_from_user_mode_prepare(struct pt_regs *regs); + +/** + * syscall_enter_from_user_mode_work - Check and handle work before invoking + * a syscall + * @regs: Pointer to currents pt_regs + * @syscall: The syscall number + * + * Invoked from architecture specific syscall entry code with interrupts + * enabled after invoking syscall_enter_from_user_mode_prepare() and extra + * architecture specific work. + * + * Returns: The original or a modified syscall number + * + * If the returned syscall number is -1 then the syscall should be + * skipped. In this case the caller may invoke syscall_set_error() or + * syscall_set_return_value() first. If neither of those are called and -1 + * is returned, then the syscall will fail with ENOSYS. + * + * It handles the following work items: + * + * 1) TIF flag dependent invocations of arch_syscall_enter_tracehook(), + * __secure_computing(), trace_sys_enter() + * 2) Invocation of audit_syscall_entry() + */ +long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall); + +/** + * syscall_enter_from_user_mode - Establish state and check and handle work + * before invoking a syscall + * @regs: Pointer to currents pt_regs + * @syscall: The syscall number + * + * Invoked from architecture specific syscall entry code with interrupts + * disabled. The calling code has to be non-instrumentable. When the + * function returns all state is correct, interrupts are enabled and the + * subsequent functions can be instrumented. + * + * This is combination of syscall_enter_from_user_mode_prepare() and + * syscall_enter_from_user_mode_work(). + * + * Returns: The original or a modified syscall number. See + * syscall_enter_from_user_mode_work() for further explanation. + */ +long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall); + +/** + * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable() + * @ti_work: Cached TIF flags gathered with interrupts disabled + * + * Defaults to local_irq_enable(). Can be supplied by architecture specific + * code. + */ +static inline void local_irq_enable_exit_to_user(unsigned long ti_work); + +#ifndef local_irq_enable_exit_to_user +static inline void local_irq_enable_exit_to_user(unsigned long ti_work) +{ + local_irq_enable(); +} +#endif + +/** + * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable() + * + * Defaults to local_irq_disable(). Can be supplied by architecture specific + * code. + */ +static inline void local_irq_disable_exit_to_user(void); + +#ifndef local_irq_disable_exit_to_user +static inline void local_irq_disable_exit_to_user(void) +{ + local_irq_disable(); +} +#endif + +/** + * arch_exit_to_user_mode_work - Architecture specific TIF work for exit + * to user mode. + * @regs: Pointer to currents pt_regs + * @ti_work: Cached TIF flags gathered with interrupts disabled + * + * Invoked from exit_to_user_mode_loop() with interrupt enabled + * + * Defaults to NOOP. Can be supplied by architecture specific code. + */ +static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, + unsigned long ti_work); + +#ifndef arch_exit_to_user_mode_work +static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, + unsigned long ti_work) +{ +} +#endif + +/** + * arch_exit_to_user_mode_prepare - Architecture specific preparation for + * exit to user mode. + * @regs: Pointer to currents pt_regs + * @ti_work: Cached TIF flags gathered with interrupts disabled + * + * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last + * function before return. Defaults to NOOP. + */ +static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, + unsigned long ti_work); + +#ifndef arch_exit_to_user_mode_prepare +static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, + unsigned long ti_work) +{ +} +#endif + +/** + * arch_exit_to_user_mode - Architecture specific final work before + * exit to user mode. + * + * Invoked from exit_to_user_mode() with interrupt disabled as the last + * function before return. Defaults to NOOP. + * + * This needs to be __always_inline because it is non-instrumentable code + * invoked after context tracking switched to user mode. + * + * An architecture implementation must not do anything complex, no locking + * etc. The main purpose is for speculation mitigations. + */ +static __always_inline void arch_exit_to_user_mode(void); + +#ifndef arch_exit_to_user_mode +static __always_inline void arch_exit_to_user_mode(void) { } +#endif + +/** + * arch_do_signal - Architecture specific signal delivery function + * @regs: Pointer to currents pt_regs + * + * Invoked from exit_to_user_mode_loop(). + */ +void arch_do_signal(struct pt_regs *regs); + +/** + * arch_syscall_exit_tracehook - Wrapper around tracehook_report_syscall_exit() + * @regs: Pointer to currents pt_regs + * @step: Indicator for single step + * + * Defaults to tracehook_report_syscall_exit(). Can be replaced by + * architecture specific code. + * + * Invoked from syscall_exit_to_user_mode() + */ +static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step); + +#ifndef arch_syscall_exit_tracehook +static inline void arch_syscall_exit_tracehook(struct pt_regs *regs, bool step) +{ + tracehook_report_syscall_exit(regs, step); +} +#endif + +/** + * syscall_exit_to_user_mode - Handle work before returning to user mode + * @regs: Pointer to currents pt_regs + * + * Invoked with interrupts enabled and fully valid regs. Returns with all + * work handled, interrupts disabled such that the caller can immediately + * switch to user mode. Called from architecture specific syscall and ret + * from fork code. + * + * The call order is: + * 1) One-time syscall exit work: + * - rseq syscall exit + * - audit + * - syscall tracing + * - tracehook (single stepping) + * + * 2) Preparatory work + * - Exit to user mode loop (common TIF handling). Invokes + * arch_exit_to_user_mode_work() for architecture specific TIF work + * - Architecture specific one time work arch_exit_to_user_mode_prepare() + * - Address limit and lockdep checks + * + * 3) Final transition (lockdep, tracing, context tracking, RCU). Invokes + * arch_exit_to_user_mode() to handle e.g. speculation mitigations + */ +void syscall_exit_to_user_mode(struct pt_regs *regs); + +/** + * irqentry_enter_from_user_mode - Establish state before invoking the irq handler + * @regs: Pointer to currents pt_regs + * + * Invoked from architecture specific entry code with interrupts disabled. + * Can only be called when the interrupt entry came from user mode. The + * calling code must be non-instrumentable. When the function returns all + * state is correct and the subsequent functions can be instrumented. + * + * The function establishes state (lockdep, RCU (context tracking), tracing) + */ +void irqentry_enter_from_user_mode(struct pt_regs *regs); + +/** + * irqentry_exit_to_user_mode - Interrupt exit work + * @regs: Pointer to current's pt_regs + * + * Invoked with interrupts disbled and fully valid regs. Returns with all + * work handled, interrupts disabled such that the caller can immediately + * switch to user mode. Called from architecture specific interrupt + * handling code. + * + * The call order is #2 and #3 as described in syscall_exit_to_user_mode(). + * Interrupt exit is not invoking #1 which is the syscall specific one time + * work. + */ +void irqentry_exit_to_user_mode(struct pt_regs *regs); + +#ifndef irqentry_state +typedef struct irqentry_state { + bool exit_rcu; +} irqentry_state_t; +#endif + +/** + * irqentry_enter - Handle state tracking on ordinary interrupt entries + * @regs: Pointer to pt_regs of interrupted context + * + * Invokes: + * - lockdep irqflag state tracking as low level ASM entry disabled + * interrupts. + * + * - Context tracking if the exception hit user mode. + * + * - The hardirq tracer to keep the state consistent as low level ASM + * entry disabled interrupts. + * + * As a precondition, this requires that the entry came from user mode, + * idle, or a kernel context in which RCU is watching. + * + * For kernel mode entries RCU handling is done conditional. If RCU is + * watching then the only RCU requirement is to check whether the tick has + * to be restarted. If RCU is not watching then rcu_irq_enter() has to be + * invoked on entry and rcu_irq_exit() on exit. + * + * Avoiding the rcu_irq_enter/exit() calls is an optimization but also + * solves the problem of kernel mode pagefaults which can schedule, which + * is not possible after invoking rcu_irq_enter() without undoing it. + * + * For user mode entries irqentry_enter_from_user_mode() is invoked to + * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit + * would not be possible. + * + * Returns: An opaque object that must be passed to idtentry_exit() + */ +irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs); + +/** + * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt + * + * Conditional reschedule with additional sanity checks. + */ +void irqentry_exit_cond_resched(void); + +/** + * irqentry_exit - Handle return from exception that used irqentry_enter() + * @regs: Pointer to pt_regs (exception entry regs) + * @state: Return value from matching call to irqentry_enter() + * + * Depending on the return target (kernel/user) this runs the necessary + * preemption and work checks if possible and reguired and returns to + * the caller with interrupts disabled and no further work pending. + * + * This is the last action before returning to the low level ASM code which + * just needs to return to the appropriate context. + * + * Counterpart to irqentry_enter(). + */ +void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state); + +#endif diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h new file mode 100644 index 000000000000..0cef17afb41a --- /dev/null +++ b/include/linux/entry-kvm.h @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_ENTRYKVM_H +#define __LINUX_ENTRYKVM_H + +#include <linux/entry-common.h> + +/* Transfer to guest mode work */ +#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK + +#ifndef ARCH_XFER_TO_GUEST_MODE_WORK +# define ARCH_XFER_TO_GUEST_MODE_WORK (0) +#endif + +#define XFER_TO_GUEST_MODE_WORK \ + (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ + _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK) + +struct kvm_vcpu; + +/** + * arch_xfer_to_guest_mode_handle_work - Architecture specific xfer to guest + * mode work handling function. + * @vcpu: Pointer to current's VCPU data + * @ti_work: Cached TIF flags gathered in xfer_to_guest_mode_handle_work() + * + * Invoked from xfer_to_guest_mode_handle_work(). Defaults to NOOP. Can be + * replaced by architecture specific code. + */ +static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu, + unsigned long ti_work); + +#ifndef arch_xfer_to_guest_mode_work +static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu, + unsigned long ti_work) +{ + return 0; +} +#endif + +/** + * xfer_to_guest_mode_handle_work - Check and handle pending work which needs + * to be handled before going to guest mode + * @vcpu: Pointer to current's VCPU data + * + * Returns: 0 or an error code + */ +int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu); + +/** + * __xfer_to_guest_mode_work_pending - Check if work is pending + * + * Returns: True if work pending, False otherwise. + * + * Bare variant of xfer_to_guest_mode_work_pending(). Can be called from + * interrupt enabled code for racy quick checks with care. + */ +static inline bool __xfer_to_guest_mode_work_pending(void) +{ + unsigned long ti_work = READ_ONCE(current_thread_info()->flags); + + return !!(ti_work & XFER_TO_GUEST_MODE_WORK); +} + +/** + * xfer_to_guest_mode_work_pending - Check if work is pending which needs to be + * handled before returning to guest mode + * + * Returns: True if work pending, False otherwise. + * + * Has to be invoked with interrupts disabled before the transition to + * guest mode. + */ +static inline bool xfer_to_guest_mode_work_pending(void) +{ + lockdep_assert_irqs_disabled(); + return __xfer_to_guest_mode_work_pending(); +} +#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ + +#endif diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 8801f1f986e5..2e5debc0373c 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -20,6 +20,7 @@ #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/random.h> +#include <linux/crc32.h> #include <asm/unaligned.h> #include <asm/bitsperlong.h> @@ -266,6 +267,17 @@ static inline void eth_hw_addr_random(struct net_device *dev) } /** + * eth_hw_addr_crc - Calculate CRC from netdev_hw_addr + * @ha: pointer to hardware address + * + * Calculate CRC from a hardware address as basis for filter hashes. + */ +static inline u32 eth_hw_addr_crc(struct netdev_hw_addr *ha) +{ + return ether_crc(ETH_ALEN, ha->addr); +} + +/** * ether_addr_copy - Copy an Ethernet address * @dst: Pointer to a six-byte array Ethernet address destination * @src: Pointer to a six-byte array Ethernet address source diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index a23b26eab479..969a80211df6 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -86,6 +86,22 @@ struct net_device; u32 ethtool_op_get_link(struct net_device *dev); int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti); + +/** + * struct ethtool_link_ext_state_info - link extended state and substate. + */ +struct ethtool_link_ext_state_info { + enum ethtool_link_ext_state link_ext_state; + union { + enum ethtool_link_ext_substate_autoneg autoneg; + enum ethtool_link_ext_substate_link_training link_training; + enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch; + enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity; + enum ethtool_link_ext_substate_cable_issue cable_issue; + u8 __link_ext_substate; + }; +}; + /** * ethtool_rxfh_indir_default - get default value for RX flow hash indirection * @index: Index in RX flow hash indirection table @@ -245,6 +261,11 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, * @get_link: Report whether physical link is up. Will only be called if * the netdev is up. Should usually be set to ethtool_op_get_link(), * which uses netif_carrier_ok(). + * @get_link_ext_state: Report link extended state. Should set link_ext_state and + * link_ext_substate (link_ext_substate of 0 means link_ext_substate is unknown, + * do not attach ext_substate attribute to netlink message). If link_ext_state + * and link_ext_substate are unknown, return -ENODATA. If not implemented, + * link_ext_state and link_ext_substate will not be sent to userspace. * @get_eeprom: Read data from the device EEPROM. * Should fill in the magic field. Don't need to check len for zero * or wraparound. Fill in the data argument with the eeprom values @@ -384,6 +405,8 @@ struct ethtool_ops { void (*set_msglevel)(struct net_device *, u32); int (*nway_reset)(struct net_device *); u32 (*get_link)(struct net_device *); + int (*get_link_ext_state)(struct net_device *, + struct ethtool_link_ext_state_info *); int (*get_eeprom_len)(struct net_device *); int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); @@ -479,5 +502,37 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd, u32 *dev_speed, u8 *dev_duplex); +struct netlink_ext_ack; +struct phy_device; +struct phy_tdr_config; + +/** + * struct ethtool_phy_ops - Optional PHY device options + * @get_sset_count: Get number of strings that @get_strings will write. + * @get_strings: Return a set of strings that describe the requested objects + * @get_stats: Return extended statistics about the PHY device. + * @start_cable_test - Start a cable test + * @start_cable_test_tdr - Start a Time Domain Reflectometry cable test + * + * All operations are optional (i.e. the function pointer may be set to %NULL) + * and callers must take this into account. Callers must hold the RTNL lock. + */ +struct ethtool_phy_ops { + int (*get_sset_count)(struct phy_device *dev); + int (*get_strings)(struct phy_device *dev, u8 *data); + int (*get_stats)(struct phy_device *dev, + struct ethtool_stats *stats, u64 *data); + int (*start_cable_test)(struct phy_device *phydev, + struct netlink_ext_ack *extack); + int (*start_cable_test_tdr)(struct phy_device *phydev, + struct netlink_ext_ack *extack, + const struct phy_tdr_config *config); +}; + +/** + * ethtool_set_ethtool_phy_ops - Set the ethtool_phy_ops singleton + * @ops: Ethtool PHY operations to set + */ +void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops); #endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h index d01b77887f82..1e7bf78cb382 100644 --- a/include/linux/ethtool_netlink.h +++ b/include/linux/ethtool_netlink.h @@ -14,4 +14,58 @@ enum ethtool_multicast_groups { ETHNL_MCGRP_MONITOR, }; +struct phy_device; + +#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK) +int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd); +void ethnl_cable_test_free(struct phy_device *phydev); +void ethnl_cable_test_finished(struct phy_device *phydev); +int ethnl_cable_test_result(struct phy_device *phydev, u8 pair, u8 result); +int ethnl_cable_test_fault_length(struct phy_device *phydev, u8 pair, u32 cm); +int ethnl_cable_test_amplitude(struct phy_device *phydev, u8 pair, s16 mV); +int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV); +int ethnl_cable_test_step(struct phy_device *phydev, u32 first, u32 last, + u32 step); +#else +static inline int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd) +{ + return -EOPNOTSUPP; +} + +static inline void ethnl_cable_test_free(struct phy_device *phydev) +{ +} + +static inline void ethnl_cable_test_finished(struct phy_device *phydev) +{ +} +static inline int ethnl_cable_test_result(struct phy_device *phydev, u8 pair, + u8 result) +{ + return -EOPNOTSUPP; +} + +static inline int ethnl_cable_test_fault_length(struct phy_device *phydev, + u8 pair, u32 cm) +{ + return -EOPNOTSUPP; +} + +static inline int ethnl_cable_test_amplitude(struct phy_device *phydev, + u8 pair, s16 mV) +{ + return -EOPNOTSUPP; +} + +static inline int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV) +{ + return -EOPNOTSUPP; +} + +static inline int ethnl_cable_test_step(struct phy_device *phydev, u32 first, + u32 last, u32 step) +{ + return -EOPNOTSUPP; +} +#endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */ #endif /* _LINUX_ETHTOOL_NETLINK_H_ */ diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index d896b8657085..3ceb72b67a7a 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -178,7 +178,7 @@ struct fid { * get_name: * @get_name should find a name for the given @child in the given @parent * directory. The name should be stored in the @name (with the - * understanding that it is already pointing to a a %NAME_MAX+1 sized + * understanding that it is already pointing to a %NAME_MAX+1 sized * buffer. get_name() should return %0 on success, a negative error code * or error. @get_name will be called without @parent->i_mutex held. * diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index b79fa9bb7359..3e9c56ee651f 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -18,8 +18,10 @@ #define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FAN_CLASS_CONTENT | \ FAN_CLASS_PRE_CONTENT) -#define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | \ - FAN_REPORT_TID | FAN_REPORT_FID | \ +#define FANOTIFY_FID_BITS (FAN_REPORT_FID | FAN_REPORT_DFID_NAME) + +#define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | FANOTIFY_FID_BITS | \ + FAN_REPORT_TID | \ FAN_CLOEXEC | FAN_NONBLOCK | \ FAN_UNLIMITED_QUEUE | FAN_UNLIMITED_MARKS) diff --git a/include/linux/fb.h b/include/linux/fb.h index 3b4b2f0c6994..850f79e9a7cb 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -400,8 +400,6 @@ struct fb_tile_ops { #define FBINFO_HWACCEL_YPAN 0x2000 /* optional */ #define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */ -#define FBINFO_MISC_USEREVENT 0x10000 /* event request - from userspace */ #define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */ /* A driver may set this flag to indicate that it does want a set_par to be @@ -506,8 +504,9 @@ struct fb_info { }; static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { - struct apertures_struct *a = kzalloc(sizeof(struct apertures_struct) - + max_num * sizeof(struct aperture), GFP_KERNEL); + struct apertures_struct *a; + + a = kzalloc(struct_size(a, ranges, max_num), GFP_KERNEL); if (!a) return NULL; a->count = max_num; diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index f07c55ea0c22..a32bf47c593e 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -22,6 +22,7 @@ * as this is the granularity returned by copy_fdset(). */ #define NR_OPEN_DEFAULT BITS_PER_LONG +#define NR_OPEN_MAX ~0U struct fdtable { unsigned int max_fds; @@ -109,7 +110,7 @@ struct files_struct *get_files_struct(struct task_struct *); void put_files_struct(struct files_struct *fs); void reset_files_struct(struct files_struct *); int unshare_files(struct files_struct **); -struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy; +struct files_struct *dup_fd(struct files_struct *, unsigned, int *) __latent_entropy; void do_close_on_exec(struct files_struct *); int iterate_fd(struct files_struct *, unsigned, int (*)(const void *, struct file *, unsigned), @@ -121,7 +122,10 @@ extern void __fd_install(struct files_struct *files, unsigned int fd, struct file *file); extern int __close_fd(struct files_struct *files, unsigned int fd); +extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags); extern int __close_fd_get_file(unsigned int fd, struct file **res); +extern int unshare_fd(unsigned long unshare_flags, unsigned int max_fds, + struct files_struct **new_fdp); extern struct kmem_cache *files_cachep; diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h new file mode 100644 index 000000000000..4e624c466583 --- /dev/null +++ b/include/linux/fiemap.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FIEMAP_H +#define _LINUX_FIEMAP_H 1 + +#include <uapi/linux/fiemap.h> +#include <linux/fs.h> + +struct fiemap_extent_info { + unsigned int fi_flags; /* Flags as passed from user */ + unsigned int fi_extents_mapped; /* Number of mapped extents */ + unsigned int fi_extents_max; /* Size of fiemap_extent array */ + struct fiemap_extent __user *fi_extents_start; /* Start of + fiemap_extent array */ +}; + +int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo, + u64 start, u64 *len, u32 supported_flags); +int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, + u64 phys, u64 len, u32 flags); + +int generic_block_fiemap(struct inode *inode, + struct fiemap_extent_info *fieinfo, u64 start, u64 len, + get_block_t *get_block); + +#endif /* _LINUX_FIEMAP_H 1 */ diff --git a/include/linux/file.h b/include/linux/file.h index 142d102f285e..225982792fa2 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -9,6 +9,7 @@ #include <linux/compiler.h> #include <linux/types.h> #include <linux/posix_types.h> +#include <linux/errno.h> struct file; @@ -91,7 +92,27 @@ extern void put_unused_fd(unsigned int fd); extern void fd_install(unsigned int fd, struct file *file); +extern int __receive_fd(int fd, struct file *file, int __user *ufd, + unsigned int o_flags); +static inline int receive_fd_user(struct file *file, int __user *ufd, + unsigned int o_flags) +{ + if (ufd == NULL) + return -EFAULT; + return __receive_fd(-1, file, ufd, o_flags); +} +static inline int receive_fd(struct file *file, unsigned int o_flags) +{ + return __receive_fd(-1, file, NULL, o_flags); +} +static inline int receive_fd_replace(int fd, struct file *file, unsigned int o_flags) +{ + return __receive_fd(fd, file, NULL, o_flags); +} + extern void flush_delayed_fput(void); extern void __fput_sync(struct file *); +extern unsigned int sysctl_nr_open_min, sysctl_nr_open_max; + #endif /* __LINUX_FILE_H */ diff --git a/include/linux/filter.h b/include/linux/filter.h index 9b5aa5c483cc..ebfb7cfb65f1 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -16,11 +16,12 @@ #include <linux/workqueue.h> #include <linux/sched.h> #include <linux/capability.h> -#include <linux/cryptohash.h> #include <linux/set_memory.h> #include <linux/kallsyms.h> #include <linux/if_vlan.h> #include <linux/vmalloc.h> +#include <linux/sockptr.h> +#include <crypto/sha.h> #include <net/sch_generic.h> @@ -502,13 +503,11 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) offsetof(TYPE, MEMBER); \ }) -#ifdef CONFIG_COMPAT /* A struct sock_filter is architecture independent. */ struct compat_sock_fprog { u16 len; compat_uptr_t filter; /* struct sock_filter * */ }; -#endif struct sock_fprog_kern { u16 len; @@ -534,7 +533,8 @@ struct bpf_prog { is_func:1, /* program is a bpf function */ kprobe_override:1, /* Do we override a kprobe? */ has_callchain_buf:1, /* callchain buffer allocated? */ - enforce_expected_attach_type:1; /* Enforce expected_attach_type checking at attach time */ + enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ + call_get_stack:1; /* Do we call bpf_get_stack() or bpf_get_stackid() */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ @@ -545,10 +545,8 @@ struct bpf_prog { unsigned int (*bpf_func)(const void *ctx, const struct bpf_insn *insn); /* Instructions for interpreter */ - union { - struct sock_filter insns[0]; - struct bpf_insn insnsi[0]; - }; + struct sock_filter insns[0]; + struct bpf_insn insnsi[]; }; struct sk_filter { @@ -746,7 +744,7 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog) { return round_up(bpf_prog_insn_size(prog) + - sizeof(__be64) + 1, SHA_MESSAGE_BYTES); + sizeof(__be64) + 1, SHA1_BLOCK_SIZE); } static inline unsigned int bpf_prog_size(unsigned int proglen) @@ -863,8 +861,6 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, bpf_aux_classic_check_t trans, bool save_orig); void bpf_prog_destroy(struct bpf_prog *fp); -const struct bpf_func_proto * -bpf_base_func_proto(enum bpf_func_id func_id); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_attach_bpf(u32 ufd, struct sock *sk); @@ -888,12 +884,12 @@ void bpf_jit_compile(struct bpf_prog *prog); bool bpf_jit_needs_zext(void); bool bpf_helper_changes_pkt_data(void *func); -static inline bool bpf_dump_raw_ok(void) +static inline bool bpf_dump_raw_ok(const struct cred *cred) { /* Reconstruction of call-sites is dependent on kallsyms, * thus make dump the same restriction. */ - return kallsyms_show_value() == 1; + return kallsyms_show_value(cred); } struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, @@ -1204,7 +1200,7 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest) BPF_ANCILLARY(RANDOM); BPF_ANCILLARY(VLAN_TPID); } - /* Fallthrough. */ + fallthrough; default: return ftest->code; } @@ -1282,4 +1278,153 @@ struct bpf_sockopt_kern { s32 retval; }; +int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len); + +struct bpf_sk_lookup_kern { + u16 family; + u16 protocol; + struct { + __be32 saddr; + __be32 daddr; + } v4; + struct { + const struct in6_addr *saddr; + const struct in6_addr *daddr; + } v6; + __be16 sport; + u16 dport; + struct sock *selected_sk; + bool no_reuseport; +}; + +extern struct static_key_false bpf_sk_lookup_enabled; + +/* Runners for BPF_SK_LOOKUP programs to invoke on socket lookup. + * + * Allowed return values for a BPF SK_LOOKUP program are SK_PASS and + * SK_DROP. Their meaning is as follows: + * + * SK_PASS && ctx.selected_sk != NULL: use selected_sk as lookup result + * SK_PASS && ctx.selected_sk == NULL: continue to htable-based socket lookup + * SK_DROP : terminate lookup with -ECONNREFUSED + * + * This macro aggregates return values and selected sockets from + * multiple BPF programs according to following rules in order: + * + * 1. If any program returned SK_PASS and a non-NULL ctx.selected_sk, + * macro result is SK_PASS and last ctx.selected_sk is used. + * 2. If any program returned SK_DROP return value, + * macro result is SK_DROP. + * 3. Otherwise result is SK_PASS and ctx.selected_sk is NULL. + * + * Caller must ensure that the prog array is non-NULL, and that the + * array as well as the programs it contains remain valid. + */ +#define BPF_PROG_SK_LOOKUP_RUN_ARRAY(array, ctx, func) \ + ({ \ + struct bpf_sk_lookup_kern *_ctx = &(ctx); \ + struct bpf_prog_array_item *_item; \ + struct sock *_selected_sk = NULL; \ + bool _no_reuseport = false; \ + struct bpf_prog *_prog; \ + bool _all_pass = true; \ + u32 _ret; \ + \ + migrate_disable(); \ + _item = &(array)->items[0]; \ + while ((_prog = READ_ONCE(_item->prog))) { \ + /* restore most recent selection */ \ + _ctx->selected_sk = _selected_sk; \ + _ctx->no_reuseport = _no_reuseport; \ + \ + _ret = func(_prog, _ctx); \ + if (_ret == SK_PASS && _ctx->selected_sk) { \ + /* remember last non-NULL socket */ \ + _selected_sk = _ctx->selected_sk; \ + _no_reuseport = _ctx->no_reuseport; \ + } else if (_ret == SK_DROP && _all_pass) { \ + _all_pass = false; \ + } \ + _item++; \ + } \ + _ctx->selected_sk = _selected_sk; \ + _ctx->no_reuseport = _no_reuseport; \ + migrate_enable(); \ + _all_pass || _selected_sk ? SK_PASS : SK_DROP; \ + }) + +static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, + const __be32 saddr, const __be16 sport, + const __be32 daddr, const u16 dport, + struct sock **psk) +{ + struct bpf_prog_array *run_array; + struct sock *selected_sk = NULL; + bool no_reuseport = false; + + rcu_read_lock(); + run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]); + if (run_array) { + struct bpf_sk_lookup_kern ctx = { + .family = AF_INET, + .protocol = protocol, + .v4.saddr = saddr, + .v4.daddr = daddr, + .sport = sport, + .dport = dport, + }; + u32 act; + + act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN); + if (act == SK_PASS) { + selected_sk = ctx.selected_sk; + no_reuseport = ctx.no_reuseport; + } else { + selected_sk = ERR_PTR(-ECONNREFUSED); + } + } + rcu_read_unlock(); + *psk = selected_sk; + return no_reuseport; +} + +#if IS_ENABLED(CONFIG_IPV6) +static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, + const struct in6_addr *saddr, + const __be16 sport, + const struct in6_addr *daddr, + const u16 dport, + struct sock **psk) +{ + struct bpf_prog_array *run_array; + struct sock *selected_sk = NULL; + bool no_reuseport = false; + + rcu_read_lock(); + run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]); + if (run_array) { + struct bpf_sk_lookup_kern ctx = { + .family = AF_INET6, + .protocol = protocol, + .v6.saddr = saddr, + .v6.daddr = daddr, + .sport = sport, + .dport = dport, + }; + u32 act; + + act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN); + if (act == SK_PASS) { + selected_sk = ctx.selected_sk; + no_reuseport = ctx.no_reuseport; + } else { + selected_sk = ERR_PTR(-ECONNREFUSED); + } + } + rcu_read_unlock(); + *psk = selected_sk; + return no_reuseport; +} +#endif /* IS_ENABLED(CONFIG_IPV6) */ + #endif /* __LINUX_FILTER_H__ */ diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 4bbd0afd91b7..cb3e2c06ed8a 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -12,7 +12,6 @@ struct firmware { size_t size; const u8 *data; - struct page **pages; /* firmware loader private fields */ void *priv; diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h index 17ba4e405129..22c76571a294 100644 --- a/include/linux/firmware/imx/sci.h +++ b/include/linux/firmware/imx/sci.h @@ -11,13 +11,14 @@ #define _SC_SCI_H #include <linux/firmware/imx/ipc.h> -#include <linux/firmware/imx/types.h> #include <linux/firmware/imx/svc/misc.h> #include <linux/firmware/imx/svc/pm.h> +#include <linux/firmware/imx/svc/rm.h> int imx_scu_enable_general_irq_channel(struct device *dev); int imx_scu_irq_register_notifier(struct notifier_block *nb); int imx_scu_irq_unregister_notifier(struct notifier_block *nb); int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable); +int imx_scu_soc_init(struct device *dev); #endif /* _SC_SCI_H */ diff --git a/include/linux/firmware/imx/svc/rm.h b/include/linux/firmware/imx/svc/rm.h new file mode 100644 index 000000000000..456b6a59d29b --- /dev/null +++ b/include/linux/firmware/imx/svc/rm.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2020 NXP + * + * Header file containing the public API for the System Controller (SC) + * Resource Management (RM) function. This includes functions for + * partitioning resources, pads, and memory regions. + * + * RM_SVC (SVC) Resource Management Service + * + * Module for the Resource Management (RM) service. + */ + +#ifndef _SC_RM_API_H +#define _SC_RM_API_H + +#include <linux/firmware/imx/sci.h> + +/* + * This type is used to indicate RPC RM function calls. + */ +enum imx_sc_rm_func { + IMX_SC_RM_FUNC_UNKNOWN = 0, + IMX_SC_RM_FUNC_PARTITION_ALLOC = 1, + IMX_SC_RM_FUNC_SET_CONFIDENTIAL = 31, + IMX_SC_RM_FUNC_PARTITION_FREE = 2, + IMX_SC_RM_FUNC_GET_DID = 26, + IMX_SC_RM_FUNC_PARTITION_STATIC = 3, + IMX_SC_RM_FUNC_PARTITION_LOCK = 4, + IMX_SC_RM_FUNC_GET_PARTITION = 5, + IMX_SC_RM_FUNC_SET_PARENT = 6, + IMX_SC_RM_FUNC_MOVE_ALL = 7, + IMX_SC_RM_FUNC_ASSIGN_RESOURCE = 8, + IMX_SC_RM_FUNC_SET_RESOURCE_MOVABLE = 9, + IMX_SC_RM_FUNC_SET_SUBSYS_RSRC_MOVABLE = 28, + IMX_SC_RM_FUNC_SET_MASTER_ATTRIBUTES = 10, + IMX_SC_RM_FUNC_SET_MASTER_SID = 11, + IMX_SC_RM_FUNC_SET_PERIPHERAL_PERMISSIONS = 12, + IMX_SC_RM_FUNC_IS_RESOURCE_OWNED = 13, + IMX_SC_RM_FUNC_GET_RESOURCE_OWNER = 33, + IMX_SC_RM_FUNC_IS_RESOURCE_MASTER = 14, + IMX_SC_RM_FUNC_IS_RESOURCE_PERIPHERAL = 15, + IMX_SC_RM_FUNC_GET_RESOURCE_INFO = 16, + IMX_SC_RM_FUNC_MEMREG_ALLOC = 17, + IMX_SC_RM_FUNC_MEMREG_SPLIT = 29, + IMX_SC_RM_FUNC_MEMREG_FRAG = 32, + IMX_SC_RM_FUNC_MEMREG_FREE = 18, + IMX_SC_RM_FUNC_FIND_MEMREG = 30, + IMX_SC_RM_FUNC_ASSIGN_MEMREG = 19, + IMX_SC_RM_FUNC_SET_MEMREG_PERMISSIONS = 20, + IMX_SC_RM_FUNC_IS_MEMREG_OWNED = 21, + IMX_SC_RM_FUNC_GET_MEMREG_INFO = 22, + IMX_SC_RM_FUNC_ASSIGN_PAD = 23, + IMX_SC_RM_FUNC_SET_PAD_MOVABLE = 24, + IMX_SC_RM_FUNC_IS_PAD_OWNED = 25, + IMX_SC_RM_FUNC_DUMP = 27, +}; + +#if IS_ENABLED(CONFIG_IMX_SCU) +bool imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource); +#else +static inline bool +imx_sc_rm_is_resource_owned(struct imx_sc_ipc *ipc, u16 resource) +{ + return true; +} +#endif +#endif diff --git a/include/linux/firmware/imx/types.h b/include/linux/firmware/imx/types.h deleted file mode 100644 index 80821100e85f..000000000000 --- a/include/linux/firmware/imx/types.h +++ /dev/null @@ -1,65 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * Copyright (C) 2016 Freescale Semiconductor, Inc. - * Copyright 2017~2018 NXP - * - * Header file containing types used across multiple service APIs. - */ - -#ifndef _SC_TYPES_H -#define _SC_TYPES_H - -/* - * This type is used to indicate a control. - */ -enum imx_sc_ctrl { - IMX_SC_C_TEMP = 0, - IMX_SC_C_TEMP_HI = 1, - IMX_SC_C_TEMP_LOW = 2, - IMX_SC_C_PXL_LINK_MST1_ADDR = 3, - IMX_SC_C_PXL_LINK_MST2_ADDR = 4, - IMX_SC_C_PXL_LINK_MST_ENB = 5, - IMX_SC_C_PXL_LINK_MST1_ENB = 6, - IMX_SC_C_PXL_LINK_MST2_ENB = 7, - IMX_SC_C_PXL_LINK_SLV1_ADDR = 8, - IMX_SC_C_PXL_LINK_SLV2_ADDR = 9, - IMX_SC_C_PXL_LINK_MST_VLD = 10, - IMX_SC_C_PXL_LINK_MST1_VLD = 11, - IMX_SC_C_PXL_LINK_MST2_VLD = 12, - IMX_SC_C_SINGLE_MODE = 13, - IMX_SC_C_ID = 14, - IMX_SC_C_PXL_CLK_POLARITY = 15, - IMX_SC_C_LINESTATE = 16, - IMX_SC_C_PCIE_G_RST = 17, - IMX_SC_C_PCIE_BUTTON_RST = 18, - IMX_SC_C_PCIE_PERST = 19, - IMX_SC_C_PHY_RESET = 20, - IMX_SC_C_PXL_LINK_RATE_CORRECTION = 21, - IMX_SC_C_PANIC = 22, - IMX_SC_C_PRIORITY_GROUP = 23, - IMX_SC_C_TXCLK = 24, - IMX_SC_C_CLKDIV = 25, - IMX_SC_C_DISABLE_50 = 26, - IMX_SC_C_DISABLE_125 = 27, - IMX_SC_C_SEL_125 = 28, - IMX_SC_C_MODE = 29, - IMX_SC_C_SYNC_CTRL0 = 30, - IMX_SC_C_KACHUNK_CNT = 31, - IMX_SC_C_KACHUNK_SEL = 32, - IMX_SC_C_SYNC_CTRL1 = 33, - IMX_SC_C_DPI_RESET = 34, - IMX_SC_C_MIPI_RESET = 35, - IMX_SC_C_DUAL_MODE = 36, - IMX_SC_C_VOLTAGE = 37, - IMX_SC_C_PXL_LINK_SEL = 38, - IMX_SC_C_OFS_SEL = 39, - IMX_SC_C_OFS_AUDIO = 40, - IMX_SC_C_OFS_PERIPH = 41, - IMX_SC_C_OFS_IRQ = 42, - IMX_SC_C_RST0 = 43, - IMX_SC_C_RST1 = 44, - IMX_SC_C_SEL0 = 45, - IMX_SC_C_LAST -}; - -#endif /* _SC_TYPES_H */ diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h index 013ae4819deb..c3e5ab014caf 100644 --- a/include/linux/firmware/intel/stratix10-smc.h +++ b/include/linux/firmware/intel/stratix10-smc.h @@ -54,32 +54,25 @@ * Secure monitor software doesn't recognize the request. * * INTEL_SIP_SMC_STATUS_OK: - * FPGA configuration completed successfully, - * In case of FPGA configuration write operation, it means secure monitor - * software can accept the next chunk of FPGA configuration data. + * Secure monitor software accepts the service client's request. * - * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY: - * In case of FPGA configuration write operation, it means secure monitor - * software is still processing previous data & can't accept the next chunk - * of data. Service driver needs to issue - * INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE call to query the - * completed block(s). + * INTEL_SIP_SMC_STATUS_BUSY: + * Secure monitor software is still processing service client's request. * - * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR: - * There is error during the FPGA configuration process. + * INTEL_SIP_SMC_STATUS_REJECTED: + * Secure monitor software reject the service client's request. * - * INTEL_SIP_SMC_REG_ERROR: - * There is error during a read or write operation of the protected registers. + * INTEL_SIP_SMC_STATUS_ERROR: + * There is error during the process of service request. * * INTEL_SIP_SMC_RSU_ERROR: - * There is error during a remote status update. + * There is error during the process of remote status update request. */ #define INTEL_SIP_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF #define INTEL_SIP_SMC_STATUS_OK 0x0 -#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY 0x1 -#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_REJECTED 0x2 -#define INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR 0x4 -#define INTEL_SIP_SMC_REG_ERROR 0x5 +#define INTEL_SIP_SMC_STATUS_BUSY 0x1 +#define INTEL_SIP_SMC_STATUS_REJECTED 0x2 +#define INTEL_SIP_SMC_STATUS_ERROR 0x4 #define INTEL_SIP_SMC_RSU_ERROR 0x7 /** @@ -95,7 +88,7 @@ * a2-7: not used. * * Return status: - * a0: INTEL_SIP_SMC_STATUS_OK, or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a0: INTEL_SIP_SMC_STATUS_OK, or INTEL_SIP_SMC_STATUS_ERROR. * a1-3: not used. */ #define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_START 1 @@ -115,8 +108,8 @@ * a3-7: not used. * * Return status: - * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or - * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_BUSY or + * INTEL_SIP_SMC_STATUS_ERROR. * a1: 64bit physical address of 1st completed memory block if any completed * block, otherwise zero value. * a2: 64bit physical address of 2nd completed memory block if any completed @@ -133,15 +126,15 @@ * * Sync call used by service driver at EL1 to track the completed write * transactions. This request is called after INTEL_SIP_SMC_FPGA_CONFIG_WRITE - * call returns INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY. + * call returns INTEL_SIP_SMC_STATUS_BUSY. * * Call register usage: * a0: INTEL_SIP_SMC_FPGA_CONFIG_COMPLETED_WRITE. * a1-7: not used. * * Return status: - * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or - * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_BUSY or + * INTEL_SIP_SMC_STATUS_ERROR. * a1: 64bit physical address of 1st completed memory block. * a2: 64bit physical address of 2nd completed memory block if * any completed block, otherwise zero value. @@ -164,8 +157,8 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) * a1-7: not used. * * Return status: - * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_FPGA_CONFIG_STATUS_BUSY or - * INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a0: INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_BUSY or + * INTEL_SIP_SMC_STATUS_ERROR. * a1-3: not used. */ #define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_ISDONE 4 @@ -183,7 +176,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) * a1-7: not used. * * Return status: - * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR. * a1: start of physical address of reserved memory block. * a2: size of reserved memory block. * a3: not used. @@ -203,7 +196,7 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) * a1-7: not used. * * Return status: - * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_FPGA_CONFIG_STATUS_ERROR. + * a0: INTEL_SIP_SMC_STATUS_OK or INTEL_SIP_SMC_STATUS_ERROR. * a1-3: not used. */ #define INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_LOOPBACK 6 @@ -368,3 +361,46 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE) #define INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER 15 #define INTEL_SIP_SMC_RSU_RETRY_COUNTER \ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_RETRY_COUNTER) + +/** + * Request INTEL_SIP_SMC_RSU_DCMF_VERSION + * + * Sync call used by service driver at EL1 to query DCMF (Decision + * Configuration Management Firmware) version from FW + * + * Call register usage: + * a0 INTEL_SIP_SMC_RSU_DCMF_VERSION + * a1-7 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + * a1 dcmf1 | dcmf0 + * a2 dcmf3 | dcmf2 + * + * Or + * + * a0 INTEL_SIP_SMC_RSU_ERROR + */ +#define INTEL_SIP_SMC_FUNCID_RSU_DCMF_VERSION 16 +#define INTEL_SIP_SMC_RSU_DCMF_VERSION \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_DCMF_VERSION) + +/** + * Request INTEL_SIP_SMC_RSU_MAX_RETRY + * + * Sync call used by service driver at EL1 to query max retry value from FW + * + * Call register usage: + * a0 INTEL_SIP_SMC_RSU_MAX_RETRY + * a1-7 not used + * + * Return status + * a0 INTEL_SIP_SMC_STATUS_OK + * a1 max retry value + * + * Or + * a0 INTEL_SIP_SMC_RSU_ERROR + */ +#define INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY 18 +#define INTEL_SIP_SMC_RSU_MAX_RETRY \ + INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_MAX_RETRY) diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h index 59bc6e2af693..a93d85932eb9 100644 --- a/include/linux/firmware/intel/stratix10-svc-client.h +++ b/include/linux/firmware/intel/stratix10-svc-client.h @@ -18,62 +18,53 @@ /** * Status of the sent command, in bit number * - * SVC_COMMAND_STATUS_RECONFIG_REQUEST_OK: - * Secure firmware accepts the request of FPGA reconfiguration. + * SVC_STATUS_OK: + * Secure firmware accepts the request issued by one of service clients. * - * SVC_STATUS_RECONFIG_BUFFER_SUBMITTED: - * Service client successfully submits FPGA configuration - * data buffer to secure firmware. + * SVC_STATUS_BUFFER_SUBMITTED: + * Service client successfully submits data buffer to secure firmware. * - * SVC_COMMAND_STATUS_RECONFIG_BUFFER_DONE: + * SVC_STATUS_BUFFER_DONE: * Secure firmware completes data process, ready to accept the * next WRITE transaction. * - * SVC_COMMAND_STATUS_RECONFIG_COMPLETED: - * Secure firmware completes FPGA configuration successfully, FPGA should - * be in user mode. + * SVC_STATUS_COMPLETED: + * Secure firmware completes service request successfully. In case of + * FPGA configuration, FPGA should be in user mode. * - * SVC_COMMAND_STATUS_RECONFIG_BUSY: - * FPGA configuration is still in process. + * SVC_COMMAND_STATUS_BUSY: + * Service request is still in process. * - * SVC_COMMAND_STATUS_RECONFIG_ERROR: - * Error encountered during FPGA configuration. + * SVC_COMMAND_STATUS_ERROR: + * Error encountered during the process of the service request. * - * SVC_STATUS_RSU_OK: - * Secure firmware accepts the request of remote status update (RSU). - * - * SVC_STATUS_RSU_ERROR: - * Error encountered during remote system update. - * - * SVC_STATUS_RSU_NO_SUPPORT: - * Secure firmware doesn't support RSU retry or notify feature. + * SVC_STATUS_NO_SUPPORT: + * Secure firmware doesn't support requested features such as RSU retry + * or RSU notify. */ -#define SVC_STATUS_RECONFIG_REQUEST_OK 0 -#define SVC_STATUS_RECONFIG_BUFFER_SUBMITTED 1 -#define SVC_STATUS_RECONFIG_BUFFER_DONE 2 -#define SVC_STATUS_RECONFIG_COMPLETED 3 -#define SVC_STATUS_RECONFIG_BUSY 4 -#define SVC_STATUS_RECONFIG_ERROR 5 -#define SVC_STATUS_RSU_OK 6 -#define SVC_STATUS_RSU_ERROR 7 -#define SVC_STATUS_RSU_NO_SUPPORT 8 +#define SVC_STATUS_OK 0 +#define SVC_STATUS_BUFFER_SUBMITTED 1 +#define SVC_STATUS_BUFFER_DONE 2 +#define SVC_STATUS_COMPLETED 3 +#define SVC_STATUS_BUSY 4 +#define SVC_STATUS_ERROR 5 +#define SVC_STATUS_NO_SUPPORT 6 /** * Flag bit for COMMAND_RECONFIG * * COMMAND_RECONFIG_FLAG_PARTIAL: - * Set to FPGA configuration type (full or partial), the default - * is full reconfig. + * Set to FPGA configuration type (full or partial). */ -#define COMMAND_RECONFIG_FLAG_PARTIAL 0 +#define COMMAND_RECONFIG_FLAG_PARTIAL 1 /** * Timeout settings for service clients: * timeout value used in Stratix10 FPGA manager driver. * timeout value used in RSU driver */ -#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 100 -#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 240 +#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 300 +#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 720 #define SVC_RSU_REQUEST_TIMEOUT_MS 300 struct stratix10_svc_chan; @@ -84,32 +75,35 @@ struct stratix10_svc_chan; * @COMMAND_NOOP: do 'dummy' request for integration/debug/trouble-shooting * * @COMMAND_RECONFIG: ask for FPGA configuration preparation, return status - * is SVC_STATUS_RECONFIG_REQUEST_OK + * is SVC_STATUS_OK * * @COMMAND_RECONFIG_DATA_SUBMIT: submit buffer(s) of bit-stream data for the - * FPGA configuration, return status is SVC_STATUS_RECONFIG_BUFFER_SUBMITTED, - * or SVC_STATUS_RECONFIG_ERROR + * FPGA configuration, return status is SVC_STATUS_SUBMITTED or SVC_STATUS_ERROR * * @COMMAND_RECONFIG_DATA_CLAIM: check the status of the configuration, return - * status is SVC_STATUS_RECONFIG_COMPLETED, or SVC_STATUS_RECONFIG_BUSY, or - * SVC_STATUS_RECONFIG_ERROR + * status is SVC_STATUS_COMPLETED, or SVC_STATUS_BUSY, or SVC_STATUS_ERROR * * @COMMAND_RECONFIG_STATUS: check the status of the configuration, return - * status is SVC_STATUS_RECONFIG_COMPLETED, or SVC_STATUS_RECONFIG_BUSY, or - * SVC_STATUS_RECONFIG_ERROR + * status is SVC_STATUS_COMPLETED, or SVC_STATUS_BUSY, or SVC_STATUS_ERROR * * @COMMAND_RSU_STATUS: request remote system update boot log, return status * is log data or SVC_STATUS_RSU_ERROR * * @COMMAND_RSU_UPDATE: set the offset of the bitstream to boot after reboot, - * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR * * @COMMAND_RSU_NOTIFY: report the status of hard processor system - * software to firmware, return status is SVC_STATUS_RSU_OK or - * SVC_STATUS_RSU_ERROR + * software to firmware, return status is SVC_STATUS_OK or + * SVC_STATUS_ERROR * * @COMMAND_RSU_RETRY: query firmware for the current image's retry counter, - * return status is SVC_STATUS_RSU_OK or SVC_STATUS_RSU_ERROR + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_RSU_MAX_RETRY: query firmware for the max retry value, + * return status is SVC_STATUS_OK or SVC_STATUS_ERROR + * + * @COMMAND_RSU_DCMF_VERSION: query firmware for the DCMF version, return status + * is SVC_STATUS_OK or SVC_STATUS_ERROR */ enum stratix10_svc_command_code { COMMAND_NOOP = 0, @@ -121,6 +115,8 @@ enum stratix10_svc_command_code { COMMAND_RSU_UPDATE, COMMAND_RSU_NOTIFY, COMMAND_RSU_RETRY, + COMMAND_RSU_MAX_RETRY, + COMMAND_RSU_DCMF_VERSION, }; /** diff --git a/include/linux/firmware/trusted_foundations.h b/include/linux/firmware/trusted_foundations.h index 2549a2db56aa..be5984bda592 100644 --- a/include/linux/firmware/trusted_foundations.h +++ b/include/linux/firmware/trusted_foundations.h @@ -32,6 +32,7 @@ #define TF_PM_MODE_LP1_NO_MC_CLK 2 #define TF_PM_MODE_LP2 3 #define TF_PM_MODE_LP2_NOFLUSH_L2 4 +#define TF_PM_MODE_NONE 5 struct trusted_foundations_platform_data { unsigned int version_major; diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 8efa5ac22d7e..5968df82b991 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -42,6 +42,8 @@ #define ZYNQMP_PM_MAX_QOS 100U +#define GSS_NUM_REGS (4) + /* Node capabilities */ #define ZYNQMP_PM_CAPABILITY_ACCESS 0x1U #define ZYNQMP_PM_CAPABILITY_CONTEXT 0x2U @@ -62,6 +64,7 @@ enum pm_api_id { PM_GET_API_VERSION = 1, + PM_SYSTEM_SHUTDOWN = 12, PM_REQUEST_NODE = 13, PM_RELEASE_NODE, PM_SET_REQUIREMENT, @@ -107,6 +110,12 @@ enum pm_ioctl_id { IOCTL_GET_PLL_FRAC_MODE, IOCTL_SET_PLL_FRAC_DATA, IOCTL_GET_PLL_FRAC_DATA, + IOCTL_WRITE_GGS = 12, + IOCTL_READ_GGS = 13, + IOCTL_WRITE_PGGS = 14, + IOCTL_READ_PGGS = 15, + /* Set healthy bit value */ + IOCTL_SET_BOOT_HEALTH_STATUS = 17, }; enum pm_query_id { @@ -279,6 +288,18 @@ enum dll_reset_type { PM_DLL_RESET_PULSE, }; +enum zynqmp_pm_shutdown_type { + ZYNQMP_PM_SHUTDOWN_TYPE_SHUTDOWN, + ZYNQMP_PM_SHUTDOWN_TYPE_RESET, + ZYNQMP_PM_SHUTDOWN_TYPE_SETSCOPE_ONLY, +}; + +enum zynqmp_pm_shutdown_subtype { + ZYNQMP_PM_SHUTDOWN_SUBTYPE_SUBSYSTEM, + ZYNQMP_PM_SHUTDOWN_SUBTYPE_PS_ONLY, + ZYNQMP_PM_SHUTDOWN_SUBTYPE_SYSTEM, +}; + /** * struct zynqmp_pm_query_data - PM query data * @qid: query ID @@ -293,49 +314,199 @@ struct zynqmp_pm_query_data { u32 arg3; }; -struct zynqmp_eemi_ops { - int (*get_api_version)(u32 *version); - int (*get_chipid)(u32 *idcode, u32 *version); - int (*fpga_load)(const u64 address, const u32 size, const u32 flags); - int (*fpga_get_status)(u32 *value); - int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out); - int (*clock_enable)(u32 clock_id); - int (*clock_disable)(u32 clock_id); - int (*clock_getstate)(u32 clock_id, u32 *state); - int (*clock_setdivider)(u32 clock_id, u32 divider); - int (*clock_getdivider)(u32 clock_id, u32 *divider); - int (*clock_setrate)(u32 clock_id, u64 rate); - int (*clock_getrate)(u32 clock_id, u64 *rate); - int (*clock_setparent)(u32 clock_id, u32 parent_id); - int (*clock_getparent)(u32 clock_id, u32 *parent_id); - int (*ioctl)(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2, u32 *out); - int (*reset_assert)(const enum zynqmp_pm_reset reset, - const enum zynqmp_pm_reset_action assert_flag); - int (*reset_get_status)(const enum zynqmp_pm_reset reset, u32 *status); - int (*init_finalize)(void); - int (*set_suspend_mode)(u32 mode); - int (*request_node)(const u32 node, - const u32 capabilities, - const u32 qos, - const enum zynqmp_pm_request_ack ack); - int (*release_node)(const u32 node); - int (*set_requirement)(const u32 node, - const u32 capabilities, - const u32 qos, - const enum zynqmp_pm_request_ack ack); - int (*aes)(const u64 address, u32 *out); -}; int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 *ret_payload); #if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE) -const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void); +int zynqmp_pm_get_api_version(u32 *version); +int zynqmp_pm_get_chipid(u32 *idcode, u32 *version); +int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out); +int zynqmp_pm_clock_enable(u32 clock_id); +int zynqmp_pm_clock_disable(u32 clock_id); +int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state); +int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider); +int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider); +int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate); +int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate); +int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id); +int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id); +int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode); +int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode); +int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data); +int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data); +int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value); +int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type); +int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset, + const enum zynqmp_pm_reset_action assert_flag); +int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status); +int zynqmp_pm_init_finalize(void); +int zynqmp_pm_set_suspend_mode(u32 mode); +int zynqmp_pm_request_node(const u32 node, const u32 capabilities, + const u32 qos, const enum zynqmp_pm_request_ack ack); +int zynqmp_pm_release_node(const u32 node); +int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities, + const u32 qos, + const enum zynqmp_pm_request_ack ack); +int zynqmp_pm_aes_engine(const u64 address, u32 *out); +int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags); +int zynqmp_pm_fpga_get_status(u32 *value); +int zynqmp_pm_write_ggs(u32 index, u32 value); +int zynqmp_pm_read_ggs(u32 index, u32 *value); +int zynqmp_pm_write_pggs(u32 index, u32 value); +int zynqmp_pm_read_pggs(u32 index, u32 *value); +int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype); +int zynqmp_pm_set_boot_health_status(u32 value); #else static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void) { return ERR_PTR(-ENODEV); } +static inline int zynqmp_pm_get_api_version(u32 *version) +{ + return -ENODEV; +} +static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version) +{ + return -ENODEV; +} +static inline int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, + u32 *out) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_enable(u32 clock_id) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_disable(u32 clock_id) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id) +{ + return -ENODEV; +} +static inline int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id) +{ + return -ENODEV; +} +static inline int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode) +{ + return -ENODEV; +} +static inline int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode) +{ + return -ENODEV; +} +static inline int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data) +{ + return -ENODEV; +} +static inline int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data) +{ + return -ENODEV; +} +static inline int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value) +{ + return -ENODEV; +} +static inline int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type) +{ + return -ENODEV; +} +static inline int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset, + const enum zynqmp_pm_reset_action assert_flag) +{ + return -ENODEV; +} +static inline int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, + u32 *status) +{ + return -ENODEV; +} +static inline int zynqmp_pm_init_finalize(void) +{ + return -ENODEV; +} +static inline int zynqmp_pm_set_suspend_mode(u32 mode) +{ + return -ENODEV; +} +static inline int zynqmp_pm_request_node(const u32 node, const u32 capabilities, + const u32 qos, + const enum zynqmp_pm_request_ack ack) +{ + return -ENODEV; +} +static inline int zynqmp_pm_release_node(const u32 node) +{ + return -ENODEV; +} +static inline int zynqmp_pm_set_requirement(const u32 node, + const u32 capabilities, + const u32 qos, + const enum zynqmp_pm_request_ack ack) +{ + return -ENODEV; +} +static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out) +{ + return -ENODEV; +} +static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size, + const u32 flags) +{ + return -ENODEV; +} +static inline int zynqmp_pm_fpga_get_status(u32 *value) +{ + return -ENODEV; +} +static inline int zynqmp_pm_write_ggs(u32 index, u32 value) +{ + return -ENODEV; +} +static inline int zynqmp_pm_read_ggs(u32 index, u32 *value) +{ + return -ENODEV; +} +static inline int zynqmp_pm_write_pggs(u32 index, u32 value) +{ + return -ENODEV; +} +static inline int zynqmp_pm_read_pggs(u32 index, u32 *value) +{ + return -ENODEV; +} +static inline int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype) +{ + return -ENODEV; +} +static inline int zynqmp_pm_set_boot_health_status(u32 value) +{ + return -ENODEV; +} #endif #endif /* __FIRMWARE_ZYNQMP_H__ */ diff --git a/include/linux/font.h b/include/linux/font.h index 51b91c8b69d5..59faa80f586d 100644 --- a/include/linux/font.h +++ b/include/linux/font.h @@ -59,4 +59,17 @@ extern const struct font_desc *get_default_font(int xres, int yres, /* Max. length for the name of a predefined font */ #define MAX_FONT_NAME 32 +/* Extra word getters */ +#define REFCOUNT(fd) (((int *)(fd))[-1]) +#define FNTSIZE(fd) (((int *)(fd))[-2]) +#define FNTCHARCNT(fd) (((int *)(fd))[-3]) +#define FNTSUM(fd) (((int *)(fd))[-4]) + +#define FONT_EXTRA_WORDS 4 + +struct font_data { + unsigned int extra[FONT_EXTRA_WORDS]; + const unsigned char data[]; +} __packed; + #endif /* _VIDEO_FONT_H */ diff --git a/include/linux/fpga/adi-axi-common.h b/include/linux/fpga/adi-axi-common.h index 7fc95d5c95bb..141ac3f251e6 100644 --- a/include/linux/fpga/adi-axi-common.h +++ b/include/linux/fpga/adi-axi-common.h @@ -11,9 +11,13 @@ #ifndef ADI_AXI_COMMON_H_ #define ADI_AXI_COMMON_H_ -#define ADI_AXI_REG_VERSION 0x0000 +#define ADI_AXI_REG_VERSION 0x0000 #define ADI_AXI_PCORE_VER(major, minor, patch) \ (((major) << 16) | ((minor) << 8) | (patch)) +#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff) +#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff) +#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff) + #endif /* ADI_AXI_COMMON_H_ */ diff --git a/include/linux/frame.h b/include/linux/frame.h index 02d3ca2d9598..303cda600e56 100644 --- a/include/linux/frame.h +++ b/include/linux/frame.h @@ -15,9 +15,20 @@ static void __used __section(.discard.func_stack_frame_non_standard) \ *__func_stack_frame_non_standard_##func = func +/* + * This macro indicates that the following intra-function call is valid. + * Any non-annotated intra-function call will cause objtool to issue a warning. + */ +#define ANNOTATE_INTRA_FUNCTION_CALL \ + 999: \ + .pushsection .discard.intra_function_calls; \ + .long 999b; \ + .popsection; + #else /* !CONFIG_STACK_VALIDATION */ #define STACK_FRAME_NON_STANDARD(func) +#define ANNOTATE_INTRA_FUNCTION_CALL #endif /* CONFIG_STACK_VALIDATION */ diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 21f5aa0b217f..27828145ca09 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h @@ -207,6 +207,17 @@ static inline long freezable_schedule_timeout_interruptible(long timeout) return __retval; } +/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ +static inline long freezable_schedule_timeout_interruptible_unsafe(long timeout) +{ + long __retval; + + freezer_do_not_count(); + __retval = schedule_timeout_interruptible(timeout); + freezer_count_unsafe(); + return __retval; +} + /* Like schedule_timeout_killable(), but should not block the freezer. */ static inline long freezable_schedule_timeout_killable(long timeout) { @@ -285,6 +296,9 @@ static inline void set_freezable(void) {} #define freezable_schedule_timeout_interruptible(timeout) \ schedule_timeout_interruptible(timeout) +#define freezable_schedule_timeout_interruptible_unsafe(timeout) \ + schedule_timeout_interruptible(timeout) + #define freezable_schedule_timeout_killable(timeout) \ schedule_timeout_killable(timeout) diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h index 6d775984905b..b07d88c92bb2 100644 --- a/include/linux/frontswap.h +++ b/include/linux/frontswap.h @@ -10,7 +10,7 @@ /* * Return code to denote that requested number of * frontswap pages are unused(moved to page cache). - * Used in in shmem_unuse and try_to_unuse. + * Used in shmem_unuse and try_to_unuse. */ #define FRONTSWAP_PAGES_UNUSED 2 diff --git a/include/linux/fs.h b/include/linux/fs.h index 45cc10cdf6dd..7519ae003a08 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -24,7 +24,6 @@ #include <linux/capability.h> #include <linux/semaphore.h> #include <linux/fcntl.h> -#include <linux/fiemap.h> #include <linux/rculist_bl.h> #include <linux/atomic.h> #include <linux/shrinker.h> @@ -48,6 +47,7 @@ struct backing_dev_info; struct bdi_writeback; struct bio; struct export_operations; +struct fiemap_extent_info; struct hd_geometry; struct iovec; struct kiocb; @@ -175,6 +175,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* File does not contribute to nr_files count */ #define FMODE_NOACCOUNT ((__force fmode_t)0x20000000) +/* File supports async buffered reads */ +#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000) + /* * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector * that indicates that they should check the contents of the iovec are @@ -292,6 +295,7 @@ enum positive_aop_returns { struct page; struct address_space; struct writeback_control; +struct readahead_control; /* * Write life time hint values. @@ -314,6 +318,9 @@ enum rw_hint { #define IOCB_SYNC (1 << 5) #define IOCB_WRITE (1 << 6) #define IOCB_NOWAIT (1 << 7) +/* iocb->ki_waitq is valid */ +#define IOCB_WAITQ (1 << 8) +#define IOCB_NOIO (1 << 9) struct kiocb { struct file *ki_filp; @@ -327,7 +334,10 @@ struct kiocb { int ki_flags; u16 ki_hint; u16 ki_ioprio; /* See linux/ioprio.h */ - unsigned int ki_cookie; /* for ->iopoll */ + union { + unsigned int ki_cookie; /* for ->iopoll */ + struct wait_page_queue *ki_waitq; /* for async buffered IO */ + }; randomized_struct_fields_end }; @@ -375,6 +385,7 @@ struct address_space_operations { */ int (*readpages)(struct file *filp, struct address_space *mapping, struct list_head *pages, unsigned nr_pages); + void (*readahead)(struct readahead_control *); int (*write_begin)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, @@ -468,45 +479,6 @@ struct address_space { * must be enforced here for CRIS, to let the least significant bit * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. */ -struct request_queue; - -struct block_device { - dev_t bd_dev; /* not a kdev_t - it's a search key */ - int bd_openers; - struct inode * bd_inode; /* will die */ - struct super_block * bd_super; - struct mutex bd_mutex; /* open/close mutex */ - void * bd_claiming; - void * bd_holder; - int bd_holders; - bool bd_write_holder; -#ifdef CONFIG_SYSFS - struct list_head bd_holder_disks; -#endif - struct block_device * bd_contains; - unsigned bd_block_size; - u8 bd_partno; - struct hd_struct * bd_part; - /* number of times partitions within this device have been opened. */ - unsigned bd_part_count; - int bd_invalidated; - struct gendisk * bd_disk; - struct request_queue * bd_queue; - struct backing_dev_info *bd_bdi; - struct list_head bd_list; - /* - * Private data. You must have bd_claim'ed the block_device - * to use this. NOTE: bd_claim allows an owner to claim - * the same device multiple times, the owner must take special - * care to not mess up bd_private for that case. - */ - unsigned long bd_private; - - /* The counter of freeze processes */ - int bd_fsfreeze_count; - /* Mutex for freeze */ - struct mutex bd_fsfreeze_mutex; -} __randomize_layout; /* XArray tags, for tagging dirty and writeback pages in the pagecache. */ #define PAGECACHE_TAG_DIRTY XA_MARK_0 @@ -546,6 +518,16 @@ static inline void i_mmap_unlock_read(struct address_space *mapping) up_read(&mapping->i_mmap_rwsem); } +static inline void i_mmap_assert_locked(struct address_space *mapping) +{ + lockdep_assert_held(&mapping->i_mmap_rwsem); +} + +static inline void i_mmap_assert_write_locked(struct address_space *mapping) +{ + lockdep_assert_held_write(&mapping->i_mmap_rwsem); +} + /* * Might pages of this file be mapped into userspace? */ @@ -556,7 +538,7 @@ static inline int mapping_mapped(struct address_space *mapping) /* * Might pages of this file have been modified in userspace? - * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff + * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap * marks vma as VM_SHARED if it is shared, and the file was opened for * writing i.e. vma may be mprotected writable even if now readonly. * @@ -905,8 +887,6 @@ static inline unsigned imajor(const struct inode *inode) return MAJOR(inode->i_rdev); } -extern struct block_device *I_BDEV(struct inode *inode); - struct fown_struct { rwlock_t lock; /* protects pid, uid, euid fields */ struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ @@ -976,6 +956,7 @@ struct file { #endif /* #ifdef CONFIG_EPOLL */ struct address_space *f_mapping; errseq_t f_wb_err; + errseq_t f_sb_err; /* for syncfs */ } __randomize_layout __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ @@ -1045,6 +1026,7 @@ struct lock_manager_operations { bool (*lm_break)(struct file_lock *); int (*lm_change)(struct file_lock *, int, struct list_head *); void (*lm_setup)(struct file_lock *, void **); + bool (*lm_breaker_owns_lease)(struct file_lock *); }; struct lock_manager { @@ -1376,6 +1358,7 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_NODIRATIME 2048 /* Do not update directory access times */ #define SB_SILENT 32768 #define SB_POSIXACL (1<<16) /* VFS does not apply the umask */ +#define SB_INLINECRYPT (1<<17) /* Use blk-crypto for encrypted files */ #define SB_KERNMOUNT (1<<22) /* this is a kern_mount call */ #define SB_I_VERSION (1<<23) /* Update inode I_version field */ #define SB_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ @@ -1409,6 +1392,8 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020 #define SB_I_UNTRUSTED_MOUNTER 0x00000040 +#define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */ + /* Possible states of 'frozen' field */ enum { SB_UNFROZEN = 0, /* FS is unfrozen */ @@ -1520,6 +1505,9 @@ struct super_block { /* Being remounted read-only */ int s_readonly_remount; + /* per-sb errseq_t for reporting writeback errors via syncfs */ + errseq_t s_wb_err; + /* AIO completions deferred from interrupt context */ struct workqueue_struct *s_dio_done_wq; struct hlist_head s_pins; @@ -1673,10 +1661,10 @@ static inline int sb_start_write_trylock(struct super_block *sb) * * Since page fault freeze protection behaves as a lock, users have to preserve * ordering of freeze protection and other filesystem locks. It is advised to - * put sb_start_pagefault() close to mmap_sem in lock ordering. Page fault + * put sb_start_pagefault() close to mmap_lock in lock ordering. Page fault * handling code implies lock dependency: * - * mmap_sem + * mmap_lock * -> sb_start_pagefault */ static inline void sb_start_pagefault(struct super_block *sb) @@ -1721,7 +1709,11 @@ extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct ino extern int vfs_rmdir(struct inode *, struct dentry *); extern int vfs_unlink(struct inode *, struct dentry *, struct inode **); extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int); -extern int vfs_whiteout(struct inode *, struct dentry *); + +static inline int vfs_whiteout(struct inode *dir, struct dentry *dentry) +{ + return vfs_mknod(dir, dentry, S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); +} extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag); @@ -1730,6 +1722,10 @@ int vfs_mkobj(struct dentry *, umode_t, int (*f)(struct dentry *, umode_t, void *), void *); +int vfs_fchown(struct file *file, uid_t user, gid_t group); +int vfs_fchmod(struct file *file, umode_t mode); +int vfs_utimes(const struct path *path, struct timespec64 *times); + extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT @@ -1745,19 +1741,6 @@ extern long compat_ptr_ioctl(struct file *file, unsigned int cmd, extern void inode_init_owner(struct inode *inode, const struct inode *dir, umode_t mode); extern bool may_open_dev(const struct path *path); -/* - * VFS FS_IOC_FIEMAP helper definitions. - */ -struct fiemap_extent_info { - unsigned int fi_flags; /* Flags as passed from user */ - unsigned int fi_extents_mapped; /* Number of mapped extents */ - unsigned int fi_extents_max; /* Size of fiemap_extent array */ - struct fiemap_extent __user *fi_extents_start; /* Start of - fiemap_extent array */ -}; -int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, - u64 phys, u64 len, u32 flags); -int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); /* * This is the "filldir" function type, used by readdir() to let @@ -1774,14 +1757,6 @@ struct dir_context { loff_t pos; }; -struct block_device_operations; - -/* These macros are for out of kernel modules to test that - * the kernel supports the unlocked_ioctl and compat_ioctl - * fields in struct file_operations. */ -#define HAVE_COMPAT_IOCTL 1 -#define HAVE_UNLOCKED_IOCTL 1 - /* * These flags let !MMU mmap() govern direct device mapping vs immediate * copying more easily for MAP_PRIVATE, especially for ROM filesystems. @@ -1917,7 +1892,6 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, struct iovec *fast_pointer, struct iovec **ret_pointer); -extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *); extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); extern ssize_t vfs_readv(struct file *, const struct iovec __user *, @@ -1982,27 +1956,27 @@ struct super_operations { /* * Inode flags - they have no relation to superblock flags now */ -#define S_SYNC 1 /* Writes are synced at once */ -#define S_NOATIME 2 /* Do not update access times */ -#define S_APPEND 4 /* Append-only file */ -#define S_IMMUTABLE 8 /* Immutable file */ -#define S_DEAD 16 /* removed, but still open directory */ -#define S_NOQUOTA 32 /* Inode is not counted to quota */ -#define S_DIRSYNC 64 /* Directory modifications are synchronous */ -#define S_NOCMTIME 128 /* Do not update file c/mtime */ -#define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */ -#define S_PRIVATE 512 /* Inode is fs-internal */ -#define S_IMA 1024 /* Inode has an associated IMA struct */ -#define S_AUTOMOUNT 2048 /* Automount/referral quasi-directory */ -#define S_NOSEC 4096 /* no suid or xattr security attributes */ +#define S_SYNC (1 << 0) /* Writes are synced at once */ +#define S_NOATIME (1 << 1) /* Do not update access times */ +#define S_APPEND (1 << 2) /* Append-only file */ +#define S_IMMUTABLE (1 << 3) /* Immutable file */ +#define S_DEAD (1 << 4) /* removed, but still open directory */ +#define S_NOQUOTA (1 << 5) /* Inode is not counted to quota */ +#define S_DIRSYNC (1 << 6) /* Directory modifications are synchronous */ +#define S_NOCMTIME (1 << 7) /* Do not update file c/mtime */ +#define S_SWAPFILE (1 << 8) /* Do not truncate: swapon got its bmaps */ +#define S_PRIVATE (1 << 9) /* Inode is fs-internal */ +#define S_IMA (1 << 10) /* Inode has an associated IMA struct */ +#define S_AUTOMOUNT (1 << 11) /* Automount/referral quasi-directory */ +#define S_NOSEC (1 << 12) /* no suid or xattr security attributes */ #ifdef CONFIG_FS_DAX -#define S_DAX 8192 /* Direct Access, avoiding the page cache */ +#define S_DAX (1 << 13) /* Direct Access, avoiding the page cache */ #else -#define S_DAX 0 /* Make all the DAX code disappear */ +#define S_DAX 0 /* Make all the DAX code disappear */ #endif -#define S_ENCRYPTED 16384 /* Encrypted file (using fs/crypto/) */ -#define S_CASEFOLD 32768 /* Casefolded file */ -#define S_VERITY 65536 /* Verity file (using fs/verity/) */ +#define S_ENCRYPTED (1 << 14) /* Encrypted file (using fs/crypto/) */ +#define S_CASEFOLD (1 << 15) /* Casefolded file */ +#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */ /* * Note that nosuid etc flags are inode-specific: setting some file-system @@ -2156,6 +2130,12 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, * * I_CREATING New object's inode in the middle of setting up. * + * I_DONTCACHE Evict inode as soon as it is not used anymore. + * + * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists. + * Used to detect that mark_inode_dirty() should not move + * inode between dirty lists. + * * Q: What is the difference between I_WILL_FREE and I_FREEING? */ #define I_DIRTY_SYNC (1 << 0) @@ -2173,11 +2153,11 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, #define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP) #define I_LINKABLE (1 << 10) #define I_DIRTY_TIME (1 << 11) -#define __I_DIRTY_TIME_EXPIRED 12 -#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED) #define I_WB_SWITCH (1 << 13) #define I_OVL_INUSE (1 << 14) #define I_CREATING (1 << 15) +#define I_DONTCACHE (1 << 16) +#define I_SYNC_QUEUED (1 << 17) #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) @@ -2261,18 +2241,9 @@ struct file_system_type { #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) -#ifdef CONFIG_BLOCK extern struct dentry *mount_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int)); -#else -static inline struct dentry *mount_bdev(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data, - int (*fill_super)(struct super_block *, void *, int)) -{ - return ERR_PTR(-ENODEV); -} -#endif extern struct dentry *mount_single(struct file_system_type *fs_type, int flags, void *data, int (*fill_super)(struct super_block *, void *, int)); @@ -2281,14 +2252,7 @@ extern struct dentry *mount_nodev(struct file_system_type *fs_type, int (*fill_super)(struct super_block *, void *, int)); extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path); void generic_shutdown_super(struct super_block *sb); -#ifdef CONFIG_BLOCK void kill_block_super(struct super_block *sb); -#else -static inline void kill_block_super(struct super_block *sb) -{ - BUG(); -} -#endif void kill_anon_super(struct super_block *sb); void kill_litter_super(struct super_block *sb); void deactivate_super(struct super_block *sb); @@ -2578,97 +2542,16 @@ extern struct kmem_cache *names_cachep; #define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) #define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) -#ifdef CONFIG_BLOCK -extern int register_blkdev(unsigned int, const char *); -extern void unregister_blkdev(unsigned int, const char *); -extern void bdev_unhash_inode(dev_t dev); -extern struct block_device *bdget(dev_t); -extern struct block_device *bdgrab(struct block_device *bdev); -extern void bd_set_size(struct block_device *, loff_t size); -extern void bd_forget(struct inode *inode); -extern void bdput(struct block_device *); -extern void invalidate_bdev(struct block_device *); -extern void iterate_bdevs(void (*)(struct block_device *, void *), void *); -extern int sync_blockdev(struct block_device *bdev); -extern void kill_bdev(struct block_device *); -extern struct super_block *freeze_bdev(struct block_device *); -extern void emergency_thaw_all(void); -extern void emergency_thaw_bdev(struct super_block *sb); -extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); -extern int fsync_bdev(struct block_device *); - extern struct super_block *blockdev_superblock; - static inline bool sb_is_blkdev_sb(struct super_block *sb) { - return sb == blockdev_superblock; -} -#else -static inline void bd_forget(struct inode *inode) {} -static inline int sync_blockdev(struct block_device *bdev) { return 0; } -static inline void kill_bdev(struct block_device *bdev) {} -static inline void invalidate_bdev(struct block_device *bdev) {} - -static inline struct super_block *freeze_bdev(struct block_device *sb) -{ - return NULL; -} - -static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) -{ - return 0; + return IS_ENABLED(CONFIG_BLOCK) && sb == blockdev_superblock; } -static inline int emergency_thaw_bdev(struct super_block *sb) -{ - return 0; -} - -static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg) -{ -} - -static inline bool sb_is_blkdev_sb(struct super_block *sb) -{ - return false; -} -#endif +void emergency_thaw_all(void); extern int sync_filesystem(struct super_block *); extern const struct file_operations def_blk_fops; extern const struct file_operations def_chr_fops; -#ifdef CONFIG_BLOCK -extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); -extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); -extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); -extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder); -extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, - void *holder); -extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, - void *holder); -extern struct block_device *bd_start_claiming(struct block_device *bdev, - void *holder); -extern void bd_finish_claiming(struct block_device *bdev, - struct block_device *whole, void *holder); -extern void bd_abort_claiming(struct block_device *bdev, - struct block_device *whole, void *holder); -extern void blkdev_put(struct block_device *bdev, fmode_t mode); - -#ifdef CONFIG_SYSFS -extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); -extern void bd_unlink_disk_holder(struct block_device *bdev, - struct gendisk *disk); -#else -static inline int bd_link_disk_holder(struct block_device *bdev, - struct gendisk *disk) -{ - return 0; -} -static inline void bd_unlink_disk_holder(struct block_device *bdev, - struct gendisk *disk) -{ -} -#endif -#endif /* fs/char_dev.c */ #define CHRDEV_MAJOR_MAX 512 @@ -2699,32 +2582,12 @@ static inline void unregister_chrdev(unsigned int major, const char *name) __unregister_chrdev(major, 0, 256, name); } -/* fs/block_dev.c */ -#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */ -#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */ - -#ifdef CONFIG_BLOCK -#define BLKDEV_MAJOR_MAX 512 -extern const char *bdevname(struct block_device *bdev, char *buffer); -extern struct block_device *lookup_bdev(const char *); -extern void blkdev_show(struct seq_file *,off_t); - -#else -#define BLKDEV_MAJOR_MAX 0 -#endif - extern void init_special_inode(struct inode *, umode_t, dev_t); /* Invalid inode operations -- fs/bad_inode.c */ extern void make_bad_inode(struct inode *); extern bool is_bad_inode(struct inode *); -#ifdef CONFIG_BLOCK -extern int revalidate_disk(struct gendisk *); -extern int check_disk_change(struct block_device *); -extern int __invalidate_device(struct block_device *, bool); -extern int invalidate_partition(struct gendisk *, int); -#endif unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end); @@ -2800,7 +2663,7 @@ static inline void filemap_set_wb_err(struct address_space *mapping, int err) } /** - * filemap_check_wb_error - has an error occurred since the mark was sampled? + * filemap_check_wb_err - has an error occurred since the mark was sampled? * @mapping: mapping to check for writeback errors * @since: previously-sampled errseq_t * @@ -2827,6 +2690,18 @@ static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) return errseq_sample(&mapping->wb_err); } +/** + * file_sample_sb_err - sample the current errseq_t to test for later errors + * @file: file pointer to be sampled + * + * Grab the most current superblock-level errseq_t value for the given + * struct file. + */ +static inline errseq_t file_sample_sb_err(struct file *file) +{ + return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err); +} + static inline int filemap_nr_thps(struct address_space *mapping) { #ifdef CONFIG_READ_ONLY_THP_FOR_FS @@ -3023,6 +2898,7 @@ extern int kernel_read_file_from_path_initns(const char *, void **, loff_t *, lo extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t, enum kernel_read_file_id); extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *); +ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos); extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *); extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *); extern struct file * open_exec(const char *); @@ -3049,8 +2925,10 @@ extern int inode_needs_sync(struct inode *inode); extern int generic_delete_inode(struct inode *inode); static inline int generic_drop_inode(struct inode *inode) { - return !inode->i_nlink || inode_unhashed(inode); + return !inode->i_nlink || inode_unhashed(inode) || + (inode->i_state & I_DONTCACHE); } +extern void d_mark_dontcache(struct inode *inode); extern struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval, int (*test)(struct inode *, void *), @@ -3070,6 +2948,9 @@ extern struct inode *find_inode_nowait(struct super_block *, int (*match)(struct inode *, unsigned long, void *), void *data); +extern struct inode *find_inode_rcu(struct super_block *, unsigned long, + int (*)(struct inode *, void *), void *); +extern struct inode *find_inode_by_ino_rcu(struct super_block *, unsigned long); extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); extern int insert_inode_locked(struct inode *); #ifdef CONFIG_DEBUG_LOCK_ALLOC @@ -3082,6 +2963,21 @@ extern void discard_new_inode(struct inode *); extern unsigned int get_next_ino(void); extern void evict_inodes(struct super_block *sb); +/* + * Userspace may rely on the the inode number being non-zero. For example, glibc + * simply ignores files with zero i_ino in unlink() and other places. + * + * As an additional complication, if userspace was compiled with + * _FILE_OFFSET_BITS=32 on a 64-bit kernel we'll only end up reading out the + * lower 32 bits, so we need to check that those aren't zero explicitly. With + * _FILE_OFFSET_BITS=64, this may cause some harmless false-negatives, but + * better safe than sorry. + */ +static inline bool is_zero_ino(ino_t ino) +{ + return (u32)ino == 0; +} + extern void __iget(struct inode * inode); extern void iget_failed(struct inode *); extern void clear_inode(struct inode *); @@ -3107,10 +3003,6 @@ static inline void remove_inode_hash(struct inode *inode) extern void inode_sb_list_add(struct inode *inode); -#ifdef CONFIG_BLOCK -extern int bdev_read_only(struct block_device *); -#endif -extern int set_blocksize(struct block_device *, int); extern int sb_set_blocksize(struct super_block *, int); extern int sb_min_blocksize(struct super_block *, int); @@ -3124,6 +3016,8 @@ extern int generic_file_rw_checks(struct file *file_in, struct file *file_out); extern int generic_copy_file_checks(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t *count, unsigned int flags); +extern ssize_t generic_file_buffered_read(struct kiocb *iocb, + struct iov_iter *to, ssize_t already_read); extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *); extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); @@ -3299,14 +3193,6 @@ static inline int vfs_fstat(int fd, struct kstat *stat) extern const char *vfs_get_link(struct dentry *, struct delayed_call *); extern int vfs_readlink(struct dentry *, char __user *, int); -extern int __generic_block_fiemap(struct inode *inode, - struct fiemap_extent_info *fieinfo, - loff_t start, loff_t len, - get_block_t *get_block); -extern int generic_block_fiemap(struct inode *inode, - struct fiemap_extent_info *fieinfo, u64 start, - u64 len, get_block_t *get_block); - extern struct file_system_type *get_filesystem(struct file_system_type *fs); extern void put_filesystem(struct file_system_type *fs); extern struct file_system_type *get_fs_type(const char *name); @@ -3394,11 +3280,6 @@ extern void setattr_copy(struct inode *inode, const struct iattr *attr); extern int file_update_time(struct file *file); -static inline bool io_is_direct(struct file *filp) -{ - return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host); -} - static inline bool vma_is_dax(const struct vm_area_struct *vma) { return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); @@ -3423,7 +3304,7 @@ static inline int iocb_flags(struct file *file) int res = 0; if (file->f_flags & O_APPEND) res |= IOCB_APPEND; - if (io_is_direct(file)) + if (file->f_flags & O_DIRECT) res |= IOCB_DIRECT; if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host)) res |= IOCB_DSYNC; @@ -3434,22 +3315,28 @@ static inline int iocb_flags(struct file *file) static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) { + int kiocb_flags = 0; + + if (!flags) + return 0; if (unlikely(flags & ~RWF_SUPPORTED)) return -EOPNOTSUPP; if (flags & RWF_NOWAIT) { if (!(ki->ki_filp->f_mode & FMODE_NOWAIT)) return -EOPNOTSUPP; - ki->ki_flags |= IOCB_NOWAIT; + kiocb_flags |= IOCB_NOWAIT | IOCB_NOIO; } if (flags & RWF_HIPRI) - ki->ki_flags |= IOCB_HIPRI; + kiocb_flags |= IOCB_HIPRI; if (flags & RWF_DSYNC) - ki->ki_flags |= IOCB_DSYNC; + kiocb_flags |= IOCB_DSYNC; if (flags & RWF_SYNC) - ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC); + kiocb_flags |= (IOCB_DSYNC | IOCB_SYNC); if (flags & RWF_APPEND) - ki->ki_flags |= IOCB_APPEND; + kiocb_flags |= IOCB_APPEND; + + ki->ki_flags |= kiocb_flags; return 0; } @@ -3536,11 +3423,11 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf, struct ctl_table; int proc_nr_files(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); int proc_nr_dentry(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); int proc_nr_inodes(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); int __init get_filesystem_list(char *buf); #define __FMODE_EXEC ((__force int) FMODE_EXEC) diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index e6c3e4c61dad..37e1e8f7f08d 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -85,7 +85,7 @@ struct p_log { * Superblock creation fills in ->root whereas reconfiguration begins with this * already set. * - * See Documentation/filesystems/mount_api.txt + * See Documentation/filesystems/mount_api.rst */ struct fs_context { const struct fs_context_operations *ops; @@ -109,6 +109,7 @@ struct fs_context { enum fs_context_phase phase:8; /* The phase the context is in */ bool need_free:1; /* Need to call ops->free() */ bool global:1; /* Goes into &init_user_ns */ + bool oldapi:1; /* Coming from mount(2) */ }; struct fs_context_operations { diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h index 2eab6d5f6736..aab0ffc6bac6 100644 --- a/include/linux/fs_parser.h +++ b/include/linux/fs_parser.h @@ -120,7 +120,7 @@ static inline bool fs_validate_description(const char *name, #define fsparam_u32oct(NAME, OPT) \ __fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)8) #define fsparam_u32hex(NAME, OPT) \ - __fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *16)) + __fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *)16) #define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0, NULL) #define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0, NULL) #define fsparam_enum(NAME, OPT, array) __fsparam(fs_param_is_enum, NAME, OPT, 0, array) diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index cf1015abfbf2..783b48dedb72 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h @@ -9,7 +9,7 @@ struct fs_struct { int users; spinlock_t lock; - seqcount_t seq; + seqcount_spinlock_t seq; int umask; int in_exec; struct path root, pwd; diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index d5ba431b5d63..3f0b19dcfae7 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -6,7 +6,7 @@ * * NOTE!!! See: * - * Documentation/filesystems/caching/backend-api.txt + * Documentation/filesystems/caching/backend-api.rst * * for a description of the cache backend interface declared here. */ @@ -46,7 +46,7 @@ struct fscache_cache_tag { unsigned long flags; #define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */ atomic_t usage; - char name[0]; /* tag name */ + char name[]; /* tag name */ }; /* @@ -454,7 +454,7 @@ static inline void fscache_object_lookup_error(struct fscache_object *object) * Set the maximum size an object is permitted to reach, implying the highest * byte that may be written. Intended to be called by the attr_changed() op. * - * See Documentation/filesystems/caching/backend-api.txt for a complete + * See Documentation/filesystems/caching/backend-api.rst for a complete * description. */ static inline diff --git a/include/linux/fscache.h b/include/linux/fscache.h index ad044c0cb1f3..a1c928fe98e7 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -6,7 +6,7 @@ * * NOTE!!! See: * - * Documentation/filesystems/caching/netfs-api.txt + * Documentation/filesystems/caching/netfs-api.rst * * for a description of the network filesystem interface declared here. */ @@ -233,7 +233,7 @@ extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_ * * Register a filesystem as desiring caching services if they're available. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -253,7 +253,7 @@ int fscache_register_netfs(struct fscache_netfs *netfs) * Indicate that a filesystem no longer desires caching services for the * moment. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -270,7 +270,7 @@ void fscache_unregister_netfs(struct fscache_netfs *netfs) * Acquire a specific cache referral tag that can be used to select a specific * cache in which to cache an index. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -288,7 +288,7 @@ struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name) * * Release a reference to a cache referral tag previously looked up. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -315,7 +315,7 @@ void fscache_release_cache_tag(struct fscache_cache_tag *tag) * that can be used to locate files. This is done by requesting a cookie for * each index in the path to the file. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -351,7 +351,7 @@ struct fscache_cookie *fscache_acquire_cookie( * provided to update the auxiliary data in the cache before the object is * disconnected. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -394,7 +394,7 @@ int fscache_check_consistency(struct fscache_cookie *cookie, * cookie. The auxiliary data on the cookie will be updated first if @aux_data * is set. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -410,7 +410,7 @@ void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data) * * Permit data-storage cache objects to be pinned in the cache. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -425,7 +425,7 @@ int fscache_pin_cookie(struct fscache_cookie *cookie) * * Permit data-storage cache objects to be unpinned from the cache. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -441,7 +441,7 @@ void fscache_unpin_cookie(struct fscache_cookie *cookie) * changed. This includes the data size. These attributes will be obtained * through the get_attr() cookie definition op. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -463,7 +463,7 @@ int fscache_attr_changed(struct fscache_cookie *cookie) * * This can be called with spinlocks held. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -479,7 +479,7 @@ void fscache_invalidate(struct fscache_cookie *cookie) * * Wait for the invalidation of an object to complete. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -498,7 +498,7 @@ void fscache_wait_on_invalidate(struct fscache_cookie *cookie) * cookie so that a write to that object within the space can always be * honoured. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -533,7 +533,7 @@ int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size) * Else, if the page is unbacked, -ENODATA is returned and a block may have * been allocated in the cache. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -582,7 +582,7 @@ int fscache_read_or_alloc_page(struct fscache_cookie *cookie, * regard to different pages, the return values are prioritised in that order. * Any pages submitted for reading are removed from the pages list. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -617,7 +617,7 @@ int fscache_read_or_alloc_pages(struct fscache_cookie *cookie, * Else, a block will be allocated if one wasn't already, and 0 will be * returned * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -667,7 +667,7 @@ void fscache_readpages_cancel(struct fscache_cookie *cookie, * be cleared at the completion of the write to indicate the success or failure * of the operation. Note that the completion may happen before the return. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -693,7 +693,7 @@ int fscache_write_page(struct fscache_cookie *cookie, * Note that this cannot cancel any outstanding I/O operations between this * page and the cache. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -711,7 +711,7 @@ void fscache_uncache_page(struct fscache_cookie *cookie, * * Ask the cache if a page is being written to the cache. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline @@ -731,7 +731,7 @@ bool fscache_check_page_write(struct fscache_cookie *cookie, * Ask the cache to wake us up when a page is no longer being written to the * cache. * - * See Documentation/filesystems/caching/netfs-api.txt for a complete + * See Documentation/filesystems/caching/netfs-api.rst for a complete * description. */ static inline diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index e3c2d2a15525..991ff8575d0e 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -15,12 +15,15 @@ #include <linux/fs.h> #include <linux/mm.h> +#include <linux/parser.h> #include <linux/slab.h> #include <uapi/linux/fscrypt.h> #define FS_CRYPTO_BLOCK_SIZE 16 +union fscrypt_context; struct fscrypt_info; +struct seq_file; struct fscrypt_str { unsigned char *name; @@ -56,25 +59,36 @@ struct fscrypt_name { struct fscrypt_operations { unsigned int flags; const char *key_prefix; - int (*get_context)(struct inode *, void *, size_t); - int (*set_context)(struct inode *, const void *, size_t, void *); - bool (*dummy_context)(struct inode *); - bool (*empty_dir)(struct inode *); + int (*get_context)(struct inode *inode, void *ctx, size_t len); + int (*set_context)(struct inode *inode, const void *ctx, size_t len, + void *fs_data); + const union fscrypt_context *(*get_dummy_context)( + struct super_block *sb); + bool (*empty_dir)(struct inode *inode); unsigned int max_namelen; bool (*has_stable_inodes)(struct super_block *sb); void (*get_ino_and_lblk_bits)(struct super_block *sb, int *ino_bits_ret, int *lblk_bits_ret); + int (*get_num_devices)(struct super_block *sb); + void (*get_devices)(struct super_block *sb, + struct request_queue **devs); }; -static inline bool fscrypt_has_encryption_key(const struct inode *inode) +static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode) { - /* pairs with cmpxchg_release() in fscrypt_get_encryption_info() */ - return READ_ONCE(inode->i_crypt_info) != NULL; + /* + * Pairs with the cmpxchg_release() in fscrypt_get_encryption_info(). + * I.e., another task may publish ->i_crypt_info concurrently, executing + * a RELEASE barrier. We need to use smp_load_acquire() here to safely + * ACQUIRE the memory the other task published. + */ + return smp_load_acquire(&inode->i_crypt_info); } /** * fscrypt_needs_contents_encryption() - check whether an inode needs * contents encryption + * @inode: the inode to check * * Return: %true iff the inode is an encrypted regular file and the kernel was * built with fscrypt support. @@ -87,10 +101,12 @@ static inline bool fscrypt_needs_contents_encryption(const struct inode *inode) return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode); } -static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +static inline const union fscrypt_context * +fscrypt_get_dummy_context(struct super_block *sb) { - return inode->i_sb->s_cop->dummy_context && - inode->i_sb->s_cop->dummy_context(inode); + if (!sb->s_cop->get_dummy_context) + return NULL; + return sb->s_cop->get_dummy_context(sb); } /* @@ -106,22 +122,21 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry) } /* crypto.c */ -extern void fscrypt_enqueue_decrypt_work(struct work_struct *); - -extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, - unsigned int len, - unsigned int offs, - gfp_t gfp_flags); -extern int fscrypt_encrypt_block_inplace(const struct inode *inode, - struct page *page, unsigned int len, - unsigned int offs, u64 lblk_num, - gfp_t gfp_flags); - -extern int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len, - unsigned int offs); -extern int fscrypt_decrypt_block_inplace(const struct inode *inode, - struct page *page, unsigned int len, - unsigned int offs, u64 lblk_num); +void fscrypt_enqueue_decrypt_work(struct work_struct *); + +struct page *fscrypt_encrypt_pagecache_blocks(struct page *page, + unsigned int len, + unsigned int offs, + gfp_t gfp_flags); +int fscrypt_encrypt_block_inplace(const struct inode *inode, struct page *page, + unsigned int len, unsigned int offs, + u64 lblk_num, gfp_t gfp_flags); + +int fscrypt_decrypt_pagecache_blocks(struct page *page, unsigned int len, + unsigned int offs); +int fscrypt_decrypt_block_inplace(const struct inode *inode, struct page *page, + unsigned int len, unsigned int offs, + u64 lblk_num); static inline bool fscrypt_is_bounce_page(struct page *page) { @@ -133,78 +148,90 @@ static inline struct page *fscrypt_pagecache_page(struct page *bounce_page) return (struct page *)page_private(bounce_page); } -extern void fscrypt_free_bounce_page(struct page *bounce_page); +void fscrypt_free_bounce_page(struct page *bounce_page); /* policy.c */ -extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); -extern int fscrypt_ioctl_get_policy(struct file *, void __user *); -extern int fscrypt_ioctl_get_policy_ex(struct file *, void __user *); -extern int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg); -extern int fscrypt_has_permitted_context(struct inode *, struct inode *); -extern int fscrypt_inherit_context(struct inode *, struct inode *, - void *, bool); +int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg); +int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg); +int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *arg); +int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg); +int fscrypt_has_permitted_context(struct inode *parent, struct inode *child); +int fscrypt_inherit_context(struct inode *parent, struct inode *child, + void *fs_data, bool preload); + +struct fscrypt_dummy_context { + const union fscrypt_context *ctx; +}; + +int fscrypt_set_test_dummy_encryption(struct super_block *sb, + const substring_t *arg, + struct fscrypt_dummy_context *dummy_ctx); +void fscrypt_show_test_dummy_encryption(struct seq_file *seq, char sep, + struct super_block *sb); +static inline void +fscrypt_free_dummy_context(struct fscrypt_dummy_context *dummy_ctx) +{ + kfree(dummy_ctx->ctx); + dummy_ctx->ctx = NULL; +} + /* keyring.c */ -extern void fscrypt_sb_free(struct super_block *sb); -extern int fscrypt_ioctl_add_key(struct file *filp, void __user *arg); -extern int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg); -extern int fscrypt_ioctl_remove_key_all_users(struct file *filp, - void __user *arg); -extern int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg); +void fscrypt_sb_free(struct super_block *sb); +int fscrypt_ioctl_add_key(struct file *filp, void __user *arg); +int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg); +int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *arg); +int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg); /* keysetup.c */ -extern int fscrypt_get_encryption_info(struct inode *); -extern void fscrypt_put_encryption_info(struct inode *); -extern void fscrypt_free_inode(struct inode *); -extern int fscrypt_drop_inode(struct inode *inode); +int fscrypt_get_encryption_info(struct inode *inode); +void fscrypt_put_encryption_info(struct inode *inode); +void fscrypt_free_inode(struct inode *inode); +int fscrypt_drop_inode(struct inode *inode); /* fname.c */ -extern int fscrypt_setup_filename(struct inode *, const struct qstr *, - int lookup, struct fscrypt_name *); +int fscrypt_setup_filename(struct inode *inode, const struct qstr *iname, + int lookup, struct fscrypt_name *fname); static inline void fscrypt_free_filename(struct fscrypt_name *fname) { kfree(fname->crypto_buf.name); } -extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, - struct fscrypt_str *); -extern void fscrypt_fname_free_buffer(struct fscrypt_str *); -extern int fscrypt_fname_disk_to_usr(const struct inode *inode, - u32 hash, u32 minor_hash, - const struct fscrypt_str *iname, - struct fscrypt_str *oname); -extern bool fscrypt_match_name(const struct fscrypt_name *fname, - const u8 *de_name, u32 de_name_len); -extern u64 fscrypt_fname_siphash(const struct inode *dir, - const struct qstr *name); +int fscrypt_fname_alloc_buffer(const struct inode *inode, u32 max_encrypted_len, + struct fscrypt_str *crypto_str); +void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str); +int fscrypt_fname_disk_to_usr(const struct inode *inode, + u32 hash, u32 minor_hash, + const struct fscrypt_str *iname, + struct fscrypt_str *oname); +bool fscrypt_match_name(const struct fscrypt_name *fname, + const u8 *de_name, u32 de_name_len); +u64 fscrypt_fname_siphash(const struct inode *dir, const struct qstr *name); /* bio.c */ -extern void fscrypt_decrypt_bio(struct bio *); -extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, - unsigned int); +void fscrypt_decrypt_bio(struct bio *bio); +int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, + sector_t pblk, unsigned int len); /* hooks.c */ -extern int fscrypt_file_open(struct inode *inode, struct file *filp); -extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir, - struct dentry *dentry); -extern int __fscrypt_prepare_rename(struct inode *old_dir, - struct dentry *old_dentry, - struct inode *new_dir, - struct dentry *new_dentry, - unsigned int flags); -extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, - struct fscrypt_name *fname); -extern int fscrypt_prepare_setflags(struct inode *inode, - unsigned int oldflags, unsigned int flags); -extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, - unsigned int max_len, - struct fscrypt_str *disk_link); -extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, - unsigned int len, - struct fscrypt_str *disk_link); -extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, - unsigned int max_size, - struct delayed_call *done); +int fscrypt_file_open(struct inode *inode, struct file *filp); +int __fscrypt_prepare_link(struct inode *inode, struct inode *dir, + struct dentry *dentry); +int __fscrypt_prepare_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags); +int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry, + struct fscrypt_name *fname); +int fscrypt_prepare_setflags(struct inode *inode, + unsigned int oldflags, unsigned int flags); +int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, + unsigned int max_len, + struct fscrypt_str *disk_link); +int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, + unsigned int len, struct fscrypt_str *disk_link); +const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, + unsigned int max_size, + struct delayed_call *done); static inline void fscrypt_set_ops(struct super_block *sb, const struct fscrypt_operations *s_cop) { @@ -212,9 +239,9 @@ static inline void fscrypt_set_ops(struct super_block *sb, } #else /* !CONFIG_FS_ENCRYPTION */ -static inline bool fscrypt_has_encryption_key(const struct inode *inode) +static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode) { - return false; + return NULL; } static inline bool fscrypt_needs_contents_encryption(const struct inode *inode) @@ -222,9 +249,10 @@ static inline bool fscrypt_needs_contents_encryption(const struct inode *inode) return false; } -static inline bool fscrypt_dummy_context_enabled(struct inode *inode) +static inline const union fscrypt_context * +fscrypt_get_dummy_context(struct super_block *sb) { - return false; + return NULL; } static inline void fscrypt_handle_d_move(struct dentry *dentry) @@ -319,6 +347,20 @@ static inline int fscrypt_inherit_context(struct inode *parent, return -EOPNOTSUPP; } +struct fscrypt_dummy_context { +}; + +static inline void fscrypt_show_test_dummy_encryption(struct seq_file *seq, + char sep, + struct super_block *sb) +{ +} + +static inline void +fscrypt_free_dummy_context(struct fscrypt_dummy_context *dummy_ctx) +{ +} + /* keyring.c */ static inline void fscrypt_sb_free(struct super_block *sb) { @@ -503,8 +545,101 @@ static inline void fscrypt_set_ops(struct super_block *sb, #endif /* !CONFIG_FS_ENCRYPTION */ +/* inline_crypt.c */ +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + +bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode); + +void fscrypt_set_bio_crypt_ctx(struct bio *bio, + const struct inode *inode, u64 first_lblk, + gfp_t gfp_mask); + +void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, + const struct buffer_head *first_bh, + gfp_t gfp_mask); + +bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, + u64 next_lblk); + +bool fscrypt_mergeable_bio_bh(struct bio *bio, + const struct buffer_head *next_bh); + +#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ + +static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode) +{ + return false; +} + +static inline void fscrypt_set_bio_crypt_ctx(struct bio *bio, + const struct inode *inode, + u64 first_lblk, gfp_t gfp_mask) { } + +static inline void fscrypt_set_bio_crypt_ctx_bh( + struct bio *bio, + const struct buffer_head *first_bh, + gfp_t gfp_mask) { } + +static inline bool fscrypt_mergeable_bio(struct bio *bio, + const struct inode *inode, + u64 next_lblk) +{ + return true; +} + +static inline bool fscrypt_mergeable_bio_bh(struct bio *bio, + const struct buffer_head *next_bh) +{ + return true; +} +#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ + +/** + * fscrypt_inode_uses_inline_crypto() - test whether an inode uses inline + * encryption + * @inode: an inode. If encrypted, its key must be set up. + * + * Return: true if the inode requires file contents encryption and if the + * encryption should be done in the block layer via blk-crypto rather + * than in the filesystem layer. + */ +static inline bool fscrypt_inode_uses_inline_crypto(const struct inode *inode) +{ + return fscrypt_needs_contents_encryption(inode) && + __fscrypt_inode_uses_inline_crypto(inode); +} + +/** + * fscrypt_inode_uses_fs_layer_crypto() - test whether an inode uses fs-layer + * encryption + * @inode: an inode. If encrypted, its key must be set up. + * + * Return: true if the inode requires file contents encryption and if the + * encryption should be done in the filesystem layer rather than in the + * block layer via blk-crypto. + */ +static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode) +{ + return fscrypt_needs_contents_encryption(inode) && + !__fscrypt_inode_uses_inline_crypto(inode); +} + +/** + * fscrypt_has_encryption_key() - check whether an inode has had its key set up + * @inode: the inode to check + * + * Return: %true if the inode has had its encryption key set up, else %false. + * + * Usually this should be preceded by fscrypt_get_encryption_info() to try to + * set up the key first. + */ +static inline bool fscrypt_has_encryption_key(const struct inode *inode) +{ + return fscrypt_get_info(inode) != NULL; +} + /** - * fscrypt_require_key - require an inode's encryption key + * fscrypt_require_key() - require an inode's encryption key * @inode: the inode we need the key for * * If the inode is encrypted, set up its encryption key if not already done. @@ -530,7 +665,8 @@ static inline int fscrypt_require_key(struct inode *inode) } /** - * fscrypt_prepare_link - prepare to link an inode into a possibly-encrypted directory + * fscrypt_prepare_link() - prepare to link an inode into a possibly-encrypted + * directory * @old_dentry: an existing dentry for the inode being linked * @dir: the target directory * @dentry: negative dentry for the target filename @@ -557,7 +693,8 @@ static inline int fscrypt_prepare_link(struct dentry *old_dentry, } /** - * fscrypt_prepare_rename - prepare for a rename between possibly-encrypted directories + * fscrypt_prepare_rename() - prepare for a rename between possibly-encrypted + * directories * @old_dir: source directory * @old_dentry: dentry for source file * @new_dir: target directory @@ -590,7 +727,8 @@ static inline int fscrypt_prepare_rename(struct inode *old_dir, } /** - * fscrypt_prepare_lookup - prepare to lookup a name in a possibly-encrypted directory + * fscrypt_prepare_lookup() - prepare to lookup a name in a possibly-encrypted + * directory * @dir: directory being searched * @dentry: filename being looked up * @fname: (output) the name to use to search the on-disk directory @@ -623,7 +761,8 @@ static inline int fscrypt_prepare_lookup(struct inode *dir, } /** - * fscrypt_prepare_setattr - prepare to change a possibly-encrypted inode's attributes + * fscrypt_prepare_setattr() - prepare to change a possibly-encrypted inode's + * attributes * @dentry: dentry through which the inode is being changed * @attr: attributes to change * @@ -648,7 +787,7 @@ static inline int fscrypt_prepare_setattr(struct dentry *dentry, } /** - * fscrypt_prepare_symlink - prepare to create a possibly-encrypted symlink + * fscrypt_prepare_symlink() - prepare to create a possibly-encrypted symlink * @dir: directory in which the symlink is being created * @target: plaintext symlink target * @len: length of @target excluding null terminator @@ -676,7 +815,7 @@ static inline int fscrypt_prepare_symlink(struct inode *dir, unsigned int max_len, struct fscrypt_str *disk_link) { - if (IS_ENCRYPTED(dir) || fscrypt_dummy_context_enabled(dir)) + if (IS_ENCRYPTED(dir) || fscrypt_get_dummy_context(dir->i_sb) != NULL) return __fscrypt_prepare_symlink(dir, len, max_len, disk_link); disk_link->name = (unsigned char *)target; @@ -687,7 +826,7 @@ static inline int fscrypt_prepare_symlink(struct inode *dir, } /** - * fscrypt_encrypt_symlink - encrypt the symlink target if needed + * fscrypt_encrypt_symlink() - encrypt the symlink target if needed * @inode: symlink inode * @target: plaintext symlink target * @len: length of @target excluding null terminator diff --git a/include/linux/fsl/bestcomm/bestcomm.h b/include/linux/fsl/bestcomm/bestcomm.h index a0e2e6b19b57..154e541ce57e 100644 --- a/include/linux/fsl/bestcomm/bestcomm.h +++ b/include/linux/fsl/bestcomm/bestcomm.h @@ -27,7 +27,7 @@ */ struct bcom_bd { u32 status; - u32 data[0]; /* variable payload size */ + u32 data[]; /* variable payload size */ }; /* ======================================================================== */ diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h index 4875dd38af7e..2d9203314865 100644 --- a/include/linux/fsl/enetc_mdio.h +++ b/include/linux/fsl/enetc_mdio.h @@ -15,6 +15,7 @@ #define ENETC_PCS_IF_MODE_SGMII_EN BIT(0) #define ENETC_PCS_IF_MODE_USE_SGMII_AN BIT(1) #define ENETC_PCS_IF_MODE_SGMII_SPEED(x) (((x) << 2) & GENMASK(3, 2)) +#define ENETC_PCS_IF_MODE_DUPLEX_HALF BIT(3) /* Not a mistake, the SerDes PLL needs to be set at 3.125 GHz by Reset * Configuration Word (RCW, outside Linux control) for 2.5G SGMII mode. The PCS diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h index 2b5f8366dbe1..a428c61ead6e 100644 --- a/include/linux/fsl/mc.h +++ b/include/linux/fsl/mc.h @@ -339,7 +339,7 @@ struct fsl_mc_io { * This field is only meaningful if the * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set */ - spinlock_t spinlock; /* serializes mc_send_command() */ + raw_spinlock_t spinlock; /* serializes mc_send_command() */ }; }; @@ -433,6 +433,11 @@ extern struct device_type fsl_mc_bus_dpmcp_type; extern struct device_type fsl_mc_bus_dpmac_type; extern struct device_type fsl_mc_bus_dprtc_type; extern struct device_type fsl_mc_bus_dpseci_type; +extern struct device_type fsl_mc_bus_dpdmux_type; +extern struct device_type fsl_mc_bus_dpdcei_type; +extern struct device_type fsl_mc_bus_dpaiop_type; +extern struct device_type fsl_mc_bus_dpci_type; +extern struct device_type fsl_mc_bus_dpdmai_type; static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev) { @@ -454,6 +459,11 @@ static inline bool is_fsl_mc_bus_dpsw(const struct fsl_mc_device *mc_dev) return mc_dev->dev.type == &fsl_mc_bus_dpsw_type; } +static inline bool is_fsl_mc_bus_dpdmux(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpdmux_type; +} + static inline bool is_fsl_mc_bus_dpbp(const struct fsl_mc_device *mc_dev) { return mc_dev->dev.type == &fsl_mc_bus_dpbp_type; @@ -484,6 +494,26 @@ static inline bool is_fsl_mc_bus_dpseci(const struct fsl_mc_device *mc_dev) return mc_dev->dev.type == &fsl_mc_bus_dpseci_type; } +static inline bool is_fsl_mc_bus_dpdcei(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpdcei_type; +} + +static inline bool is_fsl_mc_bus_dpaiop(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpaiop_type; +} + +static inline bool is_fsl_mc_bus_dpci(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpci_type; +} + +static inline bool is_fsl_mc_bus_dpdmai(const struct fsl_mc_device *mc_dev) +{ + return mc_dev->dev.type == &fsl_mc_bus_dpdmai_type; +} + /* * Data Path Buffer Pool (DPBP) API * Contains initialization APIs and runtime control APIs for DPBP diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h index 75884563059f..884b8f8ca06d 100644 --- a/include/linux/fsl/ptp_qoriq.h +++ b/include/linux/fsl/ptp_qoriq.h @@ -135,7 +135,7 @@ struct ptp_qoriq_registers { #define DEFAULT_CKSEL 1 #define DEFAULT_TMR_PRSC 2 #define DEFAULT_FIPER1_PERIOD 1000000000 -#define DEFAULT_FIPER2_PERIOD 100000 +#define DEFAULT_FIPER2_PERIOD 1000000000 struct ptp_qoriq { void __iomem *base; diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 5ab28f6c7d26..f8acddcf54fb 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -23,19 +23,14 @@ * have changed (i.e. renamed over). * * Unlike fsnotify_parent(), the event will be reported regardless of the - * FS_EVENT_ON_CHILD mask on the parent inode. + * FS_EVENT_ON_CHILD mask on the parent inode and will not be reported if only + * the child is interested and not the parent. */ static inline void fsnotify_name(struct inode *dir, __u32 mask, struct inode *child, const struct qstr *name, u32 cookie) { - fsnotify(dir, mask, child, FSNOTIFY_EVENT_INODE, name, cookie); - /* - * Send another flavor of the event without child inode data and - * without the specific event type (e.g. FS_CREATE|FS_IS_DIR). - * The name is relative to the dir inode the event is reported to. - */ - fsnotify(dir, FS_DIR_MODIFY, dir, FSNOTIFY_EVENT_INODE, name, 0); + fsnotify(mask, child, FSNOTIFY_EVENT_INODE, dir, name, NULL, cookie); } static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, @@ -44,38 +39,55 @@ static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, fsnotify_name(dir, mask, d_inode(dentry), &dentry->d_name, 0); } -/* - * Simple wrappers to consolidate calls fsnotify_parent()/fsnotify() when - * an event is on a file/dentry. - */ -static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask) +static inline void fsnotify_inode(struct inode *inode, __u32 mask) +{ + if (S_ISDIR(inode->i_mode)) + mask |= FS_ISDIR; + + fsnotify(mask, inode, FSNOTIFY_EVENT_INODE, NULL, NULL, inode, 0); +} + +/* Notify this dentry's parent about a child's events. */ +static inline int fsnotify_parent(struct dentry *dentry, __u32 mask, + const void *data, int data_type) { struct inode *inode = d_inode(dentry); - if (S_ISDIR(inode->i_mode)) + if (S_ISDIR(inode->i_mode)) { mask |= FS_ISDIR; - fsnotify_parent(dentry, mask, inode, FSNOTIFY_EVENT_INODE); - fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + /* sb/mount marks are not interested in name of directory */ + if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED)) + goto notify_child; + } + + /* disconnected dentry cannot notify parent */ + if (IS_ROOT(dentry)) + goto notify_child; + + return __fsnotify_parent(dentry, mask, data, data_type); + +notify_child: + return fsnotify(mask, data, data_type, NULL, NULL, inode, 0); +} + +/* + * Simple wrappers to consolidate calls to fsnotify_parent() when an event + * is on a file/dentry. + */ +static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask) +{ + fsnotify_parent(dentry, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE); } static inline int fsnotify_file(struct file *file, __u32 mask) { const struct path *path = &file->f_path; - struct inode *inode = file_inode(file); - int ret; if (file->f_mode & FMODE_NONOTIFY) return 0; - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - ret = fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); - if (ret) - return ret; - - return fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); + return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); } /* Simple call site for access decisions */ @@ -108,12 +120,7 @@ static inline int fsnotify_perm(struct file *file, int mask) */ static inline void fsnotify_link_count(struct inode *inode) { - __u32 mask = FS_ATTRIB; - - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + fsnotify_inode(inode, FS_ATTRIB); } /* @@ -128,7 +135,6 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, u32 fs_cookie = fsnotify_get_cookie(); __u32 old_dir_mask = FS_MOVED_FROM; __u32 new_dir_mask = FS_MOVED_TO; - __u32 mask = FS_MOVE_SELF; const struct qstr *new_name = &moved->d_name; if (old_dir == new_dir) @@ -137,7 +143,6 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, if (isdir) { old_dir_mask |= FS_ISDIR; new_dir_mask |= FS_ISDIR; - mask |= FS_ISDIR; } fsnotify_name(old_dir, old_dir_mask, source, old_name, fs_cookie); @@ -145,9 +150,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, if (target) fsnotify_link_count(target); - - if (source) - fsnotify(source, mask, source, FSNOTIFY_EVENT_INODE, NULL, 0); + fsnotify_inode(source, FS_MOVE_SELF); audit_inode_child(new_dir, moved, AUDIT_TYPE_CHILD_CREATE); } @@ -172,12 +175,7 @@ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt) */ static inline void fsnotify_inoderemove(struct inode *inode) { - __u32 mask = FS_DELETE_SELF; - - if (S_ISDIR(inode->i_mode)) - mask |= FS_ISDIR; - - fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + fsnotify_inode(inode, FS_DELETE_SELF); __fsnotify_inode_delete(inode); } diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index f0c506405b54..f8529a3a2923 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -47,11 +47,13 @@ #define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */ #define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */ #define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */ -#define FS_DIR_MODIFY 0x00080000 /* Directory entry was modified */ #define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */ -/* This inode cares about things that happen to its children. Always set for - * dnotify and inotify. */ +/* + * Set on inode mark that cares about things that happen to its children. + * Always set for dnotify and inotify. + * Set on inode/sb/mount marks that care about parent/name info. + */ #define FS_EVENT_ON_CHILD 0x08000000 #define FS_DN_RENAME 0x10000000 /* file renamed */ @@ -67,21 +69,28 @@ * The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event * when a directory entry inside a child subdir changes. */ -#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | \ - FS_DIR_MODIFY) +#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE) #define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \ FS_OPEN_EXEC_PERM) /* - * This is a list of all events that may get sent to a parent based on fs event - * happening to inodes inside that directory. + * This is a list of all events that may get sent to a parent that is watching + * with flag FS_EVENT_ON_CHILD based on fs event on a child of that directory. */ #define FS_EVENTS_POSS_ON_CHILD (ALL_FSNOTIFY_PERM_EVENTS | \ FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | \ FS_OPEN | FS_OPEN_EXEC) +/* + * This is a list of all events that may get sent with the parent inode as the + * @to_tell argument of fsnotify(). + * It may include events that can be sent to an inode/sb/mount mark, but cannot + * be sent to a parent watching children. + */ +#define FS_EVENTS_POSS_TO_PARENT (FS_EVENTS_POSS_ON_CHILD) + /* Events that can be reported to backends */ #define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \ FS_EVENTS_POSS_ON_CHILD | \ @@ -108,18 +117,41 @@ struct mem_cgroup; * these operations for each relevant group. * * handle_event - main call for a group to handle an fs event + * @group: group to notify + * @mask: event type and flags + * @data: object that event happened on + * @data_type: type of object for fanotify_data_XXX() accessors + * @dir: optional directory associated with event - + * if @file_name is not NULL, this is the directory that + * @file_name is relative to + * @file_name: optional file name associated with event + * @cookie: inotify rename cookie + * @iter_info: array of marks from this group that are interested in the event + * + * handle_inode_event - simple variant of handle_event() for groups that only + * have inode marks and don't have ignore mask + * @mark: mark to notify + * @mask: event type and flags + * @inode: inode that event happened on + * @dir: optional directory associated with event - + * if @file_name is not NULL, this is the directory that + * @file_name is relative to. + * @file_name: optional file name associated with event + * * free_group_priv - called when a group refcnt hits 0 to clean up the private union * freeing_mark - called when a mark is being destroyed for some reason. The group - * MUST be holding a reference on each mark and that reference must be - * dropped in this function. inotify uses this function to send - * userspace messages that marks have been removed. + * MUST be holding a reference on each mark and that reference must be + * dropped in this function. inotify uses this function to send + * userspace messages that marks have been removed. */ struct fsnotify_ops { - int (*handle_event)(struct fsnotify_group *group, - struct inode *inode, - u32 mask, const void *data, int data_type, + int (*handle_event)(struct fsnotify_group *group, u32 mask, + const void *data, int data_type, struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info); + int (*handle_inode_event)(struct fsnotify_mark *mark, u32 mask, + struct inode *inode, struct inode *dir, + const struct qstr *file_name); void (*free_group_priv)(struct fsnotify_group *group); void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); void (*free_event)(struct fsnotify_event *event); @@ -220,12 +252,11 @@ enum fsnotify_data_type { FSNOTIFY_EVENT_INODE, }; -static inline const struct inode *fsnotify_data_inode(const void *data, - int data_type) +static inline struct inode *fsnotify_data_inode(const void *data, int data_type) { switch (data_type) { case FSNOTIFY_EVENT_INODE: - return data; + return (struct inode *)data; case FSNOTIFY_EVENT_PATH: return d_inode(((const struct path *)data)->dentry); default: @@ -246,6 +277,7 @@ static inline const struct path *fsnotify_data_path(const void *data, enum fsnotify_obj_type { FSNOTIFY_OBJ_TYPE_INODE, + FSNOTIFY_OBJ_TYPE_CHILD, FSNOTIFY_OBJ_TYPE_VFSMOUNT, FSNOTIFY_OBJ_TYPE_SB, FSNOTIFY_OBJ_TYPE_COUNT, @@ -253,6 +285,7 @@ enum fsnotify_obj_type { }; #define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE) +#define FSNOTIFY_OBJ_TYPE_CHILD_FL (1U << FSNOTIFY_OBJ_TYPE_CHILD) #define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT) #define FSNOTIFY_OBJ_TYPE_SB_FL (1U << FSNOTIFY_OBJ_TYPE_SB) #define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1) @@ -297,6 +330,7 @@ static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \ } FSNOTIFY_ITER_FUNCS(inode, INODE) +FSNOTIFY_ITER_FUNCS(child, CHILD) FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT) FSNOTIFY_ITER_FUNCS(sb, SB) @@ -377,15 +411,29 @@ struct fsnotify_mark { /* called from the vfs helpers */ /* main fsnotify call to send events */ -extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, - int data_type, const struct qstr *name, u32 cookie); -extern int fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, +extern int fsnotify(__u32 mask, const void *data, int data_type, + struct inode *dir, const struct qstr *name, + struct inode *inode, u32 cookie); +extern int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, int data_type); extern void __fsnotify_inode_delete(struct inode *inode); extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); extern void fsnotify_sb_delete(struct super_block *sb); extern u32 fsnotify_get_cookie(void); +static inline __u32 fsnotify_parent_needed_mask(__u32 mask) +{ + /* FS_EVENT_ON_CHILD is set on marks that want parent/name info */ + if (!(mask & FS_EVENT_ON_CHILD)) + return 0; + /* + * This object might be watched by a mark that cares about parent/name + * info, does it care about the specific set of events that can be + * reported with parent/name info? + */ + return mask & FS_EVENTS_POSS_TO_PARENT; +} + static inline int fsnotify_inode_watches_children(struct inode *inode) { /* FS_EVENT_ON_CHILD is set if the inode may care */ @@ -535,13 +583,14 @@ static inline void fsnotify_init_event(struct fsnotify_event *event, #else -static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, - int data_type, const struct qstr *name, u32 cookie) +static inline int fsnotify(__u32 mask, const void *data, int data_type, + struct inode *dir, const struct qstr *name, + struct inode *inode, u32 cookie) { return 0; } -static inline int fsnotify_parent(struct dentry *dentry, __u32 mask, +static inline int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, int data_type) { return 0; diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h index ecc604e61d61..c1144a450392 100644 --- a/include/linux/fsverity.h +++ b/include/linux/fsverity.h @@ -115,29 +115,34 @@ struct fsverity_operations { static inline struct fsverity_info *fsverity_get_info(const struct inode *inode) { - /* pairs with the cmpxchg() in fsverity_set_info() */ - return READ_ONCE(inode->i_verity_info); + /* + * Pairs with the cmpxchg_release() in fsverity_set_info(). + * I.e., another task may publish ->i_verity_info concurrently, + * executing a RELEASE barrier. We need to use smp_load_acquire() here + * to safely ACQUIRE the memory the other task published. + */ + return smp_load_acquire(&inode->i_verity_info); } /* enable.c */ -extern int fsverity_ioctl_enable(struct file *filp, const void __user *arg); +int fsverity_ioctl_enable(struct file *filp, const void __user *arg); /* measure.c */ -extern int fsverity_ioctl_measure(struct file *filp, void __user *arg); +int fsverity_ioctl_measure(struct file *filp, void __user *arg); /* open.c */ -extern int fsverity_file_open(struct inode *inode, struct file *filp); -extern int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr); -extern void fsverity_cleanup_inode(struct inode *inode); +int fsverity_file_open(struct inode *inode, struct file *filp); +int fsverity_prepare_setattr(struct dentry *dentry, struct iattr *attr); +void fsverity_cleanup_inode(struct inode *inode); /* verify.c */ -extern bool fsverity_verify_page(struct page *page); -extern void fsverity_verify_bio(struct bio *bio); -extern void fsverity_enqueue_verify_work(struct work_struct *work); +bool fsverity_verify_page(struct page *page); +void fsverity_verify_bio(struct bio *bio); +void fsverity_enqueue_verify_work(struct work_struct *work); #else /* !CONFIG_FS_VERITY */ @@ -200,6 +205,7 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work) /** * fsverity_active() - do reads from the inode need to go through fs-verity? + * @inode: inode to check * * This checks whether ->i_verity_info has been set. * @@ -207,6 +213,8 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work) * be verified or not. Don't use IS_VERITY() for this purpose; it's subject to * a race condition where the file is being read concurrently with * FS_IOC_ENABLE_VERITY completing. (S_VERITY is set before ->i_verity_info.) + * + * Return: true if reads need to go through fs-verity, otherwise false */ static inline bool fsverity_active(const struct inode *inode) { diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index ab4bd15cbcdb..e5c2d5cc6e6a 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -58,9 +58,6 @@ struct ftrace_direct_func; const char * ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, unsigned long *off, char **modname, char *sym); -int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, - char *type, char *name, - char *module_name, int *exported); #else static inline const char * ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, @@ -68,6 +65,13 @@ ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, { return NULL; } +#endif + +#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) +int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *name, + char *module_name, int *exported); +#else static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *name, char *module_name, int *exported) @@ -76,14 +80,12 @@ static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *val } #endif - #ifdef CONFIG_FUNCTION_TRACER extern int ftrace_enabled; extern int ftrace_enable_sysctl(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); struct ftrace_ops; @@ -207,6 +209,7 @@ struct ftrace_ops { struct ftrace_ops_hash old_hash; unsigned long trampoline; unsigned long trampoline_size; + struct list_head list; #endif }; @@ -342,9 +345,8 @@ static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, extern int stack_tracer_enabled; -int stack_trace_sysctl(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); +int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ DECLARE_PER_CPU(int, disable_stack_tracer); @@ -1028,8 +1030,7 @@ extern void disable_trace_on_warning(void); extern int __disable_trace_on_warning; int tracepoint_printk_sysctl(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); #else /* CONFIG_TRACING */ static inline void disable_trace_on_warning(void) { } diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h index ccda97dc7f8b..0abd9a1d2852 100644 --- a/include/linux/ftrace_irq.h +++ b/include/linux/ftrace_irq.h @@ -2,15 +2,6 @@ #ifndef _LINUX_FTRACE_IRQ_H #define _LINUX_FTRACE_IRQ_H - -#ifdef CONFIG_FTRACE_NMI_ENTER -extern void arch_ftrace_nmi_enter(void); -extern void arch_ftrace_nmi_exit(void); -#else -static inline void arch_ftrace_nmi_enter(void) { } -static inline void arch_ftrace_nmi_exit(void) { } -#endif - #ifdef CONFIG_HWLAT_TRACER extern bool trace_hwlat_callback_enabled; extern void trace_hwlat_callback(bool enter); @@ -22,12 +13,10 @@ static inline void ftrace_nmi_enter(void) if (trace_hwlat_callback_enabled) trace_hwlat_callback(true); #endif - arch_ftrace_nmi_enter(); } static inline void ftrace_nmi_exit(void) { - arch_ftrace_nmi_exit(); #ifdef CONFIG_HWLAT_TRACER if (trace_hwlat_callback_enabled) trace_hwlat_callback(false); diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index e0abafbb17f8..9506f8ec0974 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -171,5 +171,7 @@ struct fwnode_operations { #define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev) extern u32 fw_devlink_get_flags(void); +void fw_devlink_pause(void); +void fw_devlink_resume(void); #endif diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h index 02393c0c98f9..bfd00320c7f3 100644 --- a/include/linux/generic-radix-tree.h +++ b/include/linux/generic-radix-tree.h @@ -44,7 +44,7 @@ struct genradix_root; struct __genradix { - struct genradix_root __rcu *root; + struct genradix_root *root; }; /* diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 9b3fffdf4011..4ab853461dff 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -19,13 +19,12 @@ #include <linux/blk_types.h> #include <asm/local.h> -#ifdef CONFIG_BLOCK - #define dev_to_disk(device) container_of((device), struct gendisk, part0.__dev) #define dev_to_part(device) container_of((device), struct hd_struct, __dev) #define disk_to_dev(disk) (&(disk)->part0.__dev) #define part_to_dev(part) (&((part)->__dev)) +extern const struct device_type disk_type; extern struct device_type part_type; extern struct class block_class; @@ -39,15 +38,6 @@ extern struct class block_class; #include <linux/fs.h> #include <linux/workqueue.h> -struct disk_stats { - u64 nsecs[NR_STAT_GROUPS]; - unsigned long sectors[NR_STAT_GROUPS]; - unsigned long ios[NR_STAT_GROUPS]; - unsigned long merges[NR_STAT_GROUPS]; - unsigned long io_ticks; - local_t in_flight[2]; -}; - #define PARTITION_META_INFO_VOLNAMELTH 64 /* * Enough for the string representation of any kind of UUID plus NULL. @@ -68,7 +58,13 @@ struct hd_struct { * can be non-atomic on 32bit machines with 64bit sector_t. */ sector_t nr_sects; +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) seqcount_t nr_sects_seq; +#endif + unsigned long stamp; + struct disk_stats __percpu *dkstats; + struct percpu_ref ref; + sector_t alignment_offset; unsigned int discard_alignment; struct device __dev; @@ -78,13 +74,6 @@ struct hd_struct { #ifdef CONFIG_FAIL_MAKE_REQUEST int make_it_fail; #endif - unsigned long stamp; -#ifdef CONFIG_SMP - struct disk_stats __percpu *dkstats; -#else - struct disk_stats dkstats; -#endif - struct percpu_ref ref; struct rcu_work rcu_work; }; @@ -169,8 +158,6 @@ struct disk_part_tbl { struct disk_events; struct badblocks; -#if defined(CONFIG_BLK_DEV_INTEGRITY) - struct blk_integrity { const struct blk_integrity_profile *profile; unsigned char flags; @@ -179,8 +166,6 @@ struct blk_integrity { unsigned char tag_size; }; -#endif /* CONFIG_BLK_DEV_INTEGRITY */ - struct gendisk { /* major, first_minor and minors are input parameters only, * don't use directly. Use disk_devt() and disk_max_parts(). @@ -217,11 +202,20 @@ struct gendisk { #ifdef CONFIG_BLK_DEV_INTEGRITY struct kobject integrity_kobj; #endif /* CONFIG_BLK_DEV_INTEGRITY */ +#if IS_ENABLED(CONFIG_CDROM) + struct cdrom_device_info *cdi; +#endif int node_id; struct badblocks *bb; struct lockdep_map lockdep_map; }; +#if IS_REACHABLE(CONFIG_CDROM) +#define disk_to_cdi(disk) ((disk)->cdi) +#else +#define disk_to_cdi(disk) NULL +#endif + static inline struct gendisk *part_to_disk(struct hd_struct *part) { if (likely(part)) { @@ -265,6 +259,13 @@ static inline void disk_put_part(struct hd_struct *part) put_device(part_to_dev(part)); } +static inline void hd_sects_seq_init(struct hd_struct *p) +{ +#if BITS_PER_LONG==32 && defined(CONFIG_SMP) + seqcount_init(&p->nr_sects_seq); +#endif +} + /* * Smarter partition iterator without context limits. */ @@ -335,12 +336,9 @@ static inline void set_capacity(struct gendisk *disk, sector_t size) disk->part0.nr_sects = size; } -extern dev_t blk_lookup_devt(const char *name, int partno); - int bdev_disk_changed(struct block_device *bdev, bool invalidate); int blk_add_partitions(struct gendisk *disk, struct block_device *bdev); -int blk_drop_partitions(struct gendisk *disk, struct block_device *bdev); -extern void printk_all_partitions(void); +int blk_drop_partitions(struct block_device *bdev); extern struct gendisk *__alloc_disk_node(int minors, int node_id); extern struct kobject *get_disk_and_module(struct gendisk *disk); @@ -371,10 +369,40 @@ extern void blk_unregister_region(dev_t devt, unsigned long range); #define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE) -#else /* CONFIG_BLOCK */ +int register_blkdev(unsigned int major, const char *name); +void unregister_blkdev(unsigned int major, const char *name); -static inline void printk_all_partitions(void) { } +int revalidate_disk(struct gendisk *disk); +int check_disk_change(struct block_device *bdev); +int __invalidate_device(struct block_device *bdev, bool kill_dirty); +void bd_set_size(struct block_device *bdev, loff_t size); +/* for drivers/char/raw.c: */ +int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); +long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); + +#ifdef CONFIG_SYSFS +int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); +void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); +#else +static inline int bd_link_disk_holder(struct block_device *bdev, + struct gendisk *disk) +{ + return 0; +} +static inline void bd_unlink_disk_holder(struct block_device *bdev, + struct gendisk *disk) +{ +} +#endif /* CONFIG_SYSFS */ + +#ifdef CONFIG_BLOCK +void printk_all_partitions(void); +dev_t blk_lookup_devt(const char *name, int partno); +#else /* CONFIG_BLOCK */ +static inline void printk_all_partitions(void) +{ +} static inline dev_t blk_lookup_devt(const char *name, int partno) { dev_t devt = MKDEV(0, 0); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 4aba4c86c626..67a0774e080b 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -110,6 +110,11 @@ struct vm_area_struct; * the caller guarantees the allocation will allow more memory to be freed * very shortly e.g. process exiting or swapping. Users either should * be the MM or co-ordinating closely with the VM (e.g. swap over NFS). + * Users of this flag have to be extremely careful to not deplete the reserve + * completely and implement a throttling mechanism which controls the + * consumption of the reserve based on the amount of freed memory. + * Usage of a pre-allocated pool (e.g. mempool) should be always considered + * before using this flag. * * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves. * This takes precedence over the %__GFP_MEMALLOC flag if both are set. @@ -307,7 +312,7 @@ struct vm_area_struct; #define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) #define GFP_MOVABLE_SHIFT 3 -static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) +static inline int gfp_migratetype(const gfp_t gfp_flags) { VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index b8fc92c177eb..d1cef5c2715c 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -253,6 +253,19 @@ struct gpio_irq_chip { * Store old irq_chip irq_disable callback */ void (*irq_disable)(struct irq_data *data); + /** + * @irq_unmask: + * + * Store old irq_chip irq_unmask callback + */ + void (*irq_unmask)(struct irq_data *data); + + /** + * @irq_mask: + * + * Store old irq_chip irq_mask callback + */ + void (*irq_mask)(struct irq_data *data); }; /** @@ -267,9 +280,9 @@ struct gpio_irq_chip { * @free: optional hook for chip-specific deactivation, such as * disabling module power and clock; may sleep * @get_direction: returns direction for signal "offset", 0=out, 1=in, - * (same as GPIOF_DIR_XXX), or negative error. - * It is recommended to always implement this function, even on - * input-only or output-only gpio chips. + * (same as GPIO_LINE_DIRECTION_OUT / GPIO_LINE_DIRECTION_IN), + * or negative error. It is recommended to always implement this + * function, even on input-only or output-only gpio chips. * @direction_input: configures signal "offset" as input, or returns error * This can be omitted on input-only or output-only gpio chips. * @direction_output: configures signal "offset" as output, or returns error @@ -349,30 +362,30 @@ struct gpio_chip { struct module *owner; int (*request)(struct gpio_chip *gc, - unsigned offset); + unsigned int offset); void (*free)(struct gpio_chip *gc, - unsigned offset); + unsigned int offset); int (*get_direction)(struct gpio_chip *gc, - unsigned offset); + unsigned int offset); int (*direction_input)(struct gpio_chip *gc, - unsigned offset); + unsigned int offset); int (*direction_output)(struct gpio_chip *gc, - unsigned offset, int value); + unsigned int offset, int value); int (*get)(struct gpio_chip *gc, - unsigned offset); + unsigned int offset); int (*get_multiple)(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits); void (*set)(struct gpio_chip *gc, - unsigned offset, int value); + unsigned int offset, int value); void (*set_multiple)(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits); int (*set_config)(struct gpio_chip *gc, - unsigned offset, + unsigned int offset, unsigned long config); int (*to_irq)(struct gpio_chip *gc, - unsigned offset); + unsigned int offset); void (*dbg_show)(struct seq_file *s, struct gpio_chip *gc); @@ -459,7 +472,23 @@ struct gpio_chip { }; extern const char *gpiochip_is_requested(struct gpio_chip *gc, - unsigned offset); + unsigned int offset); + +/** + * for_each_requested_gpio_in_range - iterates over requested GPIOs in a given range + * @chip: the chip to query + * @i: loop variable + * @base: first GPIO in the range + * @size: amount of GPIOs to check starting from @base + * @label: label of current GPIO + */ +#define for_each_requested_gpio_in_range(chip, i, base, size, label) \ + for (i = 0; i < size; i++) \ + if ((label = gpiochip_is_requested(chip, base + i)) == NULL) {} else + +/* Iterates over all requested GPIO of the given @chip */ +#define for_each_requested_gpio(chip, i, label) \ + for_each_requested_gpio_in_range(chip, i, 0, chip->ngpio, label) /* add/remove chips */ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, @@ -468,25 +497,25 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, /** * gpiochip_add_data() - register a gpio_chip - * @chip: the chip to register, with chip->base initialized + * @gc: the chip to register, with gc->base initialized * @data: driver-private data associated with this chip * * Context: potentially before irqs will work * * When gpiochip_add_data() is called very early during boot, so that GPIOs - * can be freely used, the chip->parent device must be registered before + * can be freely used, the gc->parent device must be registered before * the gpio framework's arch_initcall(). Otherwise sysfs initialization * for GPIOs will fail rudely. * * gpiochip_add_data() must only be called after gpiolib initialization, * ie after core_initcall(). * - * If chip->base is negative, this requests dynamic assignment of + * If gc->base is negative, this requests dynamic assignment of * a range of valid GPIOs. * * Returns: * A negative errno if the chip can't be registered, such as because the - * chip->base is invalid or already associated with a different chip. + * gc->base is invalid or already associated with a different chip. * Otherwise it returns zero as a success code. */ #ifdef CONFIG_LOCKDEP @@ -496,8 +525,16 @@ extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, gpiochip_add_data_with_key(gc, data, &lock_key, \ &request_key); \ }) +#define devm_gpiochip_add_data(dev, gc, data) ({ \ + static struct lock_class_key lock_key; \ + static struct lock_class_key request_key; \ + devm_gpiochip_add_data_with_key(dev, gc, data, &lock_key, \ + &request_key); \ + }) #else #define gpiochip_add_data(gc, data) gpiochip_add_data_with_key(gc, data, NULL, NULL) +#define devm_gpiochip_add_data(dev, gc, data) \ + devm_gpiochip_add_data_with_key(dev, gc, data, NULL, NULL) #endif /* CONFIG_LOCKDEP */ static inline int gpiochip_add(struct gpio_chip *gc) @@ -505,8 +542,9 @@ static inline int gpiochip_add(struct gpio_chip *gc) return gpiochip_add_data(gc, NULL); } extern void gpiochip_remove(struct gpio_chip *gc); -extern int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *gc, - void *data); +extern int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, void *data, + struct lock_class_key *lock_key, + struct lock_class_key *request_key); extern struct gpio_chip *gpiochip_find(void *data, int (*match)(struct gpio_chip *gc, void *data)); @@ -599,6 +637,9 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gc, bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc, unsigned int offset); +int gpiochip_irqchip_add_domain(struct gpio_chip *gc, + struct irq_domain *domain); + #ifdef CONFIG_LOCKDEP /* @@ -657,9 +698,9 @@ static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gc, } #endif /* CONFIG_LOCKDEP */ -int gpiochip_generic_request(struct gpio_chip *gc, unsigned offset); -void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset); -int gpiochip_generic_config(struct gpio_chip *gc, unsigned offset, +int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset); +void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset); +int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset, unsigned long config); /** diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h index 1ebe5be05d5f..781a053abbb9 100644 --- a/include/linux/gpio/machine.h +++ b/include/linux/gpio/machine.h @@ -20,8 +20,11 @@ enum gpio_lookup_flags { /** * struct gpiod_lookup - lookup table - * @chip_label: name of the chip the GPIO belongs to - * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO + * @key: either the name of the chip the GPIO belongs to, or the GPIO line name + * Note that GPIO line names are not guaranteed to be globally unique, + * so this will use the first match found! + * @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO, or + * U16_MAX to indicate that @key is a GPIO line name * @con_id: name of the GPIO from the device's point of view * @idx: index of the GPIO in case several GPIOs share the same name * @flags: bitmask of gpio_lookup_flags GPIO_* values @@ -30,7 +33,7 @@ enum gpio_lookup_flags { * functions using platform data. */ struct gpiod_lookup { - const char *chip_label; + const char *key; u16 chip_hwnum; const char *con_id; unsigned int idx; @@ -63,17 +66,17 @@ struct gpiod_hog { /* * Simple definition of a single GPIO under a con_id */ -#define GPIO_LOOKUP(_chip_label, _chip_hwnum, _con_id, _flags) \ - GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, 0, _flags) +#define GPIO_LOOKUP(_key, _chip_hwnum, _con_id, _flags) \ + GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, 0, _flags) /* * Use this macro if you need to have several GPIOs under the same con_id. * Each GPIO needs to use a different index and can be accessed using * gpiod_get_index() */ -#define GPIO_LOOKUP_IDX(_chip_label, _chip_hwnum, _con_id, _idx, _flags) \ +#define GPIO_LOOKUP_IDX(_key, _chip_hwnum, _con_id, _idx, _flags) \ { \ - .chip_label = _chip_label, \ + .key = _key, \ .chip_hwnum = _chip_hwnum, \ .con_id = _con_id, \ .idx = _idx, \ diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h new file mode 100644 index 000000000000..ad76f3d0a6ba --- /dev/null +++ b/include/linux/gpio/regmap.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _LINUX_GPIO_REGMAP_H +#define _LINUX_GPIO_REGMAP_H + +struct device; +struct gpio_regmap; +struct irq_domain; +struct regmap; + +#define GPIO_REGMAP_ADDR_ZERO ((unsigned int)(-1)) +#define GPIO_REGMAP_ADDR(addr) ((addr) ? : GPIO_REGMAP_ADDR_ZERO) + +/** + * struct gpio_regmap_config - Description of a generic regmap gpio_chip. + * @parent: The parent device + * @regmap: The regmap used to access the registers + * given, the name of the device is used + * @label: (Optional) Descriptive name for GPIO controller. + * If not given, the name of the device is used. + * @ngpio: Number of GPIOs + * @names: (Optional) Array of names for gpios + * @reg_dat_base: (Optional) (in) register base address + * @reg_set_base: (Optional) set register base address + * @reg_clr_base: (Optional) clear register base address + * @reg_dir_in_base: (Optional) in setting register base address + * @reg_dir_out_base: (Optional) out setting register base address + * @reg_stride: (Optional) May be set if the registers (of the + * same type, dat, set, etc) are not consecutive. + * @ngpio_per_reg: Number of GPIOs per register + * @irq_domain: (Optional) IRQ domain if the controller is + * interrupt-capable + * @reg_mask_xlate: (Optional) Translates base address and GPIO + * offset to a register/bitmask pair. If not + * given the default gpio_regmap_simple_xlate() + * is used. + * + * The ->reg_mask_xlate translates a given base address and GPIO offset to + * register and mask pair. The base address is one of the given register + * base addresses in this structure. + * + * Although all register base addresses are marked as optional, there are + * several rules: + * 1. if you only have @reg_dat_base set, then it is input-only + * 2. if you only have @reg_set_base set, then it is output-only + * 3. if you have either @reg_dir_in_base or @reg_dir_out_base set, then + * you have to set both @reg_dat_base and @reg_set_base + * 4. if you have @reg_set_base set, you may also set @reg_clr_base to have + * two different registers for setting and clearing the output. This is + * also valid for the output-only case. + * 5. @reg_dir_in_base and @reg_dir_out_base are exclusive; is there really + * hardware which has redundant registers? + * + * Note: All base addresses may have the special value %GPIO_REGMAP_ADDR_ZERO + * which forces the address to the value 0. + */ +struct gpio_regmap_config { + struct device *parent; + struct regmap *regmap; + + const char *label; + int ngpio; + const char *const *names; + + unsigned int reg_dat_base; + unsigned int reg_set_base; + unsigned int reg_clr_base; + unsigned int reg_dir_in_base; + unsigned int reg_dir_out_base; + int reg_stride; + int ngpio_per_reg; + struct irq_domain *irq_domain; + + int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base, + unsigned int offset, unsigned int *reg, + unsigned int *mask); +}; + +struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config); +void gpio_regmap_unregister(struct gpio_regmap *gpio); +struct gpio_regmap *devm_gpio_regmap_register(struct device *dev, + const struct gpio_regmap_config *config); +void gpio_regmap_set_drvdata(struct gpio_regmap *gpio, void *data); +void *gpio_regmap_get_drvdata(struct gpio_regmap *gpio); + +#endif /* _LINUX_GPIO_REGMAP_H */ diff --git a/include/linux/greybus/greybus_protocols.h b/include/linux/greybus/greybus_protocols.h index dfbc6c39a74b..aeb8f9243545 100644 --- a/include/linux/greybus/greybus_protocols.h +++ b/include/linux/greybus/greybus_protocols.h @@ -345,7 +345,7 @@ struct gb_cap_get_ims_certificate_request { struct gb_cap_get_ims_certificate_response { __u8 result_code; - __u8 certificate[0]; + __u8 certificate[]; } __packed; /* CAP authenticate request/response */ @@ -358,7 +358,7 @@ struct gb_cap_authenticate_request { struct gb_cap_authenticate_response { __u8 result_code; __u8 response[64]; - __u8 signature[0]; + __u8 signature[]; } __packed; @@ -642,7 +642,7 @@ struct gb_hid_get_report_request { struct gb_hid_set_report_request { __u8 report_type; __u8 report_id; - __u8 report[0]; + __u8 report[]; } __packed; /* HID input report request, via interrupt pipe */ @@ -680,7 +680,7 @@ struct gb_i2c_transfer_op { struct gb_i2c_transfer_request { __le16 op_count; - struct gb_i2c_transfer_op ops[0]; /* op_count of these */ + struct gb_i2c_transfer_op ops[]; /* op_count of these */ } __packed; struct gb_i2c_transfer_response { __u8 data[0]; /* inbound data */ @@ -908,7 +908,7 @@ struct gb_spi_transfer_request { __u8 chip_select; /* of the spi device */ __u8 mode; /* of the spi device */ __le16 count; - struct gb_spi_transfer transfers[0]; /* count of these */ + struct gb_spi_transfer transfers[]; /* count of these */ } __packed; struct gb_spi_transfer_response { @@ -1188,7 +1188,7 @@ struct gb_svc_pwrmon_rail_count_get_response { struct gb_svc_pwrmon_rail_names_get_response { __u8 status; - __u8 name[0][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE]; + __u8 name[][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE]; } __packed; #define GB_SVC_PWRMON_TYPE_CURR 0x01 @@ -1281,7 +1281,7 @@ struct gb_svc_intf_oops_request { struct gb_raw_send_request { __le32 len; - __u8 data[0]; + __u8 data[]; } __packed; @@ -1300,7 +1300,7 @@ struct gb_raw_send_request { /* Represents data from AP -> Module */ struct gb_uart_send_data_request { __le16 size; - __u8 data[0]; + __u8 data[]; } __packed; /* recv-data-request flags */ @@ -1313,7 +1313,7 @@ struct gb_uart_send_data_request { struct gb_uart_recv_data_request { __le16 size; __u8 flags; - __u8 data[0]; + __u8 data[]; } __packed; struct gb_uart_receive_credits_request { @@ -1382,14 +1382,14 @@ struct gb_loopback_transfer_request { __le32 len; __le32 reserved0; __le32 reserved1; - __u8 data[0]; + __u8 data[]; } __packed; struct gb_loopback_transfer_response { __le32 len; __le32 reserved0; __le32 reserved1; - __u8 data[0]; + __u8 data[]; } __packed; /* SDIO */ @@ -1530,13 +1530,13 @@ struct gb_sdio_transfer_request { __le16 data_blocks; __le16 data_blksz; - __u8 data[0]; + __u8 data[]; } __packed; struct gb_sdio_transfer_response { __le16 data_blocks; __le16 data_blksz; - __u8 data[0]; + __u8 data[]; } __packed; /* event request: generated by module and is defined as unidirectional */ @@ -1572,7 +1572,7 @@ struct gb_camera_configure_streams_request { __u8 flags; #define GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY 0x01 __le16 padding; - struct gb_camera_stream_config_request config[0]; + struct gb_camera_stream_config_request config[]; } __packed; /* Greybus Camera Configure Streams response payload */ @@ -1593,7 +1593,7 @@ struct gb_camera_configure_streams_response { __u8 flags; __u8 padding[2]; __le32 data_rate; - struct gb_camera_stream_config_response config[0]; + struct gb_camera_stream_config_response config[]; }; /* Greybus Camera Capture request payload - response has no payload */ @@ -1602,7 +1602,7 @@ struct gb_camera_capture_request { __u8 streams; __u8 padding; __le16 num_frames; - __u8 settings[0]; + __u8 settings[]; } __packed; /* Greybus Camera Flush response payload - request has no payload */ @@ -1616,7 +1616,7 @@ struct gb_camera_metadata_request { __le16 frame_number; __u8 stream; __u8 padding; - __u8 metadata[0]; + __u8 metadata[]; } __packed; /* Lights */ @@ -1993,7 +1993,7 @@ struct gb_audio_integer64 { struct gb_audio_enumerated { __le32 items; __le16 names_length; - __u8 names[0]; + __u8 names[]; } __packed; struct gb_audio_ctl_elem_info { /* See snd_ctl_elem_info in Linux source */ @@ -2033,7 +2033,7 @@ struct gb_audio_widget { __u8 type; /* GB_AUDIO_WIDGET_TYPE_* */ __u8 state; /* GB_AUDIO_WIDGET_STATE_* */ __u8 ncontrols; - struct gb_audio_control ctl[0]; /* 'ncontrols' entries */ + struct gb_audio_control ctl[]; /* 'ncontrols' entries */ } __packed; struct gb_audio_route { @@ -2059,7 +2059,7 @@ struct gb_audio_topology { * struct gb_audio_widget widgets[num_widgets]; * struct gb_audio_route routes[num_routes]; */ - __u8 data[0]; + __u8 data[]; } __packed; struct gb_audio_get_topology_size_response { @@ -2157,7 +2157,7 @@ struct gb_audio_streaming_event_request { struct gb_audio_send_data_request { __le64 timestamp; - __u8 data[0]; + __u8 data[]; } __packed; @@ -2171,7 +2171,7 @@ struct gb_audio_send_data_request { struct gb_log_send_log_request { __le16 len; - __u8 msg[0]; + __u8 msg[]; } __packed; #endif /* __GREYBUS_PROTOCOLS_H */ diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 7c8b82f69288..754f67ac4326 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -2,31 +2,28 @@ #ifndef LINUX_HARDIRQ_H #define LINUX_HARDIRQ_H +#include <linux/context_tracking_state.h> #include <linux/preempt.h> #include <linux/lockdep.h> #include <linux/ftrace_irq.h> #include <linux/vtime.h> #include <asm/hardirq.h> - extern void synchronize_irq(unsigned int irq); extern bool synchronize_hardirq(unsigned int irq); -#if defined(CONFIG_TINY_RCU) - -static inline void rcu_nmi_enter(void) -{ -} +#ifdef CONFIG_NO_HZ_FULL +void __rcu_irq_enter_check_tick(void); +#else +static inline void __rcu_irq_enter_check_tick(void) { } +#endif -static inline void rcu_nmi_exit(void) +static __always_inline void rcu_irq_enter_check_tick(void) { + if (context_tracking_enabled()) + __rcu_irq_enter_check_tick(); } -#else -extern void rcu_nmi_enter(void); -extern void rcu_nmi_exit(void); -#endif - /* * It is safe to do non-atomic ops on ->hardirq_context, * because NMI handlers may not preempt and the ops are @@ -41,9 +38,24 @@ extern void rcu_nmi_exit(void); } while (0) /* + * Like __irq_enter() without time accounting for fast + * interrupts, e.g. reschedule IPI where time accounting + * is more expensive than the actual interrupt. + */ +#define __irq_enter_raw() \ + do { \ + preempt_count_add(HARDIRQ_OFFSET); \ + lockdep_hardirq_enter(); \ + } while (0) + +/* * Enter irq context (on NO_HZ, update jiffies): */ -extern void irq_enter(void); +void irq_enter(void); +/* + * Like irq_enter(), but RCU is already watching. + */ +void irq_enter_rcu(void); /* * Exit irq context without processing softirqs: @@ -56,37 +68,85 @@ extern void irq_enter(void); } while (0) /* + * Like __irq_exit() without time accounting + */ +#define __irq_exit_raw() \ + do { \ + lockdep_hardirq_exit(); \ + preempt_count_sub(HARDIRQ_OFFSET); \ + } while (0) + +/* * Exit irq context and process softirqs if needed: */ -extern void irq_exit(void); +void irq_exit(void); + +/* + * Like irq_exit(), but return with RCU watching. + */ +void irq_exit_rcu(void); #ifndef arch_nmi_enter #define arch_nmi_enter() do { } while (0) #define arch_nmi_exit() do { } while (0) #endif -#define nmi_enter() \ +#ifdef CONFIG_TINY_RCU +static inline void rcu_nmi_enter(void) { } +static inline void rcu_nmi_exit(void) { } +#else +extern void rcu_nmi_enter(void); +extern void rcu_nmi_exit(void); +#endif + +/* + * NMI vs Tracing + * -------------- + * + * We must not land in a tracer until (or after) we've changed preempt_count + * such that in_nmi() becomes true. To that effect all NMI C entry points must + * be marked 'notrace' and call nmi_enter() as soon as possible. + */ + +/* + * nmi_enter() can nest up to 15 times; see NMI_BITS. + */ +#define __nmi_enter() \ do { \ + lockdep_off(); \ arch_nmi_enter(); \ printk_nmi_enter(); \ - lockdep_off(); \ - ftrace_nmi_enter(); \ - BUG_ON(in_nmi()); \ - preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ - rcu_nmi_enter(); \ + BUG_ON(in_nmi() == NMI_MASK); \ + __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ + } while (0) + +#define nmi_enter() \ + do { \ + __nmi_enter(); \ lockdep_hardirq_enter(); \ + rcu_nmi_enter(); \ + instrumentation_begin(); \ + ftrace_nmi_enter(); \ + instrumentation_end(); \ } while (0) -#define nmi_exit() \ +#define __nmi_exit() \ do { \ - lockdep_hardirq_exit(); \ - rcu_nmi_exit(); \ BUG_ON(!in_nmi()); \ - preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ - ftrace_nmi_exit(); \ - lockdep_on(); \ + __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ printk_nmi_exit(); \ arch_nmi_exit(); \ + lockdep_on(); \ + } while (0) + +#define nmi_exit() \ + do { \ + instrumentation_begin(); \ + ftrace_nmi_exit(); \ + instrumentation_end(); \ + rcu_nmi_exit(); \ + lockdep_hardirq_exit(); \ + __nmi_exit(); \ } while (0) #endif /* LINUX_HARDIRQ_H */ diff --git a/include/linux/hashtable.h b/include/linux/hashtable.h index 78b6ea5fa8ba..f6c666730b8c 100644 --- a/include/linux/hashtable.h +++ b/include/linux/hashtable.h @@ -173,9 +173,9 @@ static inline void hash_del_rcu(struct hlist_node *node) * @member: the name of the hlist_node within the struct * @key: the key of the objects to iterate over */ -#define hash_for_each_possible_rcu(name, obj, member, key) \ +#define hash_for_each_possible_rcu(name, obj, member, key, cond...) \ hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\ - member) + member, ## cond) /** * hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 9613d796cfb1..9850d59d6f1c 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -57,6 +57,7 @@ enum hdmi_infoframe_type { #define HDMI_SPD_INFOFRAME_SIZE 25 #define HDMI_AUDIO_INFOFRAME_SIZE 10 #define HDMI_DRM_INFOFRAME_SIZE 26 +#define HDMI_VENDOR_INFOFRAME_SIZE 4 #define HDMI_INFOFRAME_SIZE(type) \ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE) @@ -219,6 +220,8 @@ ssize_t hdmi_drm_infoframe_pack(struct hdmi_drm_infoframe *frame, void *buffer, ssize_t hdmi_drm_infoframe_pack_only(const struct hdmi_drm_infoframe *frame, void *buffer, size_t size); int hdmi_drm_infoframe_check(struct hdmi_drm_infoframe *frame); +int hdmi_drm_infoframe_unpack_only(struct hdmi_drm_infoframe *frame, + const void *buffer, size_t size); enum hdmi_spd_sdi { HDMI_SPD_SDI_UNKNOWN, diff --git a/include/linux/hid.h b/include/linux/hid.h index 875f71132b14..c7044a14200e 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -959,34 +959,49 @@ static inline void hid_device_io_stop(struct hid_device *hid) { * @max: maximal valid usage->code to consider later (out parameter) * @type: input event type (EV_KEY, EV_REL, ...) * @c: code which corresponds to this usage and type + * + * The value pointed to by @bit will be set to NULL if either @type is + * an unhandled event type, or if @c is out of range for @type. This + * can be used as an error condition. */ static inline void hid_map_usage(struct hid_input *hidinput, struct hid_usage *usage, unsigned long **bit, int *max, - __u8 type, __u16 c) + __u8 type, unsigned int c) { struct input_dev *input = hidinput->input; - - usage->type = type; - usage->code = c; + unsigned long *bmap = NULL; + unsigned int limit = 0; switch (type) { case EV_ABS: - *bit = input->absbit; - *max = ABS_MAX; + bmap = input->absbit; + limit = ABS_MAX; break; case EV_REL: - *bit = input->relbit; - *max = REL_MAX; + bmap = input->relbit; + limit = REL_MAX; break; case EV_KEY: - *bit = input->keybit; - *max = KEY_MAX; + bmap = input->keybit; + limit = KEY_MAX; break; case EV_LED: - *bit = input->ledbit; - *max = LED_MAX; + bmap = input->ledbit; + limit = LED_MAX; break; } + + if (unlikely(c > limit || !bmap)) { + pr_warn_ratelimited("%s: Invalid code %d type %d\n", + input->name, c, type); + *bit = NULL; + return; + } + + usage->type = type; + usage->code = c; + *max = limit; + *bit = bmap; } /** @@ -1000,7 +1015,8 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput, __u8 type, __u16 c) { hid_map_usage(hidinput, usage, bit, max, type, c); - clear_bit(c, *bit); + if (*bit) + clear_bit(usage->code, *bit); } /** diff --git a/include/linux/highmem.h b/include/linux/highmem.h index ea5cdbd8c2c3..14e6202ce47f 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -32,8 +32,65 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size) #include <asm/kmap_types.h> #ifdef CONFIG_HIGHMEM +extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot); +extern void kunmap_atomic_high(void *kvaddr); #include <asm/highmem.h> +#ifndef ARCH_HAS_KMAP_FLUSH_TLB +static inline void kmap_flush_tlb(unsigned long addr) { } +#endif + +#ifndef kmap_prot +#define kmap_prot PAGE_KERNEL +#endif + +void *kmap_high(struct page *page); +static inline void *kmap(struct page *page) +{ + void *addr; + + might_sleep(); + if (!PageHighMem(page)) + addr = page_address(page); + else + addr = kmap_high(page); + kmap_flush_tlb((unsigned long)addr); + return addr; +} + +void kunmap_high(struct page *page); + +static inline void kunmap(struct page *page) +{ + might_sleep(); + if (!PageHighMem(page)) + return; + kunmap_high(page); +} + +/* + * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because + * no global lock is needed and because the kmap code must perform a global TLB + * invalidation when the kmap pool wraps. + * + * However when holding an atomic kmap it is not legal to sleep, so atomic + * kmaps are appropriate for short, tight code paths only. + * + * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap + * gives a more generic (and caching) interface. But kmap_atomic can + * be used in IRQ contexts, so in some (very limited) cases we need + * it. + */ +static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) +{ + preempt_disable(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + return kmap_atomic_high_prot(page, prot); +} +#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot) + /* declarations for linux/mm/highmem.c */ unsigned int nr_free_highpages(void); extern atomic_long_t _totalhigh_pages; @@ -77,15 +134,21 @@ static inline struct page *kmap_to_page(void *addr) static inline unsigned long totalhigh_pages(void) { return 0UL; } -#ifndef ARCH_HAS_KMAP static inline void *kmap(struct page *page) { might_sleep(); return page_address(page); } +static inline void kunmap_high(struct page *page) +{ +} + static inline void kunmap(struct page *page) { +#ifdef ARCH_HAS_FLUSH_ON_KUNMAP + kunmap_flush_on_unmap(page_address(page)); +#endif } static inline void *kmap_atomic(struct page *page) @@ -96,16 +159,20 @@ static inline void *kmap_atomic(struct page *page) } #define kmap_atomic_prot(page, prot) kmap_atomic(page) -static inline void __kunmap_atomic(void *addr) +static inline void kunmap_atomic_high(void *addr) { - pagefault_enable(); - preempt_enable(); + /* + * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic() + * handles re-enabling faults + preemption + */ +#ifdef ARCH_HAS_FLUSH_ON_KUNMAP + kunmap_flush_on_unmap(addr); +#endif } #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) #define kmap_flush_unused() do {} while(0) -#endif #endif /* CONFIG_HIGHMEM */ @@ -149,7 +216,9 @@ static inline void kmap_atomic_idx_pop(void) #define kunmap_atomic(addr) \ do { \ BUILD_BUG_ON(__same_type((addr), struct page *)); \ - __kunmap_atomic(addr); \ + kunmap_atomic_high(addr); \ + pagefault_enable(); \ + preempt_enable(); \ } while (0) diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 7475051100c7..866a0fa104c4 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -10,7 +10,7 @@ #define LINUX_HMM_H #include <linux/kconfig.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> #include <linux/device.h> #include <linux/migrate.h> @@ -19,51 +19,67 @@ #include <linux/mmu_notifier.h> /* - * hmm_pfn_flag_e - HMM flag enums + * On output: + * 0 - The page is faultable and a future call with + * HMM_PFN_REQ_FAULT could succeed. + * HMM_PFN_VALID - the pfn field points to a valid PFN. This PFN is at + * least readable. If dev_private_owner is !NULL then this could + * point at a DEVICE_PRIVATE page. + * HMM_PFN_WRITE - if the page memory can be written to (requires HMM_PFN_VALID) + * HMM_PFN_ERROR - accessing the pfn is impossible and the device should + * fail. ie poisoned memory, special pages, no vma, etc * - * Flags: - * HMM_PFN_VALID: pfn is valid. It has, at least, read permission. - * HMM_PFN_WRITE: CPU page table has write permission set - * - * The driver provides a flags array for mapping page protections to device - * PTE bits. If the driver valid bit for an entry is bit 3, - * i.e., (entry & (1 << 3)), then the driver must provide - * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3. - * Same logic apply to all flags. This is the same idea as vm_page_prot in vma - * except that this is per device driver rather than per architecture. + * On input: + * 0 - Return the current state of the page, do not fault it. + * HMM_PFN_REQ_FAULT - The output must have HMM_PFN_VALID or hmm_range_fault() + * will fail + * HMM_PFN_REQ_WRITE - The output must have HMM_PFN_WRITE or hmm_range_fault() + * will fail. Must be combined with HMM_PFN_REQ_FAULT. */ -enum hmm_pfn_flag_e { - HMM_PFN_VALID = 0, - HMM_PFN_WRITE, - HMM_PFN_FLAG_MAX +enum hmm_pfn_flags { + /* Output fields and flags */ + HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1), + HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2), + HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3), + HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 8), + + /* Input flags */ + HMM_PFN_REQ_FAULT = HMM_PFN_VALID, + HMM_PFN_REQ_WRITE = HMM_PFN_WRITE, + + HMM_PFN_FLAGS = 0xFFUL << HMM_PFN_ORDER_SHIFT, }; /* - * hmm_pfn_value_e - HMM pfn special value + * hmm_pfn_to_page() - return struct page pointed to by a device entry * - * Flags: - * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory - * HMM_PFN_NONE: corresponding CPU page table entry is pte_none() - * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the - * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not - * be mirrored by a device, because the entry will never have HMM_PFN_VALID - * set and the pfn value is undefined. + * This must be called under the caller 'user_lock' after a successful + * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID + * already. + */ +static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn) +{ + return pfn_to_page(hmm_pfn & ~HMM_PFN_FLAGS); +} + +/* + * hmm_pfn_to_map_order() - return the CPU mapping size order * - * Driver provides values for none entry, error entry, and special entry. - * Driver can alias (i.e., use same value) error and special, but - * it should not alias none with error or special. + * This is optionally useful to optimize processing of the pfn result + * array. It indicates that the page starts at the order aligned VA and is + * 1<<order bytes long. Every pfn within an high order page will have the + * same pfn flags, both access protections and the map_order. The caller must + * be careful with edge cases as the start and end VA of the given page may + * extend past the range used with hmm_range_fault(). * - * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be: - * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous, - * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table entry, - * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one + * This must be called under the caller 'user_lock' after a successful + * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID + * already. */ -enum hmm_pfn_value_e { - HMM_PFN_ERROR, - HMM_PFN_NONE, - HMM_PFN_SPECIAL, - HMM_PFN_VALUE_MAX -}; +static inline unsigned int hmm_pfn_to_map_order(unsigned long hmm_pfn) +{ + return (hmm_pfn >> HMM_PFN_ORDER_SHIFT) & 0x1F; +} /* * struct hmm_range - track invalidation lock on virtual address range @@ -72,12 +88,9 @@ enum hmm_pfn_value_e { * @notifier_seq: result of mmu_interval_read_begin() * @start: range virtual start address (inclusive) * @end: range virtual end address (exclusive) - * @pfns: array of pfns (big enough for the range) - * @flags: pfn flags to match device driver page table - * @values: pfn value for some special case (none, special, error, ...) + * @hmm_pfns: array of pfns (big enough for the range) * @default_flags: default flags for the range (write, read, ... see hmm doc) * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter - * @pfn_shift: pfn shift value (should be <= PAGE_SHIFT) * @dev_private_owner: owner of device private pages */ struct hmm_range { @@ -85,42 +98,16 @@ struct hmm_range { unsigned long notifier_seq; unsigned long start; unsigned long end; - uint64_t *pfns; - const uint64_t *flags; - const uint64_t *values; - uint64_t default_flags; - uint64_t pfn_flags_mask; - uint8_t pfn_shift; + unsigned long *hmm_pfns; + unsigned long default_flags; + unsigned long pfn_flags_mask; void *dev_private_owner; }; /* - * hmm_device_entry_to_page() - return struct page pointed to by a device entry - * @range: range use to decode device entry value - * @entry: device entry value to get corresponding struct page from - * Return: struct page pointer if entry is a valid, NULL otherwise - * - * If the device entry is valid (ie valid flag set) then return the struct page - * matching the entry value. Otherwise return NULL. - */ -static inline struct page *hmm_device_entry_to_page(const struct hmm_range *range, - uint64_t entry) -{ - if (entry == range->values[HMM_PFN_NONE]) - return NULL; - if (entry == range->values[HMM_PFN_ERROR]) - return NULL; - if (entry == range->values[HMM_PFN_SPECIAL]) - return NULL; - if (!(entry & range->flags[HMM_PFN_VALID])) - return NULL; - return pfn_to_page(entry >> range->pfn_shift); -} - -/* * Please see Documentation/vm/hmm.rst for how to use the range API. */ -long hmm_range_fault(struct hmm_range *range); +int hmm_range_fault(struct hmm_range *range); /* * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range diff --git a/include/linux/host1x.h b/include/linux/host1x.h index c230b4e70d75..20c885d0bddc 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -48,6 +48,9 @@ struct host1x_client_ops { * @channel: host1x channel associated with this client * @syncpts: array of syncpoints requested for this client * @num_syncpts: number of syncpoints requested for this client + * @parent: pointer to parent structure + * @usecount: reference count for this structure + * @lock: mutex for mutually exclusive concurrency */ struct host1x_client { struct list_head list; @@ -325,10 +328,12 @@ int host1x_client_resume(struct host1x_client *client); struct tegra_mipi_device; -struct tegra_mipi_device *tegra_mipi_request(struct device *device); +struct tegra_mipi_device *tegra_mipi_request(struct device *device, + struct device_node *np); void tegra_mipi_free(struct tegra_mipi_device *device); int tegra_mipi_enable(struct tegra_mipi_device *device); int tegra_mipi_disable(struct tegra_mipi_device *device); int tegra_mipi_calibrate(struct tegra_mipi_device *device); +int tegra_mipi_wait(struct tegra_mipi_device *device); #endif diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 15c8ac313678..107cedd7019a 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -17,6 +17,7 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/percpu.h> +#include <linux/seqlock.h> #include <linux/timer.h> #include <linux/timerqueue.h> @@ -159,7 +160,7 @@ struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base; unsigned int index; clockid_t clockid; - seqcount_t seq; + seqcount_raw_spinlock_t seq; struct hrtimer *running; struct timerqueue_head active; ktime_t (*get_time)(void); diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index cfbb0a87c5f0..8a8bc46a2432 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -42,7 +42,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec); extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, - unsigned long new_addr, unsigned long old_end, + unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd); extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, @@ -181,13 +181,6 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) -#ifdef CONFIG_DEBUG_VM -#define transparent_hugepage_debug_cow() \ - (transparent_hugepage_flags & \ - (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) -#else /* CONFIG_DEBUG_VM */ -#define transparent_hugepage_debug_cow() 0 -#endif /* CONFIG_DEBUG_VM */ extern unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, @@ -248,7 +241,7 @@ static inline int is_swap_pmd(pmd_t pmd) return !pmd_none(pmd) && !pmd_present(pmd); } -/* mmap_sem must be held on entry */ +/* mmap_lock must be held on entry */ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) { @@ -265,9 +258,36 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, else return NULL; } -static inline int hpage_nr_pages(struct page *page) + +/** + * thp_head - Head page of a transparent huge page. + * @page: Any page (tail, head or regular) found in the page cache. + */ +static inline struct page *thp_head(struct page *page) { - if (unlikely(PageTransHuge(page))) + return compound_head(page); +} + +/** + * thp_order - Order of a transparent huge page. + * @page: Head page of a transparent huge page. + */ +static inline unsigned int thp_order(struct page *page) +{ + VM_BUG_ON_PGFLAGS(PageTail(page), page); + if (PageHead(page)) + return HPAGE_PMD_ORDER; + return 0; +} + +/** + * thp_nr_pages - The number of regular pages in this huge page. + * @page: The head page of a huge page. + */ +static inline int thp_nr_pages(struct page *page) +{ + VM_BUG_ON_PGFLAGS(PageTail(page), page); + if (PageHead(page)) return HPAGE_PMD_NR; return 1; } @@ -324,9 +344,21 @@ static inline struct list_head *page_deferred_list(struct page *page) #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) -static inline int hpage_nr_pages(struct page *page) +static inline struct page *thp_head(struct page *page) +{ + VM_BUG_ON_PGFLAGS(PageTail(page), page); + return page; +} + +static inline unsigned int thp_order(struct page *page) +{ + VM_BUG_ON_PGFLAGS(PageTail(page), page); + return 0; +} + +static inline int thp_nr_pages(struct page *page) { - VM_BUG_ON_PAGE(PageTail(page), page); + VM_BUG_ON_PGFLAGS(PageTail(page), page); return 1; } @@ -450,4 +482,15 @@ static inline bool thp_migration_supported(void) } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +/** + * thp_size - Size of a transparent huge page. + * @page: Head page of a transparent huge page. + * + * Return: Number of bytes in this page. + */ +static inline unsigned long thp_size(struct page *page) +{ + return PAGE_SIZE << thp_order(page); +} + #endif /* _LINUX_HUGE_MM_H */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 43a1cef8f0f1..d5cc5f802dd4 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -9,7 +9,8 @@ #include <linux/cgroup.h> #include <linux/list.h> #include <linux/kref.h> -#include <asm/pgtable.h> +#include <linux/pgtable.h> +#include <linux/gfp.h> struct ctl_table; struct user_struct; @@ -105,14 +106,13 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, void hugepage_put_subpool(struct hugepage_subpool *spool); void reset_vma_resv_huge_pages(struct vm_area_struct *vma); -int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); -int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); -int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); - -#ifdef CONFIG_NUMA -int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -#endif +int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); +int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); +int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); +int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, @@ -165,7 +165,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz); pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz); -int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep); +int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long *addr, pte_t *ptep); void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, unsigned long *start, unsigned long *end); struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, @@ -204,8 +205,9 @@ static inline struct address_space *hugetlb_page_mapping_lock_write( return NULL; } -static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, - pte_t *ptep) +static inline int huge_pmd_unshare(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long *addr, pte_t *ptep) { return 0; } @@ -505,13 +507,10 @@ struct huge_bootmem_page { struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); -struct page *alloc_huge_page_node(struct hstate *h, int nid); struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, - nodemask_t *nmask); + nodemask_t *nmask, gfp_t gfp_mask); struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address); -struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, - int nid, nodemask_t *nmask); int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx); @@ -519,8 +518,8 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping, int __init __alloc_bootmem_huge_page(struct hstate *h); int __init alloc_bootmem_huge_page(struct hstate *h); -void __init hugetlb_bad_size(void); void __init hugetlb_add_hstate(unsigned order); +bool __init arch_hugetlb_valid_size(unsigned long size); struct hstate *size_to_hstate(unsigned long size); #ifndef HUGE_MAX_HSTATE @@ -591,6 +590,20 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h) #include <asm/hugetlb.h> +#ifndef is_hugepage_only_range +static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, unsigned long len) +{ + return 0; +} +#define is_hugepage_only_range is_hugepage_only_range +#endif + +#ifndef arch_clear_hugepage_flags +static inline void arch_clear_hugepage_flags(struct page *page) { } +#define arch_clear_hugepage_flags arch_clear_hugepage_flags +#endif + #ifndef arch_make_huge_pte static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, struct page *page, int writable) @@ -679,6 +692,27 @@ static inline bool hugepage_movable_supported(struct hstate *h) return true; } +/* Movability of hugepages depends on migration support. */ +static inline gfp_t htlb_alloc_mask(struct hstate *h) +{ + if (hugepage_movable_supported(h)) + return GFP_HIGHUSER_MOVABLE; + else + return GFP_HIGHUSER; +} + +static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) +{ + gfp_t modified_mask = htlb_alloc_mask(h); + + /* Some callers might want to enforce node */ + modified_mask |= (gfp_mask & __GFP_THISNODE); + + modified_mask |= (gfp_mask & __GFP_NOWARN); + + return modified_mask; +} + static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { @@ -746,13 +780,9 @@ static inline struct page *alloc_huge_page(struct vm_area_struct *vma, return NULL; } -static inline struct page *alloc_huge_page_node(struct hstate *h, int nid) -{ - return NULL; -} - static inline struct page * -alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask) +alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, + nodemask_t *nmask, gfp_t gfp_mask) { return NULL; } @@ -865,6 +895,16 @@ static inline bool hugepage_movable_supported(struct hstate *h) return false; } +static inline gfp_t htlb_alloc_mask(struct hstate *h) +{ + return 0; +} + +static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) +{ + return 0; +} + static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h index 6058c3844a76..78dd7035d1e5 100644 --- a/include/linux/hw_breakpoint.h +++ b/include/linux/hw_breakpoint.h @@ -72,7 +72,6 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, void *context); extern int register_perf_hw_breakpoint(struct perf_event *bp); -extern int __register_perf_hw_breakpoint(struct perf_event *bp); extern void unregister_hw_breakpoint(struct perf_event *bp); extern void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events); @@ -80,6 +79,10 @@ extern int dbg_reserve_bp_slot(struct perf_event *bp); extern int dbg_release_bp_slot(struct perf_event *bp); extern int reserve_bp_slot(struct perf_event *bp); extern void release_bp_slot(struct perf_event *bp); +int hw_breakpoint_weight(struct perf_event *bp); +int arch_reserve_bp_slot(struct perf_event *bp); +void arch_release_bp_slot(struct perf_event *bp); +void arch_unregister_hw_breakpoint(struct perf_event *bp); extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk); @@ -115,8 +118,6 @@ register_wide_hw_breakpoint(struct perf_event_attr *attr, void *context) { return NULL; } static inline int register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } -static inline int -__register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } static inline void unregister_hw_breakpoint(struct perf_event *bp) { } static inline void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events) { } diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index 5e609f25878c..363d4a814aa1 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -436,6 +436,9 @@ devm_hwmon_device_register_with_info(struct device *dev, void hwmon_device_unregister(struct device *dev); void devm_hwmon_device_unregister(struct device *dev); +int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel); + /** * hwmon_is_bad_char - Is the char invalid in a hwmon name * @ch: the char to be considered diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 692c89ccf5df..38100e80360a 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -117,7 +117,7 @@ struct hv_ring_buffer { * Ring data starts here + RingDataStartOffset * !!! DO NOT place any fields below this !!! */ - u8 buffer[0]; + u8 buffer[]; } __packed; struct hv_ring_buffer_info { @@ -313,7 +313,7 @@ struct vmadd_remove_transfer_page_set { struct gpa_range { u32 byte_count; u32 byte_offset; - u64 pfn_array[0]; + u64 pfn_array[]; }; /* @@ -425,7 +425,7 @@ enum vmbus_channel_message_type { CHANNELMSG_19 = 19, CHANNELMSG_20 = 20, CHANNELMSG_TL_CONNECT_REQUEST = 21, - CHANNELMSG_22 = 22, + CHANNELMSG_MODIFYCHANNEL = 22, CHANNELMSG_TL_CONNECT_RESULT = 23, CHANNELMSG_COUNT }; @@ -563,7 +563,7 @@ struct vmbus_channel_gpadl_header { u32 gpadl; u16 range_buflen; u16 rangecount; - struct gpa_range range[0]; + struct gpa_range range[]; } __packed; /* This is the followup packet that contains more PFNs. */ @@ -571,7 +571,7 @@ struct vmbus_channel_gpadl_body { struct vmbus_channel_message_header header; u32 msgnumber; u32 gpadl; - u64 pfn[0]; + u64 pfn[]; } __packed; struct vmbus_channel_gpadl_created { @@ -620,6 +620,13 @@ struct vmbus_channel_tl_connect_request { guid_t host_service_id; } __packed; +/* Modify Channel parameters, cf. vmbus_send_modifychannel() */ +struct vmbus_channel_modifychannel { + struct vmbus_channel_message_header header; + u32 child_relid; + u32 target_vp; +} __packed; + struct vmbus_channel_version_response { struct vmbus_channel_message_header header; u8 version_supported; @@ -672,7 +679,7 @@ struct vmbus_channel_msginfo { * The channel message that goes out on the "wire". * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header */ - unsigned char msg[0]; + unsigned char msg[]; }; struct vmbus_close_msg { @@ -689,11 +696,6 @@ union hv_connection_id { } u; }; -enum hv_numa_policy { - HV_BALANCED = 0, - HV_LOCALIZED, -}; - enum vmbus_device_type { HV_IDE = 0, HV_SCSI, @@ -771,6 +773,15 @@ struct vmbus_channel { void (*onchannel_callback)(void *context); void *channel_callback_context; + void (*change_target_cpu_callback)(struct vmbus_channel *channel, + u32 old, u32 new); + + /* + * Synchronize channel scheduling and channel removal; see the inline + * comments in vmbus_chan_sched() and vmbus_reset_channel_cb(). + */ + spinlock_t sched_lock; + /* * A channel can be marked for one of three modes of reading: * BATCHED - callback called from taslket and should read @@ -792,22 +803,16 @@ struct vmbus_channel { u64 sig_event; /* - * Starting with win8, this field will be used to specify - * the target virtual processor on which to deliver the interrupt for - * the host to guest communication. - * Prior to win8, incoming channel interrupts would only - * be delivered on cpu 0. Setting this value to 0 would - * preserve the earlier behavior. + * Starting with win8, this field will be used to specify the + * target CPU on which to deliver the interrupt for the host + * to guest communication. + * + * Prior to win8, incoming channel interrupts would only be + * delivered on CPU 0. Setting this value to 0 would preserve + * the earlier behavior. */ - u32 target_vp; - /* The corresponding CPUID in the guest */ u32 target_cpu; /* - * State to manage the CPU affiliation of channels. - */ - struct cpumask alloced_cpus_in_node; - int numa_node; - /* * Support for sub-channels. For high performance devices, * it will be useful to have multiple sub-channels to support * a scalable communication infrastructure with the host. @@ -836,12 +841,6 @@ struct vmbus_channel { void (*chn_rescind_callback)(struct vmbus_channel *channel); /* - * The spinlock to protect the structure. It is being used to protect - * test-and-set access to various attributes of the structure as well - * as all sc_list operations. - */ - spinlock_t lock; - /* * All Sub-channels of a primary channel are linked here. */ struct list_head sc_list; @@ -854,11 +853,6 @@ struct vmbus_channel { * Support per-channel state for use by vmbus drivers. */ void *per_channel_state; - /* - * To support per-cpu lookup mapping of relid to channel, - * link up channels based on their CPU affinity. - */ - struct list_head percpu_list; /* * Defer freeing channel until after all cpu's have @@ -897,19 +891,14 @@ struct vmbus_channel { */ bool low_latency; + bool probe_done; + /* - * NUMA distribution policy: - * We support two policies: - * 1) Balanced: Here all performance critical channels are - * distributed evenly amongst all the NUMA nodes. - * This policy will be the default policy. - * 2) Localized: All channels of a given instance of a - * performance critical service will be assigned CPUs - * within a selected NUMA node. + * Cache the device ID here for easy access; this is useful, in + * particular, in situations where the channel's device_obj has + * not been allocated/initialized yet. */ - enum hv_numa_policy affinity_policy; - - bool probe_done; + u16 device_id; /* * We must offload the handling of the primary/sub channels @@ -964,12 +953,6 @@ static inline bool is_sub_channel(const struct vmbus_channel *c) return c->offermsg.offer.sub_channel_index != 0; } -static inline void set_channel_affinity_state(struct vmbus_channel *c, - enum hv_numa_policy policy) -{ - c->affinity_policy = policy; -} - static inline void set_channel_read_mode(struct vmbus_channel *c, enum hv_callback_mode mode) { @@ -1017,7 +1000,7 @@ static inline void clear_low_latency_mode(struct vmbus_channel *c) c->low_latency = false; } -void vmbus_onmessage(void *context); +void vmbus_onmessage(struct vmbus_channel_message_header *hdr); int vmbus_request_offers(void); @@ -1531,6 +1514,7 @@ extern __u32 vmbus_proto_version; int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id, const guid_t *shv_host_servie_id); +int vmbus_send_modifychannel(u32 child_relid, u32 target_vp); void vmbus_set_event(struct vmbus_channel *channel); /* Get the start of the ring buffer. */ diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h index d03071732db4..7c522fdd9ea7 100644 --- a/include/linux/i2c-algo-pca.h +++ b/include/linux/i2c-algo-pca.h @@ -53,6 +53,20 @@ #define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */ #define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */ +/** + * struct pca_i2c_bus_settings - The configured PCA i2c bus settings + * @mode: Configured i2c bus mode + * @tlow: Configured SCL LOW period + * @thi: Configured SCL HIGH period + * @clock_freq: The configured clock frequency + */ +struct pca_i2c_bus_settings { + int mode; + int tlow; + int thi; + int clock_freq; +}; + struct i2c_algo_pca_data { void *data; /* private low level data */ void (*write_byte) (void *data, int reg, int val); @@ -64,6 +78,7 @@ struct i2c_algo_pca_data { * For PCA9665, use the frequency you want here. */ unsigned int i2c_clock; unsigned int chip; + struct pca_i2c_bus_settings bus_settings; }; int i2c_pca_add_bus(struct i2c_adapter *); diff --git a/include/linux/i2c-smbus.h b/include/linux/i2c-smbus.h index 8c5459034f92..1e4e0de4ef8b 100644 --- a/include/linux/i2c-smbus.h +++ b/include/linux/i2c-smbus.h @@ -2,7 +2,7 @@ /* * i2c-smbus.h - SMBus extensions to the I2C protocol * - * Copyright (C) 2010 Jean Delvare <jdelvare@suse.de> + * Copyright (C) 2010-2019 Jean Delvare <jdelvare@suse.de> */ #ifndef _LINUX_I2C_SMBUS_H @@ -39,4 +39,10 @@ static inline int of_i2c_setup_smbus_alert(struct i2c_adapter *adap) } #endif +#if IS_ENABLED(CONFIG_I2C_SMBUS) && IS_ENABLED(CONFIG_DMI) +void i2c_register_spd(struct i2c_adapter *adap); +#else +static inline void i2c_register_spd(struct i2c_adapter *adap) { } +#endif + #endif /* _LINUX_I2C_SMBUS_H */ diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 49d29054e657..fc55ea41d323 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -56,7 +56,7 @@ struct property_entry; * on a bus (or read from them). Apart from two basic transfer functions to * transmit one message at a time, a more complex version can be used to * transmit an arbitrary number of messages without interruption. - * @count must be be less than 64k since msg.len is u16. + * @count must be less than 64k since msg.len is u16. */ int i2c_transfer_buffer_flags(const struct i2c_client *client, char *buf, int count, u16 flags); @@ -231,7 +231,6 @@ enum i2c_alert_protocol { * @detect: Callback for device detection * @address_list: The I2C addresses to probe (for detect) * @clients: List of detected clients we created (for i2c-core use only) - * @disable_i2c_core_irq_mapping: Tell the i2c-core to not do irq-mapping * * The driver.owner field should be set to the module owner of this driver. * The driver.name field should be set to the name of this driver. @@ -290,8 +289,6 @@ struct i2c_driver { int (*detect)(struct i2c_client *client, struct i2c_board_info *info); const unsigned short *address_list; struct list_head clients; - - bool disable_i2c_core_irq_mapping; }; #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) @@ -351,14 +348,14 @@ static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj) return to_i2c_client(dev); } -static inline void *i2c_get_clientdata(const struct i2c_client *dev) +static inline void *i2c_get_clientdata(const struct i2c_client *client) { - return dev_get_drvdata(&dev->dev); + return dev_get_drvdata(&client->dev); } -static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) +static inline void i2c_set_clientdata(struct i2c_client *client, void *data) { - dev_set_drvdata(&dev->dev, data); + dev_set_drvdata(&client->dev, data); } /* I2C slave support */ @@ -408,7 +405,7 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; } * that are present. This information is used to grow the driver model tree. * For mainboards this is done statically using i2c_register_board_info(); * bus numbers identify adapters that aren't yet available. For add-on boards, - * i2c_new_device() does this dynamically with the adapter already known. + * i2c_new_client_device() does this dynamically with the adapter already known. */ struct i2c_board_info { char type[I2C_NAME_SIZE]; @@ -439,14 +436,12 @@ struct i2c_board_info { #if IS_ENABLED(CONFIG_I2C) -/* Add-on boards should register/unregister their devices; e.g. a board +/* + * Add-on boards should register/unregister their devices; e.g. a board * with integrated I2C, a config eeprom, sensors, and a codec that's * used in conjunction with the primary hardware. */ struct i2c_client * -i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info); - -struct i2c_client * i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info); /* If you don't know the exact address of an I2C device, use this variant @@ -611,6 +606,14 @@ struct i2c_timings { * may configure padmux here for SDA/SCL line or something else they want. * @scl_gpiod: gpiod of the SCL line. Only required for GPIO recovery. * @sda_gpiod: gpiod of the SDA line. Only required for GPIO recovery. + * @pinctrl: pinctrl used by GPIO recovery to change the state of the I2C pins. + * Optional. + * @pins_default: default pinctrl state of SCL/SDA lines, when they are assigned + * to the I2C bus. Optional. Populated internally for GPIO recovery, if + * state with the name PINCTRL_STATE_DEFAULT is found and pinctrl is valid. + * @pins_gpio: recovery pinctrl state of SCL/SDA lines, when they are used as + * GPIOs. Optional. Populated internally for GPIO recovery, if this state + * is called "gpio" or "recovery" and pinctrl is valid. */ struct i2c_bus_recovery_info { int (*recover_bus)(struct i2c_adapter *adap); @@ -627,6 +630,9 @@ struct i2c_bus_recovery_info { /* gpio recovery */ struct gpio_desc *scl_gpiod; struct gpio_desc *sda_gpiod; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_gpio; }; int i2c_recover_bus(struct i2c_adapter *adap); @@ -1003,7 +1009,7 @@ static inline u32 i2c_acpi_find_bus_speed(struct device *dev) static inline struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, struct i2c_board_info *info) { - return NULL; + return ERR_PTR(-ENODEV); } static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle) { diff --git a/include/linux/icmp.h b/include/linux/icmp.h index 81ca84ce3119..0af4d210ee31 100644 --- a/include/linux/icmp.h +++ b/include/linux/icmp.h @@ -15,6 +15,7 @@ #include <linux/skbuff.h> #include <uapi/linux/icmp.h> +#include <uapi/linux/errqueue.h> static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb) { @@ -35,4 +36,8 @@ static inline bool icmp_is_err(int type) return false; } +void ip_icmp_error_rfc4884(const struct sk_buff *skb, + struct sock_ee_data_rfc4884 *out, + int thlen, int off); + #endif /* _LINUX_ICMP_H */ diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index 33d379602314..1b3371ae8193 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -13,12 +13,32 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) #include <linux/netdevice.h> #if IS_ENABLED(CONFIG_IPV6) -extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info, const struct in6_addr *force_saddr); +#if IS_BUILTIN(CONFIG_IPV6) +void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, + const struct in6_addr *force_saddr); +static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) +{ + icmp6_send(skb, type, code, info, NULL); +} +static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn) +{ + BUILD_BUG_ON(fn != icmp6_send); + return 0; +} +static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) +{ + BUILD_BUG_ON(fn != icmp6_send); + return 0; +} +#else +extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); +#endif + int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, unsigned int data_len); diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h index a445cd1a36c5..91a8612b8bf9 100644 --- a/include/linux/idle_inject.h +++ b/include/linux/idle_inject.h @@ -26,4 +26,8 @@ void idle_inject_set_duration(struct idle_inject_device *ii_dev, void idle_inject_get_duration(struct idle_inject_device *ii_dev, unsigned int *run_duration_us, unsigned int *idle_duration_us); + +void idle_inject_set_latency(struct idle_inject_device *ii_dev, + unsigned int latency_ns); + #endif /* __IDLE_INJECT_H__ */ diff --git a/include/linux/idr.h b/include/linux/idr.h index ac6e946b6767..3ade03e5c7af 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -171,7 +171,7 @@ static inline bool idr_is_empty(const struct idr *idr) */ static inline void idr_preload_end(void) { - preempt_enable(); + local_unlock(&radix_tree_preloads.lock); } /** diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 5d3e48d02033..c47f43e65a2f 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -9,7 +9,7 @@ * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright (c) 2016 - 2017 Intel Deutschland GmbH - * Copyright (c) 2018 - 2019 Intel Corporation + * Copyright (c) 2018 - 2020 Intel Corporation */ #ifndef LINUX_IEEE80211_H @@ -105,6 +105,51 @@ /* extension, added by 802.11ad */ #define IEEE80211_STYPE_DMG_BEACON 0x0000 +#define IEEE80211_STYPE_S1G_BEACON 0x0010 + +/* bits unique to S1G beacon */ +#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100 + +/* see 802.11ah-2016 9.9 NDP CMAC frames */ +#define IEEE80211_S1G_1MHZ_NDP_BITS 25 +#define IEEE80211_S1G_1MHZ_NDP_BYTES 4 +#define IEEE80211_S1G_2MHZ_NDP_BITS 37 +#define IEEE80211_S1G_2MHZ_NDP_BYTES 5 + +#define IEEE80211_NDP_FTYPE_CTS 0 +#define IEEE80211_NDP_FTYPE_CF_END 0 +#define IEEE80211_NDP_FTYPE_PS_POLL 1 +#define IEEE80211_NDP_FTYPE_ACK 2 +#define IEEE80211_NDP_FTYPE_PS_POLL_ACK 3 +#define IEEE80211_NDP_FTYPE_BA 4 +#define IEEE80211_NDP_FTYPE_BF_REPORT_POLL 5 +#define IEEE80211_NDP_FTYPE_PAGING 6 +#define IEEE80211_NDP_FTYPE_PREQ 7 + +#define SM64(f, v) ((((u64)v) << f##_S) & f) + +/* NDP CMAC frame fields */ +#define IEEE80211_NDP_FTYPE 0x0000000000000007 +#define IEEE80211_NDP_FTYPE_S 0x0000000000000000 + +/* 1M Probe Request 11ah 9.9.3.1.1 */ +#define IEEE80211_NDP_1M_PREQ_ANO 0x0000000000000008 +#define IEEE80211_NDP_1M_PREQ_ANO_S 3 +#define IEEE80211_NDP_1M_PREQ_CSSID 0x00000000000FFFF0 +#define IEEE80211_NDP_1M_PREQ_CSSID_S 4 +#define IEEE80211_NDP_1M_PREQ_RTYPE 0x0000000000100000 +#define IEEE80211_NDP_1M_PREQ_RTYPE_S 20 +#define IEEE80211_NDP_1M_PREQ_RSV 0x0000000001E00000 +#define IEEE80211_NDP_1M_PREQ_RSV 0x0000000001E00000 +/* 2M Probe Request 11ah 9.9.3.1.2 */ +#define IEEE80211_NDP_2M_PREQ_ANO 0x0000000000000008 +#define IEEE80211_NDP_2M_PREQ_ANO_S 3 +#define IEEE80211_NDP_2M_PREQ_CSSID 0x0000000FFFFFFFF0 +#define IEEE80211_NDP_2M_PREQ_CSSID_S 4 +#define IEEE80211_NDP_2M_PREQ_RTYPE 0x0000001000000000 +#define IEEE80211_NDP_2M_PREQ_RTYPE_S 36 + +#define IEEE80211_ANO_NETTYPE_WILD 15 /* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */ #define IEEE80211_CTL_EXT_POLL 0x2000 @@ -121,6 +166,21 @@ #define IEEE80211_MAX_SN IEEE80211_SN_MASK #define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1) + +/* PV1 Layout 11ah 9.8.3.1 */ +#define IEEE80211_PV1_FCTL_VERS 0x0003 +#define IEEE80211_PV1_FCTL_FTYPE 0x001c +#define IEEE80211_PV1_FCTL_STYPE 0x00e0 +#define IEEE80211_PV1_FCTL_TODS 0x0100 +#define IEEE80211_PV1_FCTL_MOREFRAGS 0x0200 +#define IEEE80211_PV1_FCTL_PM 0x0400 +#define IEEE80211_PV1_FCTL_MOREDATA 0x0800 +#define IEEE80211_PV1_FCTL_PROTECTED 0x1000 +#define IEEE80211_PV1_FCTL_END_SP 0x2000 +#define IEEE80211_PV1_FCTL_RELAYED 0x4000 +#define IEEE80211_PV1_FCTL_ACK_POLICY 0x8000 +#define IEEE80211_PV1_FCTL_CTL_EXT 0x0f00 + static inline bool ieee80211_sn_less(u16 sn1, u16 sn2) { return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1); @@ -148,6 +208,7 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2) #define IEEE80211_MAX_FRAG_THRESHOLD 2352 #define IEEE80211_MAX_RTS_THRESHOLD 2353 #define IEEE80211_MAX_AID 2007 +#define IEEE80211_MAX_AID_S1G 8191 #define IEEE80211_MAX_TIM_LEN 251 #define IEEE80211_MAX_MESH_PEERINGS 63 /* Maximum size for the MA-UNITDATA primitive, 802.11 standard section @@ -372,6 +433,17 @@ static inline bool ieee80211_is_data(__le16 fc) } /** + * ieee80211_is_ext - check if type is IEEE80211_FTYPE_EXT + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_ext(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == + cpu_to_le16(IEEE80211_FTYPE_EXT); +} + + +/** * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set * @fc: frame control bytes in little-endian byteorder */ @@ -470,6 +542,18 @@ static inline bool ieee80211_is_beacon(__le16 fc) } /** + * ieee80211_is_s1g_beacon - check if IEEE80211_FTYPE_EXT && + * IEEE80211_STYPE_S1G_BEACON + * @fc: frame control bytes in little-endian byteorder + */ +static inline bool ieee80211_is_s1g_beacon(__le16 fc) +{ + return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | + IEEE80211_FCTL_STYPE)) == + cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON); +} + +/** * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM * @fc: frame control bytes in little-endian byteorder */ @@ -716,7 +800,7 @@ struct ieee80211_msrment_ie { u8 token; u8 mode; u8 type; - u8 request[0]; + u8 request[]; } __packed; /** @@ -859,6 +943,7 @@ enum ieee80211_ht_chanwidth_values { * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ: 40 MHz channel width * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ: 80 MHz channel width * @IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ: 160 MHz or 80+80 MHz channel width + * @IEEE80211_OPMODE_NOTIF_BW_160_80P80: 160 / 80+80 MHz indicator flag * @IEEE80211_OPMODE_NOTIF_RX_NSS_MASK: number of spatial streams mask * (the NSS value is the value of this field + 1) * @IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT: number of spatial streams shift @@ -866,11 +951,12 @@ enum ieee80211_ht_chanwidth_values { * using a beamforming steering matrix */ enum ieee80211_vht_opmode_bits { - IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 3, + IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK = 0x03, IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ = 0, IEEE80211_OPMODE_NOTIF_CHANWIDTH_40MHZ = 1, IEEE80211_OPMODE_NOTIF_CHANWIDTH_80MHZ = 2, IEEE80211_OPMODE_NOTIF_CHANWIDTH_160MHZ = 3, + IEEE80211_OPMODE_NOTIF_BW_160_80P80 = 0x04, IEEE80211_OPMODE_NOTIF_RX_NSS_MASK = 0x70, IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT = 4, IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF = 0x80, @@ -898,6 +984,59 @@ struct ieee80211_addba_ext_ie { u8 data; } __packed; +/** + * struct ieee80211_s1g_bcn_compat_ie + * + * S1G Beacon Compatibility element + */ +struct ieee80211_s1g_bcn_compat_ie { + __le16 compat_info; + __le16 beacon_int; + __le32 tsf_completion; +} __packed; + +/** + * struct ieee80211_s1g_oper_ie + * + * S1G Operation element + */ +struct ieee80211_s1g_oper_ie { + u8 ch_width; + u8 oper_class; + u8 primary_ch; + u8 oper_ch; + __le16 basic_mcs_nss; +} __packed; + +/** + * struct ieee80211_aid_response_ie + * + * AID Response element + */ +struct ieee80211_aid_response_ie { + __le16 aid; + u8 switch_count; + __le16 response_int; +} __packed; + +struct ieee80211_s1g_cap { + u8 capab_info[10]; + u8 supp_mcs_nss[5]; +} __packed; + +struct ieee80211_ext { + __le16 frame_control; + __le16 duration; + union { + struct { + u8 sa[ETH_ALEN]; + __le32 timestamp; + u8 change_seq; + u8 variable[0]; + } __packed s1g_beacon; + } u; +} __packed __aligned(2); + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -1065,6 +1204,7 @@ struct ieee80211_mgmt { /* Supported rates membership selectors */ #define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 #define BSS_MEMBERSHIP_SELECTOR_VHT_PHY 126 +#define BSS_MEMBERSHIP_SELECTOR_HE_PHY 122 /* mgmt header + 1 byte category code */ #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u) @@ -1641,7 +1781,7 @@ struct ieee80211_he_operation { __le32 he_oper_params; __le16 he_mcs_nss_set; /* Optional 0,1,3,4,5,7 or 8 bytes: depends on @he_oper_params */ - u8 optional[0]; + u8 optional[]; } __packed; /** @@ -1653,7 +1793,7 @@ struct ieee80211_he_operation { struct ieee80211_he_spr { u8 he_sr_control; /* Optional 0 to 19 bytes: depends on @he_sr_control */ - u8 optional[0]; + u8 optional[]; } __packed; /** @@ -1731,6 +1871,9 @@ struct ieee80211_mu_edca_param_set { * @ext_nss_bw_capable: indicates whether or not the local transmitter * (rate scaling algorithm) can deal with the new logic * (dot11VHTExtendedNSSBWCapable) + * @max_vht_nss: current maximum NSS as advertised by the STA in + * operating mode notification, can be 0 in which case the + * capability data will be used to derive this (from MCS support) * * Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can * vary for a given BW/MCS. This function parses the data. @@ -1739,7 +1882,8 @@ struct ieee80211_mu_edca_param_set { */ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, enum ieee80211_vht_chanwidth bw, - int mcs, bool ext_nss_bw_capable); + int mcs, bool ext_nss_bw_capable, + unsigned int max_vht_nss); /* 802.11ax HE MAC capabilities */ #define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01 @@ -1814,6 +1958,8 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, #define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40 #define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80 +#define IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_SHIFT 3 + #define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01 #define IEEE80211_HE_MAC_CAP4_QTP 0x02 #define IEEE80211_HE_MAC_CAP4_BQR 0x04 @@ -1835,6 +1981,9 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, #define IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING 0x40 #define IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX 0x80 +#define IEEE80211_HE_VHT_MAX_AMPDU_FACTOR 20 +#define IEEE80211_HE_HT_MAX_AMPDU_FACTOR 16 + /* 802.11ax HE PHY capabilities */ #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02 #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04 @@ -2060,6 +2209,28 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) #define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000 #define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000 +/** + * ieee80211_he_6ghz_oper - HE 6 GHz operation Information field + * @primary: primary channel + * @control: control flags + * @ccfs0: channel center frequency segment 0 + * @ccfs1: channel center frequency segment 1 + * @minrate: minimum rate (in 1 Mbps units) + */ +struct ieee80211_he_6ghz_oper { + u8 primary; +#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH 0x3 +#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ 0 +#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ 1 +#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2 +#define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3 +#define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4 + u8 control; + u8 ccfs0; + u8 ccfs1; + u8 minrate; +} __packed; + /* * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size * @he_oper_ie: byte data of the He Operations IE, stating from the byte @@ -2086,7 +2257,7 @@ ieee80211_he_oper_size(const u8 *he_oper_ie) if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS) oper_len++; if (he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO) - oper_len += 4; + oper_len += sizeof(struct ieee80211_he_6ghz_oper); /* Add the first byte (extension ID) to the total length */ oper_len++; @@ -2094,6 +2265,34 @@ ieee80211_he_oper_size(const u8 *he_oper_ie) return oper_len; } +/** + * ieee80211_he_6ghz_oper - obtain 6 GHz operation field + * @he_oper: HE operation element (must be pre-validated for size) + * but may be %NULL + * + * Return: a pointer to the 6 GHz operation field, or %NULL + */ +static inline const struct ieee80211_he_6ghz_oper * +ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper) +{ + const u8 *ret = (void *)&he_oper->optional; + u32 he_oper_params; + + if (!he_oper) + return NULL; + + he_oper_params = le32_to_cpu(he_oper->he_oper_params); + + if (!(he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO)) + return NULL; + if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO) + ret += 3; + if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS) + ret++; + + return (void *)ret; +} + /* HE Spatial Reuse defines */ #define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT 0x4 #define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT 0x8 @@ -2130,6 +2329,86 @@ ieee80211_he_spr_size(const u8 *he_spr_ie) return spr_len; } +/* S1G Capabilities Information field */ +#define S1G_CAPAB_B0_S1G_LONG BIT(0) +#define S1G_CAPAB_B0_SGI_1MHZ BIT(1) +#define S1G_CAPAB_B0_SGI_2MHZ BIT(2) +#define S1G_CAPAB_B0_SGI_4MHZ BIT(3) +#define S1G_CAPAB_B0_SGI_8MHZ BIT(4) +#define S1G_CAPAB_B0_SGI_16MHZ BIT(5) +#define S1G_CAPAB_B0_SUPP_CH_WIDTH_MASK (BIT(6) | BIT(7)) +#define S1G_CAPAB_B0_SUPP_CH_WIDTH_SHIFT 6 + +#define S1G_CAPAB_B1_RX_LDPC BIT(0) +#define S1G_CAPAB_B1_TX_STBC BIT(1) +#define S1G_CAPAB_B1_RX_STBC BIT(2) +#define S1G_CAPAB_B1_SU_BFER BIT(3) +#define S1G_CAPAB_B1_SU_BFEE BIT(4) +#define S1G_CAPAB_B1_BFEE_STS_MASK (BIT(5) | BIT(6) | BIT(7)) +#define S1G_CAPAB_B1_BFEE_STS_SHIFT 5 + +#define S1G_CAPAB_B2_SOUNDING_DIMENSIONS_MASK (BIT(0) | BIT(1) | BIT(2)) +#define S1G_CAPAB_B2_SOUNDING_DIMENSIONS_SHIFT 0 +#define S1G_CAPAB_B2_MU_BFER BIT(3) +#define S1G_CAPAB_B2_MU_BFEE BIT(4) +#define S1G_CAPAB_B2_PLUS_HTC_VHT BIT(5) +#define S1G_CAPAB_B2_TRAVELING_PILOT_MASK (BIT(6) | BIT(7)) +#define S1G_CAPAB_B2_TRAVELING_PILOT_SHIFT 6 + +#define S1G_CAPAB_B3_RD_RESPONDER BIT(0) +#define S1G_CAPAB_B3_HT_DELAYED_BA BIT(1) +#define S1G_CAPAB_B3_MAX_MPDU_LEN BIT(2) +#define S1G_CAPAB_B3_MAX_AMPDU_LEN_EXP_MASK (BIT(3) | BIT(4)) +#define S1G_CAPAB_B3_MAX_AMPDU_LEN_EXP_SHIFT 3 +#define S1G_CAPAB_B3_MIN_MPDU_START_MASK (BIT(5) | BIT(6) | BIT(7)) +#define S1G_CAPAB_B3_MIN_MPDU_START_SHIFT 5 + +#define S1G_CAPAB_B4_UPLINK_SYNC BIT(0) +#define S1G_CAPAB_B4_DYNAMIC_AID BIT(1) +#define S1G_CAPAB_B4_BAT BIT(2) +#define S1G_CAPAB_B4_TIME_ADE BIT(3) +#define S1G_CAPAB_B4_NON_TIM BIT(4) +#define S1G_CAPAB_B4_GROUP_AID BIT(5) +#define S1G_CAPAB_B4_STA_TYPE_MASK (BIT(6) | BIT(7)) +#define S1G_CAPAB_B4_STA_TYPE_SHIFT 6 + +#define S1G_CAPAB_B5_CENT_AUTH_CONTROL BIT(0) +#define S1G_CAPAB_B5_DIST_AUTH_CONTROL BIT(1) +#define S1G_CAPAB_B5_AMSDU BIT(2) +#define S1G_CAPAB_B5_AMPDU BIT(3) +#define S1G_CAPAB_B5_ASYMMETRIC_BA BIT(4) +#define S1G_CAPAB_B5_FLOW_CONTROL BIT(5) +#define S1G_CAPAB_B5_SECTORIZED_BEAM_MASK (BIT(6) | BIT(7)) +#define S1G_CAPAB_B5_SECTORIZED_BEAM_SHIFT 6 + +#define S1G_CAPAB_B6_OBSS_MITIGATION BIT(0) +#define S1G_CAPAB_B6_FRAGMENT_BA BIT(1) +#define S1G_CAPAB_B6_NDP_PS_POLL BIT(2) +#define S1G_CAPAB_B6_RAW_OPERATION BIT(3) +#define S1G_CAPAB_B6_PAGE_SLICING BIT(4) +#define S1G_CAPAB_B6_TXOP_SHARING_IMP_ACK BIT(5) +#define S1G_CAPAB_B6_VHT_LINK_ADAPT_MASK (BIT(6) | BIT(7)) +#define S1G_CAPAB_B6_VHT_LINK_ADAPT_SHIFT 6 + +#define S1G_CAPAB_B7_TACK_AS_PS_POLL BIT(0) +#define S1G_CAPAB_B7_DUP_1MHZ BIT(1) +#define S1G_CAPAB_B7_MCS_NEGOTIATION BIT(2) +#define S1G_CAPAB_B7_1MHZ_CTL_RESPONSE_PREAMBLE BIT(3) +#define S1G_CAPAB_B7_NDP_BFING_REPORT_POLL BIT(4) +#define S1G_CAPAB_B7_UNSOLICITED_DYN_AID BIT(5) +#define S1G_CAPAB_B7_SECTOR_TRAINING_OPERATION BIT(6) +#define S1G_CAPAB_B7_TEMP_PS_MODE_SWITCH BIT(7) + +#define S1G_CAPAB_B8_TWT_GROUPING BIT(0) +#define S1G_CAPAB_B8_BDT BIT(1) +#define S1G_CAPAB_B8_COLOR_MASK (BIT(2) | BIT(3) | BIT(4)) +#define S1G_CAPAB_B8_COLOR_SHIFT 2 +#define S1G_CAPAB_B8_TWT_REQUEST BIT(5) +#define S1G_CAPAB_B8_TWT_RESPOND BIT(6) +#define S1G_CAPAB_B8_PV1_FRAME BIT(7) + +#define S1G_CAPAB_B9_LINK_ADAPT_PER_CONTROL_RESPONSE BIT(0) + /* Authentication algorithms */ #define WLAN_AUTH_OPEN 0 #define WLAN_AUTH_SHARED_KEY 1 @@ -2282,6 +2561,8 @@ enum ieee80211_statuscode { /* 802.11ai */ WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108, WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109, + WLAN_STATUS_SAE_HASH_TO_ELEMENT = 126, + WLAN_STATUS_SAE_PK = 127, }; @@ -2525,8 +2806,14 @@ enum ieee80211_eid { WLAN_EID_QUIET_CHANNEL = 198, WLAN_EID_OPMODE_NOTIF = 199, + WLAN_EID_REDUCED_NEIGHBOR_REPORT = 201, + + WLAN_EID_S1G_BCN_COMPAT = 213, + WLAN_EID_S1G_SHORT_BCN_INTERVAL = 214, + WLAN_EID_S1G_CAPABILITIES = 217, WLAN_EID_VENDOR_SPECIFIC = 221, WLAN_EID_QOS_PARAMETER = 222, + WLAN_EID_S1G_OPERATION = 232, WLAN_EID_CAG_NUMBER = 237, WLAN_EID_AP_CSN = 239, WLAN_EID_FILS_INDICATION = 240, @@ -2554,9 +2841,19 @@ enum ieee80211_eid_ext { WLAN_EID_EXT_UORA = 37, WLAN_EID_EXT_HE_MU_EDCA = 38, WLAN_EID_EXT_HE_SPR = 39, + WLAN_EID_EXT_NDP_FEEDBACK_REPORT_PARAMSET = 41, + WLAN_EID_EXT_BSS_COLOR_CHG_ANN = 42, + WLAN_EID_EXT_QUIET_TIME_PERIOD_SETUP = 43, + WLAN_EID_EXT_ESS_REPORT = 45, + WLAN_EID_EXT_OPS = 46, + WLAN_EID_EXT_HE_BSS_LOAD = 47, WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME = 52, WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION = 55, WLAN_EID_EXT_NON_INHERITANCE = 56, + WLAN_EID_EXT_KNOWN_BSSID = 57, + WLAN_EID_EXT_SHORT_SSID_LIST = 58, + WLAN_EID_EXT_HE_6GHZ_CAPA = 59, + WLAN_EID_EXT_UL_MU_POWER_CAPA = 60, }; /* Action category code */ @@ -2787,7 +3084,7 @@ enum ieee80211_tdls_actioncode { #define WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT BIT(7) /* Defines support for enhanced multi-bssid advertisement*/ -#define WLAN_EXT_CAPA11_EMA_SUPPORT BIT(1) +#define WLAN_EXT_CAPA11_EMA_SUPPORT BIT(3) /* TDLS specific payload type in the LLC/SNAP header */ #define WLAN_TDLS_SNAP_RFTYPE 0x2 @@ -3038,13 +3335,17 @@ struct ieee80211_multiple_bssid_configuration { #define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7) #define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8) #define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9) +#define WLAN_AKM_SUITE_AP_PEER_KEY SUITE(0x000FAC, 10) #define WLAN_AKM_SUITE_8021X_SUITE_B SUITE(0x000FAC, 11) #define WLAN_AKM_SUITE_8021X_SUITE_B_192 SUITE(0x000FAC, 12) +#define WLAN_AKM_SUITE_FT_8021X_SHA384 SUITE(0x000FAC, 13) #define WLAN_AKM_SUITE_FILS_SHA256 SUITE(0x000FAC, 14) #define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15) #define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16) #define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17) #define WLAN_AKM_SUITE_OWE SUITE(0x000FAC, 18) +#define WLAN_AKM_SUITE_FT_PSK_SHA384 SUITE(0x000FAC, 19) +#define WLAN_AKM_SUITE_PSK_SHA384 SUITE(0x000FAC, 20) #define WLAN_MAX_KEY_LEN 32 @@ -3099,6 +3400,24 @@ struct ieee80211_tspec_ie { __le16 medium_time; } __packed; +struct ieee80211_he_6ghz_capa { + /* uses IEEE80211_HE_6GHZ_CAP_* below */ + __le16 capa; +} __packed; + +/* HE 6 GHz band capabilities */ +/* uses enum ieee80211_min_mpdu_spacing values */ +#define IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START 0x0007 +/* uses enum ieee80211_vht_max_ampdu_length_exp values */ +#define IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP 0x0038 +/* uses IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_* values */ +#define IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN 0x00c0 +/* WLAN_HT_CAP_SM_PS_* values */ +#define IEEE80211_HE_6GHZ_CAP_SM_PS 0x0600 +#define IEEE80211_HE_6GHZ_CAP_RD_RESPONDER 0x0800 +#define IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS 0x1000 +#define IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS 0x2000 + /** * ieee80211_get_qos_ctl - get pointer to qos control bytes * @hdr: the frame @@ -3323,6 +3642,18 @@ static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size) #define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) #define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) +/* convert frequencies */ +#define MHZ_TO_KHZ(freq) ((freq) * 1000) +#define KHZ_TO_MHZ(freq) ((freq) / 1000) +#define PR_KHZ(f) KHZ_TO_MHZ(f), f % 1000 +#define KHZ_F "%d.%03d" + +/* convert powers */ +#define DBI_TO_MBI(gain) ((gain) * 100) +#define MBI_TO_DBI(gain) ((gain) / 100) +#define DBM_TO_MBM(gain) ((gain) * 100) +#define MBM_TO_DBM(gain) ((gain) / 100) + /** * ieee80211_action_contains_tpc - checks if the frame contains TPC element * @skb: the skb containing the frame, length will be checked @@ -3430,4 +3761,30 @@ static inline bool for_each_element_completed(const struct element *element, #define WLAN_RSNX_CAPA_PROTECTED_TWT BIT(4) #define WLAN_RSNX_CAPA_SAE_H2E BIT(5) +/* + * reduced neighbor report, based on Draft P802.11ax_D5.0, + * section 9.4.2.170 + */ +#define IEEE80211_AP_INFO_TBTT_HDR_TYPE 0x03 +#define IEEE80211_AP_INFO_TBTT_HDR_FILTERED 0x04 +#define IEEE80211_AP_INFO_TBTT_HDR_COLOC 0x08 +#define IEEE80211_AP_INFO_TBTT_HDR_COUNT 0xF0 +#define IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM 8 +#define IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM 12 + +#define IEEE80211_RNR_TBTT_PARAMS_OCT_RECOMMENDED 0x01 +#define IEEE80211_RNR_TBTT_PARAMS_SAME_SSID 0x02 +#define IEEE80211_RNR_TBTT_PARAMS_MULTI_BSSID 0x04 +#define IEEE80211_RNR_TBTT_PARAMS_TRANSMITTED_BSSID 0x08 +#define IEEE80211_RNR_TBTT_PARAMS_COLOC_ESS 0x10 +#define IEEE80211_RNR_TBTT_PARAMS_PROBE_ACTIVE 0x20 +#define IEEE80211_RNR_TBTT_PARAMS_COLOC_AP 0x40 + +struct ieee80211_neighbor_ap_info { + u8 tbtt_info_hdr; + u8 tbtt_info_len; + u8 op_class; + u8 channel; +} __packed; + #endif /* LINUX_IEEE80211_H */ diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index 9e57c4411734..6479a38e52fa 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -47,6 +47,9 @@ struct br_ip_list { #define BR_BCAST_FLOOD BIT(14) #define BR_NEIGH_SUPPRESS BIT(15) #define BR_ISOLATED BIT(16) +#define BR_MRP_AWARE BIT(17) +#define BR_MRP_LOST_CONT BIT(18) +#define BR_MRP_LOST_IN_CONT BIT(19) #define BR_DEFAULT_AGEING_TIME (300 * HZ) diff --git a/include/linux/if_team.h b/include/linux/if_team.h index ec7e4bd07f82..add607943c95 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -67,7 +67,7 @@ struct team_port { u16 queue_id; struct list_head qom_list; /* node in queue override mapping list */ struct rcu_head rcu; - long mode_priv[0]; + long mode_priv[]; }; static inline struct team_port *team_port_get_rcu(const struct net_device *dev) @@ -102,10 +102,7 @@ static inline bool team_port_dev_txable(const struct net_device *port_dev) static inline void team_netpoll_send_skb(struct team_port *port, struct sk_buff *skb) { - struct netpoll *np = port->np; - - if (np) - netpoll_send_skb(np, skb); + netpoll_send_skb(port->np, skb); } #else static inline void team_netpoll_send_skb(struct team_port *port, diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index b05e855f1ddd..41a518336673 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -25,6 +25,8 @@ #define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ #define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ +#define VLAN_MAX_DEPTH 8 /* Max. number of nested VLAN tags parsed */ + /* * struct vlan_hdr - vlan header * @h_vlan_TCI: priority and VLAN ID @@ -577,10 +579,10 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) * Returns the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ -static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, +static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, int *depth) { - unsigned int vlan_depth = skb->mac_len; + unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH; /* if type is 802.1Q/AD then the header should already be * present at mac_len - VLAN_HLEN (if mac_len > 0), or at @@ -595,13 +597,12 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, vlan_depth = ETH_HLEN; } do { - struct vlan_hdr *vh; + struct vlan_hdr vhdr, *vh; - if (unlikely(!pskb_may_pull(skb, - vlan_depth + VLAN_HLEN))) + vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr); + if (unlikely(!vh || !--parse_depth)) return 0; - vh = (struct vlan_hdr *)(skb->data + vlan_depth); type = vh->h_vlan_encapsulated_proto; vlan_depth += VLAN_HLEN; } while (eth_type_vlan(type)); @@ -620,11 +621,25 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type, * Returns the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ -static inline __be16 vlan_get_protocol(struct sk_buff *skb) +static inline __be16 vlan_get_protocol(const struct sk_buff *skb) { return __vlan_get_protocol(skb, skb->protocol, NULL); } +/* A getter for the SKB protocol field which will handle VLAN tags consistently + * whether VLAN acceleration is enabled or not. + */ +static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan) +{ + if (!skip_vlan) + /* VLAN acceleration strips the VLAN header from the skb and + * moves it to skb->vlan_proto + */ + return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol; + + return vlan_get_protocol(skb); +} + static inline void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr) { diff --git a/include/linux/igmp.h b/include/linux/igmp.h index faa6586a5783..64ce8cd1cfaf 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -123,7 +123,7 @@ extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex); extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, struct ip_msfilter __user *optval, int __user *optlen); extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, - struct group_filter __user *optval, int __user *optlen); + struct sockaddr_storage __user *p); extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, int dif, int sdif); extern void ip_mc_init_dev(struct in_device *); diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index 5a127c0ed200..a3a838dcf8e4 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -133,62 +133,4 @@ void ad_sd_cleanup_buffer_and_trigger(struct iio_dev *indio_dev); int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig); -#define __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \ - _storagebits, _shift, _extend_name, _type, _mask_all) \ - { \ - .type = (_type), \ - .differential = (_channel2 == -1 ? 0 : 1), \ - .indexed = 1, \ - .channel = (_channel1), \ - .channel2 = (_channel2), \ - .address = (_address), \ - .extend_name = (_extend_name), \ - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ - BIT(IIO_CHAN_INFO_OFFSET), \ - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ - .info_mask_shared_by_all = _mask_all, \ - .scan_index = (_si), \ - .scan_type = { \ - .sign = 'u', \ - .realbits = (_bits), \ - .storagebits = (_storagebits), \ - .shift = (_shift), \ - .endianness = IIO_BE, \ - }, \ - } - -#define AD_SD_DIFF_CHANNEL(_si, _channel1, _channel2, _address, _bits, \ - _storagebits, _shift) \ - __AD_SD_CHANNEL(_si, _channel1, _channel2, _address, _bits, \ - _storagebits, _shift, NULL, IIO_VOLTAGE, \ - BIT(IIO_CHAN_INFO_SAMP_FREQ)) - -#define AD_SD_SHORTED_CHANNEL(_si, _channel, _address, _bits, \ - _storagebits, _shift) \ - __AD_SD_CHANNEL(_si, _channel, _channel, _address, _bits, \ - _storagebits, _shift, "shorted", IIO_VOLTAGE, \ - BIT(IIO_CHAN_INFO_SAMP_FREQ)) - -#define AD_SD_CHANNEL(_si, _channel, _address, _bits, \ - _storagebits, _shift) \ - __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \ - _storagebits, _shift, NULL, IIO_VOLTAGE, \ - BIT(IIO_CHAN_INFO_SAMP_FREQ)) - -#define AD_SD_CHANNEL_NO_SAMP_FREQ(_si, _channel, _address, _bits, \ - _storagebits, _shift) \ - __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \ - _storagebits, _shift, NULL, IIO_VOLTAGE, 0) - -#define AD_SD_TEMP_CHANNEL(_si, _address, _bits, _storagebits, _shift) \ - __AD_SD_CHANNEL(_si, 0, -1, _address, _bits, \ - _storagebits, _shift, NULL, IIO_TEMP, \ - BIT(IIO_CHAN_INFO_SAMP_FREQ)) - -#define AD_SD_SUPPLY_CHANNEL(_si, _channel, _address, _bits, _storagebits, \ - _shift) \ - __AD_SD_CHANNEL(_si, _channel, -1, _address, _bits, \ - _storagebits, _shift, "supply", IIO_VOLTAGE, \ - BIT(IIO_CHAN_INFO_SAMP_FREQ)) - #endif diff --git a/include/linux/iio/adc/adi-axi-adc.h b/include/linux/iio/adc/adi-axi-adc.h new file mode 100644 index 000000000000..c5d48e1c2d36 --- /dev/null +++ b/include/linux/iio/adc/adi-axi-adc.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Analog Devices Generic AXI ADC IP core driver/library + * Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip + * + * Copyright 2012-2020 Analog Devices Inc. + */ +#ifndef __ADI_AXI_ADC_H__ +#define __ADI_AXI_ADC_H__ + +struct device; +struct iio_chan_spec; + +/** + * struct adi_axi_adc_chip_info - Chip specific information + * @name Chip name + * @id Chip ID (usually product ID) + * @channels Channel specifications of type @struct axi_adc_chan_spec + * @num_channels Number of @channels + * @scale_table Supported scales by the chip; tuples of 2 ints + * @num_scales Number of scales in the table + * @max_rate Maximum sampling rate supported by the device + */ +struct adi_axi_adc_chip_info { + const char *name; + unsigned int id; + + const struct iio_chan_spec *channels; + unsigned int num_channels; + + const unsigned int (*scale_table)[2]; + int num_scales; + + unsigned long max_rate; +}; + +/** + * struct adi_axi_adc_conv - data of the ADC attached to the AXI ADC + * @chip_info chip info details for the client ADC + * @preenable_setup op to run in the client before enabling the AXI ADC + * @reg_access IIO debugfs_reg_access hook for the client ADC + * @read_raw IIO read_raw hook for the client ADC + * @write_raw IIO write_raw hook for the client ADC + */ +struct adi_axi_adc_conv { + const struct adi_axi_adc_chip_info *chip_info; + + int (*preenable_setup)(struct adi_axi_adc_conv *conv); + int (*reg_access)(struct adi_axi_adc_conv *conv, unsigned int reg, + unsigned int writeval, unsigned int *readval); + int (*read_raw)(struct adi_axi_adc_conv *conv, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask); + int (*write_raw)(struct adi_axi_adc_conv *conv, + struct iio_chan_spec const *chan, + int val, int val2, long mask); +}; + +struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev, + size_t sizeof_priv); + +void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv); + +#endif diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h index 016d8a068353..ff15c61bf319 100644 --- a/include/linux/iio/buffer-dma.h +++ b/include/linux/iio/buffer-dma.h @@ -11,7 +11,7 @@ #include <linux/kref.h> #include <linux/spinlock.h> #include <linux/mutex.h> -#include <linux/iio/buffer.h> +#include <linux/iio/buffer_impl.h> struct iio_dma_buffer_queue; struct iio_dma_buffer_ops; diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h index b3a57444a886..0e503db71289 100644 --- a/include/linux/iio/buffer-dmaengine.h +++ b/include/linux/iio/buffer-dmaengine.h @@ -14,4 +14,7 @@ struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, const char *channel); void iio_dmaengine_buffer_free(struct iio_buffer *buffer); +struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev, + const char *channel); + #endif diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h index a4d2d8061ef6..a63dc07b7350 100644 --- a/include/linux/iio/buffer_impl.h +++ b/include/linux/iio/buffer_impl.h @@ -94,12 +94,6 @@ struct iio_buffer { unsigned int watermark; /* private: */ - /* - * @scan_el_attrs: Control of scan elements if that scan mode - * control method is used. - */ - struct attribute_group *scan_el_attrs; - /* @scan_timestamp: Does the scan mode include a timestamp. */ bool scan_timestamp; @@ -115,9 +109,6 @@ struct iio_buffer { */ struct attribute_group scan_el_group; - /* @stufftoread: Flag to indicate new data. */ - bool stufftoread; - /* @attrs: Standard attributes of the buffer. */ const struct attribute **attrs; diff --git a/include/linux/iio/common/cros_ec_sensors_core.h b/include/linux/iio/common/cros_ec_sensors_core.h index 7bc961defa87..caa8bb279a34 100644 --- a/include/linux/iio/common/cros_ec_sensors_core.h +++ b/include/linux/iio/common/cros_ec_sensors_core.h @@ -42,6 +42,10 @@ typedef irqreturn_t (*cros_ec_sensors_capture_t)(int irq, void *p); * @resp: motion sensor response structure * @type: type of motion sensor * @loc: location where the motion sensor is placed + * @range_updated: True if the range of the sensor has been + * updated. + * @curr_range: If updated, the current range value. + * It will be reapplied at every resume. * @calib: calibration parameters. Note that trigger * captured data will always provide the calibrated * data @@ -65,6 +69,9 @@ struct cros_ec_sensors_core_state { enum motionsensor_type type; enum motionsensor_location loc; + bool range_updated; + int curr_range; + struct calib_data { s16 offset; u16 scale; @@ -114,7 +121,9 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, struct iio_chan_spec const *chan, int val, int val2, long mask); -/* List of extended channel specification for all sensors */ +extern const struct dev_pm_ops cros_ec_sensors_pm_ops; + +/* List of extended channel specification for all sensors. */ extern const struct iio_chan_spec_ext_info cros_ec_sensors_ext_info[]; extern const struct attribute *cros_ec_sensor_fifo_attributes[]; diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h index 2bde8c912d4d..c4118dcb8e05 100644 --- a/include/linux/iio/consumer.h +++ b/include/linux/iio/consumer.h @@ -64,15 +64,6 @@ void iio_channel_release(struct iio_channel *chan); struct iio_channel *devm_iio_channel_get(struct device *dev, const char *consumer_channel); /** - * devm_iio_channel_release() - Resource managed version of - * iio_channel_release(). - * @dev: Pointer to consumer device for which resource - * is allocared. - * @chan: The channel to be released. - */ -void devm_iio_channel_release(struct device *dev, struct iio_channel *chan); - -/** * iio_channel_get_all() - get all channels associated with a client * @dev: Pointer to consumer device. * @@ -106,15 +97,6 @@ void iio_channel_release_all(struct iio_channel *chan); */ struct iio_channel *devm_iio_channel_get_all(struct device *dev); -/** - * devm_iio_channel_release_all() - Resource managed version of - * iio_channel_release_all(). - * @dev: Pointer to consumer device for which resource - * is allocared. - * @chan: Array channel to be released. - */ -void devm_iio_channel_release_all(struct device *dev, struct iio_channel *chan); - struct iio_cb_buffer; /** * iio_channel_get_all_cb() - register callback for triggered capture diff --git a/include/linux/iio/hw-consumer.h b/include/linux/iio/hw-consumer.h index 44d48bb1d39f..e8255c2e33bc 100644 --- a/include/linux/iio/hw-consumer.h +++ b/include/linux/iio/hw-consumer.h @@ -14,7 +14,6 @@ struct iio_hw_consumer; struct iio_hw_consumer *iio_hw_consumer_alloc(struct device *dev); void iio_hw_consumer_free(struct iio_hw_consumer *hwc); struct iio_hw_consumer *devm_iio_hw_consumer_alloc(struct device *dev); -void devm_iio_hw_consumer_free(struct device *dev, struct iio_hw_consumer *hwc); int iio_hw_consumer_enable(struct iio_hw_consumer *hwc); void iio_hw_consumer_disable(struct iio_hw_consumer *hwc); diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h new file mode 100644 index 000000000000..f2e94196d31f --- /dev/null +++ b/include/linux/iio/iio-opaque.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _INDUSTRIAL_IO_OPAQUE_H_ +#define _INDUSTRIAL_IO_OPAQUE_H_ + +/** + * struct iio_dev_opaque - industrial I/O device opaque information + * @indio_dev: public industrial I/O device information + * @event_interface: event chrdevs associated with interrupt lines + * @buffer_list: list of all buffers currently attached + * @channel_attr_list: keep track of automatically created channel + * attributes + * @chan_attr_group: group for all attrs in base directory + * @debugfs_dentry: device specific debugfs dentry + * @cached_reg_addr: cached register address for debugfs reads + * @read_buf: read buffer to be used for the initial reg read + * @read_buf_len: data length in @read_buf + */ +struct iio_dev_opaque { + struct iio_dev indio_dev; + struct iio_event_interface *event_interface; + struct list_head buffer_list; + struct list_head channel_attr_list; + struct attribute_group chan_attr_group; +#if defined(CONFIG_DEBUG_FS) + struct dentry *debugfs_dentry; + unsigned cached_reg_addr; + char read_buf[20]; + unsigned int read_buf_len; +#endif +}; + +#define to_iio_dev_opaque(indio_dev) \ + container_of(indio_dev, struct iio_dev_opaque, indio_dev) + +#endif diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 25c87507a1fa..e2df67a3b9ab 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -488,11 +488,9 @@ struct iio_buffer_setup_ops { * @currentmode: [DRIVER] current operating mode * @dev: [DRIVER] device structure, should be assigned a parent * and owner - * @event_interface: [INTERN] event chrdevs associated with interrupt lines * @buffer: [DRIVER] any buffer present - * @buffer_list: [INTERN] list of all buffers currently attached * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux - * @mlock: [DRIVER] lock used to prevent simultaneous device state + * @mlock: [INTERN] lock used to prevent simultaneous device state * changes * @available_scan_masks: [DRIVER] optional array of allowed bitmasks * @masklength: [INTERN] the length of the mask established from @@ -506,9 +504,6 @@ struct iio_buffer_setup_ops { * @pollfunc_event: [DRIVER] function run on events trigger being received * @channels: [DRIVER] channel specification structure table * @num_channels: [DRIVER] number of channels specified in @channels. - * @channel_attr_list: [INTERN] keep track of automatically created channel - * attributes - * @chan_attr_group: [INTERN] group for all attrs in base directory * @name: [DRIVER] name of the device. * @label: [DRIVER] unique name to identify which device this is * @info: [DRIVER] callbacks and constant info from driver @@ -520,8 +515,8 @@ struct iio_buffer_setup_ops { * @groups: [INTERN] attribute groups * @groupcounter: [INTERN] index of next attribute group * @flags: [INTERN] file ops related flags including busy flag. - * @debugfs_dentry: [INTERN] device specific debugfs dentry. - * @cached_reg_addr: [INTERN] cached register address for debugfs reads. + * @priv: [DRIVER] reference to driver's private information + * **MUST** be accessed **ONLY** via iio_priv() helper */ struct iio_dev { int id; @@ -531,10 +526,7 @@ struct iio_dev { int currentmode; struct device dev; - struct iio_event_interface *event_interface; - struct iio_buffer *buffer; - struct list_head buffer_list; int scan_bytes; struct mutex mlock; @@ -551,8 +543,6 @@ struct iio_dev { struct iio_chan_spec const *channels; int num_channels; - struct list_head channel_attr_list; - struct attribute_group chan_attr_group; const char *name; const char *label; const struct iio_info *info; @@ -565,12 +555,7 @@ struct iio_dev { int groupcounter; unsigned long flags; -#if defined(CONFIG_DEBUG_FS) - struct dentry *debugfs_dentry; - unsigned cached_reg_addr; - char read_buf[20]; - unsigned int read_buf_len; -#endif + void *priv; }; const struct iio_chan_spec @@ -593,9 +578,6 @@ void iio_device_unregister(struct iio_dev *indio_dev); * calls iio_device_register() internally. Refer to that function for more * information. * - * If an iio_dev registered with this function needs to be unregistered - * separately, devm_iio_device_unregister() must be used. - * * RETURNS: * 0 on success, negative error number on failure. */ @@ -603,7 +585,6 @@ void iio_device_unregister(struct iio_dev *indio_dev); __devm_iio_device_register((dev), (indio_dev), THIS_MODULE) int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, struct module *this_mod); -void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev); int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp); int iio_device_claim_direct_mode(struct iio_dev *indio_dev); void iio_device_release_direct_mode(struct iio_dev *indio_dev); @@ -653,6 +634,26 @@ static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev) return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL; } +/** + * iio_device_set_parent() - assign parent device to the IIO device object + * @indio_dev: IIO device structure + * @parent: reference to parent device object + * + * This utility must be called between IIO device allocation + * (via devm_iio_device_alloc()) & IIO device registration + * (via {devm_}iio_device_register()). + * By default, the device allocation will also assign a parent device to + * the IIO device object. In cases where devm_iio_device_alloc() is used, + * sometimes the parent device must be different than the device used to + * manage the allocation. + * In that case, this helper should be used to change the parent, hence the + * requirement to call this between allocation & registration. + **/ +static inline void iio_device_set_parent(struct iio_dev *indio_dev, + struct device *parent) +{ + indio_dev->dev.parent = parent; +} /** * iio_device_set_drvdata() - Set device driver data @@ -673,34 +674,25 @@ static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data) * * Returns the data previously set with iio_device_set_drvdata() */ -static inline void *iio_device_get_drvdata(struct iio_dev *indio_dev) +static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev) { return dev_get_drvdata(&indio_dev->dev); } /* Can we make this smaller? */ #define IIO_ALIGN L1_CACHE_BYTES -struct iio_dev *iio_device_alloc(int sizeof_priv); +struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv); +/* The information at the returned address is guaranteed to be cacheline aligned */ static inline void *iio_priv(const struct iio_dev *indio_dev) { - return (char *)indio_dev + ALIGN(sizeof(struct iio_dev), IIO_ALIGN); -} - -static inline struct iio_dev *iio_priv_to_dev(void *priv) -{ - return (struct iio_dev *)((char *)priv - - ALIGN(sizeof(struct iio_dev), IIO_ALIGN)); + return indio_dev->priv; } void iio_device_free(struct iio_dev *indio_dev); -int devm_iio_device_match(struct device *dev, void *res, void *data); -struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv); -void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev); +struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv); struct iio_trigger *devm_iio_trigger_alloc(struct device *dev, const char *fmt, ...); -void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig); - /** * iio_buffer_enabled() - helper function to test if the buffer is enabled * @indio_dev: IIO device structure for device @@ -717,10 +709,7 @@ static inline bool iio_buffer_enabled(struct iio_dev *indio_dev) * @indio_dev: IIO device structure for device **/ #if defined(CONFIG_DEBUG_FS) -static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) -{ - return indio_dev->debugfs_dentry; -} +struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev); #else static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) { diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index dd8219138c2e..2df67448f0d1 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -83,10 +83,13 @@ struct adis_data { * @trig: IIO trigger object data * @data: ADIS chip variant specific data * @burst: ADIS burst transfer information + * @burst_extra_len: Burst extra length. Should only be used by devices that can + * dynamically change their burst mode length. * @state_lock: Lock used by the device to protect state * @msg: SPI message object * @xfer: SPI transfer objects to be used for a @msg * @current_page: Some ADIS devices have registers, this selects current page + * @irq_flag: IRQ handling flags as passed to request_irq() * @buffer: Data buffer for information read from the device * @tx: DMA safe TX buffer for SPI transfers * @rx: DMA safe RX buffer for SPI transfers @@ -97,7 +100,7 @@ struct adis { const struct adis_data *data; struct adis_burst *burst; - + unsigned int burst_extra_len; /** * The state_lock is meant to be used during operations that require * a sequence of SPI R/W in order to protect the SPI transfer @@ -113,6 +116,7 @@ struct adis { struct spi_message msg; struct spi_transfer *xfer; unsigned int current_page; + unsigned long irq_flag; void *buffer; uint8_t tx[10] ____cacheline_aligned; @@ -331,6 +335,65 @@ static inline int adis_read_reg_32(struct adis *adis, unsigned int reg, return ret; } +int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask, + const u32 val, u8 size); +/** + * adis_update_bits_base() - ADIS Update bits function - Locked version + * @adis: The adis device + * @reg: The address of the lower of the two registers + * @mask: Bitmask to change + * @val: Value to be written + * @size: Size of the register to update + * + * Updates the desired bits of @reg in accordance with @mask and @val. + */ +static inline int adis_update_bits_base(struct adis *adis, unsigned int reg, + const u32 mask, const u32 val, u8 size) +{ + int ret; + + mutex_lock(&adis->state_lock); + ret = __adis_update_bits_base(adis, reg, mask, val, size); + mutex_unlock(&adis->state_lock); + return ret; +} + +/** + * adis_update_bits() - Wrapper macro for adis_update_bits_base - Locked version + * @adis: The adis device + * @reg: The address of the lower of the two registers + * @mask: Bitmask to change + * @val: Value to be written + * + * This macro evaluates the sizeof of @val at compile time and calls + * adis_update_bits_base() accordingly. Be aware that using MACROS/DEFINES for + * @val can lead to undesired behavior if the register to update is 16bit. + */ +#define adis_update_bits(adis, reg, mask, val) ({ \ + BUILD_BUG_ON(sizeof(val) == 1 || sizeof(val) == 8); \ + __builtin_choose_expr(sizeof(val) == 4, \ + adis_update_bits_base(adis, reg, mask, val, 4), \ + adis_update_bits_base(adis, reg, mask, val, 2)); \ +}) + +/** + * adis_update_bits() - Wrapper macro for adis_update_bits_base + * @adis: The adis device + * @reg: The address of the lower of the two registers + * @mask: Bitmask to change + * @val: Value to be written + * + * This macro evaluates the sizeof of @val at compile time and calls + * adis_update_bits_base() accordingly. Be aware that using MACROS/DEFINES for + * @val can lead to undesired behavior if the register to update is 16bit. + */ +#define __adis_update_bits(adis, reg, mask, val) ({ \ + BUILD_BUG_ON(sizeof(val) == 1 || sizeof(val) == 8); \ + __builtin_choose_expr(sizeof(val) == 4, \ + __adis_update_bits_base(adis, reg, mask, val, 4), \ + __adis_update_bits_base(adis, reg, mask, val, 2)); \ +}) + int adis_enable_irq(struct adis *adis, bool enable); int __adis_check_status(struct adis *adis); int __adis_initial_startup(struct adis *adis); @@ -441,18 +504,25 @@ int adis_single_conversion(struct iio_dev *indio_dev, * @en burst mode enabled * @reg_cmd register command that triggers burst * @extra_len extra length to account in the SPI RX buffer + * @burst_max_len holds the maximum burst size when the device supports + * more than one burst mode with different sizes */ struct adis_burst { bool en; unsigned int reg_cmd; - unsigned int extra_len; + const u32 extra_len; + const u32 burst_max_len; }; +int +devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, + irq_handler_t trigger_handler); int adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *)); void adis_cleanup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev); +int devm_adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev); int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev); void adis_remove_trigger(struct adis *adis); @@ -461,6 +531,13 @@ int adis_update_scan_mode(struct iio_dev *indio_dev, #else /* CONFIG_IIO_BUFFER */ +static inline int +devm_adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, + irq_handler_t trigger_handler) +{ + return 0; +} + static inline int adis_setup_buffer_and_trigger(struct adis *adis, struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *)) { @@ -472,6 +549,12 @@ static inline void adis_cleanup_buffer_and_trigger(struct adis *adis, { } +static inline int devm_adis_probe_trigger(struct adis *adis, + struct iio_dev *indio_dev) +{ + return 0; +} + static inline int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev) { diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h index 764659e01b68..1fc1efa7799d 100644 --- a/include/linux/iio/kfifo_buf.h +++ b/include/linux/iio/kfifo_buf.h @@ -9,6 +9,5 @@ struct iio_buffer *iio_kfifo_allocate(void); void iio_kfifo_free(struct iio_buffer *r); struct iio_buffer *devm_iio_kfifo_allocate(struct device *dev); -void devm_iio_kfifo_free(struct device *dev, struct iio_buffer *r); #endif diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index 84995e2967ac..cad8325903f9 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h @@ -141,9 +141,6 @@ int __devm_iio_trigger_register(struct device *dev, **/ void iio_trigger_unregister(struct iio_trigger *trig_info); -void devm_iio_trigger_unregister(struct device *dev, - struct iio_trigger *trig_info); - /** * iio_trigger_set_immutable() - set an immutable trigger on destination * diff --git a/include/linux/iio/trigger_consumer.h b/include/linux/iio/trigger_consumer.h index c3c6ba5ec423..3aa2f132dd67 100644 --- a/include/linux/iio/trigger_consumer.h +++ b/include/linux/iio/trigger_consumer.h @@ -50,11 +50,4 @@ irqreturn_t iio_pollfunc_store_time(int irq, void *p); void iio_trigger_notify_done(struct iio_trigger *trig); -/* - * Two functions for common case where all that happens is a pollfunc - * is attached and detached from a trigger - */ -int iio_triggered_buffer_postenable(struct iio_dev *indio_dev); -int iio_triggered_buffer_predisable(struct iio_dev *indio_dev); - #endif diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h index 238ad30ce166..e99c91799359 100644 --- a/include/linux/iio/triggered_buffer.h +++ b/include/linux/iio/triggered_buffer.h @@ -18,7 +18,5 @@ int devm_iio_triggered_buffer_setup(struct device *dev, irqreturn_t (*h)(int irq, void *p), irqreturn_t (*thread)(int irq, void *p), const struct iio_buffer_setup_ops *ops); -void devm_iio_triggered_buffer_cleanup(struct device *dev, - struct iio_dev *indio_dev); #endif diff --git a/include/linux/ima.h b/include/linux/ima.h index aefe758f4466..d15100de6cdd 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -18,13 +18,14 @@ extern int ima_file_check(struct file *file, int mask); extern void ima_post_create_tmpfile(struct inode *inode); extern void ima_file_free(struct file *file); extern int ima_file_mmap(struct file *file, unsigned long prot); +extern int ima_file_mprotect(struct vm_area_struct *vma, unsigned long prot); extern int ima_load_data(enum kernel_load_data_id id); extern int ima_read_file(struct file *file, enum kernel_read_file_id id); extern int ima_post_read_file(struct file *file, void *buf, loff_t size, enum kernel_read_file_id id); extern void ima_post_path_mknod(struct dentry *dentry); extern int ima_file_hash(struct file *file, char *buf, size_t buf_size); -extern void ima_kexec_cmdline(const void *buf, int size); +extern void ima_kexec_cmdline(int kernel_fd, const void *buf, int size); #ifdef CONFIG_IMA_KEXEC extern void ima_add_kexec_buffer(struct kimage *image); @@ -70,6 +71,12 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot) return 0; } +static inline int ima_file_mprotect(struct vm_area_struct *vma, + unsigned long prot) +{ + return 0; +} + static inline int ima_load_data(enum kernel_load_data_id id) { return 0; @@ -96,7 +103,7 @@ static inline int ima_file_hash(struct file *file, char *buf, size_t buf_size) return -EOPNOTSUPP; } -static inline void ima_kexec_cmdline(const void *buf, int size) {} +static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {} #endif /* CONFIG_IMA */ #ifndef CONFIG_IMA_KEXEC diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h index 00d7e8e919c6..54c02c84906a 100644 --- a/include/linux/indirect_call_wrapper.h +++ b/include/linux/indirect_call_wrapper.h @@ -23,6 +23,16 @@ likely(f == f2) ? f2(__VA_ARGS__) : \ INDIRECT_CALL_1(f, f1, __VA_ARGS__); \ }) +#define INDIRECT_CALL_3(f, f3, f2, f1, ...) \ + ({ \ + likely(f == f3) ? f3(__VA_ARGS__) : \ + INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \ + }) +#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \ + ({ \ + likely(f == f4) ? f4(__VA_ARGS__) : \ + INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \ + }) #define INDIRECT_CALLABLE_DECLARE(f) f #define INDIRECT_CALLABLE_SCOPE @@ -30,6 +40,8 @@ #else #define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__) #define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_3(f, f3, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__) #define INDIRECT_CALLABLE_DECLARE(f) #define INDIRECT_CALLABLE_SCOPE static #endif diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h index ce9ed1c0602f..0ef2d800fda7 100644 --- a/include/linux/inet_diag.h +++ b/include/linux/inet_diag.h @@ -71,7 +71,11 @@ static inline size_t inet_diag_msg_attrs_size(void) + nla_total_size(1) /* INET_DIAG_SKV6ONLY */ #endif + nla_total_size(4) /* INET_DIAG_MARK */ - + nla_total_size(4); /* INET_DIAG_CLASS_ID */ + + nla_total_size(4) /* INET_DIAG_CLASS_ID */ +#ifdef CONFIG_SOCK_CGROUP_DATA + + nla_total_size_64bit(sizeof(u64)) /* INET_DIAG_CGROUP_ID */ +#endif + ; } int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb, struct inet_diag_msg *r, int ext, diff --git a/include/linux/init_syscalls.h b/include/linux/init_syscalls.h new file mode 100644 index 000000000000..92045d18cbfc --- /dev/null +++ b/include/linux/init_syscalls.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +int __init init_mount(const char *dev_name, const char *dir_name, + const char *type_page, unsigned long flags, void *data_page); +int __init init_umount(const char *name, int flags); +int __init init_chdir(const char *filename); +int __init init_chroot(const char *filename); +int __init init_chown(const char *filename, uid_t user, gid_t group, int flags); +int __init init_chmod(const char *filename, umode_t mode); +int __init init_eaccess(const char *filename); +int __init init_stat(const char *filename, struct kstat *stat, int flags); +int __init init_mknod(const char *filename, umode_t mode, unsigned int dev); +int __init init_link(const char *oldname, const char *newname); +int __init init_symlink(const char *oldname, const char *newname); +int __init init_unlink(const char *pathname); +int __init init_mkdir(const char *pathname, umode_t mode); +int __init init_rmdir(const char *pathname); +int __init init_utimes(char *filename, struct timespec64 *ts); +int __init init_dup(struct file *file); diff --git a/include/linux/initrd.h b/include/linux/initrd.h index aa5914355728..8db6f8c8030b 100644 --- a/include/linux/initrd.h +++ b/include/linux/initrd.h @@ -2,12 +2,6 @@ #define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */ -/* 1 = load ramdisk, 0 = don't load */ -extern int rd_doload; - -/* 1 = prompt for ramdisk, 0 = don't prompt */ -extern int rd_prompt; - /* starting block # of image */ extern int rd_image_start; diff --git a/include/linux/instrumentation.h b/include/linux/instrumentation.h new file mode 100644 index 000000000000..93e2ad67fc10 --- /dev/null +++ b/include/linux/instrumentation.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_INSTRUMENTATION_H +#define __LINUX_INSTRUMENTATION_H + +#if defined(CONFIG_DEBUG_ENTRY) && defined(CONFIG_STACK_VALIDATION) + +/* Begin/end of an instrumentation safe region */ +#define instrumentation_begin() ({ \ + asm volatile("%c0: nop\n\t" \ + ".pushsection .discard.instr_begin\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) + +/* + * Because instrumentation_{begin,end}() can nest, objtool validation considers + * _begin() a +1 and _end() a -1 and computes a sum over the instructions. + * When the value is greater than 0, we consider instrumentation allowed. + * + * There is a problem with code like: + * + * noinstr void foo() + * { + * instrumentation_begin(); + * ... + * if (cond) { + * instrumentation_begin(); + * ... + * instrumentation_end(); + * } + * bar(); + * instrumentation_end(); + * } + * + * If instrumentation_end() would be an empty label, like all the other + * annotations, the inner _end(), which is at the end of a conditional block, + * would land on the instruction after the block. + * + * If we then consider the sum of the !cond path, we'll see that the call to + * bar() is with a 0-value, even though, we meant it to happen with a positive + * value. + * + * To avoid this, have _end() be a NOP instruction, this ensures it will be + * part of the condition block and does not escape. + */ +#define instrumentation_end() ({ \ + asm volatile("%c0: nop\n\t" \ + ".pushsection .discard.instr_end\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) +#else +# define instrumentation_begin() do { } while(0) +# define instrumentation_end() do { } while(0) +#endif + +#endif /* __LINUX_INSTRUMENTATION_H */ diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h new file mode 100644 index 000000000000..43e6ea591975 --- /dev/null +++ b/include/linux/instrumented.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * This header provides generic wrappers for memory access instrumentation that + * the compiler cannot emit for: KASAN, KCSAN. + */ +#ifndef _LINUX_INSTRUMENTED_H +#define _LINUX_INSTRUMENTED_H + +#include <linux/compiler.h> +#include <linux/kasan-checks.h> +#include <linux/kcsan-checks.h> +#include <linux/types.h> + +/** + * instrument_read - instrument regular read access + * + * Instrument a regular read access. The instrumentation should be inserted + * before the actual read happens. + * + * @ptr address of access + * @size size of access + */ +static __always_inline void instrument_read(const volatile void *v, size_t size) +{ + kasan_check_read(v, size); + kcsan_check_read(v, size); +} + +/** + * instrument_write - instrument regular write access + * + * Instrument a regular write access. The instrumentation should be inserted + * before the actual write happens. + * + * @ptr address of access + * @size size of access + */ +static __always_inline void instrument_write(const volatile void *v, size_t size) +{ + kasan_check_write(v, size); + kcsan_check_write(v, size); +} + +/** + * instrument_atomic_read - instrument atomic read access + * + * Instrument an atomic read access. The instrumentation should be inserted + * before the actual read happens. + * + * @ptr address of access + * @size size of access + */ +static __always_inline void instrument_atomic_read(const volatile void *v, size_t size) +{ + kasan_check_read(v, size); + kcsan_check_atomic_read(v, size); +} + +/** + * instrument_atomic_write - instrument atomic write access + * + * Instrument an atomic write access. The instrumentation should be inserted + * before the actual write happens. + * + * @ptr address of access + * @size size of access + */ +static __always_inline void instrument_atomic_write(const volatile void *v, size_t size) +{ + kasan_check_write(v, size); + kcsan_check_atomic_write(v, size); +} + +/** + * instrument_copy_to_user - instrument reads of copy_to_user + * + * Instrument reads from kernel memory, that are due to copy_to_user (and + * variants). The instrumentation must be inserted before the accesses. + * + * @to destination address + * @from source address + * @n number of bytes to copy + */ +static __always_inline void +instrument_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + kasan_check_read(from, n); + kcsan_check_read(from, n); +} + +/** + * instrument_copy_from_user - instrument writes of copy_from_user + * + * Instrument writes to kernel memory, that are due to copy_from_user (and + * variants). The instrumentation should be inserted before the accesses. + * + * @to destination address + * @from source address + * @n number of bytes to copy + */ +static __always_inline void +instrument_copy_from_user(const void *to, const void __user *from, unsigned long n) +{ + kasan_check_write(to, n); + kcsan_check_write(to, n); +} + +#endif /* _LINUX_INSTRUMENTED_H */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 980234ae0312..b1ed2f25f7c0 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -19,6 +19,7 @@ #include <linux/iommu.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/dmar.h> +#include <linux/ioasid.h> #include <asm/cacheflush.h> #include <asm/iommu.h> @@ -40,8 +41,12 @@ #define DMA_PTE_SNP BIT_ULL(11) #define DMA_FL_PTE_PRESENT BIT_ULL(0) +#define DMA_FL_PTE_US BIT_ULL(2) #define DMA_FL_PTE_XD BIT_ULL(63) +#define ADDR_WIDTH_5LEVEL (57) +#define ADDR_WIDTH_4LEVEL (48) + #define CONTEXT_TT_MULTI_LEVEL 0 #define CONTEXT_TT_DEV_IOTLB 1 #define CONTEXT_TT_PASS_THROUGH 2 @@ -166,6 +171,7 @@ #define ecap_smpwc(e) (((e) >> 48) & 0x1) #define ecap_flts(e) (((e) >> 47) & 0x1) #define ecap_slts(e) (((e) >> 46) & 0x1) +#define ecap_vcs(e) (((e) >> 44) & 0x1) #define ecap_smts(e) (((e) >> 43) & 0x1) #define ecap_dit(e) ((e >> 41) & 0x1) #define ecap_pasid(e) ((e >> 40) & 0x1) @@ -191,6 +197,9 @@ #define ecap_max_handle_mask(e) ((e >> 20) & 0xf) #define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */ +/* Virtual command interface capability */ +#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */ + /* IOTLB_REG */ #define DMA_TLB_FLUSH_GRANU_OFFSET 60 #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) @@ -284,6 +293,9 @@ /* PRS_REG */ #define DMA_PRS_PPR ((u32)1) +#define DMA_PRS_PRO ((u32)2) + +#define DMA_VCS_PAS ((u64)1) #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ do { \ @@ -324,6 +336,8 @@ enum { #define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) #define QI_IWD_STATUS_WRITE (((u64)1) << 5) +#define QI_IWD_FENCE (((u64)1) << 6) +#define QI_IWD_PRQ_DRAIN (((u64)1) << 7) #define QI_IOTLB_DID(did) (((u64)did) << 16) #define QI_IOTLB_DR(dr) (((u64)dr) << 7) @@ -331,7 +345,7 @@ enum { #define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) #define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) #define QI_IOTLB_IH(ih) (((u64)ih) << 6) -#define QI_IOTLB_AM(am) (((u8)am)) +#define QI_IOTLB_AM(am) (((u8)am) & 0x3f) #define QI_CC_FM(fm) (((u64)fm) << 48) #define QI_CC_SID(sid) (((u64)sid) << 32) @@ -350,20 +364,24 @@ enum { #define QI_PC_DID(did) (((u64)did) << 16) #define QI_PC_GRAN(gran) (((u64)gran) << 4) -#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0)) -#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) +/* PASID cache invalidation granu */ +#define QI_PC_ALL_PASIDS 0 +#define QI_PC_PASID_SEL 1 #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) -#define QI_EIOTLB_AM(am) (((u64)am)) +#define QI_EIOTLB_AM(am) (((u64)am) & 0x3f) #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) #define QI_EIOTLB_DID(did) (((u64)did) << 16) #define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4) +/* QI Dev-IOTLB inv granu */ +#define QI_DEV_IOTLB_GRAN_ALL 1 +#define QI_DEV_IOTLB_GRAN_PASID_SEL 0 + #define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK) #define QI_DEV_EIOTLB_SIZE (((u64)1) << 11) -#define QI_DEV_EIOTLB_GLOB(g) ((u64)g) -#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) +#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32) #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \ @@ -480,6 +498,23 @@ struct context_entry { u64 hi; }; +/* si_domain contains mulitple devices */ +#define DOMAIN_FLAG_STATIC_IDENTITY BIT(0) + +/* + * When VT-d works in the scalable mode, it allows DMA translation to + * happen through either first level or second level page table. This + * bit marks that the DMA translation for the domain goes through the + * first level page table, otherwise, it goes through the second level. + */ +#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1) + +/* + * Domain represents a virtual machine which demands iommu nested + * translation mode support. + */ +#define DOMAIN_FLAG_NESTING_MODE BIT(2) + struct dmar_domain { int nid; /* node id */ @@ -529,6 +564,7 @@ struct intel_iommu { u64 reg_size; /* size of hw register set */ u64 cap; u64 ecap; + u64 vccap; u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ raw_spinlock_t register_lock; /* protect register handling */ int seq_id; /* sequence id of the iommu */ @@ -549,6 +585,8 @@ struct intel_iommu { #ifdef CONFIG_INTEL_IOMMU_SVM struct page_req_dsc *prq; unsigned char prq_name[16]; /* Name for PRQ interrupt */ + struct completion prq_complete; + struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */ #endif struct q_inval *qi; /* Queued invalidation info */ u32 *iommu_state; /* Store iommu states between suspend and resume.*/ @@ -561,6 +599,8 @@ struct intel_iommu { struct iommu_device iommu; /* IOMMU core code handle */ int node; u32 flags; /* Software defined flags */ + + struct dmar_drhd_unit *drhd; }; /* PCI domain-device relationship */ @@ -571,6 +611,7 @@ struct device_domain_info { struct list_head auxiliary_domains; /* auxiliary domains * attached to this device */ + u32 segment; /* PCI segment number */ u8 bus; /* PCI bus number */ u8 devfn; /* PCI devfn number */ u16 pfsid; /* SRIOV physical function source ID */ @@ -595,6 +636,12 @@ static inline void __iommu_flush_cache( clflush_cache_range(addr, size); } +/* Convert generic struct iommu_domain to private struct dmar_domain */ +static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom) +{ + return container_of(dom, struct dmar_domain, domain); +} + /* * 0: readable * 1: writable @@ -653,9 +700,23 @@ extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int size_order, u64 type); extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, u16 qdep, u64 addr, unsigned mask); + void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, unsigned long npages, bool ih); -extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); + +void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, + u32 pasid, u16 qdep, u64 addr, + unsigned int size_order); +void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu, + int pasid); + +int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, + unsigned int count, unsigned long options); +/* + * Options used in qi_submit_sync: + * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8. + */ +#define QI_OPT_WAIT_DRAIN BIT(0) extern int dmar_ir_support(void); @@ -667,11 +728,22 @@ int for_each_device_domain(int (*fn)(struct device_domain_info *info, void iommu_flush_write_buffer(struct intel_iommu *iommu); int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev); struct dmar_domain *find_domain(struct device *dev); +struct device_domain_info *get_domain_info(struct device *dev); +struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn); #ifdef CONFIG_INTEL_IOMMU_SVM extern void intel_svm_check(struct intel_iommu *iommu); extern int intel_svm_enable_prq(struct intel_iommu *iommu); extern int intel_svm_finish_prq(struct intel_iommu *iommu); +int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, + struct iommu_gpasid_bind_data *data); +int intel_svm_unbind_gpasid(struct device *dev, int pasid); +struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, + void *drvdata); +void intel_svm_unbind(struct iommu_sva *handle); +int intel_svm_get_pasid(struct iommu_sva *handle); +int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt, + struct iommu_page_response *msg); struct svm_dev_ops; @@ -680,6 +752,8 @@ struct intel_svm_dev { struct rcu_head rcu; struct device *dev; struct svm_dev_ops *ops; + struct iommu_sva sva; + int pasid; int users; u16 did; u16 dev_iotlb:1; @@ -689,14 +763,14 @@ struct intel_svm_dev { struct intel_svm { struct mmu_notifier notifier; struct mm_struct *mm; + struct intel_iommu *iommu; int flags; int pasid; + int gpasid; /* In case that guest PASID is different from host PASID */ struct list_head devs; struct list_head list; }; - -extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev); #else static inline void intel_svm_check(struct intel_iommu *iommu) {} #endif diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index d7c403d0dd27..c9e7e601950d 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -21,7 +21,6 @@ struct svm_dev_ops { #define SVM_REQ_EXEC (1<<1) #define SVM_REQ_PRIV (1<<0) - /* * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main" * PASID for the current process. Even if a PASID already exists, a new one @@ -44,90 +43,17 @@ struct svm_dev_ops { * do such IOTLB flushes automatically. */ #define SVM_FLAG_SUPERVISOR_MODE (1<<1) - -#ifdef CONFIG_INTEL_IOMMU_SVM - -/** - * intel_svm_bind_mm() - Bind the current process to a PASID - * @dev: Device to be granted access - * @pasid: Address for allocated PASID - * @flags: Flags. Later for requesting supervisor mode, etc. - * @ops: Callbacks to device driver - * - * This function attempts to enable PASID support for the given device. - * If the @pasid argument is non-%NULL, a PASID is allocated for access - * to the MM of the current process. - * - * By using a %NULL value for the @pasid argument, this function can - * be used to simply validate that PASID support is available for the - * given device — i.e. that it is behind an IOMMU which has the - * requisite support, and is enabled. - * - * Page faults are handled transparently by the IOMMU code, and there - * should be no need for the device driver to be involved. If a page - * fault cannot be handled (i.e. is an invalid address rather than - * just needs paging in), then the page request will be completed by - * the core IOMMU code with appropriate status, and the device itself - * can then report the resulting fault to its driver via whatever - * mechanism is appropriate. - * - * Multiple calls from the same process may result in the same PASID - * being re-used. A reference count is kept. - */ -extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, - struct svm_dev_ops *ops); - -/** - * intel_svm_unbind_mm() - Unbind a specified PASID - * @dev: Device for which PASID was allocated - * @pasid: PASID value to be unbound - * - * This function allows a PASID to be retired when the device no - * longer requires access to the address space of a given process. - * - * If the use count for the PASID in question reaches zero, the - * PASID is revoked and may no longer be used by hardware. - * - * Device drivers are required to ensure that no access (including - * page requests) is currently outstanding for the PASID in question, - * before calling this function. +/* + * The SVM_FLAG_GUEST_MODE flag is used when a PASID bind is for guest + * processes. Compared to the host bind, the primary differences are: + * 1. mm life cycle management + * 2. fault reporting */ -extern int intel_svm_unbind_mm(struct device *dev, int pasid); - -/** - * intel_svm_is_pasid_valid() - check if pasid is valid - * @dev: Device for which PASID was allocated - * @pasid: PASID value to be checked - * - * This function checks if the specified pasid is still valid. A - * valid pasid means the backing mm is still having a valid user. - * For kernel callers init_mm is always valid. for other mm, if mm->mm_users - * is non-zero, it is valid. - * - * returns -EINVAL if invalid pasid, 0 if pasid ref count is invalid - * 1 if pasid is valid. +#define SVM_FLAG_GUEST_MODE (1<<2) +/* + * The SVM_FLAG_GUEST_PASID flag is used when a guest has its own PASID space, + * which requires guest and host PASID translation at both directions. */ -extern int intel_svm_is_pasid_valid(struct device *dev, int pasid); - -#else /* CONFIG_INTEL_IOMMU_SVM */ - -static inline int intel_svm_bind_mm(struct device *dev, int *pasid, - int flags, struct svm_dev_ops *ops) -{ - return -ENOSYS; -} - -static inline int intel_svm_unbind_mm(struct device *dev, int pasid) -{ - BUG(); -} - -static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid) -{ - return -EINVAL; -} -#endif /* CONFIG_INTEL_IOMMU_SVM */ - -#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL)) +#define SVM_FLAG_GUEST_PASID (1<<3) #endif /* __INTEL_SVM_H__ */ diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h index efb3ce892c20..3582176a1eca 100644 --- a/include/linux/intel_rapl.h +++ b/include/linux/intel_rapl.h @@ -29,6 +29,7 @@ enum rapl_domain_reg_id { RAPL_DOMAIN_REG_PERF, RAPL_DOMAIN_REG_POLICY, RAPL_DOMAIN_REG_INFO, + RAPL_DOMAIN_REG_PL4, RAPL_DOMAIN_REG_MAX, }; @@ -38,12 +39,14 @@ enum rapl_primitives { ENERGY_COUNTER, POWER_LIMIT1, POWER_LIMIT2, + POWER_LIMIT4, FW_LOCK, PL1_ENABLE, /* power limit 1, aka long term */ PL1_CLAMP, /* allow frequency to go below OS request */ PL2_ENABLE, /* power limit 2, aka short term, instantaneous */ PL2_CLAMP, + PL4_ENABLE, /* power limit 4, aka max peak power */ TIME_WINDOW1, /* long term */ TIME_WINDOW2, /* short term */ @@ -65,7 +68,7 @@ struct rapl_domain_data { unsigned long timestamp; }; -#define NR_POWER_LIMITS (2) +#define NR_POWER_LIMITS (3) struct rapl_power_limit { struct powercap_zone_constraint *constraint; int prim_id; /* primitive ID used to enable */ diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h index 0c494534b4d3..4735518de515 100644 --- a/include/linux/interconnect-provider.h +++ b/include/linux/interconnect-provider.h @@ -41,6 +41,7 @@ struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec, * @xlate: provider-specific callback for mapping nodes from phandle arguments * @dev: the device this interconnect provider belongs to * @users: count of active users + * @inter_set: whether inter-provider pairs will be configured with @set * @data: pointer to private data */ struct icc_provider { @@ -53,6 +54,7 @@ struct icc_provider { struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data); struct device *dev; int users; + bool inter_set; void *data; }; @@ -103,6 +105,7 @@ void icc_node_del(struct icc_node *node); int icc_nodes_remove(struct icc_provider *provider); int icc_provider_add(struct icc_provider *provider); int icc_provider_del(struct icc_provider *provider); +struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec); #else @@ -117,7 +120,7 @@ static inline struct icc_node *icc_node_create(int id) return ERR_PTR(-ENOTSUPP); } -void icc_node_destroy(int id) +static inline void icc_node_destroy(int id) { } @@ -126,16 +129,16 @@ static inline int icc_link_create(struct icc_node *node, const int dst_id) return -ENOTSUPP; } -int icc_link_destroy(struct icc_node *src, struct icc_node *dst) +static inline int icc_link_destroy(struct icc_node *src, struct icc_node *dst) { return -ENOTSUPP; } -void icc_node_add(struct icc_node *node, struct icc_provider *provider) +static inline void icc_node_add(struct icc_node *node, struct icc_provider *provider) { } -void icc_node_del(struct icc_node *node) +static inline void icc_node_del(struct icc_node *node) { } @@ -154,6 +157,11 @@ static inline int icc_provider_del(struct icc_provider *provider) return -ENOTSUPP; } +static inline struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec) +{ + return ERR_PTR(-ENOTSUPP); +} + #endif /* CONFIG_INTERCONNECT */ #endif /* __LINUX_INTERCONNECT_PROVIDER_H */ diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h index d70a914cba11..3a63d98613fc 100644 --- a/include/linux/interconnect.h +++ b/include/linux/interconnect.h @@ -28,9 +28,14 @@ struct device; struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id); struct icc_path *of_icc_get(struct device *dev, const char *name); +struct icc_path *devm_of_icc_get(struct device *dev, const char *name); +struct icc_path *of_icc_get_by_index(struct device *dev, int idx); void icc_put(struct icc_path *path); +int icc_enable(struct icc_path *path); +int icc_disable(struct icc_path *path); int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw); void icc_set_tag(struct icc_path *path, u32 tag); +const char *icc_get_name(struct icc_path *path); #else @@ -46,10 +51,31 @@ static inline struct icc_path *of_icc_get(struct device *dev, return NULL; } +static inline struct icc_path *devm_of_icc_get(struct device *dev, + const char *name) +{ + return NULL; +} + +static inline struct icc_path *of_icc_get_by_index(struct device *dev, int idx) +{ + return NULL; +} + static inline void icc_put(struct icc_path *path) { } +static inline int icc_enable(struct icc_path *path) +{ + return 0; +} + +static inline int icc_disable(struct icc_path *path) +{ + return 0; +} + static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw) { return 0; @@ -59,6 +85,11 @@ static inline void icc_set_tag(struct icc_path *path, u32 tag) { } +static inline const char *icc_get_name(struct icc_path *path) +{ + return NULL; +} + #endif /* CONFIG_INTERCONNECT */ #endif /* __LINUX_INTERCONNECT_H */ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 80f637c3a6f3..f9aee3538461 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -585,6 +585,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void) /* Tasklets --- multithreaded analogue of BHs. + This API is deprecated. Please consider using threaded IRQs instead: + https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de + Main feature differing them of generic softirqs: tasklet is running only on one CPU simultaneously. @@ -608,16 +611,42 @@ struct tasklet_struct struct tasklet_struct *next; unsigned long state; atomic_t count; - void (*func)(unsigned long); + bool use_callback; + union { + void (*func)(unsigned long data); + void (*callback)(struct tasklet_struct *t); + }; unsigned long data; }; -#define DECLARE_TASKLET(name, func, data) \ -struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } +#define DECLARE_TASKLET(name, _callback) \ +struct tasklet_struct name = { \ + .count = ATOMIC_INIT(0), \ + .callback = _callback, \ + .use_callback = true, \ +} -#define DECLARE_TASKLET_DISABLED(name, func, data) \ -struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } +#define DECLARE_TASKLET_DISABLED(name, _callback) \ +struct tasklet_struct name = { \ + .count = ATOMIC_INIT(1), \ + .callback = _callback, \ + .use_callback = true, \ +} +#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \ + container_of(callback_tasklet, typeof(*var), tasklet_fieldname) + +#define DECLARE_TASKLET_OLD(name, _func) \ +struct tasklet_struct name = { \ + .count = ATOMIC_INIT(0), \ + .func = _func, \ +} + +#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \ +struct tasklet_struct name = { \ + .count = ATOMIC_INIT(1), \ + .func = _func, \ +} enum { @@ -686,6 +715,8 @@ extern void tasklet_kill(struct tasklet_struct *t); extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data); +extern void tasklet_setup(struct tasklet_struct *t, + void (*callback)(struct tasklet_struct *)); /* * Autoprobing for irqs: @@ -760,8 +791,10 @@ extern int arch_early_irq_init(void); /* * We want to know which function is an entrypoint of a hardirq or a softirq. */ -#define __irq_entry __attribute__((__section__(".irqentry.text"))) -#define __softirq_entry \ - __attribute__((__section__(".softirqentry.text"))) +#ifndef __irq_entry +# define __irq_entry __attribute__((__section__(".irqentry.text"))) +#endif + +#define __softirq_entry __attribute__((__section__(".softirqentry.text"))) #endif diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h index ae21b72cce85..f32522bb3aa5 100644 --- a/include/linux/io-64-nonatomic-hi-lo.h +++ b/include/linux/io-64-nonatomic-hi-lo.h @@ -57,7 +57,7 @@ static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr) #ifndef ioread64_hi_lo #define ioread64_hi_lo ioread64_hi_lo -static inline u64 ioread64_hi_lo(void __iomem *addr) +static inline u64 ioread64_hi_lo(const void __iomem *addr) { u32 low, high; @@ -79,7 +79,7 @@ static inline void iowrite64_hi_lo(u64 val, void __iomem *addr) #ifndef ioread64be_hi_lo #define ioread64be_hi_lo ioread64be_hi_lo -static inline u64 ioread64be_hi_lo(void __iomem *addr) +static inline u64 ioread64be_hi_lo(const void __iomem *addr) { u32 low, high; diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h index faaa842dbdb9..448a21435dba 100644 --- a/include/linux/io-64-nonatomic-lo-hi.h +++ b/include/linux/io-64-nonatomic-lo-hi.h @@ -57,7 +57,7 @@ static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr) #ifndef ioread64_lo_hi #define ioread64_lo_hi ioread64_lo_hi -static inline u64 ioread64_lo_hi(void __iomem *addr) +static inline u64 ioread64_lo_hi(const void __iomem *addr) { u32 low, high; @@ -79,7 +79,7 @@ static inline void iowrite64_lo_hi(u64 val, void __iomem *addr) #ifndef ioread64be_lo_hi #define ioread64be_lo_hi ioread64be_lo_hi -static inline u64 ioread64be_lo_hi(void __iomem *addr) +static inline u64 ioread64be_lo_hi(const void __iomem *addr) { u32 low, high; diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index b336622612f3..c75e4d3d8833 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/bug.h> #include <linux/io.h> +#include <linux/pgtable.h> #include <asm/page.h> /* @@ -99,7 +100,6 @@ io_mapping_unmap(void __iomem *vaddr) #else #include <linux/uaccess.h> -#include <asm/pgtable.h> /* Create the io_mapping object*/ static inline struct io_mapping * @@ -107,9 +107,12 @@ io_mapping_init_wc(struct io_mapping *iomap, resource_size_t base, unsigned long size) { + iomap->iomem = ioremap_wc(base, size); + if (!iomap->iomem) + return NULL; + iomap->base = base; iomap->size = size; - iomap->iomem = ioremap_wc(base, size); #if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */ iomap->prot = pgprot_noncached_wc(PAGE_KERNEL); #elif defined(pgprot_writecombine) diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 53d53c6c2be9..23285ba645db 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -155,7 +155,7 @@ struct io_pgtable_cfg { */ struct io_pgtable_ops { int (*map)(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int prot); + phys_addr_t paddr, size_t size, int prot, gfp_t gfp); size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, size_t size, struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 8b09463dae0d..4d1d3c3469e9 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -155,8 +155,7 @@ loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length, ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, const struct iomap_ops *ops); int iomap_readpage(struct page *page, const struct iomap_ops *ops); -int iomap_readpages(struct address_space *mapping, struct list_head *pages, - unsigned nr_pages, const struct iomap_ops *ops); +void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); int iomap_set_page_dirty(struct page *page); int iomap_is_partially_uptodate(struct page *page, unsigned long from, unsigned long count); @@ -178,7 +177,7 @@ int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops); int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, - loff_t start, loff_t len, const struct iomap_ops *ops); + u64 start, u64 len, const struct iomap_ops *ops); loff_t iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops); loff_t iomap_seek_data(struct inode *inode, loff_t offset, @@ -252,6 +251,8 @@ int iomap_writepages(struct address_space *mapping, struct iomap_dio_ops { int (*end_io)(struct kiocb *iocb, ssize_t size, int error, unsigned flags); + blk_qc_t (*submit_io)(struct inode *inode, struct iomap *iomap, + struct bio *bio, loff_t file_offset); }; ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 7ef8b0bda695..fee209efb756 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -31,12 +31,6 @@ * if the IOMMU page table format is equivalent. */ #define IOMMU_PRIV (1 << 5) -/* - * Non-coherent masters can use this page protection flag to set cacheable - * memory attributes for only a transparent outer level of cache, also known as - * the last-level or system cache. - */ -#define IOMMU_SYS_CACHE_ONLY (1 << 6) struct iommu_ops; struct iommu_group; @@ -53,8 +47,6 @@ struct iommu_fault_event; typedef int (*iommu_fault_handler_t)(struct iommu_domain *, struct device *, unsigned long, int, void *); -typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *, - void *); typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); struct iommu_domain_geometry { @@ -171,25 +163,6 @@ enum iommu_dev_features { #define IOMMU_PASID_INVALID (-1U) -/** - * struct iommu_sva_ops - device driver callbacks for an SVA context - * - * @mm_exit: called when the mm is about to be torn down by exit_mmap. After - * @mm_exit returns, the device must not issue any more transaction - * with the PASID given as argument. - * - * The @mm_exit handler is allowed to sleep. Be careful about the - * locks taken in @mm_exit, because they might lead to deadlocks if - * they are also held when dropping references to the mm. Consider the - * following call chain: - * mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A) - * Using mmput_async() prevents this scenario. - * - */ -struct iommu_sva_ops { - iommu_mm_exit_handler_t mm_exit; -}; - #ifdef CONFIG_IOMMU_API /** @@ -223,8 +196,10 @@ struct iommu_iotlb_gather { * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush * queue * @iova_to_phys: translate iova to physical address - * @add_device: add device to iommu grouping - * @remove_device: remove device from iommu grouping + * @probe_device: Add device to iommu driver handling + * @release_device: Remove device from iommu driver handling + * @probe_finalize: Do final setup work after the device is added to an IOMMU + * group and attached to the groups domain * @device_group: find iommu group for a particular device * @domain_get_attr: Query domain attributes * @domain_set_attr: Change domain attributes @@ -248,6 +223,10 @@ struct iommu_iotlb_gather { * @cache_invalidate: invalidate translation caches * @sva_bind_gpasid: bind guest pasid and mm * @sva_unbind_gpasid: unbind guest pasid and mm + * @def_domain_type: device default domain type, return value: + * - IOMMU_DOMAIN_IDENTITY: must use an identity domain + * - IOMMU_DOMAIN_DMA: must use a dma domain + * - 0: use the default setting * @pgsize_bitmap: bitmap of all possible supported page sizes * @owner: Driver module providing these ops */ @@ -269,8 +248,9 @@ struct iommu_ops { void (*iotlb_sync)(struct iommu_domain *domain, struct iommu_iotlb_gather *iotlb_gather); phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); - int (*add_device)(struct device *dev); - void (*remove_device)(struct device *dev); + struct iommu_device *(*probe_device)(struct device *dev); + void (*release_device)(struct device *dev); + void (*probe_finalize)(struct device *dev); struct iommu_group *(*device_group)(struct device *dev); int (*domain_get_attr)(struct iommu_domain *domain, enum iommu_attr attr, void *data); @@ -318,6 +298,8 @@ struct iommu_ops { int (*sva_unbind_gpasid)(struct device *dev, int pasid); + int (*def_domain_type)(struct device *dev); + unsigned long pgsize_bitmap; struct module *owner; }; @@ -369,6 +351,7 @@ struct iommu_fault_param { * * @fault_param: IOMMU detected device fault reporting data * @fwspec: IOMMU fwspec data + * @iommu_dev: IOMMU device this device is linked to * @priv: IOMMU Driver private data * * TODO: migrate other per device data pointers under iommu_dev_data, e.g. @@ -378,6 +361,7 @@ struct dev_iommu { struct mutex lock; struct iommu_fault_param *fault_param; struct iommu_fwspec *fwspec; + struct iommu_device *iommu_dev; void *priv; }; @@ -430,6 +414,7 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) #define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */ extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); +extern int bus_iommu_probe(struct bus_type *bus); extern bool iommu_present(struct bus_type *bus); extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap); extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); @@ -470,8 +455,6 @@ extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); extern void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list); -extern int iommu_request_dm_for_dev(struct device *dev); -extern int iommu_request_dma_domain_for_dev(struct device *dev); extern void iommu_set_default_passthrough(bool cmd_line); extern void iommu_set_default_translated(bool cmd_line); extern bool iommu_default_passthrough(void); @@ -515,7 +498,6 @@ extern int iommu_page_response(struct device *dev, struct iommu_page_response *msg); extern int iommu_group_id(struct iommu_group *group); -extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, @@ -605,7 +587,6 @@ struct iommu_fwspec { */ struct iommu_sva { struct device *dev; - const struct iommu_sva_ops *ops; }; int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, @@ -653,8 +634,6 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata); void iommu_sva_unbind_device(struct iommu_sva *handle); -int iommu_sva_set_ops(struct iommu_sva *handle, - const struct iommu_sva_ops *ops); int iommu_sva_get_pasid(struct iommu_sva *handle); #else /* CONFIG_IOMMU_API */ @@ -793,16 +772,6 @@ static inline int iommu_get_group_resv_regions(struct iommu_group *group, return -ENODEV; } -static inline int iommu_request_dm_for_dev(struct device *dev) -{ - return -ENODEV; -} - -static inline int iommu_request_dma_domain_for_dev(struct device *dev) -{ - return -ENODEV; -} - static inline void iommu_set_default_passthrough(bool cmd_line) { } @@ -1058,12 +1027,6 @@ static inline void iommu_sva_unbind_device(struct iommu_sva *handle) { } -static inline int iommu_sva_set_ops(struct iommu_sva *handle, - const struct iommu_sva_ops *ops) -{ - return -EINVAL; -} - static inline int iommu_sva_get_pasid(struct iommu_sva *handle) { return IOMMU_PASID_INVALID; @@ -1094,6 +1057,22 @@ static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) } #endif /* CONFIG_IOMMU_API */ +/** + * iommu_map_sgtable - Map the given buffer to the IOMMU domain + * @domain: The IOMMU domain to perform the mapping + * @iova: The start address to map the buffer + * @sgt: The sg_table object describing the buffer + * @prot: IOMMU protection bits + * + * Creates a mapping at @iova for the buffer described by a scatterlist + * stored in the given sg_table object in the provided IOMMU domain. + */ +static inline size_t iommu_map_sgtable(struct iommu_domain *domain, + unsigned long iova, struct sg_table *sgt, int prot) +{ + return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot); +} + #ifdef CONFIG_IOMMU_DEBUGFS extern struct dentry *iommu_debugfs_dir; void iommu_debugfs_setup(void); diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index cb20c733b15a..bc89ac625f26 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h @@ -58,6 +58,48 @@ }) /** + * read_poll_timeout_atomic - Periodically poll an address until a condition is + * met or a timeout occurs + * @op: accessor function (takes @addr as its only argument) + * @addr: Address to poll + * @val: Variable to read the value into + * @cond: Break condition (usually involving @val) + * @delay_us: Time to udelay between reads in us (0 tight-loops). Should + * be less than ~10us since udelay is used (see + * Documentation/timers/timers-howto.rst). + * @timeout_us: Timeout in us, 0 means never timeout + * @delay_before_read: if it is true, delay @delay_us before read. + * + * Returns 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @args is stored in @val. + * + * When available, you'll probably want to use one of the specialized + * macros defined below rather than this macro directly. + */ +#define read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, \ + delay_before_read, args...) \ +({ \ + u64 __timeout_us = (timeout_us); \ + unsigned long __delay_us = (delay_us); \ + ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ + if (delay_before_read && __delay_us) \ + udelay(__delay_us); \ + for (;;) { \ + (val) = op(args); \ + if (cond) \ + break; \ + if (__timeout_us && \ + ktime_compare(ktime_get(), __timeout) > 0) { \ + (val) = op(args); \ + break; \ + } \ + if (__delay_us) \ + udelay(__delay_us); \ + } \ + (cond) ? 0 : -ETIMEDOUT; \ +}) + +/** * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs * @op: accessor function (takes @addr as its only argument) * @addr: Address to poll @@ -96,25 +138,7 @@ * macros defined below rather than this macro directly. */ #define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \ -({ \ - u64 __timeout_us = (timeout_us); \ - unsigned long __delay_us = (delay_us); \ - ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ - for (;;) { \ - (val) = op(addr); \ - if (cond) \ - break; \ - if (__timeout_us && \ - ktime_compare(ktime_get(), __timeout) > 0) { \ - (val) = op(addr); \ - break; \ - } \ - if (__delay_us) \ - udelay(__delay_us); \ - } \ - (cond) ? 0 : -ETIMEDOUT; \ -}) - + read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, false, addr) #define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \ readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us) diff --git a/include/linux/ioport.h b/include/linux/ioport.h index a9b9170b5dd2..6c2b06fe8beb 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -103,6 +103,7 @@ struct resource { #define IORESOURCE_MEM_32BIT (3<<3) #define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */ #define IORESOURCE_MEM_EXPANSIONROM (1<<6) +#define IORESOURCE_MEM_DRIVER_MANAGED (1<<7) /* PnP I/O specific bits (IORESOURCE_BITS) */ #define IORESOURCE_IO_16BIT_ADDR (1<<0) @@ -301,5 +302,11 @@ struct resource *devm_request_free_mem_region(struct device *dev, struct resource *request_free_mem_region(struct resource *base, unsigned long size, const char *name); +#ifdef CONFIG_IO_STRICT_DEVMEM +void revoke_devmem(struct resource *res); +#else +static inline void revoke_devmem(struct resource *res) { }; +#endif + #endif /* __ASSEMBLY__ */ #endif /* _LINUX_IOPORT_H */ diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index c309f43bde45..a06a78c67f19 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -68,6 +68,8 @@ struct ipc_namespace { struct user_namespace *user_ns; struct ucounts *ucounts; + struct llist_node mnt_llist; + struct ns_common ns; } __randomize_layout; diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 2cb445a8fc9e..a44789d027cc 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -223,7 +223,7 @@ struct ipv6_pinfo { /* * Packed in 16bits. - * Omit one shift by by putting the signed field at MSB. + * Omit one shift by putting the signed field at MSB. */ #if defined(__BIG_ENDIAN_BITFIELD) __s16 hop_limit:9; @@ -283,6 +283,7 @@ struct ipv6_pinfo { autoflowlabel:1, autoflowlabel_set:1, mc_all:1, + recverr_rfc4884:1, rtalert_isolate:1; __u8 min_hopcount; __u8 tclass; diff --git a/include/linux/irq.h b/include/linux/irq.h index 8d5bc2c237d7..1b7f4dfee35b 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -213,6 +213,8 @@ struct irq_data { * required * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked * from actual interrupt context. + * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call + * irq_chip::irq_set_affinity() when deactivated. */ enum { IRQD_TRIGGER_MASK = 0xf, @@ -237,6 +239,7 @@ enum { IRQD_CAN_RESERVE = (1 << 26), IRQD_MSI_NOMASK_QUIRK = (1 << 27), IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28), + IRQD_AFFINITY_ON_ACTIVATE = (1 << 29), }; #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) @@ -421,6 +424,16 @@ static inline bool irqd_msi_nomask_quirk(struct irq_data *d) return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK; } +static inline void irqd_set_affinity_on_activate(struct irq_data *d) +{ + __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE; +} + +static inline bool irqd_affinity_on_activate(struct irq_data *d) +{ + return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE; +} + #undef __irqd_to_state static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h index 4500d453a63e..ab831e5ae748 100644 --- a/include/linux/irq_sim.h +++ b/include/linux/irq_sim.h @@ -1,41 +1,26 @@ /* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2017-2018 Bartosz Golaszewski <brgl@bgdev.pl> + * Copyright (C) 2020 Bartosz Golaszewski <bgolaszewski@baylibre.com> */ #ifndef _LINUX_IRQ_SIM_H #define _LINUX_IRQ_SIM_H -#include <linux/irq_work.h> #include <linux/device.h> +#include <linux/fwnode.h> +#include <linux/irqdomain.h> /* * Provides a framework for allocating simulated interrupts which can be * requested like normal irqs and enqueued from process context. */ -struct irq_sim_work_ctx { - struct irq_work work; - unsigned long *pending; -}; - -struct irq_sim_irq_ctx { - int irqnum; - bool enabled; -}; - -struct irq_sim { - struct irq_sim_work_ctx work_ctx; - int irq_base; - unsigned int irq_count; - struct irq_sim_irq_ctx *irqs; -}; - -int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs); -int devm_irq_sim_init(struct device *dev, struct irq_sim *sim, - unsigned int num_irqs); -void irq_sim_fini(struct irq_sim *sim); -void irq_sim_fire(struct irq_sim *sim, unsigned int offset); -int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset); +struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode, + unsigned int num_irqs); +struct irq_domain *devm_irq_domain_create_sim(struct device *dev, + struct fwnode_handle *fwnode, + unsigned int num_irqs); +void irq_domain_remove_sim(struct irq_domain *domain); #endif /* _LINUX_IRQ_SIM_H */ diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 3b752e80c017..30823780c192 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -2,7 +2,7 @@ #ifndef _LINUX_IRQ_WORK_H #define _LINUX_IRQ_WORK_H -#include <linux/llist.h> +#include <linux/smp_types.h> /* * An entry can be in one of four states: @@ -13,19 +13,14 @@ * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed */ -#define IRQ_WORK_PENDING BIT(0) -#define IRQ_WORK_BUSY BIT(1) - -/* Doesn't want IPI, wait for tick: */ -#define IRQ_WORK_LAZY BIT(2) -/* Run hard IRQ context, even on RT */ -#define IRQ_WORK_HARD_IRQ BIT(3) - -#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY) - struct irq_work { - atomic_t flags; - struct llist_node llnode; + union { + struct __call_single_node node; + struct { + struct llist_node llnode; + atomic_t flags; + }; + }; void (*func)(struct irq_work *); }; @@ -53,9 +48,11 @@ void irq_work_sync(struct irq_work *work); void irq_work_run(void); bool irq_work_needs_cpu(void); +void irq_work_single(void *arg); #else static inline bool irq_work_needs_cpu(void) { return false; } static inline void irq_work_run(void) { } +static inline void irq_work_single(void *arg) { } #endif #endif /* _LINUX_IRQ_WORK_H */ diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h index 950e4b2458f0..67351aac65ef 100644 --- a/include/linux/irqchip.h +++ b/include/linux/irqchip.h @@ -12,7 +12,9 @@ #define _LINUX_IRQCHIP_H #include <linux/acpi.h> +#include <linux/module.h> #include <linux/of.h> +#include <linux/platform_device.h> /* * This macro must be used by the different irqchip drivers to declare @@ -26,6 +28,28 @@ */ #define IRQCHIP_DECLARE(name, compat, fn) OF_DECLARE_2(irqchip, name, compat, fn) +extern int platform_irqchip_probe(struct platform_device *pdev); + +#define IRQCHIP_PLATFORM_DRIVER_BEGIN(drv_name) \ +static const struct of_device_id drv_name##_irqchip_match_table[] = { + +#define IRQCHIP_MATCH(compat, fn) { .compatible = compat, .data = fn }, + +#define IRQCHIP_PLATFORM_DRIVER_END(drv_name) \ + {}, \ +}; \ +MODULE_DEVICE_TABLE(of, drv_name##_irqchip_match_table); \ +static struct platform_driver drv_name##_driver = { \ + .probe = platform_irqchip_probe, \ + .driver = { \ + .name = #drv_name, \ + .owner = THIS_MODULE, \ + .of_match_table = drv_name##_irqchip_match_table, \ + .suppress_bind_attrs = true, \ + }, \ +}; \ +builtin_platform_driver(drv_name##_driver) + /* * This macro must be used by the different irqchip drivers to declare * the association between their version and their initialization function. @@ -39,8 +63,9 @@ * @fn: initialization function */ #define IRQCHIP_ACPI_DECLARE(name, subtable, validate, data, fn) \ - ACPI_DECLARE_PROBE_ENTRY(irqchip, name, ACPI_SIG_MADT, \ - subtable, validate, data, fn) + ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(irqchip, name, \ + ACPI_SIG_MADT, subtable, \ + validate, data, fn) #ifdef CONFIG_IRQCHIP void irqchip_init(void); diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 6c36b6cc3edf..f6d092fdb93d 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h @@ -19,7 +19,6 @@ #define GICD_CLRSPI_NSR 0x0048 #define GICD_SETSPI_SR 0x0050 #define GICD_CLRSPI_SR 0x0058 -#define GICD_SEIR 0x0068 #define GICD_IGROUPR 0x0080 #define GICD_ISENABLER 0x0100 #define GICD_ICENABLER 0x0180 @@ -119,14 +118,11 @@ #define GICR_WAKER 0x0014 #define GICR_SETLPIR 0x0040 #define GICR_CLRLPIR 0x0048 -#define GICR_SEIR GICD_SEIR #define GICR_PROPBASER 0x0070 #define GICR_PENDBASER 0x0078 #define GICR_INVLPIR 0x00A0 #define GICR_INVALLR 0x00B0 #define GICR_SYNCR 0x00C0 -#define GICR_MOVLPIR 0x0100 -#define GICR_MOVALLR 0x0110 #define GICR_IDREGS GICD_IDREGS #define GICR_PIDR2 GICD_PIDR2 diff --git a/include/linux/irqchip/arm-vic.h b/include/linux/irqchip/arm-vic.h index a158b97242c7..f2b11d1df23d 100644 --- a/include/linux/irqchip/arm-vic.h +++ b/include/linux/irqchip/arm-vic.h @@ -9,17 +9,6 @@ #include <linux/types.h> -#define VIC_RAW_STATUS 0x08 -#define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */ -#define VIC_INT_ENABLE_CLEAR 0x14 - -struct device_node; -struct pt_regs; - -void __vic_init(void __iomem *base, int parent_irq, int irq_start, - u32 vic_sources, u32 resume_sources, struct device_node *node); void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); -int vic_init_cascaded(void __iomem *base, unsigned int parent_irq, - u32 vic_sources, u32 resume_sources); #endif diff --git a/include/linux/irqchip/irq-bcm2836.h b/include/linux/irqchip/irq-bcm2836.h index f224d6f9e550..ac5719d8f56b 100644 --- a/include/linux/irqchip/irq-bcm2836.h +++ b/include/linux/irqchip/irq-bcm2836.h @@ -30,7 +30,7 @@ */ #define LOCAL_MAILBOX_INT_CONTROL0 0x050 /* - * The CPU's interrupt status register. Bits are defined by the the + * The CPU's interrupt status register. Bits are defined by the * LOCAL_IRQ_* bits below. */ #define LOCAL_IRQ_PENDING0 0x060 diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h index 216e5adf80ce..dca379c0d7eb 100644 --- a/include/linux/irqchip/irq-omap-intc.h +++ b/include/linux/irqchip/irq-omap-intc.h @@ -2,7 +2,7 @@ /** * irq-omap-intc.h - INTC Idle Functions * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com * * Author: Felipe Balbi <balbi@ti.com> */ diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 8f2820c5e69e..5745491303e0 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -22,7 +22,6 @@ struct pt_regs; * @irq_common_data: per irq and chip data passed down to chip functions * @kstat_irqs: irq stats per cpu * @handle_irq: highlevel irq-events handler - * @preflow_handler: handler called before the flow handler (currently used by sparc) * @action: the irq action chain * @status_use_accessors: status information * @core_internal_state__do_not_mess_with_it: core internal status information @@ -58,9 +57,6 @@ struct irq_desc { struct irq_data irq_data; unsigned int __percpu *kstat_irqs; irq_flow_handler_t handle_irq; -#ifdef CONFIG_IRQ_PREFLOW_FASTEOI - irq_preflow_handler_t preflow_handler; -#endif struct irqaction *action; /* IRQ action list */ unsigned int status_use_accessors; unsigned int core_internal_state__do_not_mess_with_it; @@ -268,15 +264,4 @@ irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, } } -#ifdef CONFIG_IRQ_PREFLOW_FASTEOI -static inline void -__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - desc->preflow_handler = handler; -} -#endif - #endif diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 8d062e86d954..b37350c4fe37 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -450,6 +450,7 @@ extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq, struct irq_chip *chip, void *chip_data, irq_flow_handler_t handler, void *handler_data, const char *handler_name); +extern void irq_domain_reset_irq_data(struct irq_data *irq_data); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY extern struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent, unsigned int flags, unsigned int size, @@ -491,7 +492,6 @@ extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, irq_hw_number_t hwirq, struct irq_chip *chip, void *chip_data); -extern void irq_domain_reset_irq_data(struct irq_data *irq_data); extern void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs); diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 61a9ced3aa50..3ed4e8771b64 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -14,31 +14,54 @@ #include <linux/typecheck.h> #include <asm/irqflags.h> +#include <asm/percpu.h> /* Currently lockdep_softirqs_on/off is used only by lockdep */ #ifdef CONFIG_PROVE_LOCKING extern void lockdep_softirqs_on(unsigned long ip); extern void lockdep_softirqs_off(unsigned long ip); + extern void lockdep_hardirqs_on_prepare(unsigned long ip); extern void lockdep_hardirqs_on(unsigned long ip); extern void lockdep_hardirqs_off(unsigned long ip); #else static inline void lockdep_softirqs_on(unsigned long ip) { } static inline void lockdep_softirqs_off(unsigned long ip) { } + static inline void lockdep_hardirqs_on_prepare(unsigned long ip) { } static inline void lockdep_hardirqs_on(unsigned long ip) { } static inline void lockdep_hardirqs_off(unsigned long ip) { } #endif #ifdef CONFIG_TRACE_IRQFLAGS - extern void trace_hardirqs_on(void); - extern void trace_hardirqs_off(void); -# define lockdep_hardirq_context(p) ((p)->hardirq_context) + +/* Per-task IRQ trace events information. */ +struct irqtrace_events { + unsigned int irq_events; + unsigned long hardirq_enable_ip; + unsigned long hardirq_disable_ip; + unsigned int hardirq_enable_event; + unsigned int hardirq_disable_event; + unsigned long softirq_disable_ip; + unsigned long softirq_enable_ip; + unsigned int softirq_disable_event; + unsigned int softirq_enable_event; +}; + +DECLARE_PER_CPU(int, hardirqs_enabled); +DECLARE_PER_CPU(int, hardirq_context); + +extern void trace_hardirqs_on_prepare(void); +extern void trace_hardirqs_off_finish(void); +extern void trace_hardirqs_on(void); +extern void trace_hardirqs_off(void); + +# define lockdep_hardirq_context() (raw_cpu_read(hardirq_context)) # define lockdep_softirq_context(p) ((p)->softirq_context) -# define lockdep_hardirqs_enabled(p) ((p)->hardirqs_enabled) +# define lockdep_hardirqs_enabled() (this_cpu_read(hardirqs_enabled)) # define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled) -# define lockdep_hardirq_enter() \ -do { \ - if (!current->hardirq_context++) \ - current->hardirq_threaded = 0; \ +# define lockdep_hardirq_enter() \ +do { \ + if (__this_cpu_inc_return(hardirq_context) == 1)\ + current->hardirq_threaded = 0; \ } while (0) # define lockdep_hardirq_threaded() \ do { \ @@ -46,7 +69,7 @@ do { \ } while (0) # define lockdep_hardirq_exit() \ do { \ - current->hardirq_context--; \ + __this_cpu_dec(hardirq_context); \ } while (0) # define lockdep_softirq_enter() \ do { \ @@ -96,17 +119,19 @@ do { \ } while (0) #else -# define trace_hardirqs_on() do { } while (0) -# define trace_hardirqs_off() do { } while (0) -# define lockdep_hardirq_context(p) 0 -# define lockdep_softirq_context(p) 0 -# define lockdep_hardirqs_enabled(p) 0 -# define lockdep_softirqs_enabled(p) 0 -# define lockdep_hardirq_enter() do { } while (0) -# define lockdep_hardirq_threaded() do { } while (0) -# define lockdep_hardirq_exit() do { } while (0) -# define lockdep_softirq_enter() do { } while (0) -# define lockdep_softirq_exit() do { } while (0) +# define trace_hardirqs_on_prepare() do { } while (0) +# define trace_hardirqs_off_finish() do { } while (0) +# define trace_hardirqs_on() do { } while (0) +# define trace_hardirqs_off() do { } while (0) +# define lockdep_hardirq_context() 0 +# define lockdep_softirq_context(p) 0 +# define lockdep_hardirqs_enabled() 0 +# define lockdep_softirqs_enabled(p) 0 +# define lockdep_hardirq_enter() do { } while (0) +# define lockdep_hardirq_threaded() do { } while (0) +# define lockdep_hardirq_exit() do { } while (0) +# define lockdep_softirq_enter() do { } while (0) +# define lockdep_softirq_exit() do { } while (0) # define lockdep_hrtimer_enter(__hrtimer) false # define lockdep_hrtimer_exit(__context) do { } while (0) # define lockdep_posixtimer_enter() do { } while (0) @@ -157,26 +182,33 @@ do { \ * if !TRACE_IRQFLAGS. */ #ifdef CONFIG_TRACE_IRQFLAGS -#define local_irq_enable() \ - do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) -#define local_irq_disable() \ - do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) + +#define local_irq_enable() \ + do { \ + trace_hardirqs_on(); \ + raw_local_irq_enable(); \ + } while (0) + +#define local_irq_disable() \ + do { \ + bool was_disabled = raw_irqs_disabled();\ + raw_local_irq_disable(); \ + if (!was_disabled) \ + trace_hardirqs_off(); \ + } while (0) + #define local_irq_save(flags) \ do { \ raw_local_irq_save(flags); \ - trace_hardirqs_off(); \ + if (!raw_irqs_disabled_flags(flags)) \ + trace_hardirqs_off(); \ } while (0) - #define local_irq_restore(flags) \ do { \ - if (raw_irqs_disabled_flags(flags)) { \ - raw_local_irq_restore(flags); \ - trace_hardirqs_off(); \ - } else { \ + if (!raw_irqs_disabled_flags(flags)) \ trace_hardirqs_on(); \ - raw_local_irq_restore(flags); \ - } \ + raw_local_irq_restore(flags); \ } while (0) #define safe_halt() \ @@ -190,10 +222,7 @@ do { \ #define local_irq_enable() do { raw_local_irq_enable(); } while (0) #define local_irq_disable() do { raw_local_irq_disable(); } while (0) -#define local_irq_save(flags) \ - do { \ - raw_local_irq_save(flags); \ - } while (0) +#define local_irq_save(flags) do { raw_local_irq_save(flags); } while (0) #define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) #define safe_halt() do { raw_safe_halt(); } while (0) diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h index 1e6f4e7123d6..c30f454a9518 100644 --- a/include/linux/irqhandler.h +++ b/include/linux/irqhandler.h @@ -10,6 +10,5 @@ struct irq_desc; struct irq_data; typedef void (*irq_flow_handler_t)(struct irq_desc *desc); -typedef void (*irq_preflow_handler_t)(struct irq_data *data); #endif diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index f613d8529863..08f904943ab2 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -27,6 +27,7 @@ #include <linux/timer.h> #include <linux/slab.h> #include <linux/bit_spinlock.h> +#include <linux/blkdev.h> #include <crypto/hash.h> #endif @@ -766,6 +767,11 @@ struct journal_s int j_errno; /** + * @j_abort_mutex: Lock the whole aborting procedure. + */ + struct mutex j_abort_mutex; + + /** * @j_sb_buffer: The first part of the superblock buffer. */ struct buffer_head *j_sb_buffer; @@ -1247,7 +1253,6 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3) #define JBD2_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file * data write error in ordered * mode */ -#define JBD2_REC_ERR 0x080 /* The errno in the sb has been recorded */ /* * Function declarations for the journaling transaction and buffer @@ -1376,7 +1381,7 @@ extern int jbd2_journal_dirty_metadata (handle_t *, struct buffer_head *); extern int jbd2_journal_forget (handle_t *, struct buffer_head *); extern int jbd2_journal_invalidatepage(journal_t *, struct page *, unsigned int, unsigned int); -extern int jbd2_journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); +extern int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page); extern int jbd2_journal_stop(handle_t *); extern int jbd2_journal_flush (journal_t *); extern void jbd2_journal_lock_updates (journal_t *); diff --git a/include/linux/jhash.h b/include/linux/jhash.h index ba2f6a9776b6..cfb62e9f37be 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h @@ -5,7 +5,7 @@ * * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net) * - * http://burtleburtle.net/bob/hash/ + * https://burtleburtle.net/bob/hash/ * * These are the credits from Bob's sources: * @@ -86,17 +86,17 @@ static inline u32 jhash(const void *key, u32 length, u32 initval) } /* Last block: affect all 32 bits of (c) */ switch (length) { - case 12: c += (u32)k[11]<<24; /* fall through */ - case 11: c += (u32)k[10]<<16; /* fall through */ - case 10: c += (u32)k[9]<<8; /* fall through */ - case 9: c += k[8]; /* fall through */ - case 8: b += (u32)k[7]<<24; /* fall through */ - case 7: b += (u32)k[6]<<16; /* fall through */ - case 6: b += (u32)k[5]<<8; /* fall through */ - case 5: b += k[4]; /* fall through */ - case 4: a += (u32)k[3]<<24; /* fall through */ - case 3: a += (u32)k[2]<<16; /* fall through */ - case 2: a += (u32)k[1]<<8; /* fall through */ + case 12: c += (u32)k[11]<<24; fallthrough; + case 11: c += (u32)k[10]<<16; fallthrough; + case 10: c += (u32)k[9]<<8; fallthrough; + case 9: c += k[8]; fallthrough; + case 8: b += (u32)k[7]<<24; fallthrough; + case 7: b += (u32)k[6]<<16; fallthrough; + case 6: b += (u32)k[5]<<8; fallthrough; + case 5: b += k[4]; fallthrough; + case 4: a += (u32)k[3]<<24; fallthrough; + case 3: a += (u32)k[2]<<16; fallthrough; + case 2: a += (u32)k[1]<<8; fallthrough; case 1: a += k[0]; __jhash_final(a, b, c); case 0: /* Nothing left to add */ @@ -132,8 +132,8 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval) /* Handle the last 3 u32's */ switch (length) { - case 3: c += k[2]; /* fall through */ - case 2: b += k[1]; /* fall through */ + case 3: c += k[2]; fallthrough; + case 2: b += k[1]; fallthrough; case 1: a += k[0]; __jhash_final(a, b, c); case 0: /* Nothing left to add */ diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 3526c0aee954..32809624d422 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -68,7 +68,7 @@ * Lacking toolchain and or architecture support, static keys fall back to a * simple conditional branch. * - * Additional babbling in: Documentation/static-keys.txt + * Additional babbling in: Documentation/staging/static-keys.rst */ #ifndef __ASSEMBLY__ diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 657a83b943f0..481273f0c72d 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -18,6 +18,7 @@ #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) +struct cred; struct module; static inline int is_kernel_inittext(unsigned long addr) @@ -98,7 +99,7 @@ int lookup_symbol_name(unsigned long addr, char *symname); int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); /* How and when do we show kallsyms values? */ -extern int kallsyms_show_value(void); +extern bool kallsyms_show_value(const struct cred *cred); #else /* !CONFIG_KALLSYMS */ @@ -158,16 +159,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u return -ERANGE; } -static inline int kallsyms_show_value(void) +static inline bool kallsyms_show_value(const struct cred *cred) { return false; } #endif /*CONFIG_KALLSYMS*/ -static inline void print_ip_sym(unsigned long ip) +static inline void print_ip_sym(const char *loglvl, unsigned long ip) { - printk("[<%px>] %pS\n", (void *) ip, (void *) ip); + printk("%s[<%px>] %pS\n", loglvl, (void *) ip, (void *) ip); } #endif /*_LINUX_KALLSYMS_H*/ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 31314ca7c635..087fba34b209 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -11,8 +11,8 @@ struct task_struct; #ifdef CONFIG_KASAN +#include <linux/pgtable.h> #include <asm/kasan.h> -#include <asm/pgtable.h> extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE]; @@ -38,7 +38,6 @@ extern void kasan_disable_current(void); void kasan_unpoison_shadow(const void *address, size_t size); void kasan_unpoison_task_stack(struct task_struct *task); -void kasan_unpoison_stack_above_sp_to(const void *watermark); void kasan_alloc_pages(struct page *page, unsigned int order); void kasan_free_pages(struct page *page, unsigned int order); @@ -101,7 +100,6 @@ void kasan_restore_multi_shot(bool enabled); static inline void kasan_unpoison_shadow(const void *address, size_t size) {} static inline void kasan_unpoison_task_stack(struct task_struct *task) {} -static inline void kasan_unpoison_stack_above_sp_to(const void *watermark) {} static inline void kasan_enable_current(void) {} static inline void kasan_disable_current(void) {} @@ -174,11 +172,13 @@ static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache); +void kasan_record_aux_stack(void *ptr); #else /* CONFIG_KASAN_GENERIC */ static inline void kasan_cache_shrink(struct kmem_cache *cache) {} static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} +static inline void kasan_record_aux_stack(void *ptr) {} #endif /* CONFIG_KASAN_GENERIC */ diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h index cc8fa109cfa3..9d12c970f18f 100644 --- a/include/linux/kconfig.h +++ b/include/linux/kconfig.h @@ -2,6 +2,8 @@ #ifndef __LINUX_KCONFIG_H #define __LINUX_KCONFIG_H +/* CONFIG_CC_VERSION_TEXT (Do not delete this comment. See help in Kconfig) */ + #include <generated/autoconf.h> #ifdef CONFIG_CPU_BIG_ENDIAN diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h new file mode 100644 index 000000000000..c5f6c1dcf7e3 --- /dev/null +++ b/include/linux/kcsan-checks.h @@ -0,0 +1,432 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_KCSAN_CHECKS_H +#define _LINUX_KCSAN_CHECKS_H + +/* Note: Only include what is already included by compiler.h. */ +#include <linux/compiler_attributes.h> +#include <linux/types.h> + +/* + * ACCESS TYPE MODIFIERS + * + * <none>: normal read access; + * WRITE : write access; + * ATOMIC: access is atomic; + * ASSERT: access is not a regular access, but an assertion; + * SCOPED: access is a scoped access; + */ +#define KCSAN_ACCESS_WRITE 0x1 +#define KCSAN_ACCESS_ATOMIC 0x2 +#define KCSAN_ACCESS_ASSERT 0x4 +#define KCSAN_ACCESS_SCOPED 0x8 + +/* + * __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used + * even in compilation units that selectively disable KCSAN, but must use KCSAN + * to validate access to an address. Never use these in header files! + */ +#ifdef CONFIG_KCSAN +/** + * __kcsan_check_access - check generic access for races + * + * @ptr: address of access + * @size: size of access + * @type: access type modifier + */ +void __kcsan_check_access(const volatile void *ptr, size_t size, int type); + +/** + * kcsan_disable_current - disable KCSAN for the current context + * + * Supports nesting. + */ +void kcsan_disable_current(void); + +/** + * kcsan_enable_current - re-enable KCSAN for the current context + * + * Supports nesting. + */ +void kcsan_enable_current(void); +void kcsan_enable_current_nowarn(void); /* Safe in uaccess regions. */ + +/** + * kcsan_nestable_atomic_begin - begin nestable atomic region + * + * Accesses within the atomic region may appear to race with other accesses but + * should be considered atomic. + */ +void kcsan_nestable_atomic_begin(void); + +/** + * kcsan_nestable_atomic_end - end nestable atomic region + */ +void kcsan_nestable_atomic_end(void); + +/** + * kcsan_flat_atomic_begin - begin flat atomic region + * + * Accesses within the atomic region may appear to race with other accesses but + * should be considered atomic. + */ +void kcsan_flat_atomic_begin(void); + +/** + * kcsan_flat_atomic_end - end flat atomic region + */ +void kcsan_flat_atomic_end(void); + +/** + * kcsan_atomic_next - consider following accesses as atomic + * + * Force treating the next n memory accesses for the current context as atomic + * operations. + * + * @n: number of following memory accesses to treat as atomic. + */ +void kcsan_atomic_next(int n); + +/** + * kcsan_set_access_mask - set access mask + * + * Set the access mask for all accesses for the current context if non-zero. + * Only value changes to bits set in the mask will be reported. + * + * @mask: bitmask + */ +void kcsan_set_access_mask(unsigned long mask); + +/* Scoped access information. */ +struct kcsan_scoped_access { + struct list_head list; + const volatile void *ptr; + size_t size; + int type; +}; +/* + * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes + * out of scope; relies on attribute "cleanup", which is supported by all + * compilers that support KCSAN. + */ +#define __kcsan_cleanup_scoped \ + __maybe_unused __attribute__((__cleanup__(kcsan_end_scoped_access))) + +/** + * kcsan_begin_scoped_access - begin scoped access + * + * Begin scoped access and initialize @sa, which will cause KCSAN to + * continuously check the memory range in the current thread until + * kcsan_end_scoped_access() is called for @sa. + * + * Scoped accesses are implemented by appending @sa to an internal list for the + * current execution context, and then checked on every call into the KCSAN + * runtime. + * + * @ptr: address of access + * @size: size of access + * @type: access type modifier + * @sa: struct kcsan_scoped_access to use for the scope of the access + */ +struct kcsan_scoped_access * +kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type, + struct kcsan_scoped_access *sa); + +/** + * kcsan_end_scoped_access - end scoped access + * + * End a scoped access, which will stop KCSAN checking the memory range. + * Requires that kcsan_begin_scoped_access() was previously called once for @sa. + * + * @sa: a previously initialized struct kcsan_scoped_access + */ +void kcsan_end_scoped_access(struct kcsan_scoped_access *sa); + + +#else /* CONFIG_KCSAN */ + +static inline void __kcsan_check_access(const volatile void *ptr, size_t size, + int type) { } + +static inline void kcsan_disable_current(void) { } +static inline void kcsan_enable_current(void) { } +static inline void kcsan_enable_current_nowarn(void) { } +static inline void kcsan_nestable_atomic_begin(void) { } +static inline void kcsan_nestable_atomic_end(void) { } +static inline void kcsan_flat_atomic_begin(void) { } +static inline void kcsan_flat_atomic_end(void) { } +static inline void kcsan_atomic_next(int n) { } +static inline void kcsan_set_access_mask(unsigned long mask) { } + +struct kcsan_scoped_access { }; +#define __kcsan_cleanup_scoped __maybe_unused +static inline struct kcsan_scoped_access * +kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type, + struct kcsan_scoped_access *sa) { return sa; } +static inline void kcsan_end_scoped_access(struct kcsan_scoped_access *sa) { } + +#endif /* CONFIG_KCSAN */ + +#ifdef __SANITIZE_THREAD__ +/* + * Only calls into the runtime when the particular compilation unit has KCSAN + * instrumentation enabled. May be used in header files. + */ +#define kcsan_check_access __kcsan_check_access + +/* + * Only use these to disable KCSAN for accesses in the current compilation unit; + * calls into libraries may still perform KCSAN checks. + */ +#define __kcsan_disable_current kcsan_disable_current +#define __kcsan_enable_current kcsan_enable_current_nowarn +#else +static inline void kcsan_check_access(const volatile void *ptr, size_t size, + int type) { } +static inline void __kcsan_enable_current(void) { } +static inline void __kcsan_disable_current(void) { } +#endif + +/** + * __kcsan_check_read - check regular read access for races + * + * @ptr: address of access + * @size: size of access + */ +#define __kcsan_check_read(ptr, size) __kcsan_check_access(ptr, size, 0) + +/** + * __kcsan_check_write - check regular write access for races + * + * @ptr: address of access + * @size: size of access + */ +#define __kcsan_check_write(ptr, size) \ + __kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE) + +/** + * kcsan_check_read - check regular read access for races + * + * @ptr: address of access + * @size: size of access + */ +#define kcsan_check_read(ptr, size) kcsan_check_access(ptr, size, 0) + +/** + * kcsan_check_write - check regular write access for races + * + * @ptr: address of access + * @size: size of access + */ +#define kcsan_check_write(ptr, size) \ + kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE) + +/* + * Check for atomic accesses: if atomic accesses are not ignored, this simply + * aliases to kcsan_check_access(), otherwise becomes a no-op. + */ +#ifdef CONFIG_KCSAN_IGNORE_ATOMICS +#define kcsan_check_atomic_read(...) do { } while (0) +#define kcsan_check_atomic_write(...) do { } while (0) +#else +#define kcsan_check_atomic_read(ptr, size) \ + kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC) +#define kcsan_check_atomic_write(ptr, size) \ + kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE) +#endif + +/** + * ASSERT_EXCLUSIVE_WRITER - assert no concurrent writes to @var + * + * Assert that there are no concurrent writes to @var; other readers are + * allowed. This assertion can be used to specify properties of concurrent code, + * where violation cannot be detected as a normal data race. + * + * For example, if we only have a single writer, but multiple concurrent + * readers, to avoid data races, all these accesses must be marked; even + * concurrent marked writes racing with the single writer are bugs. + * Unfortunately, due to being marked, they are no longer data races. For cases + * like these, we can use the macro as follows: + * + * .. code-block:: c + * + * void writer(void) { + * spin_lock(&update_foo_lock); + * ASSERT_EXCLUSIVE_WRITER(shared_foo); + * WRITE_ONCE(shared_foo, ...); + * spin_unlock(&update_foo_lock); + * } + * void reader(void) { + * // update_foo_lock does not need to be held! + * ... = READ_ONCE(shared_foo); + * } + * + * Note: ASSERT_EXCLUSIVE_WRITER_SCOPED(), if applicable, performs more thorough + * checking if a clear scope where no concurrent writes are expected exists. + * + * @var: variable to assert on + */ +#define ASSERT_EXCLUSIVE_WRITER(var) \ + __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT) + +/* + * Helper macros for implementation of for ASSERT_EXCLUSIVE_*_SCOPED(). @id is + * expected to be unique for the scope in which instances of kcsan_scoped_access + * are declared. + */ +#define __kcsan_scoped_name(c, suffix) __kcsan_scoped_##c##suffix +#define __ASSERT_EXCLUSIVE_SCOPED(var, type, id) \ + struct kcsan_scoped_access __kcsan_scoped_name(id, _) \ + __kcsan_cleanup_scoped; \ + struct kcsan_scoped_access *__kcsan_scoped_name(id, _dummy_p) \ + __maybe_unused = kcsan_begin_scoped_access( \ + &(var), sizeof(var), KCSAN_ACCESS_SCOPED | (type), \ + &__kcsan_scoped_name(id, _)) + +/** + * ASSERT_EXCLUSIVE_WRITER_SCOPED - assert no concurrent writes to @var in scope + * + * Scoped variant of ASSERT_EXCLUSIVE_WRITER(). + * + * Assert that there are no concurrent writes to @var for the duration of the + * scope in which it is introduced. This provides a better way to fully cover + * the enclosing scope, compared to multiple ASSERT_EXCLUSIVE_WRITER(), and + * increases the likelihood for KCSAN to detect racing accesses. + * + * For example, it allows finding race-condition bugs that only occur due to + * state changes within the scope itself: + * + * .. code-block:: c + * + * void writer(void) { + * spin_lock(&update_foo_lock); + * { + * ASSERT_EXCLUSIVE_WRITER_SCOPED(shared_foo); + * WRITE_ONCE(shared_foo, 42); + * ... + * // shared_foo should still be 42 here! + * } + * spin_unlock(&update_foo_lock); + * } + * void buggy(void) { + * if (READ_ONCE(shared_foo) == 42) + * WRITE_ONCE(shared_foo, 1); // bug! + * } + * + * @var: variable to assert on + */ +#define ASSERT_EXCLUSIVE_WRITER_SCOPED(var) \ + __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_ASSERT, __COUNTER__) + +/** + * ASSERT_EXCLUSIVE_ACCESS - assert no concurrent accesses to @var + * + * Assert that there are no concurrent accesses to @var (no readers nor + * writers). This assertion can be used to specify properties of concurrent + * code, where violation cannot be detected as a normal data race. + * + * For example, where exclusive access is expected after determining no other + * users of an object are left, but the object is not actually freed. We can + * check that this property actually holds as follows: + * + * .. code-block:: c + * + * if (refcount_dec_and_test(&obj->refcnt)) { + * ASSERT_EXCLUSIVE_ACCESS(*obj); + * do_some_cleanup(obj); + * release_for_reuse(obj); + * } + * + * Note: + * + * 1. ASSERT_EXCLUSIVE_ACCESS_SCOPED(), if applicable, performs more thorough + * checking if a clear scope where no concurrent accesses are expected exists. + * + * 2. For cases where the object is freed, `KASAN <kasan.html>`_ is a better + * fit to detect use-after-free bugs. + * + * @var: variable to assert on + */ +#define ASSERT_EXCLUSIVE_ACCESS(var) \ + __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT) + +/** + * ASSERT_EXCLUSIVE_ACCESS_SCOPED - assert no concurrent accesses to @var in scope + * + * Scoped variant of ASSERT_EXCLUSIVE_ACCESS(). + * + * Assert that there are no concurrent accesses to @var (no readers nor writers) + * for the entire duration of the scope in which it is introduced. This provides + * a better way to fully cover the enclosing scope, compared to multiple + * ASSERT_EXCLUSIVE_ACCESS(), and increases the likelihood for KCSAN to detect + * racing accesses. + * + * @var: variable to assert on + */ +#define ASSERT_EXCLUSIVE_ACCESS_SCOPED(var) \ + __ASSERT_EXCLUSIVE_SCOPED(var, KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, __COUNTER__) + +/** + * ASSERT_EXCLUSIVE_BITS - assert no concurrent writes to subset of bits in @var + * + * Bit-granular variant of ASSERT_EXCLUSIVE_WRITER(). + * + * Assert that there are no concurrent writes to a subset of bits in @var; + * concurrent readers are permitted. This assertion captures more detailed + * bit-level properties, compared to the other (word granularity) assertions. + * Only the bits set in @mask are checked for concurrent modifications, while + * ignoring the remaining bits, i.e. concurrent writes (or reads) to ~mask bits + * are ignored. + * + * Use this for variables, where some bits must not be modified concurrently, + * yet other bits are expected to be modified concurrently. + * + * For example, variables where, after initialization, some bits are read-only, + * but other bits may still be modified concurrently. A reader may wish to + * assert that this is true as follows: + * + * .. code-block:: c + * + * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK); + * foo = (READ_ONCE(flags) & READ_ONLY_MASK) >> READ_ONLY_SHIFT; + * + * Note: The access that immediately follows ASSERT_EXCLUSIVE_BITS() is assumed + * to access the masked bits only, and KCSAN optimistically assumes it is + * therefore safe, even in the presence of data races, and marking it with + * READ_ONCE() is optional from KCSAN's point-of-view. We caution, however, that + * it may still be advisable to do so, since we cannot reason about all compiler + * optimizations when it comes to bit manipulations (on the reader and writer + * side). If you are sure nothing can go wrong, we can write the above simply + * as: + * + * .. code-block:: c + * + * ASSERT_EXCLUSIVE_BITS(flags, READ_ONLY_MASK); + * foo = (flags & READ_ONLY_MASK) >> READ_ONLY_SHIFT; + * + * Another example, where this may be used, is when certain bits of @var may + * only be modified when holding the appropriate lock, but other bits may still + * be modified concurrently. Writers, where other bits may change concurrently, + * could use the assertion as follows: + * + * .. code-block:: c + * + * spin_lock(&foo_lock); + * ASSERT_EXCLUSIVE_BITS(flags, FOO_MASK); + * old_flags = flags; + * new_flags = (old_flags & ~FOO_MASK) | (new_foo << FOO_SHIFT); + * if (cmpxchg(&flags, old_flags, new_flags) != old_flags) { ... } + * spin_unlock(&foo_lock); + * + * @var: variable to assert on + * @mask: only check for modifications to bits set in @mask + */ +#define ASSERT_EXCLUSIVE_BITS(var, mask) \ + do { \ + kcsan_set_access_mask(mask); \ + __kcsan_check_access(&(var), sizeof(var), KCSAN_ACCESS_ASSERT);\ + kcsan_set_access_mask(0); \ + kcsan_atomic_next(1); \ + } while (0) + +#endif /* _LINUX_KCSAN_CHECKS_H */ diff --git a/include/linux/kcsan.h b/include/linux/kcsan.h new file mode 100644 index 000000000000..53340d8789f9 --- /dev/null +++ b/include/linux/kcsan.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_KCSAN_H +#define _LINUX_KCSAN_H + +#include <linux/kcsan-checks.h> +#include <linux/types.h> + +#ifdef CONFIG_KCSAN + +/* + * Context for each thread of execution: for tasks, this is stored in + * task_struct, and interrupts access internal per-CPU storage. + */ +struct kcsan_ctx { + int disable_count; /* disable counter */ + int atomic_next; /* number of following atomic ops */ + + /* + * We distinguish between: (a) nestable atomic regions that may contain + * other nestable regions; and (b) flat atomic regions that do not keep + * track of nesting. Both (a) and (b) are entirely independent of each + * other, and a flat region may be started in a nestable region or + * vice-versa. + * + * This is required because, for example, in the annotations for + * seqlocks, we declare seqlock writer critical sections as (a) nestable + * atomic regions, but reader critical sections as (b) flat atomic + * regions, but have encountered cases where seqlock reader critical + * sections are contained within writer critical sections (the opposite + * may be possible, too). + * + * To support these cases, we independently track the depth of nesting + * for (a), and whether the leaf level is flat for (b). + */ + int atomic_nest_count; + bool in_flat_atomic; + + /* + * Access mask for all accesses if non-zero. + */ + unsigned long access_mask; + + /* List of scoped accesses. */ + struct list_head scoped_accesses; +}; + +/** + * kcsan_init - initialize KCSAN runtime + */ +void kcsan_init(void); + +#else /* CONFIG_KCSAN */ + +static inline void kcsan_init(void) { } + +#endif /* CONFIG_KCSAN */ + +#endif /* _LINUX_KCSAN_H */ diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 24cd447659e0..0125a677b67f 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -125,7 +125,7 @@ extern const char *kdb_diemsg; #define KDB_FLAG_NO_I8042 (1 << 7) /* No i8042 chip is available, do * not use keyboard */ -extern int kdb_flags; /* Global flags, see kdb_state for per cpu state */ +extern unsigned int kdb_flags; /* Global flags, see kdb_state for per cpu state */ extern void kdb_save_flags(void); extern void kdb_restore_flags(void); diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 9b7a8d74a9d6..c25b8e41c0ea 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -17,7 +17,6 @@ #include <asm/byteorder.h> #include <asm/div64.h> #include <uapi/linux/kernel.h> -#include <asm/div64.h> #define STACK_MAGIC 0xdeadbeef @@ -34,6 +33,7 @@ #define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a)) #define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) #define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#define PTR_ALIGN_DOWN(p, a) ((typeof(p))ALIGN_DOWN((unsigned long)(p), (a))) #define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) /* generic data direction definitions */ @@ -186,7 +186,7 @@ * lower_32_bits - return bits 0-31 of a number * @n: the number we're accessing */ -#define lower_32_bits(n) ((u32)(n)) +#define lower_32_bits(n) ((u32)((n) & 0xffffffff)) struct completion; struct pt_regs; @@ -321,8 +321,7 @@ void panic(const char *fmt, ...) __noreturn __cold; void nmi_panic(struct pt_regs *regs, const char *msg); extern void oops_enter(void); extern void oops_exit(void); -void print_oops_end_marker(void); -extern int oops_may_print(void); +extern bool oops_may_print(void); void do_exit(long error_code) __noreturn; void complete_and_exit(struct completion *, long) __noreturn; @@ -346,7 +345,7 @@ int __must_check kstrtoll(const char *s, unsigned int base, long long *res); * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the simple_strtoull. Return code must be checked. + * Preferred over simple_strtoul(). Return code must be checked. */ static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res) { @@ -374,7 +373,7 @@ static inline int __must_check kstrtoul(const char *s, unsigned int base, unsign * @res: Where to write the result of the conversion on success. * * Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error. - * Used as a replacement for the simple_strtoull. Return code must be checked. + * Preferred over simple_strtol(). Return code must be checked. */ static inline int __must_check kstrtol(const char *s, unsigned int base, long *res) { @@ -520,6 +519,12 @@ static inline u32 int_sqrt64(u64 x) } #endif +#ifdef CONFIG_SMP +extern unsigned int sysctl_oops_all_cpu_backtrace; +#else +#define sysctl_oops_all_cpu_backtrace 0 +#endif /* CONFIG_SMP */ + extern void bust_spinlocks(int yes); extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ extern int panic_timeout; @@ -528,6 +533,8 @@ extern int panic_on_oops; extern int panic_on_unrecovered_nmi; extern int panic_on_io_nmi; extern int panic_on_warn; +extern unsigned long panic_on_taint; +extern bool panic_on_taint_nousertaint; extern int sysctl_panic_on_rcu_stall; extern int sysctl_panic_on_stackoverflow; @@ -596,6 +603,7 @@ extern enum system_states { #define TAINT_AUX 16 #define TAINT_RANDSTRUCT 17 #define TAINT_FLAGS_COUNT 18 +#define TAINT_FLAGS_MAX ((1UL << TAINT_FLAGS_COUNT) - 1) struct taint_flag { char c_true; /* character printed when tainted */ diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 1776eb2e43a4..9e93bef52968 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -183,17 +183,24 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, bool get_value); void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); -int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, - unsigned long buf_len); -void * __weak arch_kexec_kernel_image_load(struct kimage *image); -int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi, - Elf_Shdr *section, - const Elf_Shdr *relsec, - const Elf_Shdr *symtab); -int __weak arch_kexec_apply_relocations(struct purgatory_info *pi, - Elf_Shdr *section, - const Elf_Shdr *relsec, - const Elf_Shdr *symtab); +/* Architectures may override the below functions */ +int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, + unsigned long buf_len); +void *arch_kexec_kernel_image_load(struct kimage *image); +int arch_kexec_apply_relocations_add(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); +int arch_kexec_apply_relocations(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#ifdef CONFIG_KEXEC_SIG +int arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, + unsigned long buf_len); +#endif +int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf); extern int kexec_add_buffer(struct kexec_buf *kbuf); int kexec_locate_mem_hole(struct kexec_buf *kbuf); @@ -208,7 +215,7 @@ struct crash_mem_range { struct crash_mem { unsigned int max_nr_ranges; unsigned int nr_ranges; - struct crash_mem_range ranges[0]; + struct crash_mem_range ranges[]; }; extern int crash_exclude_mem_range(struct crash_mem *mem, diff --git a/include/linux/key.h b/include/linux/key.h index 6cf8e71cf8b7..0f2e24f13c2b 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -71,6 +71,23 @@ struct net; #define KEY_PERM_UNDEF 0xffffffff +/* + * The permissions required on a key that we're looking up. + */ +enum key_need_perm { + KEY_NEED_UNSPECIFIED, /* Needed permission unspecified */ + KEY_NEED_VIEW, /* Require permission to view attributes */ + KEY_NEED_READ, /* Require permission to read content */ + KEY_NEED_WRITE, /* Require permission to update / modify */ + KEY_NEED_SEARCH, /* Require permission to search (keyring) or find (key) */ + KEY_NEED_LINK, /* Require permission to link */ + KEY_NEED_SETATTR, /* Require permission to change attributes */ + KEY_NEED_UNLINK, /* Require permission to unlink key */ + KEY_SYSADMIN_OVERRIDE, /* Special: override by CAP_SYS_ADMIN */ + KEY_AUTHTOKEN_OVERRIDE, /* Special: override by possession of auth token */ + KEY_DEFER_PERM_CHECK, /* Special: permission check is deferred */ +}; + struct seq_file; struct user_struct; struct signal_struct; @@ -176,6 +193,9 @@ struct key { struct list_head graveyard_link; struct rb_node serial_node; }; +#ifdef CONFIG_KEY_NOTIFICATIONS + struct watch_list *watchers; /* Entities watching this key for changes */ +#endif struct rw_semaphore sem; /* change vs change sem */ struct key_user *user; /* owner of this key */ void *security; /* security data for this key */ @@ -417,20 +437,9 @@ static inline key_serial_t key_serial(const struct key *key) extern void key_set_timeout(struct key *, unsigned); extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags, - key_perm_t perm); + enum key_need_perm need_perm); extern void key_free_user_ns(struct user_namespace *); -/* - * The permissions required on a key that we're looking up. - */ -#define KEY_NEED_VIEW 0x01 /* Require permission to view attributes */ -#define KEY_NEED_READ 0x02 /* Require permission to read content */ -#define KEY_NEED_WRITE 0x04 /* Require permission to update / modify */ -#define KEY_NEED_SEARCH 0x08 /* Require permission to search (keyring) or find (key) */ -#define KEY_NEED_LINK 0x10 /* Require permission to link */ -#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */ -#define KEY_NEED_ALL 0x3f /* All the above permissions */ - static inline short key_read_state(const struct key *key) { /* Barrier versus mark_key_instantiated(). */ diff --git a/include/linux/keyslot-manager.h b/include/linux/keyslot-manager.h new file mode 100644 index 000000000000..18f3f5346843 --- /dev/null +++ b/include/linux/keyslot-manager.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2019 Google LLC + */ + +#ifndef __LINUX_KEYSLOT_MANAGER_H +#define __LINUX_KEYSLOT_MANAGER_H + +#include <linux/bio.h> +#include <linux/blk-crypto.h> + +struct blk_keyslot_manager; + +/** + * struct blk_ksm_ll_ops - functions to manage keyslots in hardware + * @keyslot_program: Program the specified key into the specified slot in the + * inline encryption hardware. + * @keyslot_evict: Evict key from the specified keyslot in the hardware. + * The key is provided so that e.g. dm layers can evict + * keys from the devices that they map over. + * Returns 0 on success, -errno otherwise. + * + * This structure should be provided by storage device drivers when they set up + * a keyslot manager - this structure holds the function ptrs that the keyslot + * manager will use to manipulate keyslots in the hardware. + */ +struct blk_ksm_ll_ops { + int (*keyslot_program)(struct blk_keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot); + int (*keyslot_evict)(struct blk_keyslot_manager *ksm, + const struct blk_crypto_key *key, + unsigned int slot); +}; + +struct blk_keyslot_manager { + /* + * The struct blk_ksm_ll_ops that this keyslot manager will use + * to perform operations like programming and evicting keys on the + * device + */ + struct blk_ksm_ll_ops ksm_ll_ops; + + /* + * The maximum number of bytes supported for specifying the data unit + * number. + */ + unsigned int max_dun_bytes_supported; + + /* + * Array of size BLK_ENCRYPTION_MODE_MAX of bitmasks that represents + * whether a crypto mode and data unit size are supported. The i'th + * bit of crypto_mode_supported[crypto_mode] is set iff a data unit + * size of (1 << i) is supported. We only support data unit sizes + * that are powers of 2. + */ + unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX]; + + /* Device for runtime power management (NULL if none) */ + struct device *dev; + + /* Here onwards are *private* fields for internal keyslot manager use */ + + unsigned int num_slots; + + /* Protects programming and evicting keys from the device */ + struct rw_semaphore lock; + + /* List of idle slots, with least recently used slot at front */ + wait_queue_head_t idle_slots_wait_queue; + struct list_head idle_slots; + spinlock_t idle_slots_lock; + + /* + * Hash table which maps struct *blk_crypto_key to keyslots, so that we + * can find a key's keyslot in O(1) time rather than O(num_slots). + * Protected by 'lock'. + */ + struct hlist_head *slot_hashtable; + unsigned int log_slot_ht_size; + + /* Per-keyslot data */ + struct blk_ksm_keyslot *slots; +}; + +int blk_ksm_init(struct blk_keyslot_manager *ksm, unsigned int num_slots); + +blk_status_t blk_ksm_get_slot_for_key(struct blk_keyslot_manager *ksm, + const struct blk_crypto_key *key, + struct blk_ksm_keyslot **slot_ptr); + +unsigned int blk_ksm_get_slot_idx(struct blk_ksm_keyslot *slot); + +void blk_ksm_put_slot(struct blk_ksm_keyslot *slot); + +bool blk_ksm_crypto_cfg_supported(struct blk_keyslot_manager *ksm, + const struct blk_crypto_config *cfg); + +int blk_ksm_evict_key(struct blk_keyslot_manager *ksm, + const struct blk_crypto_key *key); + +void blk_ksm_reprogram_all_keys(struct blk_keyslot_manager *ksm); + +void blk_ksm_destroy(struct blk_keyslot_manager *ksm); + +#endif /* __LINUX_KEYSLOT_MANAGER_H */ diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index b072aeb1fd78..477b8b7c908f 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h @@ -177,6 +177,17 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code, struct pt_regs *regs); /** + * kgdb_arch_handle_qxfer_pkt - Handle architecture specific GDB XML + * packets. + * @remcom_in_buffer: The buffer of the packet we have read. + * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into. + */ + +extern void +kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer, + char *remcom_out_buffer); + +/** * kgdb_call_nmi_hook - Call kgdb_nmicallback() on the current CPU * @ignored: This parameter is only here to match the prototype. * @@ -269,12 +280,14 @@ struct kgdb_arch { * @write_char: Pointer to a function that will write one char. * @flush: Pointer to a function that will flush any pending writes. * @init: Pointer to a function that will initialize the device. + * @deinit: Pointer to a function that will deinit the device. Implies that + * this I/O driver is temporary and expects to be replaced. Called when + * an I/O driver is replaced or explicitly unregistered. * @pre_exception: Pointer to a function that will do any prep work for * the I/O driver. * @post_exception: Pointer to a function that will do any cleanup work * for the I/O driver. - * @is_console: 1 if the end device is a console 0 if the I/O device is - * not a console + * @cons: valid if the I/O device is a console; else NULL. */ struct kgdb_io { const char *name; @@ -282,9 +295,10 @@ struct kgdb_io { void (*write_char) (u8); void (*flush) (void); int (*init) (void); + void (*deinit) (void); void (*pre_exception) (void); void (*post_exception) (void); - int is_console; + struct console *cons; }; extern const struct kgdb_arch arch_kgdb_ops; @@ -298,7 +312,7 @@ extern bool kgdb_nmi_poll_knock(void); #else static inline int kgdb_register_nmi_console(void) { return 0; } static inline int kgdb_unregister_nmi_console(void) { return 0; } -static inline bool kgdb_nmi_poll_knock(void) { return 1; } +static inline bool kgdb_nmi_poll_knock(void) { return true; } #endif extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops); @@ -311,6 +325,7 @@ extern int kgdb_hex2mem(char *buf, char *mem, int count); extern int kgdb_isremovedbreak(unsigned long addr); extern void kgdb_schedule_breakpoint(void); +extern int kgdb_has_hit_break(unsigned long addr); extern int kgdb_handle_exception(int ex_vector, int signo, int err_code, @@ -323,7 +338,7 @@ extern void gdbstub_exit(int status); extern int kgdb_single_step; extern atomic_t kgdb_active; #define in_dbg_master() \ - (raw_smp_processor_id() == atomic_read(&kgdb_active)) + (irqs_disabled() && (smp_processor_id() == atomic_read(&kgdb_active))) extern bool dbg_is_early; extern void __init dbg_late_init(void); extern void kgdb_panic(const char *msg); diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index bc45ea1efbf7..c941b7377321 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -15,6 +15,7 @@ extern int __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, unsigned long vm_flags); +extern void khugepaged_min_free_kbytes_update(void); #ifdef CONFIG_SHMEM extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr); #else @@ -85,6 +86,10 @@ static inline void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr) { } + +static inline void khugepaged_min_free_kbytes_update(void) +{ +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* _LINUX_KHUGEPAGED_H */ diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h index 2e7a1e032c71..3378bcbe585e 100644 --- a/include/linux/kmsg_dump.h +++ b/include/linux/kmsg_dump.h @@ -25,9 +25,8 @@ enum kmsg_dump_reason { KMSG_DUMP_PANIC, KMSG_DUMP_OOPS, KMSG_DUMP_EMERG, - KMSG_DUMP_RESTART, - KMSG_DUMP_HALT, - KMSG_DUMP_POWEROFF, + KMSG_DUMP_SHUTDOWN, + KMSG_DUMP_MAX }; /** @@ -71,6 +70,8 @@ void kmsg_dump_rewind(struct kmsg_dumper *dumper); int kmsg_dump_register(struct kmsg_dumper *dumper); int kmsg_dump_unregister(struct kmsg_dumper *dumper); + +const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason); #else static inline void kmsg_dump(enum kmsg_dump_reason reason) { @@ -112,6 +113,11 @@ static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper) { return -EINVAL; } + +static inline const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason) +{ + return "Disabled"; +} #endif #endif /* _LINUX_KMSG_DUMP_H */ diff --git a/include/linux/kobject.h b/include/linux/kobject.h index e2ca0a292e21..ea30529fba08 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -7,7 +7,7 @@ * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com> * Copyright (c) 2006-2008 Novell Inc. * - * Please read Documentation/kobject.txt before using the kobject + * Please read Documentation/core-api/kobject.rst before using the kobject * interface, ESPECIALLY the parts about reference counts and object * destructors. */ @@ -29,7 +29,7 @@ #include <linux/uidgid.h> #define UEVENT_HELPER_PATH_LEN 256 -#define UEVENT_NUM_ENVP 32 /* number of env pointers */ +#define UEVENT_NUM_ENVP 64 /* number of env pointers */ #define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */ #ifdef CONFIG_UEVENT_HELPER @@ -59,7 +59,6 @@ enum kobject_action { KOBJ_OFFLINE, KOBJ_BIND, KOBJ_UNBIND, - KOBJ_MAX }; struct kobject { diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h index 069aa2ebef90..2b5b64256cf4 100644 --- a/include/linux/kobject_ns.h +++ b/include/linux/kobject_ns.h @@ -8,7 +8,7 @@ * * Split from kobject.h by David Howells (dhowells@redhat.com) * - * Please read Documentation/kobject.txt before using the kobject + * Please read Documentation/core-api/kobject.rst before using the kobject * interface, ESPECIALLY the parts about reference counts and object * destructors. */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 04bdaf01112c..8aab327b5539 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -161,7 +161,7 @@ struct kretprobe_instance { kprobe_opcode_t *ret_addr; struct task_struct *task; void *fp; - char data[0]; + char data[]; }; struct kretprobe_blackpoint { @@ -227,7 +227,6 @@ extern int arch_prepare_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p); extern void arch_disarm_kprobe(struct kprobe *p); extern int arch_init_kprobes(void); -extern void show_registers(struct pt_regs *regs); extern void kprobes_inc_nmissed_count(struct kprobe *p); extern bool arch_within_kprobe_blacklist(unsigned long addr); extern int arch_populate_kprobe_blacklist(void); @@ -242,6 +241,7 @@ struct kprobe_insn_cache { struct mutex mutex; void *(*alloc)(void); /* allocate insn page */ void (*free)(void *); /* free insn page */ + const char *sym; /* symbol for insn pages */ struct list_head pages; /* list of kprobe_insn_page */ size_t insn_size; /* size of instruction slot */ int nr_garbage; @@ -272,6 +272,10 @@ static inline bool is_kprobe_##__name##_slot(unsigned long addr) \ { \ return __is_insn_slot_addr(&kprobe_##__name##_slots, addr); \ } +#define KPROBE_INSN_PAGE_SYM "kprobe_insn_page" +#define KPROBE_OPTINSN_PAGE_SYM "kprobe_optinsn_page" +int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum, + unsigned long *value, char *type, char *sym); #else /* __ARCH_WANT_KPROBES_INSN_SLOT */ #define DEFINE_INSN_CACHE_OPS(__name) \ static inline bool is_kprobe_##__name##_slot(unsigned long addr) \ @@ -312,7 +316,7 @@ DEFINE_INSN_CACHE_OPS(optinsn); #ifdef CONFIG_SYSCTL extern int sysctl_kprobes_optimization; extern int proc_kprobes_optimization_handler(struct ctl_table *table, - int write, void __user *buffer, + int write, void *buffer, size_t *length, loff_t *ppos); #endif extern void wait_for_kprobe_optimizer(void); @@ -350,6 +354,10 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void) return this_cpu_ptr(&kprobe_ctlblk); } +extern struct kprobe kprobe_busy; +void kprobe_busy_begin(void); +void kprobe_busy_end(void); + kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset); int register_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p); @@ -365,6 +373,8 @@ void unregister_kretprobes(struct kretprobe **rps, int num); void kprobe_flush_task(struct task_struct *tk); void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); +void kprobe_free_init_mem(void); + int disable_kprobe(struct kprobe *kp); int enable_kprobe(struct kprobe *kp); @@ -373,6 +383,11 @@ void dump_kprobe(struct kprobe *kp); void *alloc_insn_page(void); void free_insn_page(void *page); +int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type, + char *sym); + +int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value, + char *type, char *sym); #else /* !CONFIG_KPROBES: */ static inline int kprobes_built_in(void) @@ -422,6 +437,9 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num) static inline void kprobe_flush_task(struct task_struct *tk) { } +static inline void kprobe_free_init_mem(void) +{ +} static inline int disable_kprobe(struct kprobe *kp) { return -ENOSYS; @@ -435,6 +453,11 @@ static inline bool within_kprobe_blacklist(unsigned long addr) { return true; } +static inline int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, + char *type, char *sym) +{ + return -ERANGE; +} #endif /* CONFIG_KPROBES */ static inline int disable_kretprobe(struct kretprobe *rp) { diff --git a/include/linux/ksm.h b/include/linux/ksm.h index e48b1e453ff5..161e8164abcf 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -53,8 +53,6 @@ struct page *ksm_might_need_to_copy(struct page *page, void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); void ksm_migrate_page(struct page *newpage, struct page *oldpage); -bool reuse_ksm_page(struct page *page, - struct vm_area_struct *vma, unsigned long address); #else /* !CONFIG_KSM */ @@ -88,11 +86,6 @@ static inline void rmap_walk_ksm(struct page *page, static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) { } -static inline bool reuse_ksm_page(struct page *page, - struct vm_area_struct *vma, unsigned long address) -{ - return false; -} #endif /* CONFIG_MMU */ #endif /* !CONFIG_KSM */ diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 8bbcaad7ef0f..65b81e0c494d 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -5,6 +5,8 @@ #include <linux/err.h> #include <linux/sched.h> +struct mm_struct; + __printf(4, 5) struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), void *data, @@ -57,6 +59,7 @@ bool kthread_should_stop(void); bool kthread_should_park(void); bool __kthread_should_park(struct task_struct *k); bool kthread_freezable_should_stop(bool *was_frozen); +void *kthread_func(struct task_struct *k); void *kthread_data(struct task_struct *k); void *kthread_probe_data(struct task_struct *k); int kthread_park(struct task_struct *k); @@ -198,6 +201,9 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work); void kthread_destroy_worker(struct kthread_worker *worker); +void kthread_use_mm(struct mm_struct *mm); +void kthread_unuse_mm(struct mm_struct *mm); + struct cgroup_subsys_state; #ifdef CONFIG_BLK_CGROUP diff --git a/include/linux/ktime.h b/include/linux/ktime.h index 42d2e6ac35f2..a12b5523cc18 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -23,6 +23,7 @@ #include <linux/time.h> #include <linux/jiffies.h> +#include <asm/bug.h> /* Nanosecond scalar representation for kernel time values */ typedef s64 ktime_t; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 131cc1527d68..05e3c2fb3ef7 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -23,7 +23,7 @@ #include <linux/irqflags.h> #include <linux/context_tracking.h> #include <linux/irqbypass.h> -#include <linux/swait.h> +#include <linux/rcuwait.h> #include <linux/refcount.h> #include <linux/nospec.h> #include <asm/signal.h> @@ -206,12 +206,13 @@ struct kvm_async_pf { unsigned long addr; struct kvm_arch_async_pf arch; bool wakeup_all; + bool notpresent_injected; }; void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); -int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, - unsigned long hva, struct kvm_arch_async_pf *arch); +bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, + unsigned long hva, struct kvm_arch_async_pf *arch); int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #endif @@ -277,7 +278,7 @@ struct kvm_vcpu { struct mutex mutex; struct kvm_run *run; - struct swait_queue_head wq; + struct rcuwait wait; struct pid __rcu *pid; int sigset_active; sigset_t sigset; @@ -318,7 +319,6 @@ struct kvm_vcpu { bool preempted; bool ready; struct kvm_vcpu_arch arch; - struct dentry *debugfs_dentry; }; static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) @@ -409,7 +409,7 @@ struct kvm_irq_routing_table { * Array indexed by gsi. Each entry contains list of irq chips * the gsi is connected to. */ - struct hlist_head map[0]; + struct hlist_head map[]; }; #endif @@ -503,6 +503,7 @@ struct kvm { struct srcu_struct srcu; struct srcu_struct irq_srcu; pid_t userspace_pid; + unsigned int max_halt_poll_ns; }; #define kvm_err(fmt, ...) \ @@ -733,6 +734,9 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); +int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, + void *data, unsigned int offset, + unsigned long len); int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, int offset, int len); int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, @@ -745,31 +749,53 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len); -#define __kvm_put_guest(kvm, gfn, offset, value, type) \ +#define __kvm_get_guest(kvm, gfn, offset, v) \ ({ \ unsigned long __addr = gfn_to_hva(kvm, gfn); \ - type __user *__uaddr = (type __user *)(__addr + offset); \ + typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ int __ret = -EFAULT; \ \ if (!kvm_is_error_hva(__addr)) \ - __ret = put_user(value, __uaddr); \ + __ret = get_user(v, __uaddr); \ + __ret; \ +}) + +#define kvm_get_guest(kvm, gpa, v) \ +({ \ + gpa_t __gpa = gpa; \ + struct kvm *__kvm = kvm; \ + \ + __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \ + offset_in_page(__gpa), v); \ +}) + +#define __kvm_put_guest(kvm, gfn, offset, v) \ +({ \ + unsigned long __addr = gfn_to_hva(kvm, gfn); \ + typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ + int __ret = -EFAULT; \ + \ + if (!kvm_is_error_hva(__addr)) \ + __ret = put_user(v, __uaddr); \ if (!__ret) \ mark_page_dirty(kvm, gfn); \ __ret; \ }) -#define kvm_put_guest(kvm, gpa, value, type) \ +#define kvm_put_guest(kvm, gpa, v) \ ({ \ gpa_t __gpa = gpa; \ struct kvm *__kvm = kvm; \ + \ __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ - offset_in_page(__gpa), (value), type); \ + offset_in_page(__gpa), v); \ }) int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); +bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); @@ -812,6 +838,13 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); void kvm_flush_remote_tlbs(struct kvm *kvm); void kvm_reload_remote_mmus(struct kvm *kvm); +#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE +int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); +int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc); +void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); +void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); +#endif + bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, struct kvm_vcpu *except, unsigned long *vcpu_bitmap, cpumask_var_t tmp); @@ -869,7 +902,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state); int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg); -int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu); int kvm_arch_init(void *opaque); void kvm_arch_exit(void); @@ -884,7 +917,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS -void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu); +void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); #endif int kvm_arch_hardware_enable(void); @@ -959,12 +992,12 @@ static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) } #endif -static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) +static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) { #ifdef __KVM_HAVE_ARCH_WQP - return vcpu->arch.wqp; + return vcpu->arch.waitp; #else - return &vcpu->wq; + return &vcpu->wait; #endif } @@ -1133,6 +1166,11 @@ struct kvm_stats_debugfs_item { #define KVM_DBGFS_GET_MODE(dbgfs_item) \ ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644) +#define VM_STAT(n, x, ...) \ + { n, offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ } +#define VCPU_STAT(n, x, ...) \ + { n, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ } + extern struct kvm_stats_debugfs_item debugfs_entries[]; extern struct dentry *kvm_debugfs_dir; @@ -1355,6 +1393,12 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) } #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ +static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot) +{ + return (memslot && memslot->id < KVM_USER_MEM_SLOTS && + !(memslot->flags & KVM_MEMSLOT_INVALID)); +} + struct kvm_vcpu *kvm_get_running_vcpu(void); struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); @@ -1406,8 +1450,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, } #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ -int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, - unsigned long start, unsigned long end, bool blockable); +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, + unsigned long start, unsigned long end); #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); @@ -1424,4 +1468,12 @@ int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, uintptr_t data, const char *name, struct task_struct **thread_ptr); +#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK +static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) +{ + vcpu->run->exit_reason = KVM_EXIT_INTR; + vcpu->stat.signal_exits++; +} +#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ + #endif diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h index dc1da020305b..dac047abdba7 100644 --- a/include/linux/kvm_irqfd.h +++ b/include/linux/kvm_irqfd.h @@ -42,7 +42,7 @@ struct kvm_kernel_irqfd { wait_queue_entry_t wait; /* Update side is protected by irqfds.lock */ struct kvm_kernel_irq_routing_entry irq_entry; - seqcount_t irq_entry_sc; + seqcount_spinlock_t irq_entry_sc; /* Used for level IRQ fast-path */ int gsi; struct work_struct inject; diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 68e84cf42a3f..a7580f69dda0 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -20,6 +20,8 @@ enum kvm_mr_change; #include <linux/types.h> +#include <asm/kvm_types.h> + /* * Address types: * @@ -58,4 +60,21 @@ struct gfn_to_pfn_cache { bool dirty; }; +#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE +/* + * Memory caches are used to preallocate memory ahead of various MMU flows, + * e.g. page fault handlers. Gracefully handling allocation failures deep in + * MMU flows is problematic, as is triggering reclaim, I/O, etc... while + * holding MMU locks. Note, these caches act more like prefetch buffers than + * classical caches, i.e. objects are not returned to the cache on being freed. + */ +struct kvm_mmu_memory_cache { + int nobjs; + gfp_t gfp_zero; + struct kmem_cache *kmem_cache; + void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE]; +}; +#endif + + #endif /* __KVM_TYPES_H__ */ diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h index 9022f0c2e2e4..abe3d95f795b 100644 --- a/include/linux/latencytop.h +++ b/include/linux/latencytop.h @@ -38,8 +38,8 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter) void clear_tsk_latency_tracing(struct task_struct *p); -extern int sysctl_latencytop(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); +int sysctl_latencytop(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); #else diff --git a/include/linux/led-class-multicolor.h b/include/linux/led-class-multicolor.h new file mode 100644 index 000000000000..5116f9a866cc --- /dev/null +++ b/include/linux/led-class-multicolor.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* LED Multicolor class interface + * Copyright (C) 2019-20 Texas Instruments Incorporated - http://www.ti.com/ + */ + +#ifndef _LINUX_MULTICOLOR_LEDS_H_INCLUDED +#define _LINUX_MULTICOLOR_LEDS_H_INCLUDED + +#include <linux/leds.h> +#include <dt-bindings/leds/common.h> + +struct mc_subled { + unsigned int color_index; + unsigned int brightness; + unsigned int intensity; + unsigned int channel; +}; + +struct led_classdev_mc { + /* led class device */ + struct led_classdev led_cdev; + unsigned int num_colors; + + struct mc_subled *subled_info; +}; + +static inline struct led_classdev_mc *lcdev_to_mccdev( + struct led_classdev *led_cdev) +{ + return container_of(led_cdev, struct led_classdev_mc, led_cdev); +} + +#if IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR) +/** + * led_classdev_multicolor_register_ext - register a new object of led_classdev + * class with support for multicolor LEDs + * @parent: the multicolor LED to register + * @mcled_cdev: the led_classdev_mc structure for this device + * @init_data: the LED class multicolor device initialization data + * + * Returns: 0 on success or negative error value on failure + */ +int led_classdev_multicolor_register_ext(struct device *parent, + struct led_classdev_mc *mcled_cdev, + struct led_init_data *init_data); + +static inline int led_classdev_multicolor_register(struct device *parent, + struct led_classdev_mc *mcled_cdev) +{ + return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL); +} + +/** + * led_classdev_multicolor_unregister - unregisters an object of led_classdev + * class with support for multicolor LEDs + * @mcled_cdev: the multicolor LED to unregister + * + * Unregister a previously registered via led_classdev_multicolor_register + * object + */ +void led_classdev_multicolor_unregister(struct led_classdev_mc *mcled_cdev); + +/* Calculate brightness for the monochrome LED cluster */ +int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev, + enum led_brightness brightness); + +int devm_led_classdev_multicolor_register_ext(struct device *parent, + struct led_classdev_mc *mcled_cdev, + struct led_init_data *init_data); + +static inline int devm_led_classdev_multicolor_register(struct device *parent, + struct led_classdev_mc *mcled_cdev) +{ + return devm_led_classdev_multicolor_register_ext(parent, mcled_cdev, + NULL); +} + +void devm_led_classdev_multicolor_unregister(struct device *parent, + struct led_classdev_mc *mcled_cdev); +#else + +static inline int led_classdev_multicolor_register_ext(struct device *parent, + struct led_classdev_mc *mcled_cdev, + struct led_init_data *init_data) +{ + return -EINVAL; +} + +static inline int led_classdev_multicolor_register(struct device *parent, + struct led_classdev_mc *mcled_cdev) +{ + return led_classdev_multicolor_register_ext(parent, mcled_cdev, NULL); +} + +static inline void led_classdev_multicolor_unregister(struct led_classdev_mc *mcled_cdev) {}; +static inline int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev, + enum led_brightness brightness) +{ + return -EINVAL; +} + +static inline int devm_led_classdev_multicolor_register_ext(struct device *parent, + struct led_classdev_mc *mcled_cdev, + struct led_init_data *init_data) +{ + return -EINVAL; +} + +static inline int devm_led_classdev_multicolor_register(struct device *parent, + struct led_classdev_mc *mcled_cdev) +{ + return devm_led_classdev_multicolor_register_ext(parent, mcled_cdev, + NULL); +} + +static inline void devm_led_classdev_multicolor_unregister(struct device *parent, + struct led_classdev_mc *mcled_cdev) +{}; + +#endif /* IS_ENABLED(CONFIG_LEDS_CLASS_MULTICOLOR) */ +#endif /* _LINUX_MULTICOLOR_LEDS_H_INCLUDED */ diff --git a/include/linux/leds-ti-lmu-common.h b/include/linux/leds-ti-lmu-common.h index 5eb111f38803..420b61e5a213 100644 --- a/include/linux/leds-ti-lmu-common.h +++ b/include/linux/leds-ti-lmu-common.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ // TI LMU Common Core -// Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ +// Copyright (C) 2018 Texas Instruments Incorporated - https://www.ti.com/ #ifndef _TI_LMU_COMMON_H_ #define _TI_LMU_COMMON_H_ diff --git a/include/linux/leds.h b/include/linux/leds.h index 2451962d1ec5..6a8d6409c993 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -57,6 +57,10 @@ struct led_init_data { bool devname_mandatory; }; +struct led_hw_trigger_type { + int dummy; +}; + struct led_classdev { const char *name; enum led_brightness brightness; @@ -141,6 +145,9 @@ struct led_classdev { void *trigger_data; /* true if activated - deactivate routine uses it to do cleanup */ bool activated; + + /* LEDs that have private triggers have this set */ + struct led_hw_trigger_type *trigger_type; #endif #ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED @@ -345,6 +352,9 @@ struct led_trigger { int (*activate)(struct led_classdev *led_cdev); void (*deactivate)(struct led_classdev *led_cdev); + /* LED-private triggers have this set */ + struct led_hw_trigger_type *trigger_type; + /* LEDs under control by this trigger (for simple triggers) */ rwlock_t leddev_list_lock; struct list_head led_cdevs; diff --git a/include/linux/libata.h b/include/linux/libata.h index cffa4714bfa8..5f550eb27f81 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -22,6 +22,7 @@ #include <linux/acpi.h> #include <linux/cdrom.h> #include <linux/sched.h> +#include <linux/async.h> /* * Define if arch has non-standard setup. This is a _PCI_ standard @@ -420,6 +421,7 @@ enum { ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */ ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ + ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ @@ -609,7 +611,7 @@ struct ata_host { struct task_struct *eh_owner; struct ata_port *simplex_claimed; /* channel owning the DMA */ - struct ata_port *ports[0]; + struct ata_port *ports[]; }; struct ata_queued_cmd { @@ -872,6 +874,8 @@ struct ata_port { struct timer_list fastdrain_timer; unsigned long fastdrain_cnt; + async_cookie_t cookie; + int em_message_type; void *private_data; @@ -1092,6 +1096,11 @@ extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd, #define ATA_SCSI_COMPAT_IOCTL /* empty */ #endif extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd); +#if IS_REACHABLE(CONFIG_ATA) +bool ata_scsi_dma_need_drain(struct request *rq); +#else +#define ata_scsi_dma_need_drain NULL +#endif extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, unsigned int cmd, void __user *arg); extern bool ata_link_online(struct ata_link *link); @@ -1387,6 +1396,7 @@ extern struct device_attribute *ata_common_sdev_attrs[]; .ioctl = ata_scsi_ioctl, \ ATA_SCSI_COMPAT_IOCTL \ .queuecommand = ata_scsi_queuecmd, \ + .dma_need_drain = ata_scsi_dma_need_drain, \ .can_queue = ATA_DEF_QUEUE, \ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ .this_id = ATA_SHT_THIS_ID, \ diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 18da4059be09..01f251b6e36c 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -76,8 +76,9 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, struct device_node; struct nvdimm_bus_descriptor { const struct attribute_group **attr_groups; - unsigned long bus_dsm_mask; unsigned long cmd_mask; + unsigned long dimm_family_mask; + unsigned long bus_family_mask; struct module *module; char *provider_name; struct device_node *of_node; @@ -85,6 +86,7 @@ struct nvdimm_bus_descriptor { int (*flush_probe)(struct nvdimm_bus_descriptor *nd_desc); int (*clear_to_send)(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *data); + const struct nvdimm_bus_fw_ops *fw_ops; }; struct nd_cmd_desc { @@ -199,6 +201,49 @@ struct nvdimm_security_ops { int (*query_overwrite)(struct nvdimm *nvdimm); }; +enum nvdimm_fwa_state { + NVDIMM_FWA_INVALID, + NVDIMM_FWA_IDLE, + NVDIMM_FWA_ARMED, + NVDIMM_FWA_BUSY, + NVDIMM_FWA_ARM_OVERFLOW, +}; + +enum nvdimm_fwa_trigger { + NVDIMM_FWA_ARM, + NVDIMM_FWA_DISARM, +}; + +enum nvdimm_fwa_capability { + NVDIMM_FWA_CAP_INVALID, + NVDIMM_FWA_CAP_NONE, + NVDIMM_FWA_CAP_QUIESCE, + NVDIMM_FWA_CAP_LIVE, +}; + +enum nvdimm_fwa_result { + NVDIMM_FWA_RESULT_INVALID, + NVDIMM_FWA_RESULT_NONE, + NVDIMM_FWA_RESULT_SUCCESS, + NVDIMM_FWA_RESULT_NOTSTAGED, + NVDIMM_FWA_RESULT_NEEDRESET, + NVDIMM_FWA_RESULT_FAIL, +}; + +struct nvdimm_bus_fw_ops { + enum nvdimm_fwa_state (*activate_state) + (struct nvdimm_bus_descriptor *nd_desc); + enum nvdimm_fwa_capability (*capability) + (struct nvdimm_bus_descriptor *nd_desc); + int (*activate)(struct nvdimm_bus_descriptor *nd_desc); +}; + +struct nvdimm_fw_ops { + enum nvdimm_fwa_state (*activate_state)(struct nvdimm *nvdimm); + enum nvdimm_fwa_result (*activate_result)(struct nvdimm *nvdimm); + int (*arm)(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arg); +}; + void badrange_init(struct badrange *badrange); int badrange_add(struct badrange *badrange, u64 addr, u64 length); void badrange_forget(struct badrange *badrange, phys_addr_t start, @@ -224,14 +269,15 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, unsigned long cmd_mask, int num_flush, struct resource *flush_wpq, const char *dimm_id, - const struct nvdimm_security_ops *sec_ops); + const struct nvdimm_security_ops *sec_ops, + const struct nvdimm_fw_ops *fw_ops); static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, unsigned long cmd_mask, int num_flush, struct resource *flush_wpq) { return __nvdimm_create(nvdimm_bus, provider_data, groups, flags, - cmd_mask, num_flush, flush_wpq, NULL, NULL); + cmd_mask, num_flush, flush_wpq, NULL, NULL, NULL); } const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index ee8ec2e68055..1db223710b28 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h @@ -631,7 +631,6 @@ static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev, return last; } -typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); typedef sector_t (nvm_tgt_capacity_fn)(void *); typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *, int flags); @@ -650,7 +649,7 @@ struct nvm_tgt_type { int flags; /* target entry points */ - nvm_tgt_make_rq_fn *make_rq; + const struct block_device_operations *bops; nvm_tgt_capacity_fn *capacity; /* module-specific init/teardown */ diff --git a/include/linux/linear_range.h b/include/linux/linear_range.h new file mode 100644 index 000000000000..17b5943727d5 --- /dev/null +++ b/include/linux/linear_range.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2020 ROHM Semiconductors */ + +#ifndef LINEAR_RANGE_H +#define LINEAR_RANGE_H + +#include <linux/types.h> + +/** + * struct linear_range - table of selector - value pairs + * + * Define a lookup-table for range of values. Intended to help when looking + * for a register value matching certaing physical measure (like voltage). + * Usable when increment of one in register always results a constant increment + * of the physical measure (like voltage). + * + * @min: Lowest value in range + * @min_sel: Lowest selector for range + * @max_sel: Highest selector for range + * @step: Value step size + */ +struct linear_range { + unsigned int min; + unsigned int min_sel; + unsigned int max_sel; + unsigned int step; +}; + +unsigned int linear_range_values_in_range(const struct linear_range *r); +unsigned int linear_range_values_in_range_array(const struct linear_range *r, + int ranges); +unsigned int linear_range_get_max_value(const struct linear_range *r); + +int linear_range_get_value(const struct linear_range *r, unsigned int selector, + unsigned int *val); +int linear_range_get_value_array(const struct linear_range *r, int ranges, + unsigned int selector, unsigned int *val); +int linear_range_get_selector_low(const struct linear_range *r, + unsigned int val, unsigned int *selector, + bool *found); +int linear_range_get_selector_high(const struct linear_range *r, + unsigned int val, unsigned int *selector, + bool *found); +int linear_range_get_selector_low_array(const struct linear_range *r, + int ranges, unsigned int val, + unsigned int *selector, bool *found); + +#endif diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 9280209d1f62..d796ec20d114 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -105,7 +105,7 @@ /* === DEPRECATED annotations === */ -#ifndef CONFIG_X86 +#ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS #ifndef GLOBAL /* deprecated, use SYM_DATA*, SYM_ENTRY, or similar */ #define GLOBAL(name) \ @@ -118,10 +118,10 @@ #define ENTRY(name) \ SYM_FUNC_START(name) #endif -#endif /* CONFIG_X86 */ +#endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */ #endif /* LINKER_SCRIPT */ -#ifndef CONFIG_X86 +#ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS #ifndef WEAK /* deprecated, use SYM_FUNC_START_WEAK* */ #define WEAK(name) \ @@ -143,7 +143,7 @@ #define ENDPROC(name) \ SYM_FUNC_END(name) #endif -#endif /* CONFIG_X86 */ +#endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */ /* === generic annotations === */ diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h index c664c27a29a0..f8397f300fcd 100644 --- a/include/linux/linkmode.h +++ b/include/linux/linkmode.h @@ -82,6 +82,12 @@ static inline int linkmode_equal(const unsigned long *src1, return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); } +static inline int linkmode_intersects(const unsigned long *src1, + const unsigned long *src2) +{ + return bitmap_intersects(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + static inline int linkmode_subset(const unsigned long *src1, const unsigned long *src2) { diff --git a/include/linux/list.h b/include/linux/list.h index aff44d34f4e4..0d0d17a10d25 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -283,6 +283,24 @@ static inline int list_empty(const struct list_head *head) } /** + * list_del_init_careful - deletes entry from list and reinitialize it. + * @entry: the element to delete from the list. + * + * This is the same as list_del_init(), except designed to be used + * together with list_empty_careful() in a way to guarantee ordering + * of other memory operations. + * + * Any memory operations done before a list_del_init_careful() are + * guaranteed to be visible after a list_empty_careful() test. + */ +static inline void list_del_init_careful(struct list_head *entry) +{ + __list_del_entry(entry); + entry->prev = entry; + smp_store_release(&entry->next, entry); +} + +/** * list_empty_careful - tests whether a list is empty and not being modified * @head: the list to test * @@ -297,7 +315,7 @@ static inline int list_empty(const struct list_head *head) */ static inline int list_empty_careful(const struct list_head *head) { - struct list_head *next = head->next; + struct list_head *next = smp_load_acquire(&head->next); return (next == head) && (next == head->prev); } diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index e894e74905f3..2614247a9781 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -195,9 +195,6 @@ struct klp_patch { int klp_enable_patch(struct klp_patch *); -void arch_klp_init_object_loaded(struct klp_patch *patch, - struct klp_object *obj); - /* Called from the module loader during module coming/going states */ int klp_module_coming(struct module *mod); void klp_module_going(struct module *mod); @@ -234,6 +231,11 @@ void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor); struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id); struct klp_state *klp_get_prev_state(unsigned long id); +int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, + const char *shstrtab, const char *strtab, + unsigned int symindex, unsigned int secindex, + const char *objname); + #else /* !CONFIG_LIVEPATCH */ static inline int klp_module_coming(struct module *mod) { return 0; } @@ -242,6 +244,15 @@ static inline bool klp_patch_pending(struct task_struct *task) { return false; } static inline void klp_update_patch_state(struct task_struct *task) {} static inline void klp_copy_process(struct task_struct *child) {} +static inline +int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, + const char *shstrtab, const char *strtab, + unsigned int symindex, unsigned int secindex, + const char *objname) +{ + return 0; +} + #endif /* CONFIG_LIVEPATCH */ #endif /* _LINUX_LIVEPATCH_H_ */ diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h new file mode 100644 index 000000000000..e55010fa7329 --- /dev/null +++ b/include/linux/local_lock.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_LOCAL_LOCK_H +#define _LINUX_LOCAL_LOCK_H + +#include <linux/local_lock_internal.h> + +/** + * local_lock_init - Runtime initialize a lock instance + */ +#define local_lock_init(lock) __local_lock_init(lock) + +/** + * local_lock - Acquire a per CPU local lock + * @lock: The lock variable + */ +#define local_lock(lock) __local_lock(lock) + +/** + * local_lock_irq - Acquire a per CPU local lock and disable interrupts + * @lock: The lock variable + */ +#define local_lock_irq(lock) __local_lock_irq(lock) + +/** + * local_lock_irqsave - Acquire a per CPU local lock, save and disable + * interrupts + * @lock: The lock variable + * @flags: Storage for interrupt flags + */ +#define local_lock_irqsave(lock, flags) \ + __local_lock_irqsave(lock, flags) + +/** + * local_unlock - Release a per CPU local lock + * @lock: The lock variable + */ +#define local_unlock(lock) __local_unlock(lock) + +/** + * local_unlock_irq - Release a per CPU local lock and enable interrupts + * @lock: The lock variable + */ +#define local_unlock_irq(lock) __local_unlock_irq(lock) + +/** + * local_unlock_irqrestore - Release a per CPU local lock and restore + * interrupt flags + * @lock: The lock variable + * @flags: Interrupt flags to restore + */ +#define local_unlock_irqrestore(lock, flags) \ + __local_unlock_irqrestore(lock, flags) + +#endif diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h new file mode 100644 index 000000000000..4a8795b21d77 --- /dev/null +++ b/include/linux/local_lock_internal.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_LOCAL_LOCK_H +# error "Do not include directly, include linux/local_lock.h" +#endif + +#include <linux/percpu-defs.h> +#include <linux/lockdep.h> + +typedef struct { +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; + struct task_struct *owner; +#endif +} local_lock_t; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define LL_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + } +#else +# define LL_DEP_MAP_INIT(lockname) +#endif + +#define INIT_LOCAL_LOCK(lockname) { LL_DEP_MAP_INIT(lockname) } + +#define __local_lock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ + lockdep_init_map_wait(&(lock)->dep_map, #lock, &__key, 0, LD_WAIT_CONFIG);\ +} while (0) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static inline void local_lock_acquire(local_lock_t *l) +{ + lock_map_acquire(&l->dep_map); + DEBUG_LOCKS_WARN_ON(l->owner); + l->owner = current; +} + +static inline void local_lock_release(local_lock_t *l) +{ + DEBUG_LOCKS_WARN_ON(l->owner != current); + l->owner = NULL; + lock_map_release(&l->dep_map); +} + +#else /* CONFIG_DEBUG_LOCK_ALLOC */ +static inline void local_lock_acquire(local_lock_t *l) { } +static inline void local_lock_release(local_lock_t *l) { } +#endif /* !CONFIG_DEBUG_LOCK_ALLOC */ + +#define __local_lock(lock) \ + do { \ + preempt_disable(); \ + local_lock_acquire(this_cpu_ptr(lock)); \ + } while (0) + +#define __local_lock_irq(lock) \ + do { \ + local_irq_disable(); \ + local_lock_acquire(this_cpu_ptr(lock)); \ + } while (0) + +#define __local_lock_irqsave(lock, flags) \ + do { \ + local_irq_save(flags); \ + local_lock_acquire(this_cpu_ptr(lock)); \ + } while (0) + +#define __local_unlock(lock) \ + do { \ + local_lock_release(this_cpu_ptr(lock)); \ + preempt_enable(); \ + } while (0) + +#define __local_unlock_irq(lock) \ + do { \ + local_lock_release(this_cpu_ptr(lock)); \ + local_irq_enable(); \ + } while (0) + +#define __local_unlock_irqrestore(lock, flags) \ + do { \ + local_lock_release(this_cpu_ptr(lock)); \ + local_irq_restore(flags); \ + } while (0) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 206774ac6946..6a584b3e5c74 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -10,33 +10,16 @@ #ifndef __LINUX_LOCKDEP_H #define __LINUX_LOCKDEP_H +#include <linux/lockdep_types.h> +#include <linux/smp.h> +#include <asm/percpu.h> + struct task_struct; -struct lockdep_map; /* for sysctl */ extern int prove_locking; extern int lock_stat; -#define MAX_LOCKDEP_SUBCLASSES 8UL - -#include <linux/types.h> - -enum lockdep_wait_type { - LD_WAIT_INV = 0, /* not checked, catch all */ - - LD_WAIT_FREE, /* wait free, rcu etc.. */ - LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */ - -#ifdef CONFIG_PROVE_RAW_LOCK_NESTING - LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */ -#else - LD_WAIT_CONFIG = LD_WAIT_SPIN, -#endif - LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */ - - LD_WAIT_MAX, /* must be last */ -}; - #ifdef CONFIG_LOCKDEP #include <linux/linkage.h> @@ -44,147 +27,6 @@ enum lockdep_wait_type { #include <linux/debug_locks.h> #include <linux/stacktrace.h> -/* - * We'd rather not expose kernel/lockdep_states.h this wide, but we do need - * the total number of states... :-( - */ -#define XXX_LOCK_USAGE_STATES (1+2*4) - -/* - * NR_LOCKDEP_CACHING_CLASSES ... Number of classes - * cached in the instance of lockdep_map - * - * Currently main class (subclass == 0) and signle depth subclass - * are cached in lockdep_map. This optimization is mainly targeting - * on rq->lock. double_rq_lock() acquires this highly competitive with - * single depth. - */ -#define NR_LOCKDEP_CACHING_CLASSES 2 - -/* - * A lockdep key is associated with each lock object. For static locks we use - * the lock address itself as the key. Dynamically allocated lock objects can - * have a statically or dynamically allocated key. Dynamically allocated lock - * keys must be registered before being used and must be unregistered before - * the key memory is freed. - */ -struct lockdep_subclass_key { - char __one_byte; -} __attribute__ ((__packed__)); - -/* hash_entry is used to keep track of dynamically allocated keys. */ -struct lock_class_key { - union { - struct hlist_node hash_entry; - struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; - }; -}; - -extern struct lock_class_key __lockdep_no_validate__; - -struct lock_trace; - -#define LOCKSTAT_POINTS 4 - -/* - * The lock-class itself. The order of the structure members matters. - * reinit_class() zeroes the key member and all subsequent members. - */ -struct lock_class { - /* - * class-hash: - */ - struct hlist_node hash_entry; - - /* - * Entry in all_lock_classes when in use. Entry in free_lock_classes - * when not in use. Instances that are being freed are on one of the - * zapped_classes lists. - */ - struct list_head lock_entry; - - /* - * These fields represent a directed graph of lock dependencies, - * to every node we attach a list of "forward" and a list of - * "backward" graph nodes. - */ - struct list_head locks_after, locks_before; - - const struct lockdep_subclass_key *key; - unsigned int subclass; - unsigned int dep_gen_id; - - /* - * IRQ/softirq usage tracking bits: - */ - unsigned long usage_mask; - const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES]; - - /* - * Generation counter, when doing certain classes of graph walking, - * to ensure that we check one node only once: - */ - int name_version; - const char *name; - - short wait_type_inner; - short wait_type_outer; - -#ifdef CONFIG_LOCK_STAT - unsigned long contention_point[LOCKSTAT_POINTS]; - unsigned long contending_point[LOCKSTAT_POINTS]; -#endif -} __no_randomize_layout; - -#ifdef CONFIG_LOCK_STAT -struct lock_time { - s64 min; - s64 max; - s64 total; - unsigned long nr; -}; - -enum bounce_type { - bounce_acquired_write, - bounce_acquired_read, - bounce_contended_write, - bounce_contended_read, - nr_bounce_types, - - bounce_acquired = bounce_acquired_write, - bounce_contended = bounce_contended_write, -}; - -struct lock_class_stats { - unsigned long contention_point[LOCKSTAT_POINTS]; - unsigned long contending_point[LOCKSTAT_POINTS]; - struct lock_time read_waittime; - struct lock_time write_waittime; - struct lock_time read_holdtime; - struct lock_time write_holdtime; - unsigned long bounces[nr_bounce_types]; -}; - -struct lock_class_stats lock_stats(struct lock_class *class); -void clear_lock_stats(struct lock_class *class); -#endif - -/* - * Map the lock object (the lock instance) to the lock-class object. - * This is embedded into specific lock instances: - */ -struct lockdep_map { - struct lock_class_key *key; - struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; - const char *name; - short wait_type_outer; /* can be taken in this context */ - short wait_type_inner; /* presents this context */ -#ifdef CONFIG_LOCK_STAT - int cpu; - unsigned long ip; -#endif -}; - static inline void lockdep_copy_map(struct lockdep_map *to, struct lockdep_map *from) { @@ -308,8 +150,27 @@ extern void lockdep_set_selftest_task(struct task_struct *task); extern void lockdep_init_task(struct task_struct *task); -extern void lockdep_off(void); -extern void lockdep_on(void); +/* + * Split the recrursion counter in two to readily detect 'off' vs recursion. + */ +#define LOCKDEP_RECURSION_BITS 16 +#define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) +#define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1) + +/* + * lockdep_{off,on}() are macros to avoid tracing and kprobes; not inlines due + * to header dependencies. + */ + +#define lockdep_off() \ +do { \ + current->lockdep_recursion += LOCKDEP_OFF; \ +} while (0) + +#define lockdep_on() \ +do { \ + current->lockdep_recursion -= LOCKDEP_OFF; \ +} while (0) extern void lockdep_register_key(struct lock_class_key *key); extern void lockdep_unregister_key(struct lock_class_key *key); @@ -421,8 +282,6 @@ static inline void lock_set_subclass(struct lockdep_map *lock, extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip); -struct pin_cookie { unsigned int val; }; - #define NIL_COOKIE (struct pin_cookie){ .val = 0U, } extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock); @@ -501,10 +360,6 @@ static inline void lockdep_set_selftest_task(struct task_struct *task) # define lockdep_reset() do { debug_locks = 1; } while (0) # define lockdep_free_key_range(start, size) do { } while (0) # define lockdep_sys_exit() do { } while (0) -/* - * The class key takes no space if lockdep is disabled: - */ -struct lock_class_key { }; static inline void lockdep_register_key(struct lock_class_key *key) { @@ -514,11 +369,6 @@ static inline void lockdep_unregister_key(struct lock_class_key *key) { } -/* - * The lockdep_map takes no space if lockdep is disabled: - */ -struct lockdep_map { }; - #define lockdep_depth(tsk) (0) #define lockdep_is_held_type(l, r) (1) @@ -530,8 +380,6 @@ struct lockdep_map { }; #define lockdep_recursing(tsk) (0) -struct pin_cookie { }; - #define NIL_COOKIE (struct pin_cookie){ } #define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; }) @@ -684,38 +532,66 @@ do { \ lock_release(&(lock)->dep_map, _THIS_IP_); \ } while (0) -#define lockdep_assert_irqs_enabled() do { \ - WARN_ONCE(debug_locks && !current->lockdep_recursion && \ - !current->hardirqs_enabled, \ - "IRQs not enabled as expected\n"); \ - } while (0) +DECLARE_PER_CPU(int, hardirqs_enabled); +DECLARE_PER_CPU(int, hardirq_context); -#define lockdep_assert_irqs_disabled() do { \ - WARN_ONCE(debug_locks && !current->lockdep_recursion && \ - current->hardirqs_enabled, \ - "IRQs not disabled as expected\n"); \ - } while (0) +/* + * The below lockdep_assert_*() macros use raw_cpu_read() to access the above + * per-cpu variables. This is required because this_cpu_read() will potentially + * call into preempt/irq-disable and that obviously isn't right. This is also + * correct because when IRQs are enabled, it doesn't matter if we accidentally + * read the value from our previous CPU. + */ -#define lockdep_assert_in_irq() do { \ - WARN_ONCE(debug_locks && !current->lockdep_recursion && \ - !current->hardirq_context, \ - "Not in hardirq as expected\n"); \ - } while (0) +#define lockdep_assert_irqs_enabled() \ +do { \ + WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled)); \ +} while (0) + +#define lockdep_assert_irqs_disabled() \ +do { \ + WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled)); \ +} while (0) + +#define lockdep_assert_in_irq() \ +do { \ + WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context)); \ +} while (0) + +#define lockdep_assert_preemption_enabled() \ +do { \ + WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ + debug_locks && \ + (preempt_count() != 0 || \ + !raw_cpu_read(hardirqs_enabled))); \ +} while (0) + +#define lockdep_assert_preemption_disabled() \ +do { \ + WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \ + debug_locks && \ + (preempt_count() == 0 && \ + raw_cpu_read(hardirqs_enabled))); \ +} while (0) #else # define might_lock(lock) do { } while (0) # define might_lock_read(lock) do { } while (0) # define might_lock_nested(lock, subclass) do { } while (0) + # define lockdep_assert_irqs_enabled() do { } while (0) # define lockdep_assert_irqs_disabled() do { } while (0) # define lockdep_assert_in_irq() do { } while (0) + +# define lockdep_assert_preemption_enabled() do { } while (0) +# define lockdep_assert_preemption_disabled() do { } while (0) #endif #ifdef CONFIG_PROVE_RAW_LOCK_NESTING # define lockdep_assert_RT_in_threaded_ctx() do { \ WARN_ONCE(debug_locks && !current->lockdep_recursion && \ - current->hardirq_context && \ + lockdep_hardirq_context() && \ !(current->hardirq_threaded || current->irq_config), \ "Not in threaded context on PREEMPT_RT as expected\n"); \ } while (0) diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h new file mode 100644 index 000000000000..bb35b449f533 --- /dev/null +++ b/include/linux/lockdep_types.h @@ -0,0 +1,194 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Runtime locking correctness validator + * + * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> + * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra + * + * see Documentation/locking/lockdep-design.rst for more details. + */ +#ifndef __LINUX_LOCKDEP_TYPES_H +#define __LINUX_LOCKDEP_TYPES_H + +#include <linux/types.h> + +#define MAX_LOCKDEP_SUBCLASSES 8UL + +enum lockdep_wait_type { + LD_WAIT_INV = 0, /* not checked, catch all */ + + LD_WAIT_FREE, /* wait free, rcu etc.. */ + LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */ + +#ifdef CONFIG_PROVE_RAW_LOCK_NESTING + LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */ +#else + LD_WAIT_CONFIG = LD_WAIT_SPIN, +#endif + LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */ + + LD_WAIT_MAX, /* must be last */ +}; + +#ifdef CONFIG_LOCKDEP + +/* + * We'd rather not expose kernel/lockdep_states.h this wide, but we do need + * the total number of states... :-( + */ +#define XXX_LOCK_USAGE_STATES (1+2*4) + +/* + * NR_LOCKDEP_CACHING_CLASSES ... Number of classes + * cached in the instance of lockdep_map + * + * Currently main class (subclass == 0) and signle depth subclass + * are cached in lockdep_map. This optimization is mainly targeting + * on rq->lock. double_rq_lock() acquires this highly competitive with + * single depth. + */ +#define NR_LOCKDEP_CACHING_CLASSES 2 + +/* + * A lockdep key is associated with each lock object. For static locks we use + * the lock address itself as the key. Dynamically allocated lock objects can + * have a statically or dynamically allocated key. Dynamically allocated lock + * keys must be registered before being used and must be unregistered before + * the key memory is freed. + */ +struct lockdep_subclass_key { + char __one_byte; +} __attribute__ ((__packed__)); + +/* hash_entry is used to keep track of dynamically allocated keys. */ +struct lock_class_key { + union { + struct hlist_node hash_entry; + struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; + }; +}; + +extern struct lock_class_key __lockdep_no_validate__; + +struct lock_trace; + +#define LOCKSTAT_POINTS 4 + +/* + * The lock-class itself. The order of the structure members matters. + * reinit_class() zeroes the key member and all subsequent members. + */ +struct lock_class { + /* + * class-hash: + */ + struct hlist_node hash_entry; + + /* + * Entry in all_lock_classes when in use. Entry in free_lock_classes + * when not in use. Instances that are being freed are on one of the + * zapped_classes lists. + */ + struct list_head lock_entry; + + /* + * These fields represent a directed graph of lock dependencies, + * to every node we attach a list of "forward" and a list of + * "backward" graph nodes. + */ + struct list_head locks_after, locks_before; + + const struct lockdep_subclass_key *key; + unsigned int subclass; + unsigned int dep_gen_id; + + /* + * IRQ/softirq usage tracking bits: + */ + unsigned long usage_mask; + const struct lock_trace *usage_traces[XXX_LOCK_USAGE_STATES]; + + /* + * Generation counter, when doing certain classes of graph walking, + * to ensure that we check one node only once: + */ + int name_version; + const char *name; + + short wait_type_inner; + short wait_type_outer; + +#ifdef CONFIG_LOCK_STAT + unsigned long contention_point[LOCKSTAT_POINTS]; + unsigned long contending_point[LOCKSTAT_POINTS]; +#endif +} __no_randomize_layout; + +#ifdef CONFIG_LOCK_STAT +struct lock_time { + s64 min; + s64 max; + s64 total; + unsigned long nr; +}; + +enum bounce_type { + bounce_acquired_write, + bounce_acquired_read, + bounce_contended_write, + bounce_contended_read, + nr_bounce_types, + + bounce_acquired = bounce_acquired_write, + bounce_contended = bounce_contended_write, +}; + +struct lock_class_stats { + unsigned long contention_point[LOCKSTAT_POINTS]; + unsigned long contending_point[LOCKSTAT_POINTS]; + struct lock_time read_waittime; + struct lock_time write_waittime; + struct lock_time read_holdtime; + struct lock_time write_holdtime; + unsigned long bounces[nr_bounce_types]; +}; + +struct lock_class_stats lock_stats(struct lock_class *class); +void clear_lock_stats(struct lock_class *class); +#endif + +/* + * Map the lock object (the lock instance) to the lock-class object. + * This is embedded into specific lock instances: + */ +struct lockdep_map { + struct lock_class_key *key; + struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; + const char *name; + short wait_type_outer; /* can be taken in this context */ + short wait_type_inner; /* presents this context */ +#ifdef CONFIG_LOCK_STAT + int cpu; + unsigned long ip; +#endif +}; + +struct pin_cookie { unsigned int val; }; + +#else /* !CONFIG_LOCKDEP */ + +/* + * The class key takes no space if lockdep is disabled: + */ +struct lock_class_key { }; + +/* + * The lockdep_map takes no space if lockdep is disabled: + */ +struct lockdep_map { }; + +struct pin_cookie { }; + +#endif /* !LOCKDEP */ + +#endif /* __LINUX_LOCKDEP_TYPES_H */ diff --git a/include/linux/log2.h b/include/linux/log2.h index 83a4a3ca3e8a..c619ec6eff4a 100644 --- a/include/linux/log2.h +++ b/include/linux/log2.h @@ -173,7 +173,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n) #define roundup_pow_of_two(n) \ ( \ __builtin_constant_p(n) ? ( \ - (n == 1) ? 1 : \ + ((n) == 1) ? 1 : \ (1UL << (ilog2((n) - 1) + 1)) \ ) : \ __roundup_pow_of_two(n) \ diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index 99d629fd9944..28f23b341c1c 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h @@ -75,6 +75,7 @@ struct common_audit_data { #define LSM_AUDIT_DATA_IBPKEY 13 #define LSM_AUDIT_DATA_IBENDPORT 14 #define LSM_AUDIT_DATA_LOCKDOWN 15 +#define LSM_AUDIT_DATA_NOTIFICATION 16 union { struct path path; struct dentry *dentry; diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 5616b2567aa7..2a8c74d99015 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -15,7 +15,7 @@ */ /* - * The macro LSM_HOOK is used to define the data structures required by the + * The macro LSM_HOOK is used to define the data structures required by * the LSM framework using the pattern: * * LSM_HOOK(<return_type>, <default_value>, <hook_name>, args...) @@ -49,7 +49,8 @@ LSM_HOOK(int, 0, syslog, int type) LSM_HOOK(int, 0, settime, const struct timespec64 *ts, const struct timezone *tz) LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages) -LSM_HOOK(int, 0, bprm_set_creds, struct linux_binprm *bprm) +LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm) +LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file) LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm) LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm) LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm) @@ -149,7 +150,7 @@ LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer, size_t buffer_size) LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid) LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new) -LSM_HOOK(int, 0, inode_copy_up_xattr, const char *name) +LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, const char *name) LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir, struct kernfs_node *kn) LSM_HOOK(int, 0, file_permission, struct file *file, int mask) @@ -190,6 +191,8 @@ LSM_HOOK(int, 0, kernel_post_read_file, struct file *file, char *buf, loff_t size, enum kernel_read_file_id id) LSM_HOOK(int, 0, task_fix_setuid, struct cred *new, const struct cred *old, int flags) +LSM_HOOK(int, 0, task_fix_setgid, struct cred *new, const struct cred * old, + int flags) LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid) LSM_HOOK(int, 0, task_getpgid, struct task_struct *p) LSM_HOOK(int, 0, task_getsid, struct task_struct *p) @@ -253,6 +256,15 @@ LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen) LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx, u32 *ctxlen) +#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE) +LSM_HOOK(int, 0, post_notification, const struct cred *w_cred, + const struct cred *cred, struct watch_notification *n) +#endif /* CONFIG_SECURITY && CONFIG_WATCH_QUEUE */ + +#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS) +LSM_HOOK(int, 0, watch_key, struct key *key) +#endif /* CONFIG_SECURITY && CONFIG_KEY_NOTIFICATIONS */ + #ifdef CONFIG_SECURITY_NETWORK LSM_HOOK(int, 0, unix_stream_connect, struct sock *sock, struct sock *other, struct sock *newsk) @@ -348,7 +360,7 @@ LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred, unsigned long flags) LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key) LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred, - unsigned perm) + enum key_need_perm need_perm) LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **_buffer) #endif /* CONFIG_KEYS */ diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 988ca0df7824..9e2e3e63719d 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -34,40 +34,48 @@ * * Security hooks for program execution operations. * - * @bprm_set_creds: - * Save security information in the bprm->security field, typically based - * on information about the bprm->file, for later use by the apply_creds - * hook. This hook may also optionally check permissions (e.g. for - * transitions between security domains). - * This hook may be called multiple times during a single execve, e.g. for - * interpreters. The hook can tell whether it has already been called by - * checking to see if @bprm->security is non-NULL. If so, then the hook - * may decide either to retain the security information saved earlier or - * to replace it. The hook must set @bprm->secureexec to 1 if a "secure - * exec" has happened as a result of this hook call. The flag is used to - * indicate the need for a sanitized execution environment, and is also - * passed in the ELF auxiliary table on the initial stack to indicate - * whether libc should enable secure mode. + * @bprm_creds_for_exec: + * If the setup in prepare_exec_creds did not setup @bprm->cred->security + * properly for executing @bprm->file, update the LSM's portion of + * @bprm->cred->security to be what commit_creds needs to install for the + * new program. This hook may also optionally check permissions + * (e.g. for transitions between security domains). + * The hook must set @bprm->secureexec to 1 if AT_SECURE should be set to + * request libc enable secure mode. + * @bprm contains the linux_binprm structure. + * Return 0 if the hook is successful and permission is granted. + * @bprm_creds_from_file: + * If @file is setpcap, suid, sgid or otherwise marked to change + * privilege upon exec, update @bprm->cred to reflect that change. + * This is called after finding the binary that will be executed. + * without an interpreter. This ensures that the credentials will not + * be derived from a script that the binary will need to reopen, which + * when reopend may end up being a completely different file. This + * hook may also optionally check permissions (e.g. for transitions + * between security domains). + * The hook must set @bprm->secureexec to 1 if AT_SECURE should be set to + * request libc enable secure mode. + * The hook must add to @bprm->per_clear any personality flags that + * should be cleared from current->personality. * @bprm contains the linux_binprm structure. * Return 0 if the hook is successful and permission is granted. * @bprm_check_security: * This hook mediates the point when a search for a binary handler will - * begin. It allows a check the @bprm->security value which is set in the - * preceding set_creds call. The primary difference from set_creds is - * that the argv list and envp list are reliably available in @bprm. This - * hook may be called multiple times during a single execve; and in each - * pass set_creds is called first. + * begin. It allows a check against the @bprm->cred->security value + * which was set in the preceding creds_for_exec call. The argv list and + * envp list are reliably available in @bprm. This hook may be called + * multiple times during a single execve. * @bprm contains the linux_binprm structure. * Return 0 if the hook is successful and permission is granted. * @bprm_committing_creds: * Prepare to install the new security attributes of a process being * transformed by an execve operation, based on the old credentials * pointed to by @current->cred and the information set in @bprm->cred by - * the bprm_set_creds hook. @bprm points to the linux_binprm structure. - * This hook is a good place to perform state changes on the process such - * as closing open file descriptors to which access will no longer be - * granted when the attributes are changed. This is called immediately - * before commit_creds(). + * the bprm_creds_for_exec hook. @bprm points to the linux_binprm + * structure. This hook is a good place to perform state changes on the + * process such as closing open file descriptors to which access will no + * longer be granted when the attributes are changed. This is called + * immediately before commit_creds(). * @bprm_committed_creds: * Tidy up after the installation of the new security attributes of a * process being transformed by an execve operation. The new credentials @@ -77,7 +85,7 @@ * state. This is called immediately after commit_creds(). * * Security hooks for mount using fs_context. - * [See also Documentation/filesystems/mount_api.txt] + * [See also Documentation/filesystems/mount_api.rst] * * @fs_context_dup: * Allocate and attach a security structure to sc->security. This pointer @@ -651,6 +659,15 @@ * @old is the set of credentials that are being replaces * @flags contains one of the LSM_SETID_* values. * Return 0 on success. + * @task_fix_setgid: + * Update the module's state after setting one or more of the group + * identity attributes of the current process. The @flags parameter + * indicates which of the set*gid system calls invoked this hook. + * @new is the set of credentials that will be installed. Modifications + * should be made to this rather than to @current->cred. + * @old is the set of credentials that are being replaced. + * @flags contains one of the LSM_SETID_* values. + * Return 0 on success. * @task_setpgid: * Check permission before setting the process group identifier of the * process @p to @pgid. @@ -798,7 +815,7 @@ * structure. Note that the security field was not added directly to the * socket structure, but rather, the socket security information is stored * in the associated inode. Typically, the inode alloc_security hook will - * allocate and and attach security information to + * allocate and attach security information to * SOCK_INODE(sock)->i_security. This hook may be used to update the * SOCK_INODE(sock)->i_security field with additional information that * wasn't available when the inode was allocated. @@ -1437,6 +1454,20 @@ * @ctx is a pointer in which to place the allocated security context. * @ctxlen points to the place to put the length of @ctx. * + * Security hooks for the general notification queue: + * + * @post_notification: + * Check to see if a watch notification can be posted to a particular + * queue. + * @w_cred: The credentials of the whoever set the watch. + * @cred: The event-triggerer's credentials + * @n: The notification being posted + * + * @watch_key: + * Check to see if a process is allowed to watch for event notifications + * from a key or keyring. + * @key: The key to watch. + * * Security hooks for using the eBPF maps and programs functionalities through * eBPF syscalls. * diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h index a4dc45fbec0a..05eea1aef5aa 100644 --- a/include/linux/mailbox/mtk-cmdq-mailbox.h +++ b/include/linux/mailbox/mtk-cmdq-mailbox.h @@ -17,6 +17,7 @@ #define CMDQ_JUMP_PASS CMDQ_INST_SIZE #define CMDQ_WFE_UPDATE BIT(31) +#define CMDQ_WFE_UPDATE_VALUE BIT(16) #define CMDQ_WFE_WAIT BIT(15) #define CMDQ_WFE_WAIT_VALUE 0x1 @@ -59,6 +60,7 @@ enum cmdq_code { CMDQ_CODE_JUMP = 0x10, CMDQ_CODE_WFE = 0x20, CMDQ_CODE_EOC = 0x40, + CMDQ_CODE_LOGIC = 0xa0, }; enum cmdq_cb_status { @@ -88,4 +90,6 @@ struct cmdq_pkt { void *cl; }; +u8 cmdq_get_shift_pa(struct mbox_chan *chan); + #endif /* __MTK_CMDQ_MAILBOX_H__ */ diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index af6b11d4d673..ff7b7607c8cf 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -15,10 +15,12 @@ #define MARVELL_PHY_ID_88E1149R 0x01410e50 #define MARVELL_PHY_ID_88E1240 0x01410e30 #define MARVELL_PHY_ID_88E1318S 0x01410e90 +#define MARVELL_PHY_ID_88E1340S 0x01410dc0 #define MARVELL_PHY_ID_88E1116R 0x01410e40 #define MARVELL_PHY_ID_88E1510 0x01410dd0 #define MARVELL_PHY_ID_88E1540 0x01410eb0 #define MARVELL_PHY_ID_88E1545 0x01410ea0 +#define MARVELL_PHY_ID_88E1548P 0x01410ec0 #define MARVELL_PHY_ID_88E3016 0x01410e60 #define MARVELL_PHY_ID_88X3310 0x002b09a0 #define MARVELL_PHY_ID_88E2110 0x002b09b0 diff --git a/include/linux/math64.h b/include/linux/math64.h index 11a267413e8e..3381d9e33c4e 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -263,6 +263,8 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) } #endif /* mul_u64_u32_div */ +u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); + #define DIV64_U64_ROUND_UP(ll, d) \ ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); }) @@ -279,4 +281,23 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) #define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \ ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); }) +/* + * DIV_S64_ROUND_CLOSEST - signed 64bit divide with 32bit divisor rounded to nearest integer + * @dividend: signed 64bit dividend + * @divisor: signed 32bit divisor + * + * Divide signed 64bit dividend by signed 32bit divisor + * and round to closest integer. + * + * Return: dividend / divisor rounded to nearest integer + */ +#define DIV_S64_ROUND_CLOSEST(dividend, divisor)( \ +{ \ + s64 __x = (dividend); \ + s32 __d = (divisor); \ + ((__x > 0) == (__d > 0)) ? \ + div_s64((__x + (__d / 2)), __d) : \ + div_s64((__x - (__d / 2)), __d); \ +} \ +) #endif /* _LINUX_MATH64_H */ diff --git a/include/linux/mdio.h b/include/linux/mdio.h index 917e4bb2ed71..898cbf00332a 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -9,8 +9,16 @@ #include <uapi/linux/mdio.h> #include <linux/mod_devicetable.h> +/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit + * IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. + */ +#define MII_ADDR_C45 (1<<30) +#define MII_DEVADDR_C45_SHIFT 16 +#define MII_REGADDR_C45_MASK GENMASK(15, 0) + struct gpio_desc; struct mii_bus; +struct reset_control; /* Multiple levels of nesting are possible. However typically this is * limited to nested DSA like layer, a MUX layer, and the normal @@ -326,6 +334,30 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val); int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set); +static inline u32 mdiobus_c45_addr(int devad, u16 regnum) +{ + return MII_ADDR_C45 | devad << MII_DEVADDR_C45_SHIFT | regnum; +} + +static inline int __mdiobus_c45_read(struct mii_bus *bus, int prtad, int devad, + u16 regnum) +{ + return __mdiobus_read(bus, prtad, mdiobus_c45_addr(devad, regnum)); +} + +static inline int __mdiobus_c45_write(struct mii_bus *bus, int prtad, int devad, + u16 regnum, u16 val) +{ + return __mdiobus_write(bus, prtad, mdiobus_c45_addr(devad, regnum), + val); +} + +static inline int mdiobus_c45_read(struct mii_bus *bus, int prtad, int devad, + u16 regnum) +{ + return mdiobus_read(bus, prtad, mdiobus_c45_addr(devad, regnum)); +} + int mdiobus_register_device(struct mdio_device *mdiodev); int mdiobus_unregister_device(struct mdio_device *mdiodev); bool mdiobus_is_registered_device(struct mii_bus *bus, int addr); diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 6bc37a731d27..9d925db0d355 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -41,7 +41,7 @@ enum memblock_flags { /** * struct memblock_region - represents a memory region - * @base: physical address of the region + * @base: base address of the region * @size: size of the region * @flags: memory region attributes * @nid: NUMA node id @@ -50,7 +50,7 @@ struct memblock_region { phys_addr_t base; phys_addr_t size; enum memblock_flags flags; -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +#ifdef CONFIG_NEED_MULTIPLE_NODES int nid; #endif }; @@ -75,18 +75,14 @@ struct memblock_type { * struct memblock - memblock allocator metadata * @bottom_up: is bottom up direction? * @current_limit: physical address of the current allocation limit - * @memory: usabe memory regions + * @memory: usable memory regions * @reserved: reserved memory regions - * @physmem: all physical memory */ struct memblock { bool bottom_up; /* is bottom up direction? */ phys_addr_t current_limit; struct memblock_type memory; struct memblock_type reserved; -#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP - struct memblock_type physmem; -#endif }; extern struct memblock memblock; @@ -145,6 +141,30 @@ void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, void __memblock_free_late(phys_addr_t base, phys_addr_t size); +#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP +static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, + phys_addr_t *out_start, + phys_addr_t *out_end) +{ + extern struct memblock_type physmem; + + __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type, + out_start, out_end, NULL); +} + +/** + * for_each_physmem_range - iterate through physmem areas not included in type. + * @i: u64 used as loop variable + * @type: ptr to memblock_type which excludes from the iteration, can be %NULL + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + */ +#define for_each_physmem_range(i, type, p_start, p_end) \ + for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \ + i != (u64)ULLONG_MAX; \ + __next_physmem_range(&i, type, p_start, p_end)) +#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */ + /** * for_each_mem_range - iterate through memblock areas from type_a and not * included in type_b. Or just type_a if type_b is NULL. @@ -215,7 +235,6 @@ static inline bool memblock_is_nomap(struct memblock_region *m) return m->flags & MEMBLOCK_NOMAP; } -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, unsigned long *end_pfn); void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, @@ -234,7 +253,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, @@ -275,6 +293,9 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, #define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \ for (; i != U64_MAX; \ __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) + +int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask); + #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ /** @@ -310,10 +331,10 @@ void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ nid, flags, p_start, p_end, p_nid) -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP int memblock_set_node(phys_addr_t base, phys_addr_t size, struct memblock_type *type, int nid); +#ifdef CONFIG_NEED_MULTIPLE_NODES static inline void memblock_set_region_node(struct memblock_region *r, int nid) { r->nid = nid; @@ -332,7 +353,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r) { return 0; } -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ +#endif /* CONFIG_NEED_MULTIPLE_NODES */ /* Flags for memblock allocation APIs */ #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 977edd3b7bd8..d0b036123c6a 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -23,19 +23,16 @@ #include <linux/page-flags.h> struct mem_cgroup; +struct obj_cgroup; struct page; struct mm_struct; struct kmem_cache; /* Cgroup-specific page state, on top of universal node page state */ enum memcg_stat_item { - MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS, - MEMCG_RSS, - MEMCG_RSS_HUGE, - MEMCG_SWAP, + MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS, MEMCG_SOCK, - /* XXX: why are these zone and not node counters? */ - MEMCG_KERNEL_STACK_KB, + MEMCG_PERCPU_B, MEMCG_NR_STAT, }; @@ -45,17 +42,12 @@ enum memcg_memory_event { MEMCG_MAX, MEMCG_OOM, MEMCG_OOM_KILL, + MEMCG_SWAP_HIGH, MEMCG_SWAP_MAX, MEMCG_SWAP_FAIL, MEMCG_NR_MEMORY_EVENTS, }; -enum mem_cgroup_protection { - MEMCG_PROT_NONE, - MEMCG_PROT_LOW, - MEMCG_PROT_MIN, -}; - struct mem_cgroup_reclaim_cookie { pg_data_t *pgdat; unsigned int generation; @@ -73,8 +65,8 @@ struct mem_cgroup_id { /* * Per memcg event counter is incremented at every pagein/pageout. With THP, - * it will be incremated by the number of pages. This counter is used for - * for trigger some periodic events. This is straightforward and better + * it will be incremented by the number of pages. This counter is used + * to trigger some periodic events. This is straightforward and better * than using jiffies etc. to handle periodic memcg event. */ enum mem_cgroup_events_target { @@ -195,6 +187,22 @@ struct memcg_cgwb_frn { }; /* + * Bucket for arbitrarily byte-sized objects charged to a memory + * cgroup. The bucket can be reparented in one piece when the cgroup + * is destroyed, without having to round up the individual references + * of all live memory objects in the wild. + */ +struct obj_cgroup { + struct percpu_ref refcnt; + struct mem_cgroup *memcg; + atomic_t nr_charged_bytes; + union { + struct list_head list; + struct rcu_head rcu; + }; +}; + +/* * The memory controller data structure. The memory controller controls both * page cache and RSS per cgroup. We would eventually like to provide * statistics based on the statistics developed by Rik Van Riel for clock-pro, @@ -215,9 +223,6 @@ struct mem_cgroup { struct page_counter kmem; struct page_counter tcpmem; - /* Upper bound of normal memory consumption range */ - unsigned long high; - /* Range enforcement for interrupt charges */ struct work_struct high_work; @@ -305,7 +310,8 @@ struct mem_cgroup { /* Index in the kmem_cache->memcg_params.memcg_caches array */ int kmemcg_id; enum memcg_kmem_state kmem_state; - struct list_head kmem_caches; + struct obj_cgroup __rcu *objcg; + struct list_head objcg_list; /* list of inherited objcgs */ #endif #ifdef CONFIG_CGROUP_WRITEBACK @@ -334,6 +340,13 @@ struct mem_cgroup { extern struct mem_cgroup *root_mem_cgroup; +static __always_inline bool memcg_stat_item_in_bytes(int idx) +{ + if (idx == MEMCG_PERCPU_B) + return true; + return vmstat_item_in_bytes(idx); +} + static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) { return (memcg == root_mem_cgroup); @@ -344,12 +357,49 @@ static inline bool mem_cgroup_disabled(void) return !cgroup_subsys_enabled(memory_cgrp_subsys); } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, +static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, bool in_low_reclaim) { if (mem_cgroup_disabled()) return 0; + /* + * There is no reclaim protection applied to a targeted reclaim. + * We are special casing this specific case here because + * mem_cgroup_protected calculation is not robust enough to keep + * the protection invariant for calculated effective values for + * parallel reclaimers with different reclaim target. This is + * especially a problem for tail memcgs (as they have pages on LRU) + * which would want to have effective values 0 for targeted reclaim + * but a different value for external reclaim. + * + * Example + * Let's have global and A's reclaim in parallel: + * | + * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G) + * |\ + * | C (low = 1G, usage = 2.5G) + * B (low = 1G, usage = 0.5G) + * + * For the global reclaim + * A.elow = A.low + * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow + * C.elow = min(C.usage, C.low) + * + * With the effective values resetting we have A reclaim + * A.elow = 0 + * B.elow = B.low + * C.elow = C.low + * + * If the global reclaim races with A's reclaim then + * B.elow = C.elow = 0 because children_low_usage > A.elow) + * is possible and reclaiming B would be violating the protection. + * + */ + if (root == memcg) + return 0; + if (in_low_reclaim) return READ_ONCE(memcg->memory.emin); @@ -357,19 +407,39 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, READ_ONCE(memcg->memory.elow)); } -enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root, - struct mem_cgroup *memcg); +void mem_cgroup_calculate_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg); + +static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg) +{ + /* + * The root memcg doesn't account charges, and doesn't support + * protection. + */ + return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg); + +} + +static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) +{ + if (!mem_cgroup_supports_protection(memcg)) + return false; + + return READ_ONCE(memcg->memory.elow) >= + page_counter_read(&memcg->memory); +} + +static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) +{ + if (!mem_cgroup_supports_protection(memcg)) + return false; + + return READ_ONCE(memcg->memory.emin) >= + page_counter_read(&memcg->memory); +} + +int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); -int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, - gfp_t gfp_mask, struct mem_cgroup **memcgp, - bool compound); -int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm, - gfp_t gfp_mask, struct mem_cgroup **memcgp, - bool compound); -void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, - bool lrucare, bool compound); -void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg, - bool compound); void mem_cgroup_uncharge(struct page *page); void mem_cgroup_uncharge_list(struct list_head *page_list); @@ -429,6 +499,33 @@ struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){ return css ? container_of(css, struct mem_cgroup, css) : NULL; } +static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg) +{ + return percpu_ref_tryget(&objcg->refcnt); +} + +static inline void obj_cgroup_get(struct obj_cgroup *objcg) +{ + percpu_ref_get(&objcg->refcnt); +} + +static inline void obj_cgroup_put(struct obj_cgroup *objcg) +{ + percpu_ref_put(&objcg->refcnt); +} + +/* + * After the initialization objcg->memcg is always pointing at + * a valid memcg, but can be atomically swapped to the parent memcg. + * + * The caller must ensure that the returned memcg won't be released: + * e.g. acquire the rcu_read_lock or css_set_lock. + */ +static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) +{ + return READ_ONCE(objcg->memcg); +} + static inline void mem_cgroup_put(struct mem_cgroup *memcg) { if (memcg) @@ -533,7 +630,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec, struct mem_cgroup_per_node *mz; mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - return mz->lru_zone_size[zone_idx][lru]; + return READ_ONCE(mz->lru_zone_size[zone_idx][lru]); } void mem_cgroup_handle_over_high(void); @@ -570,7 +667,7 @@ struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim, void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); #ifdef CONFIG_MEMCG_SWAP -extern int do_swap_account; +extern bool cgroup_memory_noswap; #endif struct mem_cgroup *lock_page_memcg(struct page *page); @@ -692,11 +789,34 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, return x; } +void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, + int val); void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val); void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val); + void mod_memcg_obj_state(void *p, int idx, int val); +static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx, + int val) +{ + unsigned long flags; + + local_irq_save(flags); + __mod_lruvec_slab_state(p, idx, val); + local_irq_restore(flags); +} + +static inline void mod_memcg_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx, int val) +{ + unsigned long flags; + + local_irq_save(flags); + __mod_memcg_lruvec_state(lruvec, idx, val); + local_irq_restore(flags); +} + static inline void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { @@ -710,16 +830,17 @@ static inline void mod_lruvec_state(struct lruvec *lruvec, static inline void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx, int val) { + struct page *head = compound_head(page); /* rmap on tail pages */ pg_data_t *pgdat = page_pgdat(page); struct lruvec *lruvec; /* Untracked pages have no memcg, no lruvec. Update only the node */ - if (!page->mem_cgroup) { + if (!head->mem_cgroup) { __mod_node_page_state(pgdat, idx, val); return; } - lruvec = mem_cgroup_lruvec(page->mem_cgroup, pgdat); + lruvec = mem_cgroup_lruvec(head->mem_cgroup, pgdat); __mod_lruvec_state(lruvec, idx, val); } @@ -837,47 +958,32 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, { } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg, +static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, bool in_low_reclaim) { return 0; } -static inline enum mem_cgroup_protection mem_cgroup_protected( - struct mem_cgroup *root, struct mem_cgroup *memcg) -{ - return MEMCG_PROT_NONE; -} - -static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, - gfp_t gfp_mask, - struct mem_cgroup **memcgp, - bool compound) +static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg) { - *memcgp = NULL; - return 0; } -static inline int mem_cgroup_try_charge_delay(struct page *page, - struct mm_struct *mm, - gfp_t gfp_mask, - struct mem_cgroup **memcgp, - bool compound) +static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg) { - *memcgp = NULL; - return 0; + return false; } -static inline void mem_cgroup_commit_charge(struct page *page, - struct mem_cgroup *memcg, - bool lrucare, bool compound) +static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) { + return false; } -static inline void mem_cgroup_cancel_charge(struct page *page, - struct mem_cgroup *memcg, - bool compound) +static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask) { + return 0; } static inline void mem_cgroup_uncharge(struct page *page) @@ -1094,6 +1200,11 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, return node_page_state(lruvec_pgdat(lruvec), idx); } +static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, + enum node_stat_item idx, int val) +{ +} + static inline void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { @@ -1126,6 +1237,14 @@ static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, __mod_node_page_state(page_pgdat(page), idx, val); } +static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx, + int val) +{ + struct page *page = virt_to_head_page(p); + + mod_node_page_state(page_pgdat(page), idx, val); +} + static inline void mod_memcg_obj_state(void *p, int idx, int val) { } @@ -1279,6 +1398,19 @@ static inline void dec_lruvec_page_state(struct page *page, mod_lruvec_page_state(page, idx, -1); } +static inline struct lruvec *parent_lruvec(struct lruvec *lruvec) +{ + struct mem_cgroup *memcg; + + memcg = lruvec_memcg(lruvec); + if (!memcg) + return NULL; + memcg = parent_mem_cgroup(memcg); + if (!memcg) + return NULL; + return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec)); +} + #ifdef CONFIG_CGROUP_WRITEBACK struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb); @@ -1365,9 +1497,6 @@ static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg, } #endif -struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); -void memcg_kmem_put_cache(struct kmem_cache *cachep); - #ifdef CONFIG_MEMCG_KMEM int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp, unsigned int nr_pages); @@ -1375,8 +1504,12 @@ void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages); int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order); void __memcg_kmem_uncharge_page(struct page *page, int order); +struct obj_cgroup *get_obj_cgroup_from_current(void); + +int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size); +void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size); + extern struct static_key_false memcg_kmem_enabled_key; -extern struct workqueue_struct *memcg_kmem_cache_wq; extern int memcg_nr_cache_ids; void memcg_get_cache_ids(void); @@ -1392,7 +1525,19 @@ void memcg_put_cache_ids(void); static inline bool memcg_kmem_enabled(void) { - return static_branch_unlikely(&memcg_kmem_enabled_key); + return static_branch_likely(&memcg_kmem_enabled_key); +} + +static inline bool memcg_kmem_bypass(void) +{ + if (in_interrupt()) + return true; + + /* Allow remote memcg charging in kthread contexts. */ + if ((!current->mm || (current->flags & PF_KTHREAD)) && + !current->active_memcg) + return true; + return false; } static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp, diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 93d9ada74ddd..375515803cd8 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -314,19 +314,13 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {} #ifdef CONFIG_MEMORY_HOTREMOVE -extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages); extern void try_offline_node(int nid); extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); extern int remove_memory(int nid, u64 start, u64 size); extern void __remove_memory(int nid, u64 start, u64 size); +extern int offline_and_remove_memory(int nid, u64 start, u64 size); #else -static inline bool is_mem_section_removable(unsigned long pfn, - unsigned long nr_pages) -{ - return false; -} - static inline void try_offline_node(int nid) {} static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) @@ -349,6 +343,8 @@ extern void __ref free_area_init_core_hotplug(int nid); extern int __add_memory(int nid, u64 start, u64 size); extern int add_memory(int nid, u64 start, u64 size); extern int add_memory_resource(int nid, struct resource *resource); +extern int add_memory_driver_managed(int nid, u64 start, u64 size, + const char *resource_name); extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); extern void remove_pfn_range_from_zone(struct zone *zone, diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 8165278c348a..5f1c74df264d 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -6,7 +6,7 @@ #ifndef _LINUX_MEMPOLICY_H #define _LINUX_MEMPOLICY_H 1 - +#include <linux/sched.h> #include <linux/mmzone.h> #include <linux/dax.h> #include <linux/slab.h> @@ -28,10 +28,10 @@ struct mm_struct; * the process policy is used. Interrupts ignore the memory policy * of the current process. * - * Locking policy for interlave: + * Locking policy for interleave: * In process context there is no locking because only the process accesses * its own state. All vma manipulation is somewhat protected by a down_read on - * mmap_sem. + * mmap_lock. * * Freeing policy: * Mempolicy objects are reference counted. A mempolicy will be freed when @@ -152,6 +152,15 @@ extern int huge_node(struct vm_area_struct *vma, extern bool init_nodemask_of_mempolicy(nodemask_t *mask); extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, const nodemask_t *mask); +extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy); + +static inline nodemask_t *policy_nodemask_current(gfp_t gfp) +{ + struct mempolicy *mpol = get_task_policy(current); + + return policy_nodemask(gfp, mpol); +} + extern unsigned int mempolicy_slab_node(void); extern enum zone_type policy_zone; @@ -281,5 +290,10 @@ static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, static inline void mpol_put_task_policy(struct task_struct *task) { } + +static inline nodemask_t *policy_nodemask_current(gfp_t gfp) +{ + return NULL; +} #endif /* CONFIG_NUMA */ #endif diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 5f5b2df06e61..e5862746751b 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -46,11 +46,10 @@ struct vmem_altmap { * wakeup is used to coordinate physical address space management (ex: * fs truncate/hole punch) vs pinned pages (ex: device dma). * - * MEMORY_DEVICE_DEVDAX: + * MEMORY_DEVICE_GENERIC: * Host memory that has similar access semantics as System RAM i.e. DMA - * coherent and supports page pinning. In contrast to - * MEMORY_DEVICE_FS_DAX, this memory is access via a device-dax - * character device. + * coherent and supports page pinning. This is for example used by DAX devices + * that expose memory using a character device. * * MEMORY_DEVICE_PCI_P2PDMA: * Device memory residing in a PCI BAR intended for use with Peer-to-Peer @@ -60,7 +59,7 @@ enum memory_type { /* 0 is reserved to catch uninitialized type fields */ MEMORY_DEVICE_PRIVATE = 1, MEMORY_DEVICE_FS_DAX, - MEMORY_DEVICE_DEVDAX, + MEMORY_DEVICE_GENERIC, MEMORY_DEVICE_PCI_P2PDMA, }; diff --git a/include/linux/memstick.h b/include/linux/memstick.h index 216a713bef7f..ebf73d4ee969 100644 --- a/include/linux/memstick.h +++ b/include/linux/memstick.h @@ -281,6 +281,7 @@ struct memstick_host { struct memstick_dev *card; unsigned int retries; + bool removing; /* Notify the host that some requests are pending. */ void (*request)(struct memstick_host *host); @@ -288,7 +289,7 @@ struct memstick_host { int (*set_param)(struct memstick_host *host, enum memstick_param param, int value); - unsigned long private[0] ____cacheline_aligned; + unsigned long private[] ____cacheline_aligned; }; struct memstick_driver { diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index d01d1299e49d..4b35baa14d30 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h @@ -14,7 +14,7 @@ #define MFD_RES_SIZE(arr) (sizeof(arr) / sizeof(struct resource)) -#define MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _match)\ +#define MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, _use_of_reg, _match) \ { \ .name = (_name), \ .resources = (_res), \ @@ -22,24 +22,32 @@ .platform_data = (_pdata), \ .pdata_size = (_pdsize), \ .of_compatible = (_compat), \ + .of_reg = (_of_reg), \ + .use_of_reg = (_use_of_reg), \ .acpi_match = (_match), \ .id = (_id), \ } -#define OF_MFD_CELL(_name, _res, _pdata, _pdsize,_id, _compat) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, NULL) \ +#define OF_MFD_CELL_REG(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg) \ + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _of_reg, true, NULL) -#define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, _match) \ +#define OF_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _compat) \ + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, 0, false, NULL) -#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \ - MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, NULL) \ +#define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match) \ + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, _match) -#define MFD_CELL_RES(_name, _res) \ - MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, NULL) \ +#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \ + MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, 0, false, NULL) -#define MFD_CELL_NAME(_name) \ - MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, NULL) \ +#define MFD_CELL_RES(_name, _res) \ + MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, 0, false, NULL) + +#define MFD_CELL_NAME(_name) \ + MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, 0, false, NULL) + +#define MFD_DEP_LEVEL_NORMAL 0 +#define MFD_DEP_LEVEL_HIGH 1 struct irq_domain; struct property_entry; @@ -58,6 +66,7 @@ struct mfd_cell_acpi_match { struct mfd_cell { const char *name; int id; + int level; int (*enable)(struct platform_device *dev); int (*disable)(struct platform_device *dev); @@ -70,14 +79,24 @@ struct mfd_cell { size_t pdata_size; /* device properties passed to the sub devices drivers */ - struct property_entry *properties; + const struct property_entry *properties; /* * Device Tree compatible string - * See: Documentation/devicetree/usage-model.txt Chapter 2.2 for details + * See: Documentation/devicetree/usage-model.rst Chapter 2.2 for details */ const char *of_compatible; + /* + * Address as defined in Device Tree. Used to compement 'of_compatible' + * (above) when matching OF nodes with devices that have identical + * compatible strings + */ + const u64 of_reg; + + /* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */ + bool use_of_reg; + /* Matches ACPI */ const struct mfd_cell_acpi_match *acpi_match; @@ -135,6 +154,7 @@ static inline int mfd_add_hotplug_devices(struct device *parent, } extern void mfd_remove_devices(struct device *parent); +extern void mfd_remove_devices_late(struct device *parent); extern int devm_mfd_add_devices(struct device *dev, int id, const struct mfd_cell *cells, int n_devs, diff --git a/include/linux/mfd/da9055/pdata.h b/include/linux/mfd/da9055/pdata.h index eac48e483190..d3f126990ad0 100644 --- a/include/linux/mfd/da9055/pdata.h +++ b/include/linux/mfd/da9055/pdata.h @@ -35,7 +35,7 @@ struct da9055_pdata { int *gpio_rsel; /* * Regulator mode control bits value (GPI offset) that - * that controls the regulator state, 0 if not available. + * controls the regulator state, 0 if not available. */ enum gpio_select *reg_ren; /* diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h index 5cd06ab26352..fa7a43f02f27 100644 --- a/include/linux/mfd/da9063/core.h +++ b/include/linux/mfd/da9063/core.h @@ -35,6 +35,7 @@ enum da9063_variant_codes { PMIC_DA9063_AD = 0x3, PMIC_DA9063_BB = 0x5, PMIC_DA9063_CA = 0x6, + PMIC_DA9063_DA = 0x7, }; /* Interrupts */ diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h index ba706b0e28c2..1dbabf1b3cb8 100644 --- a/include/linux/mfd/da9063/registers.h +++ b/include/linux/mfd/da9063/registers.h @@ -292,8 +292,10 @@ #define DA9063_BB_REG_GP_ID_19 0x134 /* Chip ID and variant */ -#define DA9063_REG_CHIP_ID 0x181 -#define DA9063_REG_CHIP_VARIANT 0x182 +#define DA9063_REG_DEVICE_ID 0x181 +#define DA9063_REG_VARIANT_ID 0x182 +#define DA9063_REG_CUSTOMER_ID 0x183 +#define DA9063_REG_CONFIG_ID 0x184 /* * PMIC registers bits @@ -929,9 +931,6 @@ #define DA9063_RTC_CLOCK 0x40 #define DA9063_OUT_32K_EN 0x80 -/* DA9063_REG_CHIP_VARIANT */ -#define DA9063_CHIP_VARIANT_SHIFT 4 - /* DA9063_REG_BUCK_ILIM_A (addr=0x9A) */ #define DA9063_BIO_ILIM_MASK 0x0F #define DA9063_BMEM_ILIM_MASK 0xF0 @@ -1065,4 +1064,10 @@ #define DA9063_MON_A10_IDX_LDO9 0x04 #define DA9063_MON_A10_IDX_LDO10 0x05 +/* DA9063_REG_VARIANT_ID (addr=0x182) */ +#define DA9063_VARIANT_ID_VRC_SHIFT 0 +#define DA9063_VARIANT_ID_VRC_MASK 0x0F +#define DA9063_VARIANT_ID_MRC_SHIFT 4 +#define DA9063_VARIANT_ID_MRC_MASK 0xF0 + #endif /* _DA9063_REG_H */ diff --git a/include/linux/mfd/gsc.h b/include/linux/mfd/gsc.h new file mode 100644 index 000000000000..6bd639c285b4 --- /dev/null +++ b/include/linux/mfd/gsc.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (C) 2020 Gateworks Corporation + */ +#ifndef __LINUX_MFD_GSC_H_ +#define __LINUX_MFD_GSC_H_ + +#include <linux/regmap.h> + +/* Device Addresses */ +#define GSC_MISC 0x20 +#define GSC_UPDATE 0x21 +#define GSC_GPIO 0x23 +#define GSC_HWMON 0x29 +#define GSC_EEPROM0 0x50 +#define GSC_EEPROM1 0x51 +#define GSC_EEPROM2 0x52 +#define GSC_EEPROM3 0x53 +#define GSC_RTC 0x68 + +/* Register offsets */ +enum { + GSC_CTRL_0 = 0x00, + GSC_CTRL_1 = 0x01, + GSC_TIME = 0x02, + GSC_TIME_ADD = 0x06, + GSC_IRQ_STATUS = 0x0A, + GSC_IRQ_ENABLE = 0x0B, + GSC_FW_CRC = 0x0C, + GSC_FW_VER = 0x0E, + GSC_WP = 0x0F, +}; + +/* Bit definitions */ +#define GSC_CTRL_0_PB_HARD_RESET 0 +#define GSC_CTRL_0_PB_CLEAR_SECURE_KEY 1 +#define GSC_CTRL_0_PB_SOFT_POWER_DOWN 2 +#define GSC_CTRL_0_PB_BOOT_ALTERNATE 3 +#define GSC_CTRL_0_PERFORM_CRC 4 +#define GSC_CTRL_0_TAMPER_DETECT 5 +#define GSC_CTRL_0_SWITCH_HOLD 6 + +#define GSC_CTRL_1_SLEEP_ENABLE 0 +#define GSC_CTRL_1_SLEEP_ACTIVATE 1 +#define GSC_CTRL_1_SLEEP_ADD 2 +#define GSC_CTRL_1_SLEEP_NOWAKEPB 3 +#define GSC_CTRL_1_WDT_TIME 4 +#define GSC_CTRL_1_WDT_ENABLE 5 +#define GSC_CTRL_1_SWITCH_BOOT_ENABLE 6 +#define GSC_CTRL_1_SWITCH_BOOT_CLEAR 7 + +#define GSC_IRQ_PB 0 +#define GSC_IRQ_KEY_ERASED 1 +#define GSC_IRQ_EEPROM_WP 2 +#define GSC_IRQ_RESV 3 +#define GSC_IRQ_GPIO 4 +#define GSC_IRQ_TAMPER 5 +#define GSC_IRQ_WDT_TIMEOUT 6 +#define GSC_IRQ_SWITCH_HOLD 7 + +int gsc_read(void *context, unsigned int reg, unsigned int *val); +int gsc_write(void *context, unsigned int reg, unsigned int val); + +struct gsc_dev { + struct device *dev; + + struct i2c_client *i2c; /* 0x20: interrupt controller, WDT */ + struct i2c_client *i2c_hwmon; /* 0x29: hwmon, fan controller */ + + struct regmap *regmap; + + unsigned int fwver; + unsigned short fwcrc; +}; + +#endif /* __LINUX_MFD_GSC_H_ */ diff --git a/include/linux/mfd/hi6421-pmic.h b/include/linux/mfd/hi6421-pmic.h index bbc64484c021..2cadf8897c64 100644 --- a/include/linux/mfd/hi6421-pmic.h +++ b/include/linux/mfd/hi6421-pmic.h @@ -5,7 +5,7 @@ * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd. * http://www.hisilicon.com * Copyright (c) <2013-2014> Linaro Ltd. - * http://www.linaro.org + * https://www.linaro.org * * Author: Guodong Xu <guodong.xu@linaro.org> */ diff --git a/include/linux/mfd/intel_pmc_bxt.h b/include/linux/mfd/intel_pmc_bxt.h new file mode 100644 index 000000000000..f51a43d25ffd --- /dev/null +++ b/include/linux/mfd/intel_pmc_bxt.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef MFD_INTEL_PMC_BXT_H +#define MFD_INTEL_PMC_BXT_H + +/* GCR reg offsets from GCR base */ +#define PMC_GCR_PMC_CFG_REG 0x08 +#define PMC_GCR_TELEM_DEEP_S0IX_REG 0x78 +#define PMC_GCR_TELEM_SHLW_S0IX_REG 0x80 + +/* PMC_CFG_REG bit masks */ +#define PMC_CFG_NO_REBOOT_EN BIT(4) + +/** + * struct intel_pmc_dev - Intel PMC device structure + * @dev: Pointer to the parent PMC device + * @scu: Pointer to the SCU IPC device data structure + * @gcr_mem_base: Virtual base address of GCR (Global Configuration Registers) + * @gcr_lock: Lock used to serialize access to GCR registers + * @telem_base: Pointer to telemetry SSRAM base resource or %NULL if not + * available + */ +struct intel_pmc_dev { + struct device *dev; + struct intel_scu_ipc_dev *scu; + void __iomem *gcr_mem_base; + spinlock_t gcr_lock; + struct resource *telem_base; +}; + +#if IS_ENABLED(CONFIG_MFD_INTEL_PMC_BXT) +int intel_pmc_gcr_read64(struct intel_pmc_dev *pmc, u32 offset, u64 *data); +int intel_pmc_gcr_update(struct intel_pmc_dev *pmc, u32 offset, u32 mask, u32 val); +int intel_pmc_s0ix_counter_read(struct intel_pmc_dev *pmc, u64 *data); +#else +static inline int intel_pmc_gcr_read64(struct intel_pmc_dev *pmc, u32 offset, + u64 *data) +{ + return -ENOTSUPP; +} + +static inline int intel_pmc_gcr_update(struct intel_pmc_dev *pmc, u32 offset, + u32 mask, u32 val) +{ + return -ENOTSUPP; +} + +static inline int intel_pmc_s0ix_counter_read(struct intel_pmc_dev *pmc, u64 *data) +{ + return -ENOTSUPP; +} +#endif + +#endif /* MFD_INTEL_PMC_BXT_H */ diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h index bfecd6bd4990..6a88e34cb955 100644 --- a/include/linux/mfd/intel_soc_pmic.h +++ b/include/linux/mfd/intel_soc_pmic.h @@ -13,6 +13,20 @@ #include <linux/regmap.h> +/** + * struct intel_soc_pmic - Intel SoC PMIC data + * @irq: Master interrupt number of the parent PMIC device + * @regmap: Pointer to the parent PMIC device regmap structure + * @irq_chip_data: IRQ chip data for the PMIC itself + * @irq_chip_data_pwrbtn: Chained IRQ chip data for the Power Button + * @irq_chip_data_tmu: Chained IRQ chip data for the Time Management Unit + * @irq_chip_data_bcu: Chained IRQ chip data for the Burst Control Unit + * @irq_chip_data_adc: Chained IRQ chip data for the General Purpose ADC + * @irq_chip_data_chgr: Chained IRQ chip data for the External Charger + * @irq_chip_data_crit: Chained IRQ chip data for the Critical Event Handler + * @dev: Pointer to the parent PMIC device + * @scu: Pointer to the SCU IPC device data structure + */ struct intel_soc_pmic { int irq; struct regmap *regmap; @@ -24,6 +38,7 @@ struct intel_soc_pmic { struct regmap_irq_chip_data *irq_chip_data_chgr; struct regmap_irq_chip_data *irq_chip_data_crit; struct device *dev; + struct intel_scu_ipc_dev *scu; }; int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address, diff --git a/include/linux/mfd/khadas-mcu.h b/include/linux/mfd/khadas-mcu.h new file mode 100644 index 000000000000..a99ba2ed0e4e --- /dev/null +++ b/include/linux/mfd/khadas-mcu.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Khadas System control Microcontroller Register map + * + * Copyright (C) 2020 BayLibre SAS + * + * Author(s): Neil Armstrong <narmstrong@baylibre.com> + */ + +#ifndef MFD_KHADAS_MCU_H +#define MFD_KHADAS_MCU_H + +#define KHADAS_MCU_PASSWD_VEN_0_REG 0x00 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_1_REG 0x01 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_2_REG 0x02 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_3_REG 0x03 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_4_REG 0x04 /* RO */ +#define KHADAS_MCU_PASSWD_VEN_5_REG 0x05 /* RO */ +#define KHADAS_MCU_MAC_0_REG 0x06 /* RO */ +#define KHADAS_MCU_MAC_1_REG 0x07 /* RO */ +#define KHADAS_MCU_MAC_2_REG 0x08 /* RO */ +#define KHADAS_MCU_MAC_3_REG 0x09 /* RO */ +#define KHADAS_MCU_MAC_4_REG 0x0a /* RO */ +#define KHADAS_MCU_MAC_5_REG 0x0b /* RO */ +#define KHADAS_MCU_USID_0_REG 0x0c /* RO */ +#define KHADAS_MCU_USID_1_REG 0x0d /* RO */ +#define KHADAS_MCU_USID_2_REG 0x0e /* RO */ +#define KHADAS_MCU_USID_3_REG 0x0f /* RO */ +#define KHADAS_MCU_USID_4_REG 0x10 /* RO */ +#define KHADAS_MCU_USID_5_REG 0x11 /* RO */ +#define KHADAS_MCU_VERSION_0_REG 0x12 /* RO */ +#define KHADAS_MCU_VERSION_1_REG 0x13 /* RO */ +#define KHADAS_MCU_DEVICE_NO_0_REG 0x14 /* RO */ +#define KHADAS_MCU_DEVICE_NO_1_REG 0x15 /* RO */ +#define KHADAS_MCU_FACTORY_TEST_REG 0x16 /* R */ +#define KHADAS_MCU_BOOT_MODE_REG 0x20 /* RW */ +#define KHADAS_MCU_BOOT_EN_WOL_REG 0x21 /* RW */ +#define KHADAS_MCU_BOOT_EN_RTC_REG 0x22 /* RW */ +#define KHADAS_MCU_BOOT_EN_EXP_REG 0x23 /* RW */ +#define KHADAS_MCU_BOOT_EN_IR_REG 0x24 /* RW */ +#define KHADAS_MCU_BOOT_EN_DCIN_REG 0x25 /* RW */ +#define KHADAS_MCU_BOOT_EN_KEY_REG 0x26 /* RW */ +#define KHADAS_MCU_KEY_MODE_REG 0x27 /* RW */ +#define KHADAS_MCU_LED_MODE_ON_REG 0x28 /* RW */ +#define KHADAS_MCU_LED_MODE_OFF_REG 0x29 /* RW */ +#define KHADAS_MCU_SHUTDOWN_NORMAL_REG 0x2c /* RW */ +#define KHADAS_MCU_MAC_SWITCH_REG 0x2d /* RW */ +#define KHADAS_MCU_MCU_SLEEP_MODE_REG 0x2e /* RW */ +#define KHADAS_MCU_IR_CODE1_0_REG 0x2f /* RW */ +#define KHADAS_MCU_IR_CODE1_1_REG 0x30 /* RW */ +#define KHADAS_MCU_IR_CODE1_2_REG 0x31 /* RW */ +#define KHADAS_MCU_IR_CODE1_3_REG 0x32 /* RW */ +#define KHADAS_MCU_USB_PCIE_SWITCH_REG 0x33 /* RW */ +#define KHADAS_MCU_IR_CODE2_0_REG 0x34 /* RW */ +#define KHADAS_MCU_IR_CODE2_1_REG 0x35 /* RW */ +#define KHADAS_MCU_IR_CODE2_2_REG 0x36 /* RW */ +#define KHADAS_MCU_IR_CODE2_3_REG 0x37 /* RW */ +#define KHADAS_MCU_PASSWD_USER_0_REG 0x40 /* RW */ +#define KHADAS_MCU_PASSWD_USER_1_REG 0x41 /* RW */ +#define KHADAS_MCU_PASSWD_USER_2_REG 0x42 /* RW */ +#define KHADAS_MCU_PASSWD_USER_3_REG 0x43 /* RW */ +#define KHADAS_MCU_PASSWD_USER_4_REG 0x44 /* RW */ +#define KHADAS_MCU_PASSWD_USER_5_REG 0x45 /* RW */ +#define KHADAS_MCU_USER_DATA_0_REG 0x46 /* RW 56 bytes */ +#define KHADAS_MCU_PWR_OFF_CMD_REG 0x80 /* WO */ +#define KHADAS_MCU_PASSWD_START_REG 0x81 /* WO */ +#define KHADAS_MCU_CHECK_VEN_PASSWD_REG 0x82 /* WO */ +#define KHADAS_MCU_CHECK_USER_PASSWD_REG 0x83 /* WO */ +#define KHADAS_MCU_SHUTDOWN_NORMAL_STATUS_REG 0x86 /* RO */ +#define KHADAS_MCU_WOL_INIT_START_REG 0x87 /* WO */ +#define KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG 0x88 /* WO */ + +enum { + KHADAS_BOARD_VIM1 = 0x1, + KHADAS_BOARD_VIM2, + KHADAS_BOARD_VIM3, + KHADAS_BOARD_EDGE = 0x11, + KHADAS_BOARD_EDGE_V, +}; + +/** + * struct khadas_mcu - Khadas MCU structure + * @device: device reference used for logs + * @regmap: register map + */ +struct khadas_mcu { + struct device *dev; + struct regmap *regmap; +}; + +#endif /* MFD_KHADAS_MCU_H */ diff --git a/include/linux/mfd/lp873x.h b/include/linux/mfd/lp873x.h index edbec8350a49..5546688c7da7 100644 --- a/include/linux/mfd/lp873x.h +++ b/include/linux/mfd/lp873x.h @@ -1,7 +1,7 @@ /* * Functions to access LP873X power management chip. * - * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/mfd/lp87565.h b/include/linux/mfd/lp87565.h index ce965354bbad..43716aca46fa 100644 --- a/include/linux/mfd/lp87565.h +++ b/include/linux/mfd/lp87565.h @@ -2,7 +2,7 @@ /* * Functions to access LP87565 power management chip. * - * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef __LINUX_MFD_LP87565_H diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h index fa9595dd42ba..601cbbc10370 100644 --- a/include/linux/mfd/madera/pdata.h +++ b/include/linux/mfd/madera/pdata.h @@ -21,7 +21,6 @@ struct gpio_desc; struct pinctrl_map; -struct madera_codec_pdata; /** * struct madera_pdata - Configuration data for Madera devices diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index e798c81aec31..311f7d3d2323 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h @@ -131,7 +131,7 @@ enum max77693_pmic_reg { #define FLASH_INT_FLED1_SHORT BIT(3) #define FLASH_INT_OVER_CURRENT BIT(4) -/* Fast charge timer in in hours */ +/* Fast charge timer in hours */ #define DEFAULT_FAST_CHARGE_TIMER 4 /* microamps */ #define DEFAULT_TOP_OFF_THRESHOLD_CURRENT 150000 diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h index 061af220dcd3..79c020bd0c70 100644 --- a/include/linux/mfd/max8998.h +++ b/include/linux/mfd/max8998.h @@ -39,6 +39,7 @@ enum { MAX8998_ENVICHG, MAX8998_ESAFEOUT1, MAX8998_ESAFEOUT2, + MAX8998_CHARGER, }; /** diff --git a/include/linux/mfd/mp2629.h b/include/linux/mfd/mp2629.h new file mode 100644 index 000000000000..89b706900b57 --- /dev/null +++ b/include/linux/mfd/mp2629.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2020 Monolithic Power Systems, Inc + */ + +#ifndef __MP2629_H__ +#define __MP2629_H__ + +#include <linux/device.h> +#include <linux/regmap.h> + +struct mp2629_data { + struct device *dev; + struct regmap *regmap; +}; + +enum mp2629_adc_chan { + MP2629_BATT_VOLT, + MP2629_SYSTEM_VOLT, + MP2629_INPUT_VOLT, + MP2629_BATT_CURRENT, + MP2629_INPUT_CURRENT, + MP2629_ADC_CHAN_END +}; + +#endif diff --git a/include/linux/mfd/mt6358/core.h b/include/linux/mfd/mt6358/core.h new file mode 100644 index 000000000000..c5a11b7458d4 --- /dev/null +++ b/include/linux/mfd/mt6358/core.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 MediaTek Inc. + */ + +#ifndef __MFD_MT6358_CORE_H__ +#define __MFD_MT6358_CORE_H__ + +#define MT6358_REG_WIDTH 16 + +struct irq_top_t { + int hwirq_base; + unsigned int num_int_regs; + unsigned int num_int_bits; + unsigned int en_reg; + unsigned int en_reg_shift; + unsigned int sta_reg; + unsigned int sta_reg_shift; + unsigned int top_offset; +}; + +struct pmic_irq_data { + unsigned int num_top; + unsigned int num_pmic_irqs; + unsigned short top_int_status_reg; + bool *enable_hwirq; + bool *cache_hwirq; +}; + +enum mt6358_irq_top_status_shift { + MT6358_BUCK_TOP = 0, + MT6358_LDO_TOP, + MT6358_PSC_TOP, + MT6358_SCK_TOP, + MT6358_BM_TOP, + MT6358_HK_TOP, + MT6358_AUD_TOP, + MT6358_MISC_TOP, +}; + +enum mt6358_irq_numbers { + MT6358_IRQ_VPROC11_OC = 0, + MT6358_IRQ_VPROC12_OC, + MT6358_IRQ_VCORE_OC, + MT6358_IRQ_VGPU_OC, + MT6358_IRQ_VMODEM_OC, + MT6358_IRQ_VDRAM1_OC, + MT6358_IRQ_VS1_OC, + MT6358_IRQ_VS2_OC, + MT6358_IRQ_VPA_OC, + MT6358_IRQ_VCORE_PREOC, + MT6358_IRQ_VFE28_OC = 16, + MT6358_IRQ_VXO22_OC, + MT6358_IRQ_VRF18_OC, + MT6358_IRQ_VRF12_OC, + MT6358_IRQ_VEFUSE_OC, + MT6358_IRQ_VCN33_OC, + MT6358_IRQ_VCN28_OC, + MT6358_IRQ_VCN18_OC, + MT6358_IRQ_VCAMA1_OC, + MT6358_IRQ_VCAMA2_OC, + MT6358_IRQ_VCAMD_OC, + MT6358_IRQ_VCAMIO_OC, + MT6358_IRQ_VLDO28_OC, + MT6358_IRQ_VA12_OC, + MT6358_IRQ_VAUX18_OC, + MT6358_IRQ_VAUD28_OC, + MT6358_IRQ_VIO28_OC, + MT6358_IRQ_VIO18_OC, + MT6358_IRQ_VSRAM_PROC11_OC, + MT6358_IRQ_VSRAM_PROC12_OC, + MT6358_IRQ_VSRAM_OTHERS_OC, + MT6358_IRQ_VSRAM_GPU_OC, + MT6358_IRQ_VDRAM2_OC, + MT6358_IRQ_VMC_OC, + MT6358_IRQ_VMCH_OC, + MT6358_IRQ_VEMC_OC, + MT6358_IRQ_VSIM1_OC, + MT6358_IRQ_VSIM2_OC, + MT6358_IRQ_VIBR_OC, + MT6358_IRQ_VUSB_OC, + MT6358_IRQ_VBIF28_OC, + MT6358_IRQ_PWRKEY = 48, + MT6358_IRQ_HOMEKEY, + MT6358_IRQ_PWRKEY_R, + MT6358_IRQ_HOMEKEY_R, + MT6358_IRQ_NI_LBAT_INT, + MT6358_IRQ_CHRDET, + MT6358_IRQ_CHRDET_EDGE, + MT6358_IRQ_VCDT_HV_DET, + MT6358_IRQ_RTC = 64, + MT6358_IRQ_FG_BAT0_H = 80, + MT6358_IRQ_FG_BAT0_L, + MT6358_IRQ_FG_CUR_H, + MT6358_IRQ_FG_CUR_L, + MT6358_IRQ_FG_ZCV, + MT6358_IRQ_FG_BAT1_H, + MT6358_IRQ_FG_BAT1_L, + MT6358_IRQ_FG_N_CHARGE_L, + MT6358_IRQ_FG_IAVG_H, + MT6358_IRQ_FG_IAVG_L, + MT6358_IRQ_FG_TIME_H, + MT6358_IRQ_FG_DISCHARGE, + MT6358_IRQ_FG_CHARGE, + MT6358_IRQ_BATON_LV = 96, + MT6358_IRQ_BATON_HT, + MT6358_IRQ_BATON_BAT_IN, + MT6358_IRQ_BATON_BAT_OUT, + MT6358_IRQ_BIF, + MT6358_IRQ_BAT_H = 112, + MT6358_IRQ_BAT_L, + MT6358_IRQ_BAT2_H, + MT6358_IRQ_BAT2_L, + MT6358_IRQ_BAT_TEMP_H, + MT6358_IRQ_BAT_TEMP_L, + MT6358_IRQ_AUXADC_IMP, + MT6358_IRQ_NAG_C_DLTV, + MT6358_IRQ_AUDIO = 128, + MT6358_IRQ_ACCDET = 133, + MT6358_IRQ_ACCDET_EINT0, + MT6358_IRQ_ACCDET_EINT1, + MT6358_IRQ_SPI_CMD_ALERT = 144, + MT6358_IRQ_NR, +}; + +#define MT6358_IRQ_BUCK_BASE MT6358_IRQ_VPROC11_OC +#define MT6358_IRQ_LDO_BASE MT6358_IRQ_VFE28_OC +#define MT6358_IRQ_PSC_BASE MT6358_IRQ_PWRKEY +#define MT6358_IRQ_SCK_BASE MT6358_IRQ_RTC +#define MT6358_IRQ_BM_BASE MT6358_IRQ_FG_BAT0_H +#define MT6358_IRQ_HK_BASE MT6358_IRQ_BAT_H +#define MT6358_IRQ_AUD_BASE MT6358_IRQ_AUDIO +#define MT6358_IRQ_MISC_BASE MT6358_IRQ_SPI_CMD_ALERT + +#define MT6358_IRQ_BUCK_BITS (MT6358_IRQ_VCORE_PREOC - MT6358_IRQ_BUCK_BASE + 1) +#define MT6358_IRQ_LDO_BITS (MT6358_IRQ_VBIF28_OC - MT6358_IRQ_LDO_BASE + 1) +#define MT6358_IRQ_PSC_BITS (MT6358_IRQ_VCDT_HV_DET - MT6358_IRQ_PSC_BASE + 1) +#define MT6358_IRQ_SCK_BITS (MT6358_IRQ_RTC - MT6358_IRQ_SCK_BASE + 1) +#define MT6358_IRQ_BM_BITS (MT6358_IRQ_BIF - MT6358_IRQ_BM_BASE + 1) +#define MT6358_IRQ_HK_BITS (MT6358_IRQ_NAG_C_DLTV - MT6358_IRQ_HK_BASE + 1) +#define MT6358_IRQ_AUD_BITS (MT6358_IRQ_ACCDET_EINT1 - MT6358_IRQ_AUD_BASE + 1) +#define MT6358_IRQ_MISC_BITS \ + (MT6358_IRQ_SPI_CMD_ALERT - MT6358_IRQ_MISC_BASE + 1) + +#define MT6358_TOP_GEN(sp) \ +{ \ + .hwirq_base = MT6358_IRQ_##sp##_BASE, \ + .num_int_regs = \ + ((MT6358_IRQ_##sp##_BITS - 1) / MT6358_REG_WIDTH) + 1, \ + .num_int_bits = MT6358_IRQ_##sp##_BITS, \ + .en_reg = MT6358_##sp##_TOP_INT_CON0, \ + .en_reg_shift = 0x6, \ + .sta_reg = MT6358_##sp##_TOP_INT_STATUS0, \ + .sta_reg_shift = 0x2, \ + .top_offset = MT6358_##sp##_TOP, \ +} + +#endif /* __MFD_MT6358_CORE_H__ */ diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h new file mode 100644 index 000000000000..2ad0b312aa28 --- /dev/null +++ b/include/linux/mfd/mt6358/registers.h @@ -0,0 +1,282 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 MediaTek Inc. + */ + +#ifndef __MFD_MT6358_REGISTERS_H__ +#define __MFD_MT6358_REGISTERS_H__ + +/* PMIC Registers */ +#define MT6358_SWCID 0xa +#define MT6358_MISC_TOP_INT_CON0 0x188 +#define MT6358_MISC_TOP_INT_STATUS0 0x194 +#define MT6358_TOP_INT_STATUS0 0x19e +#define MT6358_SCK_TOP_INT_CON0 0x52e +#define MT6358_SCK_TOP_INT_STATUS0 0x53a +#define MT6358_EOSC_CALI_CON0 0x540 +#define MT6358_EOSC_CALI_CON1 0x542 +#define MT6358_RTC_MIX_CON0 0x544 +#define MT6358_RTC_MIX_CON1 0x546 +#define MT6358_RTC_MIX_CON2 0x548 +#define MT6358_RTC_DSN_ID 0x580 +#define MT6358_RTC_DSN_REV0 0x582 +#define MT6358_RTC_DBI 0x584 +#define MT6358_RTC_DXI 0x586 +#define MT6358_RTC_BBPU 0x588 +#define MT6358_RTC_IRQ_STA 0x58a +#define MT6358_RTC_IRQ_EN 0x58c +#define MT6358_RTC_CII_EN 0x58e +#define MT6358_RTC_AL_MASK 0x590 +#define MT6358_RTC_TC_SEC 0x592 +#define MT6358_RTC_TC_MIN 0x594 +#define MT6358_RTC_TC_HOU 0x596 +#define MT6358_RTC_TC_DOM 0x598 +#define MT6358_RTC_TC_DOW 0x59a +#define MT6358_RTC_TC_MTH 0x59c +#define MT6358_RTC_TC_YEA 0x59e +#define MT6358_RTC_AL_SEC 0x5a0 +#define MT6358_RTC_AL_MIN 0x5a2 +#define MT6358_RTC_AL_HOU 0x5a4 +#define MT6358_RTC_AL_DOM 0x5a6 +#define MT6358_RTC_AL_DOW 0x5a8 +#define MT6358_RTC_AL_MTH 0x5aa +#define MT6358_RTC_AL_YEA 0x5ac +#define MT6358_RTC_OSC32CON 0x5ae +#define MT6358_RTC_POWERKEY1 0x5b0 +#define MT6358_RTC_POWERKEY2 0x5b2 +#define MT6358_RTC_PDN1 0x5b4 +#define MT6358_RTC_PDN2 0x5b6 +#define MT6358_RTC_SPAR0 0x5b8 +#define MT6358_RTC_SPAR1 0x5ba +#define MT6358_RTC_PROT 0x5bc +#define MT6358_RTC_DIFF 0x5be +#define MT6358_RTC_CALI 0x5c0 +#define MT6358_RTC_WRTGR 0x5c2 +#define MT6358_RTC_CON 0x5c4 +#define MT6358_RTC_SEC_CTRL 0x5c6 +#define MT6358_RTC_INT_CNT 0x5c8 +#define MT6358_RTC_SEC_DAT0 0x5ca +#define MT6358_RTC_SEC_DAT1 0x5cc +#define MT6358_RTC_SEC_DAT2 0x5ce +#define MT6358_RTC_SEC_DSN_ID 0x600 +#define MT6358_RTC_SEC_DSN_REV0 0x602 +#define MT6358_RTC_SEC_DBI 0x604 +#define MT6358_RTC_SEC_DXI 0x606 +#define MT6358_RTC_TC_SEC_SEC 0x608 +#define MT6358_RTC_TC_MIN_SEC 0x60a +#define MT6358_RTC_TC_HOU_SEC 0x60c +#define MT6358_RTC_TC_DOM_SEC 0x60e +#define MT6358_RTC_TC_DOW_SEC 0x610 +#define MT6358_RTC_TC_MTH_SEC 0x612 +#define MT6358_RTC_TC_YEA_SEC 0x614 +#define MT6358_RTC_SEC_CK_PDN 0x616 +#define MT6358_RTC_SEC_WRTGR 0x618 +#define MT6358_PSC_TOP_INT_CON0 0x910 +#define MT6358_PSC_TOP_INT_STATUS0 0x91c +#define MT6358_BM_TOP_INT_CON0 0xc32 +#define MT6358_BM_TOP_INT_CON1 0xc38 +#define MT6358_BM_TOP_INT_STATUS0 0xc4a +#define MT6358_BM_TOP_INT_STATUS1 0xc4c +#define MT6358_HK_TOP_INT_CON0 0xf92 +#define MT6358_HK_TOP_INT_STATUS0 0xf9e +#define MT6358_BUCK_TOP_INT_CON0 0x1318 +#define MT6358_BUCK_TOP_INT_STATUS0 0x1324 +#define MT6358_BUCK_VPROC11_CON0 0x1388 +#define MT6358_BUCK_VPROC11_DBG0 0x139e +#define MT6358_BUCK_VPROC11_DBG1 0x13a0 +#define MT6358_BUCK_VPROC11_ELR0 0x13a6 +#define MT6358_BUCK_VPROC12_CON0 0x1408 +#define MT6358_BUCK_VPROC12_DBG0 0x141e +#define MT6358_BUCK_VPROC12_DBG1 0x1420 +#define MT6358_BUCK_VPROC12_ELR0 0x1426 +#define MT6358_BUCK_VCORE_CON0 0x1488 +#define MT6358_BUCK_VCORE_DBG0 0x149e +#define MT6358_BUCK_VCORE_DBG1 0x14a0 +#define MT6358_BUCK_VCORE_ELR0 0x14aa +#define MT6358_BUCK_VGPU_CON0 0x1508 +#define MT6358_BUCK_VGPU_DBG0 0x151e +#define MT6358_BUCK_VGPU_DBG1 0x1520 +#define MT6358_BUCK_VGPU_ELR0 0x1526 +#define MT6358_BUCK_VMODEM_CON0 0x1588 +#define MT6358_BUCK_VMODEM_DBG0 0x159e +#define MT6358_BUCK_VMODEM_DBG1 0x15a0 +#define MT6358_BUCK_VMODEM_ELR0 0x15a6 +#define MT6358_BUCK_VDRAM1_CON0 0x1608 +#define MT6358_BUCK_VDRAM1_DBG0 0x161e +#define MT6358_BUCK_VDRAM1_DBG1 0x1620 +#define MT6358_BUCK_VDRAM1_ELR0 0x1626 +#define MT6358_BUCK_VS1_CON0 0x1688 +#define MT6358_BUCK_VS1_DBG0 0x169e +#define MT6358_BUCK_VS1_DBG1 0x16a0 +#define MT6358_BUCK_VS1_ELR0 0x16ae +#define MT6358_BUCK_VS2_CON0 0x1708 +#define MT6358_BUCK_VS2_DBG0 0x171e +#define MT6358_BUCK_VS2_DBG1 0x1720 +#define MT6358_BUCK_VS2_ELR0 0x172e +#define MT6358_BUCK_VPA_CON0 0x1788 +#define MT6358_BUCK_VPA_CON1 0x178a +#define MT6358_BUCK_VPA_ELR0 MT6358_BUCK_VPA_CON1 +#define MT6358_BUCK_VPA_DBG0 0x1792 +#define MT6358_BUCK_VPA_DBG1 0x1794 +#define MT6358_VPROC_ANA_CON0 0x180c +#define MT6358_VCORE_VGPU_ANA_CON0 0x1828 +#define MT6358_VMODEM_ANA_CON0 0x1888 +#define MT6358_VDRAM1_ANA_CON0 0x1896 +#define MT6358_VS1_ANA_CON0 0x18a2 +#define MT6358_VS2_ANA_CON0 0x18ae +#define MT6358_VPA_ANA_CON0 0x18ba +#define MT6358_LDO_TOP_INT_CON0 0x1a50 +#define MT6358_LDO_TOP_INT_CON1 0x1a56 +#define MT6358_LDO_TOP_INT_STATUS0 0x1a68 +#define MT6358_LDO_TOP_INT_STATUS1 0x1a6a +#define MT6358_LDO_VXO22_CON0 0x1a88 +#define MT6358_LDO_VXO22_CON1 0x1a96 +#define MT6358_LDO_VA12_CON0 0x1a9c +#define MT6358_LDO_VA12_CON1 0x1aaa +#define MT6358_LDO_VAUX18_CON0 0x1ab0 +#define MT6358_LDO_VAUX18_CON1 0x1abe +#define MT6358_LDO_VAUD28_CON0 0x1ac4 +#define MT6358_LDO_VAUD28_CON1 0x1ad2 +#define MT6358_LDO_VIO28_CON0 0x1ad8 +#define MT6358_LDO_VIO28_CON1 0x1ae6 +#define MT6358_LDO_VIO18_CON0 0x1aec +#define MT6358_LDO_VIO18_CON1 0x1afa +#define MT6358_LDO_VDRAM2_CON0 0x1b08 +#define MT6358_LDO_VDRAM2_CON1 0x1b16 +#define MT6358_LDO_VEMC_CON0 0x1b1c +#define MT6358_LDO_VEMC_CON1 0x1b2a +#define MT6358_LDO_VUSB_CON0_0 0x1b30 +#define MT6358_LDO_VUSB_CON1 0x1b40 +#define MT6358_LDO_VSRAM_PROC11_CON0 0x1b46 +#define MT6358_LDO_VSRAM_PROC11_DBG0 0x1b60 +#define MT6358_LDO_VSRAM_PROC11_DBG1 0x1b62 +#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON0 0x1b64 +#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON1 0x1b66 +#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON2 0x1b68 +#define MT6358_LDO_VSRAM_PROC11_TRACKING_CON3 0x1b6a +#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON0 0x1b6c +#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON1 0x1b6e +#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON2 0x1b70 +#define MT6358_LDO_VSRAM_PROC12_TRACKING_CON3 0x1b72 +#define MT6358_LDO_VSRAM_WAKEUP_CON0 0x1b74 +#define MT6358_LDO_GON1_ELR_NUM 0x1b76 +#define MT6358_LDO_VDRAM2_ELR0 0x1b78 +#define MT6358_LDO_VSRAM_PROC12_CON0 0x1b88 +#define MT6358_LDO_VSRAM_PROC12_DBG0 0x1ba2 +#define MT6358_LDO_VSRAM_PROC12_DBG1 0x1ba4 +#define MT6358_LDO_VSRAM_OTHERS_CON0 0x1ba6 +#define MT6358_LDO_VSRAM_OTHERS_DBG0 0x1bc0 +#define MT6358_LDO_VSRAM_OTHERS_DBG1 0x1bc2 +#define MT6358_LDO_VSRAM_GPU_CON0 0x1bc8 +#define MT6358_LDO_VSRAM_GPU_DBG0 0x1be2 +#define MT6358_LDO_VSRAM_GPU_DBG1 0x1be4 +#define MT6358_LDO_VSRAM_CON0 0x1bee +#define MT6358_LDO_VSRAM_CON1 0x1bf0 +#define MT6358_LDO_VSRAM_CON2 0x1bf2 +#define MT6358_LDO_VSRAM_CON3 0x1bf4 +#define MT6358_LDO_VFE28_CON0 0x1c08 +#define MT6358_LDO_VFE28_CON1 0x1c16 +#define MT6358_LDO_VFE28_CON2 0x1c18 +#define MT6358_LDO_VFE28_CON3 0x1c1a +#define MT6358_LDO_VRF18_CON0 0x1c1c +#define MT6358_LDO_VRF18_CON1 0x1c2a +#define MT6358_LDO_VRF18_CON2 0x1c2c +#define MT6358_LDO_VRF18_CON3 0x1c2e +#define MT6358_LDO_VRF12_CON0 0x1c30 +#define MT6358_LDO_VRF12_CON1 0x1c3e +#define MT6358_LDO_VRF12_CON2 0x1c40 +#define MT6358_LDO_VRF12_CON3 0x1c42 +#define MT6358_LDO_VEFUSE_CON0 0x1c44 +#define MT6358_LDO_VEFUSE_CON1 0x1c52 +#define MT6358_LDO_VEFUSE_CON2 0x1c54 +#define MT6358_LDO_VEFUSE_CON3 0x1c56 +#define MT6358_LDO_VCN18_CON0 0x1c58 +#define MT6358_LDO_VCN18_CON1 0x1c66 +#define MT6358_LDO_VCN18_CON2 0x1c68 +#define MT6358_LDO_VCN18_CON3 0x1c6a +#define MT6358_LDO_VCAMA1_CON0 0x1c6c +#define MT6358_LDO_VCAMA1_CON1 0x1c7a +#define MT6358_LDO_VCAMA1_CON2 0x1c7c +#define MT6358_LDO_VCAMA1_CON3 0x1c7e +#define MT6358_LDO_VCAMA2_CON0 0x1c88 +#define MT6358_LDO_VCAMA2_CON1 0x1c96 +#define MT6358_LDO_VCAMA2_CON2 0x1c98 +#define MT6358_LDO_VCAMA2_CON3 0x1c9a +#define MT6358_LDO_VCAMD_CON0 0x1c9c +#define MT6358_LDO_VCAMD_CON1 0x1caa +#define MT6358_LDO_VCAMD_CON2 0x1cac +#define MT6358_LDO_VCAMD_CON3 0x1cae +#define MT6358_LDO_VCAMIO_CON0 0x1cb0 +#define MT6358_LDO_VCAMIO_CON1 0x1cbe +#define MT6358_LDO_VCAMIO_CON2 0x1cc0 +#define MT6358_LDO_VCAMIO_CON3 0x1cc2 +#define MT6358_LDO_VMC_CON0 0x1cc4 +#define MT6358_LDO_VMC_CON1 0x1cd2 +#define MT6358_LDO_VMC_CON2 0x1cd4 +#define MT6358_LDO_VMC_CON3 0x1cd6 +#define MT6358_LDO_VMCH_CON0 0x1cd8 +#define MT6358_LDO_VMCH_CON1 0x1ce6 +#define MT6358_LDO_VMCH_CON2 0x1ce8 +#define MT6358_LDO_VMCH_CON3 0x1cea +#define MT6358_LDO_VIBR_CON0 0x1d08 +#define MT6358_LDO_VIBR_CON1 0x1d16 +#define MT6358_LDO_VIBR_CON2 0x1d18 +#define MT6358_LDO_VIBR_CON3 0x1d1a +#define MT6358_LDO_VCN33_CON0_0 0x1d1c +#define MT6358_LDO_VCN33_CON0_1 0x1d2a +#define MT6358_LDO_VCN33_CON1 0x1d2c +#define MT6358_LDO_VCN33_BT_CON1 MT6358_LDO_VCN33_CON1 +#define MT6358_LDO_VCN33_WIFI_CON1 MT6358_LDO_VCN33_CON1 +#define MT6358_LDO_VCN33_CON2 0x1d2e +#define MT6358_LDO_VCN33_CON3 0x1d30 +#define MT6358_LDO_VLDO28_CON0_0 0x1d32 +#define MT6358_LDO_VLDO28_CON0_1 0x1d40 +#define MT6358_LDO_VLDO28_CON1 0x1d42 +#define MT6358_LDO_VLDO28_CON2 0x1d44 +#define MT6358_LDO_VLDO28_CON3 0x1d46 +#define MT6358_LDO_VSIM1_CON0 0x1d48 +#define MT6358_LDO_VSIM1_CON1 0x1d56 +#define MT6358_LDO_VSIM1_CON2 0x1d58 +#define MT6358_LDO_VSIM1_CON3 0x1d5a +#define MT6358_LDO_VSIM2_CON0 0x1d5c +#define MT6358_LDO_VSIM2_CON1 0x1d6a +#define MT6358_LDO_VSIM2_CON2 0x1d6c +#define MT6358_LDO_VSIM2_CON3 0x1d6e +#define MT6358_LDO_VCN28_CON0 0x1d88 +#define MT6358_LDO_VCN28_CON1 0x1d96 +#define MT6358_LDO_VCN28_CON2 0x1d98 +#define MT6358_LDO_VCN28_CON3 0x1d9a +#define MT6358_VRTC28_CON0 0x1d9c +#define MT6358_LDO_VBIF28_CON0 0x1d9e +#define MT6358_LDO_VBIF28_CON1 0x1dac +#define MT6358_LDO_VBIF28_CON2 0x1dae +#define MT6358_LDO_VBIF28_CON3 0x1db0 +#define MT6358_VCAMA1_ANA_CON0 0x1e08 +#define MT6358_VCAMA2_ANA_CON0 0x1e0c +#define MT6358_VCN33_ANA_CON0 0x1e28 +#define MT6358_VSIM1_ANA_CON0 0x1e2c +#define MT6358_VSIM2_ANA_CON0 0x1e30 +#define MT6358_VUSB_ANA_CON0 0x1e34 +#define MT6358_VEMC_ANA_CON0 0x1e38 +#define MT6358_VLDO28_ANA_CON0 0x1e3c +#define MT6358_VIO28_ANA_CON0 0x1e40 +#define MT6358_VIBR_ANA_CON0 0x1e44 +#define MT6358_VMCH_ANA_CON0 0x1e48 +#define MT6358_VMC_ANA_CON0 0x1e4c +#define MT6358_VRF18_ANA_CON0 0x1e88 +#define MT6358_VCN18_ANA_CON0 0x1e8c +#define MT6358_VCAMIO_ANA_CON0 0x1e90 +#define MT6358_VIO18_ANA_CON0 0x1e94 +#define MT6358_VEFUSE_ANA_CON0 0x1e98 +#define MT6358_VRF12_ANA_CON0 0x1e9c +#define MT6358_VSRAM_PROC11_ANA_CON0 0x1ea0 +#define MT6358_VSRAM_PROC12_ANA_CON0 0x1ea4 +#define MT6358_VSRAM_OTHERS_ANA_CON0 0x1ea6 +#define MT6358_VSRAM_GPU_ANA_CON0 0x1ea8 +#define MT6358_VDRAM2_ANA_CON0 0x1eaa +#define MT6358_VCAMD_ANA_CON0 0x1eae +#define MT6358_VA12_ANA_CON0 0x1eb2 +#define MT6358_AUD_TOP_INT_CON0 0x2228 +#define MT6358_AUD_TOP_INT_STATUS0 0x2234 + +#endif /* __MFD_MT6358_REGISTERS_H__ */ diff --git a/include/linux/mfd/mt6360.h b/include/linux/mfd/mt6360.h new file mode 100644 index 000000000000..ea1304035d4d --- /dev/null +++ b/include/linux/mfd/mt6360.h @@ -0,0 +1,240 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 MediaTek Inc. + */ + +#ifndef __MT6360_H__ +#define __MT6360_H__ + +#include <linux/regmap.h> + +enum { + MT6360_SLAVE_PMU = 0, + MT6360_SLAVE_PMIC, + MT6360_SLAVE_LDO, + MT6360_SLAVE_TCPC, + MT6360_SLAVE_MAX, +}; + +#define MT6360_PMU_SLAVEID (0x34) +#define MT6360_PMIC_SLAVEID (0x1A) +#define MT6360_LDO_SLAVEID (0x64) +#define MT6360_TCPC_SLAVEID (0x4E) + +struct mt6360_pmu_data { + struct i2c_client *i2c[MT6360_SLAVE_MAX]; + struct device *dev; + struct regmap *regmap; + struct regmap_irq_chip_data *irq_data; + unsigned int chip_rev; +}; + +/* PMU register defininition */ +#define MT6360_PMU_DEV_INFO (0x00) +#define MT6360_PMU_CORE_CTRL1 (0x01) +#define MT6360_PMU_RST1 (0x02) +#define MT6360_PMU_CRCEN (0x03) +#define MT6360_PMU_RST_PAS_CODE1 (0x04) +#define MT6360_PMU_RST_PAS_CODE2 (0x05) +#define MT6360_PMU_CORE_CTRL2 (0x06) +#define MT6360_PMU_TM_PAS_CODE1 (0x07) +#define MT6360_PMU_TM_PAS_CODE2 (0x08) +#define MT6360_PMU_TM_PAS_CODE3 (0x09) +#define MT6360_PMU_TM_PAS_CODE4 (0x0A) +#define MT6360_PMU_IRQ_IND (0x0B) +#define MT6360_PMU_IRQ_MASK (0x0C) +#define MT6360_PMU_IRQ_SET (0x0D) +#define MT6360_PMU_SHDN_CTRL (0x0E) +#define MT6360_PMU_TM_INF (0x0F) +#define MT6360_PMU_I2C_CTRL (0x10) +#define MT6360_PMU_CHG_CTRL1 (0x11) +#define MT6360_PMU_CHG_CTRL2 (0x12) +#define MT6360_PMU_CHG_CTRL3 (0x13) +#define MT6360_PMU_CHG_CTRL4 (0x14) +#define MT6360_PMU_CHG_CTRL5 (0x15) +#define MT6360_PMU_CHG_CTRL6 (0x16) +#define MT6360_PMU_CHG_CTRL7 (0x17) +#define MT6360_PMU_CHG_CTRL8 (0x18) +#define MT6360_PMU_CHG_CTRL9 (0x19) +#define MT6360_PMU_CHG_CTRL10 (0x1A) +#define MT6360_PMU_CHG_CTRL11 (0x1B) +#define MT6360_PMU_CHG_CTRL12 (0x1C) +#define MT6360_PMU_CHG_CTRL13 (0x1D) +#define MT6360_PMU_CHG_CTRL14 (0x1E) +#define MT6360_PMU_CHG_CTRL15 (0x1F) +#define MT6360_PMU_CHG_CTRL16 (0x20) +#define MT6360_PMU_CHG_AICC_RESULT (0x21) +#define MT6360_PMU_DEVICE_TYPE (0x22) +#define MT6360_PMU_QC_CONTROL1 (0x23) +#define MT6360_PMU_QC_CONTROL2 (0x24) +#define MT6360_PMU_QC30_CONTROL1 (0x25) +#define MT6360_PMU_QC30_CONTROL2 (0x26) +#define MT6360_PMU_USB_STATUS1 (0x27) +#define MT6360_PMU_QC_STATUS1 (0x28) +#define MT6360_PMU_QC_STATUS2 (0x29) +#define MT6360_PMU_CHG_PUMP (0x2A) +#define MT6360_PMU_CHG_CTRL17 (0x2B) +#define MT6360_PMU_CHG_CTRL18 (0x2C) +#define MT6360_PMU_CHRDET_CTRL1 (0x2D) +#define MT6360_PMU_CHRDET_CTRL2 (0x2E) +#define MT6360_PMU_DPDN_CTRL (0x2F) +#define MT6360_PMU_CHG_HIDDEN_CTRL1 (0x30) +#define MT6360_PMU_CHG_HIDDEN_CTRL2 (0x31) +#define MT6360_PMU_CHG_HIDDEN_CTRL3 (0x32) +#define MT6360_PMU_CHG_HIDDEN_CTRL4 (0x33) +#define MT6360_PMU_CHG_HIDDEN_CTRL5 (0x34) +#define MT6360_PMU_CHG_HIDDEN_CTRL6 (0x35) +#define MT6360_PMU_CHG_HIDDEN_CTRL7 (0x36) +#define MT6360_PMU_CHG_HIDDEN_CTRL8 (0x37) +#define MT6360_PMU_CHG_HIDDEN_CTRL9 (0x38) +#define MT6360_PMU_CHG_HIDDEN_CTRL10 (0x39) +#define MT6360_PMU_CHG_HIDDEN_CTRL11 (0x3A) +#define MT6360_PMU_CHG_HIDDEN_CTRL12 (0x3B) +#define MT6360_PMU_CHG_HIDDEN_CTRL13 (0x3C) +#define MT6360_PMU_CHG_HIDDEN_CTRL14 (0x3D) +#define MT6360_PMU_CHG_HIDDEN_CTRL15 (0x3E) +#define MT6360_PMU_CHG_HIDDEN_CTRL16 (0x3F) +#define MT6360_PMU_CHG_HIDDEN_CTRL17 (0x40) +#define MT6360_PMU_CHG_HIDDEN_CTRL18 (0x41) +#define MT6360_PMU_CHG_HIDDEN_CTRL19 (0x42) +#define MT6360_PMU_CHG_HIDDEN_CTRL20 (0x43) +#define MT6360_PMU_CHG_HIDDEN_CTRL21 (0x44) +#define MT6360_PMU_CHG_HIDDEN_CTRL22 (0x45) +#define MT6360_PMU_CHG_HIDDEN_CTRL23 (0x46) +#define MT6360_PMU_CHG_HIDDEN_CTRL24 (0x47) +#define MT6360_PMU_CHG_HIDDEN_CTRL25 (0x48) +#define MT6360_PMU_BC12_CTRL (0x49) +#define MT6360_PMU_CHG_STAT (0x4A) +#define MT6360_PMU_RESV1 (0x4B) +#define MT6360_PMU_TYPEC_OTP_TH_SEL_CODEH (0x4E) +#define MT6360_PMU_TYPEC_OTP_TH_SEL_CODEL (0x4F) +#define MT6360_PMU_TYPEC_OTP_HYST_TH (0x50) +#define MT6360_PMU_TYPEC_OTP_CTRL (0x51) +#define MT6360_PMU_ADC_BAT_DATA_H (0x52) +#define MT6360_PMU_ADC_BAT_DATA_L (0x53) +#define MT6360_PMU_IMID_BACKBST_ON (0x54) +#define MT6360_PMU_IMID_BACKBST_OFF (0x55) +#define MT6360_PMU_ADC_CONFIG (0x56) +#define MT6360_PMU_ADC_EN2 (0x57) +#define MT6360_PMU_ADC_IDLE_T (0x58) +#define MT6360_PMU_ADC_RPT_1 (0x5A) +#define MT6360_PMU_ADC_RPT_2 (0x5B) +#define MT6360_PMU_ADC_RPT_3 (0x5C) +#define MT6360_PMU_ADC_RPT_ORG1 (0x5D) +#define MT6360_PMU_ADC_RPT_ORG2 (0x5E) +#define MT6360_PMU_BAT_OVP_TH_SEL_CODEH (0x5F) +#define MT6360_PMU_BAT_OVP_TH_SEL_CODEL (0x60) +#define MT6360_PMU_CHG_CTRL19 (0x61) +#define MT6360_PMU_VDDASUPPLY (0x62) +#define MT6360_PMU_BC12_MANUAL (0x63) +#define MT6360_PMU_CHGDET_FUNC (0x64) +#define MT6360_PMU_FOD_CTRL (0x65) +#define MT6360_PMU_CHG_CTRL20 (0x66) +#define MT6360_PMU_CHG_HIDDEN_CTRL26 (0x67) +#define MT6360_PMU_CHG_HIDDEN_CTRL27 (0x68) +#define MT6360_PMU_RESV2 (0x69) +#define MT6360_PMU_USBID_CTRL1 (0x6D) +#define MT6360_PMU_USBID_CTRL2 (0x6E) +#define MT6360_PMU_USBID_CTRL3 (0x6F) +#define MT6360_PMU_FLED_CFG (0x70) +#define MT6360_PMU_RESV3 (0x71) +#define MT6360_PMU_FLED1_CTRL (0x72) +#define MT6360_PMU_FLED_STRB_CTRL (0x73) +#define MT6360_PMU_FLED1_STRB_CTRL2 (0x74) +#define MT6360_PMU_FLED1_TOR_CTRL (0x75) +#define MT6360_PMU_FLED2_CTRL (0x76) +#define MT6360_PMU_RESV4 (0x77) +#define MT6360_PMU_FLED2_STRB_CTRL2 (0x78) +#define MT6360_PMU_FLED2_TOR_CTRL (0x79) +#define MT6360_PMU_FLED_VMIDTRK_CTRL1 (0x7A) +#define MT6360_PMU_FLED_VMID_RTM (0x7B) +#define MT6360_PMU_FLED_VMIDTRK_CTRL2 (0x7C) +#define MT6360_PMU_FLED_PWSEL (0x7D) +#define MT6360_PMU_FLED_EN (0x7E) +#define MT6360_PMU_FLED_Hidden1 (0x7F) +#define MT6360_PMU_RGB_EN (0x80) +#define MT6360_PMU_RGB1_ISNK (0x81) +#define MT6360_PMU_RGB2_ISNK (0x82) +#define MT6360_PMU_RGB3_ISNK (0x83) +#define MT6360_PMU_RGB_ML_ISNK (0x84) +#define MT6360_PMU_RGB1_DIM (0x85) +#define MT6360_PMU_RGB2_DIM (0x86) +#define MT6360_PMU_RGB3_DIM (0x87) +#define MT6360_PMU_RESV5 (0x88) +#define MT6360_PMU_RGB12_Freq (0x89) +#define MT6360_PMU_RGB34_Freq (0x8A) +#define MT6360_PMU_RGB1_Tr (0x8B) +#define MT6360_PMU_RGB1_Tf (0x8C) +#define MT6360_PMU_RGB1_TON_TOFF (0x8D) +#define MT6360_PMU_RGB2_Tr (0x8E) +#define MT6360_PMU_RGB2_Tf (0x8F) +#define MT6360_PMU_RGB2_TON_TOFF (0x90) +#define MT6360_PMU_RGB3_Tr (0x91) +#define MT6360_PMU_RGB3_Tf (0x92) +#define MT6360_PMU_RGB3_TON_TOFF (0x93) +#define MT6360_PMU_RGB_Hidden_CTRL1 (0x94) +#define MT6360_PMU_RGB_Hidden_CTRL2 (0x95) +#define MT6360_PMU_RESV6 (0x97) +#define MT6360_PMU_SPARE1 (0x9A) +#define MT6360_PMU_SPARE2 (0xA0) +#define MT6360_PMU_SPARE3 (0xB0) +#define MT6360_PMU_SPARE4 (0xC0) +#define MT6360_PMU_CHG_IRQ1 (0xD0) +#define MT6360_PMU_CHG_IRQ2 (0xD1) +#define MT6360_PMU_CHG_IRQ3 (0xD2) +#define MT6360_PMU_CHG_IRQ4 (0xD3) +#define MT6360_PMU_CHG_IRQ5 (0xD4) +#define MT6360_PMU_CHG_IRQ6 (0xD5) +#define MT6360_PMU_QC_IRQ (0xD6) +#define MT6360_PMU_FOD_IRQ (0xD7) +#define MT6360_PMU_BASE_IRQ (0xD8) +#define MT6360_PMU_FLED_IRQ1 (0xD9) +#define MT6360_PMU_FLED_IRQ2 (0xDA) +#define MT6360_PMU_RGB_IRQ (0xDB) +#define MT6360_PMU_BUCK1_IRQ (0xDC) +#define MT6360_PMU_BUCK2_IRQ (0xDD) +#define MT6360_PMU_LDO_IRQ1 (0xDE) +#define MT6360_PMU_LDO_IRQ2 (0xDF) +#define MT6360_PMU_CHG_STAT1 (0xE0) +#define MT6360_PMU_CHG_STAT2 (0xE1) +#define MT6360_PMU_CHG_STAT3 (0xE2) +#define MT6360_PMU_CHG_STAT4 (0xE3) +#define MT6360_PMU_CHG_STAT5 (0xE4) +#define MT6360_PMU_CHG_STAT6 (0xE5) +#define MT6360_PMU_QC_STAT (0xE6) +#define MT6360_PMU_FOD_STAT (0xE7) +#define MT6360_PMU_BASE_STAT (0xE8) +#define MT6360_PMU_FLED_STAT1 (0xE9) +#define MT6360_PMU_FLED_STAT2 (0xEA) +#define MT6360_PMU_RGB_STAT (0xEB) +#define MT6360_PMU_BUCK1_STAT (0xEC) +#define MT6360_PMU_BUCK2_STAT (0xED) +#define MT6360_PMU_LDO_STAT1 (0xEE) +#define MT6360_PMU_LDO_STAT2 (0xEF) +#define MT6360_PMU_CHG_MASK1 (0xF0) +#define MT6360_PMU_CHG_MASK2 (0xF1) +#define MT6360_PMU_CHG_MASK3 (0xF2) +#define MT6360_PMU_CHG_MASK4 (0xF3) +#define MT6360_PMU_CHG_MASK5 (0xF4) +#define MT6360_PMU_CHG_MASK6 (0xF5) +#define MT6360_PMU_QC_MASK (0xF6) +#define MT6360_PMU_FOD_MASK (0xF7) +#define MT6360_PMU_BASE_MASK (0xF8) +#define MT6360_PMU_FLED_MASK1 (0xF9) +#define MT6360_PMU_FLED_MASK2 (0xFA) +#define MT6360_PMU_FAULTB_MASK (0xFB) +#define MT6360_PMU_BUCK1_MASK (0xFC) +#define MT6360_PMU_BUCK2_MASK (0xFD) +#define MT6360_PMU_LDO_MASK1 (0xFE) +#define MT6360_PMU_LDO_MASK2 (0xFF) +#define MT6360_PMU_MAXREG (MT6360_PMU_LDO_MASK2) + +/* MT6360_PMU_IRQ_SET */ +#define MT6360_PMU_IRQ_REGNUM (MT6360_PMU_LDO_IRQ2 - MT6360_PMU_CHG_IRQ1 + 1) +#define MT6360_IRQ_RETRIG BIT(2) + +#define CHIP_VEN_MASK (0xF0) +#define CHIP_VEN_MT6360 (0x50) +#define CHIP_REV_MASK (0x0F) + +#endif /* __MT6360_H__ */ diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h index fc88d315bdde..949268581b36 100644 --- a/include/linux/mfd/mt6397/core.h +++ b/include/linux/mfd/mt6397/core.h @@ -8,9 +8,11 @@ #define __MFD_MT6397_CORE_H__ #include <linux/mutex.h> +#include <linux/notifier.h> enum chip_id { MT6323_CHIP_ID = 0x23, + MT6358_CHIP_ID = 0x58, MT6391_CHIP_ID = 0x91, MT6397_CHIP_ID = 0x97, }; @@ -54,6 +56,7 @@ enum mt6397_irq_numbers { struct mt6397_chip { struct device *dev; struct regmap *regmap; + struct notifier_block pm_nb; int irq; struct irq_domain *irq_domain; struct mutex irqlock; @@ -63,8 +66,10 @@ struct mt6397_chip { u16 int_con[2]; u16 int_status[2]; u16 chip_id; + void *irq_data; }; +int mt6358_irq_init(struct mt6397_chip *chip); int mt6397_irq_init(struct mt6397_chip *chip); #endif /* __MFD_MT6397_CORE_H__ */ diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h index 7dfb63b81373..66989a16221a 100644 --- a/include/linux/mfd/mt6397/rtc.h +++ b/include/linux/mfd/mt6397/rtc.h @@ -18,7 +18,9 @@ #define RTC_BBPU_CBUSY BIT(6) #define RTC_BBPU_KEY (0x43 << 8) -#define RTC_WRTGR 0x003c +#define RTC_WRTGR_MT6358 0x003a +#define RTC_WRTGR_MT6397 0x003c +#define RTC_WRTGR_MT6323 RTC_WRTGR_MT6397 #define RTC_IRQ_STA 0x0002 #define RTC_IRQ_STA_AL BIT(0) @@ -65,6 +67,10 @@ #define MTK_RTC_POLL_DELAY_US 10 #define MTK_RTC_POLL_TIMEOUT (jiffies_to_usecs(HZ)) +struct mtk_rtc_data { + u32 wrtgr; +}; + struct mt6397_rtc { struct device *dev; struct rtc_device *rtc_dev; @@ -74,6 +80,7 @@ struct mt6397_rtc { struct regmap *regmap; int irq; u32 addr_base; + const struct mtk_rtc_data *data; }; #endif /* _LINUX_MFD_MT6397_RTC_H_ */ diff --git a/include/linux/mfd/sky81452.h b/include/linux/mfd/sky81452.h index d469aa481243..b08570ff34df 100644 --- a/include/linux/mfd/sky81452.h +++ b/include/linux/mfd/sky81452.h @@ -9,11 +9,9 @@ #ifndef _SKY81452_H #define _SKY81452_H -#include <linux/platform_data/sky81452-backlight.h> #include <linux/regulator/machine.h> struct sky81452_platform_data { - struct sky81452_bl_platform_data *bl_pdata; struct regulator_init_data *regulator_init_data; }; diff --git a/include/linux/mfd/smsc.h b/include/linux/mfd/smsc.h deleted file mode 100644 index 83944124e886..000000000000 --- a/include/linux/mfd/smsc.h +++ /dev/null @@ -1,104 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * SMSC ECE1099 - * - * Copyright 2012 Texas Instruments Inc. - * - * Author: Sourav Poddar <sourav.poddar@ti.com> - */ - -#ifndef __LINUX_MFD_SMSC_H -#define __LINUX_MFD_SMSC_H - -#include <linux/regmap.h> - -#define SMSC_ID_ECE1099 1 -#define SMSC_NUM_CLIENTS 2 - -#define SMSC_BASE_ADDR 0x38 -#define OMAP_GPIO_SMSC_IRQ 151 - -#define SMSC_MAXGPIO 32 -#define SMSC_BANK(offs) ((offs) >> 3) -#define SMSC_BIT(offs) (1u << ((offs) & 0x7)) - -struct smsc { - struct device *dev; - struct i2c_client *i2c_clients[SMSC_NUM_CLIENTS]; - struct regmap *regmap; - int clk; - /* Stored chip id */ - int id; -}; - -struct smsc_gpio; -struct smsc_keypad; - -static inline int smsc_read(struct device *child, unsigned int reg, - unsigned int *dest) -{ - struct smsc *smsc = dev_get_drvdata(child->parent); - - return regmap_read(smsc->regmap, reg, dest); -} - -static inline int smsc_write(struct device *child, unsigned int reg, - unsigned int value) -{ - struct smsc *smsc = dev_get_drvdata(child->parent); - - return regmap_write(smsc->regmap, reg, value); -} - -/* Registers for SMSC */ -#define SMSC_RESET 0xF5 -#define SMSC_GRP_INT 0xF9 -#define SMSC_CLK_CTRL 0xFA -#define SMSC_WKUP_CTRL 0xFB -#define SMSC_DEV_ID 0xFC -#define SMSC_DEV_REV 0xFD -#define SMSC_VEN_ID_L 0xFE -#define SMSC_VEN_ID_H 0xFF - -/* CLK VALUE */ -#define SMSC_CLK_VALUE 0x13 - -/* Registers for function GPIO INPUT */ -#define SMSC_GPIO_DATA_IN_START 0x00 - -/* Registers for function GPIO OUPUT */ -#define SMSC_GPIO_DATA_OUT_START 0x05 - -/* Definitions for SMSC GPIO CONFIGURATION REGISTER*/ -#define SMSC_GPIO_INPUT_LOW 0x01 -#define SMSC_GPIO_INPUT_RISING 0x09 -#define SMSC_GPIO_INPUT_FALLING 0x11 -#define SMSC_GPIO_INPUT_BOTH_EDGE 0x19 -#define SMSC_GPIO_OUTPUT_PP 0x21 -#define SMSC_GPIO_OUTPUT_OP 0x31 - -#define GRP_INT_STAT 0xf9 -#define SMSC_GPI_INT 0x0f -#define SMSC_CFG_START 0x0A - -/* Registers for SMSC GPIO INTERRUPT STATUS REGISTER*/ -#define SMSC_GPIO_INT_STAT_START 0x32 - -/* Registers for SMSC GPIO INTERRUPT MASK REGISTER*/ -#define SMSC_GPIO_INT_MASK_START 0x37 - -/* Registers for SMSC function KEYPAD*/ -#define SMSC_KP_OUT 0x40 -#define SMSC_KP_IN 0x41 -#define SMSC_KP_INT_STAT 0x42 -#define SMSC_KP_INT_MASK 0x43 - -/* Definitions for keypad */ -#define SMSC_KP_KSO 0x70 -#define SMSC_KP_KSI 0x51 -#define SMSC_KSO_ALL_LOW 0x20 -#define SMSC_KP_SET_LOW_PWR 0x0B -#define SMSC_KP_SET_HIGH 0xFF -#define SMSC_KSO_EVAL 0x00 - -#endif /* __LINUX_MFD_SMSC_H */ diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h index 605f62264825..90b20550c1c8 100644 --- a/include/linux/mfd/stm32-lptimer.h +++ b/include/linux/mfd/stm32-lptimer.h @@ -27,10 +27,15 @@ #define STM32_LPTIM_CMPOK BIT(3) /* STM32_LPTIM_ICR - bit fields */ +#define STM32_LPTIM_ARRMCF BIT(1) #define STM32_LPTIM_CMPOKCF_ARROKCF GENMASK(4, 3) +/* STM32_LPTIM_IER - bit flieds */ +#define STM32_LPTIM_ARRMIE BIT(1) + /* STM32_LPTIM_CR - bit fields */ #define STM32_LPTIM_CNTSTRT BIT(2) +#define STM32_LPTIM_SNGSTRT BIT(1) #define STM32_LPTIM_ENABLE BIT(0) /* STM32_LPTIM_CFGR - bit fields */ diff --git a/include/linux/mfd/stmfx.h b/include/linux/mfd/stmfx.h index 3c67983678ec..744dce63946e 100644 --- a/include/linux/mfd/stmfx.h +++ b/include/linux/mfd/stmfx.h @@ -109,6 +109,7 @@ struct stmfx { struct device *dev; struct regmap *map; struct regulator *vdd; + int irq; struct irq_domain *irq_domain; struct mutex lock; /* IRQ bus lock */ u8 irq_src; diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index 483168403ae5..ffc091b77633 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h @@ -4,7 +4,7 @@ /* * TI Touch Screen / ADC MFD driver * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h index a228ae4c88d9..e0a417e53766 100644 --- a/include/linux/mfd/tps65086.h +++ b/include/linux/mfd/tps65086.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis <afd@ti.com> * * This program is free software; you can redistribute it and/or diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h index b5dd108421c8..db7091824ed0 100644 --- a/include/linux/mfd/tps65217.h +++ b/include/linux/mfd/tps65217.h @@ -3,7 +3,7 @@ * * Functions to access TPS65217 power management chip. * - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h index b0470c35162d..f4ca367e3473 100644 --- a/include/linux/mfd/tps65218.h +++ b/include/linux/mfd/tps65218.h @@ -3,7 +3,7 @@ * * Functions to access TPS65219 power management chip. * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 as diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h index b25d0297ba88..7943e413deae 100644 --- a/include/linux/mfd/tps65912.h +++ b/include/linux/mfd/tps65912.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis <afd@ti.com> * * This program is free software; you can redistribute it and/or diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 3d7c3c26eeb9..c4a940d98912 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -331,8 +331,6 @@ struct mhi_controller_config { * @wlock: Lock for protecting device wakeup * @mhi_link_info: Device bandwidth info * @st_worker: State transition worker - * @fw_worker: Firmware download worker - * @syserr_worker: System error worker * @state_event: State change event * @status_cb: CB function to notify power states of the device (required) * @wake_get: CB function to assert device wake (optional) @@ -412,8 +410,6 @@ struct mhi_controller { spinlock_t wlock; struct mhi_link_info mhi_link_info; struct work_struct st_worker; - struct work_struct fw_worker; - struct work_struct syserr_worker; wait_queue_head_t state_event; void (*status_cb)(struct mhi_controller *mhi_cntrl, @@ -573,6 +569,13 @@ void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state); /** + * mhi_notify - Notify the MHI client driver about client device status + * @mhi_dev: MHI device instance + * @cb_reason: MHI callback reason + */ +void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason); + +/** * mhi_prepare_for_power_up - Do pre-initialization before power up. * This is optional, call this before power up if * the controller does not want bus framework to @@ -609,6 +612,18 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful); void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl); /** + * mhi_pm_suspend - Move MHI into a suspended state + * @mhi_cntrl: MHI controller + */ +int mhi_pm_suspend(struct mhi_controller *mhi_cntrl); + +/** + * mhi_pm_resume - Resume MHI from suspended state + * @mhi_cntrl: MHI controller + */ +int mhi_pm_resume(struct mhi_controller *mhi_cntrl); + +/** * mhi_download_rddm_img - Download ramdump image from device for * debugging purpose. * @mhi_cntrl: MHI controller diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h index 491156a2359f..e99c789424e0 100644 --- a/include/linux/mic_bus.h +++ b/include/linux/mic_bus.h @@ -6,7 +6,7 @@ * * Intel MIC Bus driver. * - * This implementation is very similar to the the virtio bus driver + * This implementation is very similar to the virtio bus driver * implementation @ include/linux/virtio.h. */ #ifndef _MIC_BUS_H_ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 3e546cbf03dd..0f8d1583fa8e 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -10,6 +10,8 @@ typedef struct page *new_page_t(struct page *page, unsigned long private); typedef void free_page_t(struct page *page, unsigned long private); +struct migration_target_control; + /* * Return values from addresss_space_operations.migratepage(): * - negative errno on page migration failure; @@ -31,34 +33,6 @@ enum migrate_reason { /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ extern const char *migrate_reason_names[MR_TYPES]; -static inline struct page *new_page_nodemask(struct page *page, - int preferred_nid, nodemask_t *nodemask) -{ - gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; - unsigned int order = 0; - struct page *new_page = NULL; - - if (PageHuge(page)) - return alloc_huge_page_nodemask(page_hstate(compound_head(page)), - preferred_nid, nodemask); - - if (PageTransHuge(page)) { - gfp_mask |= GFP_TRANSHUGE; - order = HPAGE_PMD_ORDER; - } - - if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) - gfp_mask |= __GFP_HIGHMEM; - - new_page = __alloc_pages_nodemask(gfp_mask, order, - preferred_nid, nodemask); - - if (new_page && PageTransHuge(new_page)) - prep_transhuge_page(new_page); - - return new_page; -} - #ifdef CONFIG_MIGRATION extern void putback_movable_pages(struct list_head *l); @@ -67,6 +41,7 @@ extern int migrate_page(struct address_space *mapping, enum migrate_mode mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason); +extern struct page *alloc_migration_target(struct page *page, unsigned long private); extern int isolate_movable_page(struct page *page, isolate_mode_t mode); extern void putback_movable_page(struct page *page); @@ -85,6 +60,9 @@ static inline int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, int reason) { return -ENOSYS; } +static inline struct page *alloc_migration_target(struct page *page, + unsigned long private) + { return NULL; } static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) { return -EBUSY; } @@ -180,6 +158,11 @@ static inline unsigned long migrate_pfn(unsigned long pfn) return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID; } +enum migrate_vma_direction { + MIGRATE_VMA_SELECT_SYSTEM = 1 << 0, + MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1, +}; + struct migrate_vma { struct vm_area_struct *vma; /* @@ -199,11 +182,14 @@ struct migrate_vma { /* * Set to the owner value also stored in page->pgmap->owner for - * migrating out of device private memory. If set only device - * private pages with this owner are migrated. If not set - * device private pages are not migrated at all. + * migrating out of device private memory. The flags also need to + * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE. + * The caller should always set this field when using mmu notifier + * callbacks to avoid device MMU invalidations for device private + * pages that are not being migrated. */ - void *src_owner; + void *pgmap_owner; + unsigned long flags; }; int migrate_vma_setup(struct migrate_vma *args); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 20372de0b587..06e066e04a4b 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -573,7 +573,6 @@ struct mlx4_caps { int reserved_eqs; int num_comp_vectors; int num_mpts; - int max_fmr_maps; int num_mtts; int fmr_reserved_mtts; int reserved_mtts; @@ -707,17 +706,6 @@ struct mlx4_mw { int enabled; }; -struct mlx4_fmr { - struct mlx4_mr mr; - struct mlx4_mpt_entry *mpt; - __be64 *mtts; - dma_addr_t dma_handle; - int max_pages; - int max_maps; - int maps; - u8 page_shift; -}; - struct mlx4_uar { unsigned long pfn; int index; @@ -1412,14 +1400,6 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); -int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, - int npages, u64 iova, u32 *lkey, u32 *rkey); -int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, - int max_maps, u8 page_shift, struct mlx4_fmr *fmr); -int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr); -void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, - u32 *lkey, u32 *rkey); -int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); int mlx4_SYNC_TPT(struct mlx4_dev *dev); int mlx4_test_interrupt(struct mlx4_dev *dev, int vector); int mlx4_test_async(struct mlx4_dev *dev); @@ -1522,6 +1502,8 @@ int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port); int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port); int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, int enable); + +struct mlx4_mpt_entry; int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, struct mlx4_mpt_entry ***mpt_entry); int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 8e2828d48d7f..9db93e487496 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -362,7 +362,7 @@ struct mlx4_wqe_datagram_seg { struct mlx4_wqe_lso_seg { __be32 mss_hdr_size; - __be32 header[0]; + __be32 header[]; }; enum mlx4_wqe_bind_seg_flags2 { diff --git a/include/linux/mlx5/accel.h b/include/linux/mlx5/accel.h index 5613e677a5f9..dacf69516002 100644 --- a/include/linux/mlx5/accel.h +++ b/include/linux/mlx5/accel.h @@ -76,7 +76,7 @@ struct aes_gcm_keymat { struct mlx5_accel_esp_xfrm_attrs { enum mlx5_accel_esp_action action; u32 esn; - u32 spi; + __be32 spi; u32 seq; u32 tfc_pad; u32 flags; @@ -92,6 +92,18 @@ struct mlx5_accel_esp_xfrm_attrs { union { struct aes_gcm_keymat aes_gcm; } keymat; + + union { + __be32 a4; + __be32 a6[4]; + } saddr; + + union { + __be32 a4; + __be32 a6[4]; + } daddr; + + u8 is_ipv6; }; struct mlx5_accel_esp_xfrm { @@ -114,7 +126,7 @@ enum mlx5_accel_ipsec_cap { MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN = 1 << 7, }; -#ifdef CONFIG_MLX5_FPGA_IPSEC +#ifdef CONFIG_MLX5_ACCEL u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev); @@ -140,5 +152,5 @@ static inline int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; } -#endif -#endif +#endif /* CONFIG_MLX5_ACCEL */ +#endif /* __MLX5_ACCEL_H__ */ diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h deleted file mode 100644 index 68cd08f02c2f..000000000000 --- a/include/linux/mlx5/cmd.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef MLX5_CMD_H -#define MLX5_CMD_H - -#include <linux/types.h> - -struct manage_pages_layout { - u64 ptr; - u32 reserved; - u16 num_entries; - u16 func_id; -}; - - -struct mlx5_cmd_alloc_uar_imm_out { - u32 rsvd[3]; - u32 uarn; -}; - -#endif /* MLX5_CMD_H */ diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 40748fc1b11b..7bfb67363434 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -33,7 +33,6 @@ #ifndef MLX5_CORE_CQ_H #define MLX5_CORE_CQ_H -#include <rdma/ib_verbs.h> #include <linux/mlx5/driver.h> #include <linux/refcount.h> @@ -188,7 +187,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen, u32 *out, int outlen); int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, - u32 *out, int outlen); + u32 *out); int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen); int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 2b90097a6cf9..4d3376e20f5e 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -276,7 +276,9 @@ enum { MLX5_MKEY_MASK_RW = 1ull << 20, MLX5_MKEY_MASK_A = 1ull << 21, MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23, - MLX5_MKEY_MASK_FREE = 1ull << 29, + MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25, + MLX5_MKEY_MASK_FREE = 1ull << 29, + MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47, }; enum { @@ -364,6 +366,7 @@ enum { enum { MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1, MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5, + MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8, }; enum { @@ -449,10 +452,21 @@ enum { enum { MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1, + MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS = 0x2, }; enum { MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1, + MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2, +}; + +struct mlx5_wqe_tls_static_params_seg { + u8 ctx[MLX5_ST_SZ_BYTES(tls_static_params)]; +}; + +struct mlx5_wqe_tls_progress_params_seg { + __be32 tis_tir_num; + u8 ctx[MLX5_ST_SZ_BYTES(tls_progress_params)]; }; enum { @@ -689,6 +703,19 @@ struct mlx5_eqe_temp_warning { __be64 sensor_warning_lsb; } __packed; +#define SYNC_RST_STATE_MASK 0xf + +enum sync_rst_state_type { + MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0, + MLX5_SYNC_RST_STATE_RESET_NOW = 0x1, + MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2, +}; + +struct mlx5_eqe_sync_fw_update { + u8 reserved_at_0[3]; + u8 sync_rst_state; +}; + union ev_data { __be32 raw[7]; struct mlx5_eqe_cmd cmd; @@ -707,6 +734,7 @@ union ev_data { struct mlx5_eqe_dct dct; struct mlx5_eqe_temp_warning temp_warning; struct mlx5_eqe_xrq_err xrq_err; + struct mlx5_eqe_sync_fw_update sync_fw_update; } __packed; struct mlx5_eqe { @@ -749,7 +777,7 @@ struct mlx5_err_cqe { }; struct mlx5_cqe64 { - u8 outer_l3_tunneled; + u8 tls_outer_l3_tunneled; u8 rsvd0; __be16 wqe_id; u8 lro_tcppsh_abort_dupack; @@ -767,7 +795,12 @@ struct mlx5_cqe64 { u8 l4_l3_hdr_type; __be16 vlan_info; __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ - __be32 imm_inval_pkey; + union { + __be32 immediate; + __be32 inval_rkey; + __be32 pkey; + __be32 ft_metadata; + }; u8 rsvd40[4]; __be32 byte_cnt; __be32 timestamp_h; @@ -834,7 +867,12 @@ static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe) { - return cqe->outer_l3_tunneled & 0x1; + return cqe->tls_outer_l3_tunneled & 0x1; +} + +static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe) +{ + return (cqe->tls_outer_l3_tunneled >> 3) & 0x3; } static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe) @@ -922,6 +960,13 @@ enum { CQE_L4_OK = 1 << 2, }; +enum { + CQE_TLS_OFFLOAD_NOT_DECRYPTED = 0x0, + CQE_TLS_OFFLOAD_DECRYPTED = 0x1, + CQE_TLS_OFFLOAD_RESYNC = 0x2, + CQE_TLS_OFFLOAD_ERROR = 0x3, +}; + struct mlx5_sig_err_cqe { u8 rsvd0[16]; __be32 expected_trans_sig; @@ -964,7 +1009,6 @@ enum { MLX5_MKEY_REMOTE_INVAL = 1 << 24, MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, MLX5_MKEY_BSF_EN = 1 << 30, - MLX5_MKEY_LEN64 = 1 << 31, }; struct mlx5_mkey_seg { @@ -1107,6 +1151,7 @@ enum mlx5_cap_type { MLX5_CAP_TLS, MLX5_CAP_VDPA_EMULATION = 0x13, MLX5_CAP_DEV_EVENT = 0x14, + MLX5_CAP_IPSEC, /* NUM OF CAP Types */ MLX5_CAP_NUM }; @@ -1317,13 +1362,16 @@ enum mlx5_qcam_feature_groups { MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap) #define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\ - MLX5_GET(device_virtio_emulation_cap, \ + MLX5_GET(virtio_emulation_cap, \ (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) #define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\ - MLX5_GET64(device_virtio_emulation_cap, \ + MLX5_GET64(virtio_emulation_cap, \ (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) +#define MLX5_CAP_IPSEC(mdev, cap)\ + MLX5_GET(ipsec_cap, (mdev)->caps.hca_cur[MLX5_CAP_IPSEC], cap) + enum { MLX5_CMD_STAT_OK = 0x0, MLX5_CMD_STAT_INT_ERR = 0x1, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 8397b6558dc7..372100c755e7 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -130,6 +130,7 @@ enum { MLX5_REG_NODE_DESC = 0x6001, MLX5_REG_HOST_ENDIANNESS = 0x7004, MLX5_REG_MCIA = 0x9014, + MLX5_REG_MFRL = 0x9028, MLX5_REG_MLCR = 0x902b, MLX5_REG_MTRC_CAP = 0x9040, MLX5_REG_MTRC_CONF = 0x9041, @@ -146,6 +147,7 @@ enum { MLX5_REG_MCDA = 0x9063, MLX5_REG_MCAM = 0x907f, MLX5_REG_MIRC = 0x9162, + MLX5_REG_SBCAM = 0xB01F, MLX5_REG_RESOURCE_DUMP = 0xC000, }; @@ -200,7 +202,7 @@ struct mlx5_rsc_debug { void *object; enum dbg_rsc_type type; struct dentry *root; - struct mlx5_field_desc fields[0]; + struct mlx5_field_desc fields[]; }; enum mlx5_dev_event { @@ -297,7 +299,7 @@ struct mlx5_cmd { struct mlx5_cmd_debug dbg; struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; int checksum_disabled; - struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; + struct mlx5_cmd_stats *stats; }; struct mlx5_port_caps { @@ -539,7 +541,7 @@ struct mlx5_priv { /* pages stuff */ struct mlx5_nb pg_nb; struct workqueue_struct *pg_wq; - struct rb_root page_root; + struct xarray page_root_xa; int fw_pages; atomic_t reg_pages; struct list_head free_list; @@ -549,7 +551,6 @@ struct mlx5_priv { struct mlx5_core_health health; /* start: qp staff */ - struct mlx5_qp_table qp_table; struct dentry *qp_debugfs; struct dentry *eq_debugfs; struct dentry *cq_debugfs; @@ -695,7 +696,6 @@ struct mlx5_core_dev { unsigned long intf_state; struct mlx5_priv priv; struct mlx5_profile *profile; - atomic_t num_qps; u32 issi; struct mlx5e_resources mlx5e_res; struct mlx5_dm *dm; @@ -708,6 +708,9 @@ struct mlx5_core_dev { #ifdef CONFIG_MLX5_FPGA struct mlx5_fpga_device *fpga; #endif +#ifdef CONFIG_MLX5_ACCEL + const struct mlx5_accel_ipsec_ops *ipsec_ops; +#endif struct mlx5_clock clock; struct mlx5_ib_clock_info *clock_info; struct mlx5_fw_tracer *tracer; @@ -764,6 +767,8 @@ struct mlx5_cmd_work_ent { u64 ts2; u16 op; bool polling; + /* Track the max comp handlers */ + refcount_t refcnt; }; struct mlx5_pas { @@ -833,11 +838,6 @@ static inline u16 fw_rev_sub(struct mlx5_core_dev *dev) return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff; } -static inline u16 cmdif_rev(struct mlx5_core_dev *dev) -{ - return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; -} - static inline u32 mlx5_base_mkey(const u32 key) { return key & 0xffffff00u; @@ -919,9 +919,23 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); + +#define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out) \ + ({ \ + mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out, \ + MLX5_ST_SZ_BYTES(ifc_cmd##_out)); \ + }) + +#define mlx5_cmd_exec_in(dev, ifc_cmd, in) \ + ({ \ + u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {}; \ + mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out); \ + }) + int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size); void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); +bool mlx5_cmd_is_down(struct mlx5_core_dev *dev); int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); @@ -964,6 +978,7 @@ void mlx5_register_debugfs(void); void mlx5_unregister_debugfs(void); void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); +void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, unsigned int *irqn); @@ -1016,11 +1031,6 @@ int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, u8 roce_version, u8 roce_l3_type, const u8 *gid, const u8 *mac, bool vlan, u16 vlan_id, u8 port_num); -static inline int fw_initializing(struct mlx5_core_dev *dev) -{ - return ioread32be(&dev->iseg->initializing) >> 31; -} - static inline u32 mlx5_mkey_to_idx(u32 mkey) { return mkey >> 8; @@ -1051,6 +1061,7 @@ enum { enum { MLX5_INTERFACE_PROTOCOL_IB = 0, MLX5_INTERFACE_PROTOCOL_ETH = 1, + MLX5_INTERFACE_PROTOCOL_VDPA = 2, }; struct mlx5_interface { @@ -1078,6 +1089,8 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev); bool mlx5_lag_is_active(struct mlx5_core_dev *dev); struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); +u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, + struct net_device *slave); int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, u64 *values, int num_counters, @@ -1085,7 +1098,8 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, - u64 length, u16 uid, phys_addr_t *addr, u32 *obj_id); + u64 length, u32 log_alignment, u16 uid, + phys_addr_t *addr, u32 *obj_id); int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, u64 length, u16 uid, phys_addr_t addr, u32 obj_id); diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index e2d13e074067..92d991d93757 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -42,6 +42,7 @@ enum { MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17, MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18, + MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS = 1 << 19, }; enum { @@ -206,7 +207,10 @@ struct mlx5_flow_act { u32 action; struct mlx5_modify_hdr *modify_hdr; struct mlx5_pkt_reformat *pkt_reformat; - uintptr_t esp_id; + union { + u32 ipsec_obj_id; + uintptr_t esp_id; + }; u32 flags; struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; struct ib_counters *counters; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 69b27c7dfc3e..de1ffb4804d6 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -74,6 +74,7 @@ enum { MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0, MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3, + MLX5_SET_HCA_CAP_OP_MOD_ROCE = 0x4, }; enum { @@ -92,6 +93,7 @@ enum { enum { MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b, + MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d, MLX5_OBJ_TYPE_MKEY = 0xff01, MLX5_OBJ_TYPE_QP = 0xff02, MLX5_OBJ_TYPE_PSV = 0xff03, @@ -415,7 +417,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 table_miss_action_domain[0x1]; u8 termination_table[0x1]; u8 reformat_and_fwd_to_table[0x1]; - u8 reserved_at_1a[0x6]; + u8 reserved_at_1a[0x2]; + u8 ipsec_encrypt[0x1]; + u8 ipsec_decrypt[0x1]; + u8 reserved_at_1e[0x2]; + u8 termination_table_raw_traffic[0x1]; u8 reserved_at_21[0x1]; u8 log_max_ft_size[0x6]; @@ -583,9 +589,7 @@ struct mlx5_ifc_fte_match_set_misc2_bits { u8 metadata_reg_a[0x20]; - u8 metadata_reg_b[0x20]; - - u8 reserved_at_1c0[0x40]; + u8 reserved_at_1a0[0x60]; }; struct mlx5_ifc_fte_match_set_misc3_bits { @@ -885,7 +889,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 tunnel_stateless_vxlan_gpe[0x1]; u8 tunnel_stateless_ipv4_over_vxlan[0x1]; u8 tunnel_stateless_ip_over_ip[0x1]; - u8 reserved_at_2a[0x6]; + u8 insert_trailer[0x1]; + u8 reserved_at_2b[0x5]; u8 max_vxlan_udp_ports[0x8]; u8 reserved_at_38[0x6]; u8 max_geneve_opt_len[0x1]; @@ -903,7 +908,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { struct mlx5_ifc_roce_cap_bits { u8 roce_apm[0x1]; - u8 reserved_at_1[0x1f]; + u8 reserved_at_1[0x3]; + u8 sw_r_roce_src_udp_port[0x1]; + u8 reserved_at_5[0x1b]; u8 reserved_at_20[0x60]; @@ -979,17 +986,40 @@ struct mlx5_ifc_device_event_cap_bits { u8 user_unaffiliated_events[4][0x40]; }; -struct mlx5_ifc_device_virtio_emulation_cap_bits { - u8 reserved_at_0[0x20]; +struct mlx5_ifc_virtio_emulation_cap_bits { + u8 desc_tunnel_offload_type[0x1]; + u8 eth_frame_offload_type[0x1]; + u8 virtio_version_1_0[0x1]; + u8 device_features_bits_mask[0xd]; + u8 event_mode[0x8]; + u8 virtio_queue_type[0x8]; - u8 reserved_at_20[0x13]; + u8 max_tunnel_desc[0x10]; + u8 reserved_at_30[0x3]; u8 log_doorbell_stride[0x5]; u8 reserved_at_38[0x3]; u8 log_doorbell_bar_size[0x5]; u8 doorbell_bar_offset[0x40]; - u8 reserved_at_80[0x780]; + u8 max_emulated_devices[0x8]; + u8 max_num_virtio_queues[0x18]; + + u8 reserved_at_a0[0x60]; + + u8 umem_1_buffer_param_a[0x20]; + + u8 umem_1_buffer_param_b[0x20]; + + u8 umem_2_buffer_param_a[0x20]; + + u8 umem_2_buffer_param_b[0x20]; + + u8 umem_3_buffer_param_a[0x20]; + + u8 umem_3_buffer_param_b[0x20]; + + u8 reserved_at_1c0[0x640]; }; enum { @@ -1097,6 +1127,23 @@ struct mlx5_ifc_tls_cap_bits { u8 reserved_at_20[0x7e0]; }; +struct mlx5_ifc_ipsec_cap_bits { + u8 ipsec_full_offload[0x1]; + u8 ipsec_crypto_offload[0x1]; + u8 ipsec_esn[0x1]; + u8 ipsec_crypto_esp_aes_gcm_256_encrypt[0x1]; + u8 ipsec_crypto_esp_aes_gcm_128_encrypt[0x1]; + u8 ipsec_crypto_esp_aes_gcm_256_decrypt[0x1]; + u8 ipsec_crypto_esp_aes_gcm_128_decrypt[0x1]; + u8 reserved_at_7[0x4]; + u8 log_max_ipsec_offload[0x5]; + u8 reserved_at_10[0x10]; + + u8 min_log_ipsec_full_replay_window[0x8]; + u8 max_log_ipsec_full_replay_window[0x8]; + u8 reserved_at_30[0x7d0]; +}; + enum { MLX5_WQ_TYPE_LINKED_LIST = 0x0, MLX5_WQ_TYPE_CYCLIC = 0x1, @@ -1189,13 +1236,19 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_99[0x2]; u8 log_max_qp[0x5]; - u8 reserved_at_a0[0xb]; + u8 reserved_at_a0[0x3]; + u8 ece_support[0x1]; + u8 reserved_at_a4[0x7]; u8 log_max_srq[0x5]; u8 reserved_at_b0[0x10]; u8 max_sgl_for_optimized_performance[0x8]; u8 log_max_cq_sz[0x8]; - u8 reserved_at_d0[0xb]; + u8 relaxed_ordering_write_umr[0x1]; + u8 relaxed_ordering_read_umr[0x1]; + u8 reserved_at_d2[0x7]; + u8 virtio_net_device_emualtion_manager[0x1]; + u8 virtio_blk_device_emualtion_manager[0x1]; u8 log_max_cq[0x5]; u8 log_max_eq_sz[0x8]; @@ -1223,7 +1276,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_130[0xa]; u8 log_max_ra_res_dc[0x6]; - u8 reserved_at_140[0x9]; + u8 reserved_at_140[0x6]; + u8 release_all_pages[0x1]; + u8 reserved_at_147[0x2]; u8 roce_accl[0x1]; u8 log_max_ra_req_qp[0x6]; u8 reserved_at_150[0xa]; @@ -1296,7 +1351,11 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 wol_p[0x1]; u8 stat_rate_support[0x10]; - u8 reserved_at_1f0[0xc]; + u8 reserved_at_1f0[0x1]; + u8 pci_sync_for_fw_update_event[0x1]; + u8 reserved_at_1f2[0x6]; + u8 init2_lag_tx_port_affinity[0x1]; + u8 reserved_at_1fa[0x3]; u8 cqe_version[0x4]; u8 compact_address_vector[0x1]; @@ -1365,7 +1424,10 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 bf[0x1]; u8 driver_version[0x1]; u8 pad_tx_eth_packet[0x1]; - u8 reserved_at_263[0x8]; + u8 reserved_at_263[0x3]; + u8 mkey_by_name[0x1]; + u8 reserved_at_267[0x4]; + u8 log_bf_reg_size[0x5]; u8 reserved_at_270[0x8]; @@ -1461,13 +1523,14 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_460[0x3]; u8 log_max_uctx[0x5]; - u8 reserved_at_468[0x3]; + u8 reserved_at_468[0x2]; + u8 ipsec_offload[0x1]; u8 log_max_umem[0x5]; u8 max_num_eqs[0x10]; u8 reserved_at_480[0x1]; u8 tls_tx[0x1]; - u8 reserved_at_482[0x1]; + u8 tls_rx[0x1]; u8 log_max_l2_table[0x5]; u8 reserved_at_488[0x8]; u8 log_uar_page_sz[0x10]; @@ -1677,7 +1740,7 @@ struct mlx5_ifc_wq_bits { u8 reserved_at_140[0x4c0]; - struct mlx5_ifc_cmd_pas_bits pas[0]; + struct mlx5_ifc_cmd_pas_bits pas[]; }; struct mlx5_ifc_rq_num_bits { @@ -1895,7 +1958,7 @@ struct mlx5_ifc_resource_dump_menu_segment_bits { u8 reserved_at_20[0x10]; u8 num_of_records[0x10]; - struct mlx5_ifc_resource_dump_menu_record_bits record[0]; + struct mlx5_ifc_resource_dump_menu_record_bits record[]; }; struct mlx5_ifc_resource_dump_resource_segment_bits { @@ -1907,7 +1970,7 @@ struct mlx5_ifc_resource_dump_resource_segment_bits { u8 index2[0x20]; - u8 payload[0][0x20]; + u8 payload[][0x20]; }; struct mlx5_ifc_resource_dump_terminate_segment_bits { @@ -2921,7 +2984,7 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_fpga_cap_bits fpga_cap; struct mlx5_ifc_tls_cap_bits tls_cap; struct mlx5_ifc_device_mem_cap_bits device_mem_cap; - struct mlx5_ifc_device_virtio_emulation_cap_bits virtio_emulation_cap; + struct mlx5_ifc_virtio_emulation_cap_bits virtio_emulation_cap; u8 reserved_at_0[0x8000]; }; @@ -2937,6 +3000,8 @@ enum { MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100, MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400, MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800, + MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT = 0x1000, + MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT = 0x2000, }; enum { @@ -2978,13 +3043,14 @@ struct mlx5_ifc_flow_context_bits { struct mlx5_ifc_vlan_bits push_vlan_2; - u8 reserved_at_120[0xe0]; + u8 ipsec_obj_id[0x20]; + u8 reserved_at_140[0xc0]; struct mlx5_ifc_fte_match_param_bits match_value; u8 reserved_at_1200[0x600]; - union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0]; + union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[]; }; enum { @@ -3112,7 +3178,8 @@ struct mlx5_ifc_tirc_bits { u8 reserved_at_0[0x20]; u8 disp_type[0x4]; - u8 reserved_at_24[0x1c]; + u8 tls_en[0x1]; + u8 reserved_at_25[0x1b]; u8 reserved_at_40[0x40]; @@ -3266,17 +3333,20 @@ struct mlx5_ifc_scheduling_context_bits { }; struct mlx5_ifc_rqtc_bits { - u8 reserved_at_0[0xa0]; + u8 reserved_at_0[0xa0]; - u8 reserved_at_a0[0x10]; - u8 rqt_max_size[0x10]; + u8 reserved_at_a0[0x5]; + u8 list_q_type[0x3]; + u8 reserved_at_a8[0x8]; + u8 rqt_max_size[0x10]; - u8 reserved_at_c0[0x10]; - u8 rqt_actual_size[0x10]; + u8 rq_vhca_id_format[0x1]; + u8 reserved_at_c1[0xf]; + u8 rqt_actual_size[0x10]; - u8 reserved_at_e0[0x6a0]; + u8 reserved_at_e0[0x6a0]; - struct mlx5_ifc_rq_num_bits rq_num[0]; + struct mlx5_ifc_rq_num_bits rq_num[]; }; enum { @@ -3388,7 +3458,7 @@ struct mlx5_ifc_nic_vport_context_bits { u8 reserved_at_7e0[0x20]; - u8 current_uc_mac_address[0][0x40]; + u8 current_uc_mac_address[][0x40]; }; enum { @@ -3661,7 +3731,8 @@ struct mlx5_ifc_dctc_bits { u8 ecn[0x2]; u8 dscp[0x6]; - u8 reserved_at_1c0[0x40]; + u8 reserved_at_1c0[0x20]; + u8 ece[0x20]; }; enum { @@ -4140,7 +4211,8 @@ enum { MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION = 0x0, MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG = 0x1, MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST = 0x2, - MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3 + MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS = 0x3, + MLX5_SET_FTE_MODIFY_ENABLE_MASK_IPSEC_OBJ_ID = 0x4 }; struct mlx5_ifc_set_fte_out_bits { @@ -4190,7 +4262,8 @@ struct mlx5_ifc_rts2rts_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x20]; + u8 ece[0x20]; }; struct mlx5_ifc_rts2rts_qp_in_bits { @@ -4207,7 +4280,7 @@ struct mlx5_ifc_rts2rts_qp_in_bits { u8 opt_param_mask[0x20]; - u8 reserved_at_a0[0x20]; + u8 ece[0x20]; struct mlx5_ifc_qpc_bits qpc; @@ -4220,7 +4293,8 @@ struct mlx5_ifc_rtr2rts_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x20]; + u8 ece[0x20]; }; struct mlx5_ifc_rtr2rts_qp_in_bits { @@ -4237,7 +4311,7 @@ struct mlx5_ifc_rtr2rts_qp_in_bits { u8 opt_param_mask[0x20]; - u8 reserved_at_a0[0x20]; + u8 ece[0x20]; struct mlx5_ifc_qpc_bits qpc; @@ -4250,7 +4324,8 @@ struct mlx5_ifc_rst2init_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x20]; + u8 ece[0x20]; }; struct mlx5_ifc_rst2init_qp_in_bits { @@ -4267,7 +4342,7 @@ struct mlx5_ifc_rst2init_qp_in_bits { u8 opt_param_mask[0x20]; - u8 reserved_at_a0[0x20]; + u8 ece[0x20]; struct mlx5_ifc_qpc_bits qpc; @@ -4310,7 +4385,7 @@ struct mlx5_ifc_query_xrc_srq_out_bits { u8 reserved_at_280[0x600]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_query_xrc_srq_in_bits { @@ -4347,6 +4422,7 @@ struct mlx5_ifc_query_vport_state_out_bits { enum { MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1, + MLX5_VPORT_STATE_OP_MOD_UPLINK = 0x2, }; struct mlx5_ifc_arm_monitor_counter_in_bits { @@ -4588,7 +4664,7 @@ struct mlx5_ifc_query_srq_out_bits { u8 reserved_at_280[0x600]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_query_srq_in_bits { @@ -4789,7 +4865,8 @@ struct mlx5_ifc_query_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x20]; + u8 ece[0x20]; u8 opt_param_mask[0x20]; @@ -4799,7 +4876,7 @@ struct mlx5_ifc_query_qp_out_bits { u8 reserved_at_800[0x80]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_query_qp_in_bits { @@ -5132,7 +5209,7 @@ struct mlx5_ifc_query_hca_vport_pkey_out_bits { u8 reserved_at_40[0x40]; - struct mlx5_ifc_pkey_bits pkey[0]; + struct mlx5_ifc_pkey_bits pkey[]; }; struct mlx5_ifc_query_hca_vport_pkey_in_bits { @@ -5168,7 +5245,7 @@ struct mlx5_ifc_query_hca_vport_gid_out_bits { u8 gids_num[0x10]; u8 reserved_at_70[0x10]; - struct mlx5_ifc_array128_auto_bits gid[0]; + struct mlx5_ifc_array128_auto_bits gid[]; }; struct mlx5_ifc_query_hca_vport_gid_in_bits { @@ -5436,7 +5513,7 @@ struct mlx5_ifc_query_flow_counter_out_bits { u8 reserved_at_40[0x40]; - struct mlx5_ifc_traffic_counter_bits flow_statistics[0]; + struct mlx5_ifc_traffic_counter_bits flow_statistics[]; }; struct mlx5_ifc_query_flow_counter_in_bits { @@ -5530,7 +5607,7 @@ struct mlx5_ifc_query_eq_out_bits { u8 reserved_at_300[0x580]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_query_eq_in_bits { @@ -5555,7 +5632,7 @@ struct mlx5_ifc_packet_reformat_context_in_bits { u8 reserved_at_20[0x10]; u8 reformat_data[2][0x8]; - u8 more_reformat_data[0][0x8]; + u8 more_reformat_data[][0x8]; }; struct mlx5_ifc_query_packet_reformat_context_out_bits { @@ -5566,7 +5643,7 @@ struct mlx5_ifc_query_packet_reformat_context_out_bits { u8 reserved_at_40[0xa0]; - struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context[0]; + struct mlx5_ifc_packet_reformat_context_in_bits packet_reformat_context[]; }; struct mlx5_ifc_query_packet_reformat_context_in_bits { @@ -5667,9 +5744,9 @@ struct mlx5_ifc_copy_action_in_bits { u8 reserved_at_38[0x8]; }; -union mlx5_ifc_set_action_in_add_action_in_auto_bits { - struct mlx5_ifc_set_action_in_bits set_action_in; - struct mlx5_ifc_add_action_in_bits add_action_in; +union mlx5_ifc_set_add_copy_action_in_auto_bits { + struct mlx5_ifc_set_action_in_bits set_action_in; + struct mlx5_ifc_add_action_in_bits add_action_in; struct mlx5_ifc_copy_action_in_bits copy_action_in; u8 reserved_at_0[0x40]; }; @@ -5717,6 +5794,7 @@ enum { MLX5_ACTION_IN_FIELD_METADATA_REG_C_7 = 0x58, MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59, MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B, + MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME = 0x5D, }; struct mlx5_ifc_alloc_modify_header_context_out_bits { @@ -5743,7 +5821,7 @@ struct mlx5_ifc_alloc_modify_header_context_in_bits { u8 reserved_at_68[0x10]; u8 num_of_actions[0x8]; - union mlx5_ifc_set_action_in_add_action_in_auto_bits actions[0]; + union mlx5_ifc_set_add_copy_action_in_auto_bits actions[0]; }; struct mlx5_ifc_dealloc_modify_header_context_out_bits { @@ -5805,7 +5883,7 @@ struct mlx5_ifc_query_cq_out_bits { u8 reserved_at_280[0x600]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_query_cq_in_bits { @@ -6412,7 +6490,7 @@ struct mlx5_ifc_modify_cq_in_bits { u8 reserved_at_300[0x580]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_modify_cong_status_out_bits { @@ -6476,7 +6554,7 @@ struct mlx5_ifc_manage_pages_out_bits { u8 reserved_at_60[0x20]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; enum { @@ -6498,7 +6576,7 @@ struct mlx5_ifc_manage_pages_in_bits { u8 input_num_entries[0x20]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_mad_ifc_out_bits { @@ -6554,7 +6632,8 @@ struct mlx5_ifc_init2rtr_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x20]; + u8 ece[0x20]; }; struct mlx5_ifc_init2rtr_qp_in_bits { @@ -6571,7 +6650,7 @@ struct mlx5_ifc_init2rtr_qp_in_bits { u8 opt_param_mask[0x20]; - u8 reserved_at_a0[0x20]; + u8 ece[0x20]; struct mlx5_ifc_qpc_bits qpc; @@ -6584,7 +6663,8 @@ struct mlx5_ifc_init2init_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x20]; + u8 ece[0x20]; }; struct mlx5_ifc_init2init_qp_in_bits { @@ -6601,7 +6681,7 @@ struct mlx5_ifc_init2init_qp_in_bits { u8 opt_param_mask[0x20]; - u8 reserved_at_a0[0x20]; + u8 ece[0x20]; struct mlx5_ifc_qpc_bits qpc; @@ -7046,7 +7126,7 @@ struct mlx5_ifc_destroy_mkey_out_bits { struct mlx5_ifc_destroy_mkey_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7453,7 +7533,7 @@ struct mlx5_ifc_create_xrc_srq_in_bits { u8 reserved_at_300[0x580]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_create_tis_out_bits { @@ -7529,7 +7609,7 @@ struct mlx5_ifc_create_srq_in_bits { u8 reserved_at_280[0x600]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_create_sq_out_bits { @@ -7667,7 +7747,7 @@ struct mlx5_ifc_create_qp_out_bits { u8 reserved_at_40[0x8]; u8 qpn[0x18]; - u8 reserved_at_60[0x20]; + u8 ece[0x20]; }; struct mlx5_ifc_create_qp_in_bits { @@ -7677,11 +7757,13 @@ struct mlx5_ifc_create_qp_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x40]; + u8 reserved_at_40[0x8]; + u8 input_qpn[0x18]; + u8 reserved_at_60[0x20]; u8 opt_param_mask[0x20]; - u8 reserved_at_a0[0x20]; + u8 ece[0x20]; struct mlx5_ifc_qpc_bits qpc; @@ -7690,7 +7772,7 @@ struct mlx5_ifc_create_qp_in_bits { u8 wq_umem_valid[0x1]; u8 reserved_at_861[0x1f]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_create_psv_out_bits { @@ -7742,7 +7824,7 @@ struct mlx5_ifc_create_mkey_out_bits { struct mlx5_ifc_create_mkey_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; @@ -7761,7 +7843,7 @@ struct mlx5_ifc_create_mkey_in_bits { u8 reserved_at_320[0x560]; - u8 klm_pas_mtt[0][0x20]; + u8 klm_pas_mtt[][0x20]; }; enum { @@ -7894,7 +7976,7 @@ struct mlx5_ifc_create_eq_in_bits { u8 reserved_at_3c0[0x4c0]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_create_dct_out_bits { @@ -7906,7 +7988,7 @@ struct mlx5_ifc_create_dct_out_bits { u8 reserved_at_40[0x8]; u8 dctn[0x18]; - u8 reserved_at_60[0x20]; + u8 ece[0x20]; }; struct mlx5_ifc_create_dct_in_bits { @@ -7951,7 +8033,7 @@ struct mlx5_ifc_create_cq_in_bits { u8 cq_umem_valid[0x1]; u8 reserved_at_2e1[0x59f]; - u8 pas[0][0x40]; + u8 pas[][0x40]; }; struct mlx5_ifc_config_int_moderation_out_bits { @@ -8307,7 +8389,7 @@ struct mlx5_ifc_access_register_out_bits { u8 reserved_at_40[0x40]; - u8 register_data[0][0x20]; + u8 register_data[][0x20]; }; enum { @@ -8327,7 +8409,7 @@ struct mlx5_ifc_access_register_in_bits { u8 argument[0x20]; - u8 register_data[0][0x20]; + u8 register_data[][0x20]; }; struct mlx5_ifc_sltp_reg_bits { @@ -9344,7 +9426,7 @@ struct mlx5_ifc_cmd_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 command[0][0x20]; + u8 command[][0x20]; }; struct mlx5_ifc_cmd_if_box_bits { @@ -9638,7 +9720,7 @@ struct mlx5_ifc_mcqi_reg_bits { u8 reserved_at_a0[0x10]; u8 data_size[0x10]; - union mlx5_ifc_mcqi_reg_data_bits data[0]; + union mlx5_ifc_mcqi_reg_data_bits data[]; }; struct mlx5_ifc_mcc_reg_bits { @@ -9680,6 +9762,29 @@ struct mlx5_ifc_mcda_reg_bits { u8 data[0][0x20]; }; +enum { + MLX5_MFRL_REG_RESET_TYPE_FULL_CHIP = BIT(0), + MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE = BIT(1), +}; + +enum { + MLX5_MFRL_REG_RESET_LEVEL0 = BIT(0), + MLX5_MFRL_REG_RESET_LEVEL3 = BIT(3), + MLX5_MFRL_REG_RESET_LEVEL6 = BIT(6), +}; + +struct mlx5_ifc_mfrl_reg_bits { + u8 reserved_at_0[0x20]; + + u8 reserved_at_20[0x2]; + u8 pci_sync_for_fw_update_start[0x1]; + u8 pci_sync_for_fw_update_resp[0x2]; + u8 rst_type_sel[0x3]; + u8 reserved_at_28[0x8]; + u8 reset_type[0x8]; + u8 reset_level[0x8]; +}; + struct mlx5_ifc_mirc_reg_bits { u8 reserved_at_0[0x18]; u8 status_code[0x8]; @@ -9743,6 +9848,7 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_mcc_reg_bits mcc_reg; struct mlx5_ifc_mcda_reg_bits mcda_reg; struct mlx5_ifc_mirc_reg_bits mirc_reg; + struct mlx5_ifc_mfrl_reg_bits mfrl_reg; u8 reserved_at_0[0x60e0]; }; @@ -9899,6 +10005,34 @@ struct mlx5_ifc_pptb_reg_bits { u8 untagged_buff[0x4]; }; +struct mlx5_ifc_sbcam_reg_bits { + u8 reserved_at_0[0x8]; + u8 feature_group[0x8]; + u8 reserved_at_10[0x8]; + u8 access_reg_group[0x8]; + + u8 reserved_at_20[0x20]; + + u8 sb_access_reg_cap_mask[4][0x20]; + + u8 reserved_at_c0[0x80]; + + u8 sb_feature_cap_mask[4][0x20]; + + u8 reserved_at_1c0[0x40]; + + u8 cap_total_buffer_size[0x20]; + + u8 cap_cell_size[0x10]; + u8 cap_max_pg_buffers[0x8]; + u8 cap_num_pool_supported[0x8]; + + u8 reserved_at_240[0x8]; + u8 cap_sbsr_stat_size[0x8]; + u8 cap_max_tclass_data[0x8]; + u8 cap_max_cpu_ingress_tclass_sb[0x8]; +}; + struct mlx5_ifc_pbmc_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; @@ -10200,7 +10334,7 @@ struct mlx5_ifc_umem_bits { u8 num_of_mtt[0x40]; - struct mlx5_ifc_mtt_bits mtt[0]; + struct mlx5_ifc_mtt_bits mtt[]; }; struct mlx5_ifc_uctx_bits { @@ -10248,6 +10382,40 @@ struct mlx5_ifc_create_umem_in_bits { struct mlx5_ifc_umem_bits umem; }; +struct mlx5_ifc_create_umem_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x8]; + u8 umem_id[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_umem_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x8]; + u8 umem_id[0x18]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_destroy_umem_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_create_uctx_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; @@ -10260,6 +10428,18 @@ struct mlx5_ifc_create_uctx_in_bits { struct mlx5_ifc_uctx_bits uctx; }; +struct mlx5_ifc_create_uctx_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x10]; + u8 uid[0x10]; + + u8 reserved_at_60[0x20]; +}; + struct mlx5_ifc_destroy_uctx_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; @@ -10273,6 +10453,15 @@ struct mlx5_ifc_destroy_uctx_in_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_destroy_uctx_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_create_sw_icm_in_bits { struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; struct mlx5_ifc_sw_icm_bits sw_icm; @@ -10325,7 +10514,7 @@ struct mlx5_ifc_mtrc_stdb_bits { u8 reserved_at_4[0x4]; u8 read_size[0x18]; u8 start_offset[0x20]; - u8 string_db_data[0]; + u8 string_db_data[]; }; struct mlx5_ifc_mtrc_ctrl_bits { @@ -10379,7 +10568,7 @@ struct mlx5_ifc_query_esw_functions_out_bits { struct mlx5_ifc_host_params_context_bits host_params_context; u8 reserved_at_280[0x180]; - u8 host_sf_enable[0][0x40]; + u8 host_sf_enable[][0x40]; }; struct mlx5_ifc_sf_partition_bits { @@ -10399,7 +10588,7 @@ struct mlx5_ifc_query_sf_partitions_out_bits { u8 reserved_at_60[0x20]; - struct mlx5_ifc_sf_partition_bits sf_partition[0]; + struct mlx5_ifc_sf_partition_bits sf_partition[]; }; struct mlx5_ifc_query_sf_partitions_in_bits { @@ -10465,10 +10654,62 @@ struct mlx5_ifc_affiliated_event_header_bits { enum { MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = BIT(0xc), + MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = BIT(0x13), }; enum { MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc, + MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13, +}; + +enum { + MLX5_IPSEC_OBJECT_ICV_LEN_16B, + MLX5_IPSEC_OBJECT_ICV_LEN_12B, + MLX5_IPSEC_OBJECT_ICV_LEN_8B, +}; + +struct mlx5_ifc_ipsec_obj_bits { + u8 modify_field_select[0x40]; + u8 full_offload[0x1]; + u8 reserved_at_41[0x1]; + u8 esn_en[0x1]; + u8 esn_overlap[0x1]; + u8 reserved_at_44[0x2]; + u8 icv_length[0x2]; + u8 reserved_at_48[0x4]; + u8 aso_return_reg[0x4]; + u8 reserved_at_50[0x10]; + + u8 esn_msb[0x20]; + + u8 reserved_at_80[0x8]; + u8 dekn[0x18]; + + u8 salt[0x20]; + + u8 implicit_iv[0x40]; + + u8 reserved_at_100[0x700]; +}; + +struct mlx5_ifc_create_ipsec_obj_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; + struct mlx5_ifc_ipsec_obj_bits ipsec_object; +}; + +enum { + MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP = BIT(0), + MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB = BIT(1), +}; + +struct mlx5_ifc_query_ipsec_obj_out_bits { + struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; + struct mlx5_ifc_ipsec_obj_bits ipsec_object; +}; + +struct mlx5_ifc_modify_ipsec_obj_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; + struct mlx5_ifc_ipsec_obj_bits ipsec_object; }; struct mlx5_ifc_encryption_key_obj_bits { @@ -10527,17 +10768,20 @@ struct mlx5_ifc_tls_static_params_bits { }; struct mlx5_ifc_tls_progress_params_bits { - u8 reserved_at_0[0x8]; - u8 tisn[0x18]; - u8 next_record_tcp_sn[0x20]; u8 hw_resync_tcp_sn[0x20]; u8 record_tracker_state[0x2]; u8 auth_state[0x2]; - u8 reserved_at_64[0x4]; + u8 reserved_at_44[0x4]; u8 hw_offset_record_number[0x18]; }; +enum { + MLX5_MTT_PERM_READ = 1 << 0, + MLX5_MTT_PERM_WRITE = 1 << 1, + MLX5_MTT_PERM_RW = MLX5_MTT_PERM_READ | MLX5_MTT_PERM_WRITE, +}; + #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index de9a272c9f3d..2d45a6af52a4 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -104,8 +104,11 @@ enum mlx5e_ext_link_mode { MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR = 8, MLX5E_CAUI_4_100GBASE_CR4_KR4 = 9, MLX5E_100GAUI_2_100GBASE_CR2_KR2 = 10, + MLX5E_100GAUI_1_100GBASE_CR_KR = 11, MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12, + MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13, MLX5E_400GAUI_8 = 15, + MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16, MLX5E_EXT_LINK_MODES_NUMBER, }; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index ae63b1ae9004..36492a1342cf 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -66,6 +66,7 @@ enum mlx5_qp_optpar { MLX5_QP_OPTPAR_RETRY_COUNT = 1 << 12, MLX5_QP_OPTPAR_RNR_RETRY = 1 << 13, MLX5_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, + MLX5_QP_OPTPAR_LAG_TX_AFF = 1 << 15, MLX5_QP_OPTPAR_PRI_PORT = 1 << 16, MLX5_QP_OPTPAR_SRQN = 1 << 18, MLX5_QP_OPTPAR_CQN_RCV = 1 << 19, @@ -208,7 +209,7 @@ struct mlx5_wqe_ctrl_seg { __be32 general_id; __be32 imm; __be32 umr_mkey; - __be32 tisn; + __be32 tis_tir_num; }; }; @@ -229,6 +230,11 @@ enum { enum { MLX5_ETH_WQE_SVLAN = 1 << 0, + MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC = 1 << 26, + MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC = 1 << 27, + MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC = 3 << 26, + MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC = 1 << 28, + MLX5_ETH_WQE_INSERT_TRAILER = 1 << 30, MLX5_ETH_WQE_INSERT_VLAN = 1 << 15, }; @@ -257,6 +263,7 @@ struct mlx5_wqe_eth_seg { __be16 type; __be16 vlan_tci; } insert; + __be32 trailer; }; }; @@ -315,6 +322,7 @@ struct mlx5_av { struct mlx5_ib_ah { struct ib_ah ibah; struct mlx5_av av; + u8 xmit_port; }; static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah) @@ -402,7 +410,7 @@ struct mlx5_wqe_signature_seg { struct mlx5_wqe_inline_seg { __be32 byte_count; - __be32 data[0]; + __be32 data[]; }; enum mlx5_sig_type { @@ -487,123 +495,8 @@ struct mlx5_core_dct { struct completion drained; }; -struct mlx5_qp_path { - u8 fl_free_ar; - u8 rsvd3; - __be16 pkey_index; - u8 rsvd0; - u8 grh_mlid; - __be16 rlid; - u8 ackto_lt; - u8 mgid_index; - u8 static_rate; - u8 hop_limit; - __be32 tclass_flowlabel; - union { - u8 rgid[16]; - u8 rip[16]; - }; - u8 f_dscp_ecn_prio; - u8 ecn_dscp; - __be16 udp_sport; - u8 dci_cfi_prio_sl; - u8 port; - u8 rmac[6]; -}; - -/* FIXME: use mlx5_ifc.h qpc */ -struct mlx5_qp_context { - __be32 flags; - __be32 flags_pd; - u8 mtu_msgmax; - u8 rq_size_stride; - __be16 sq_crq_size; - __be32 qp_counter_set_usr_page; - __be32 wire_qpn; - __be32 log_pg_sz_remote_qpn; - struct mlx5_qp_path pri_path; - struct mlx5_qp_path alt_path; - __be32 params1; - u8 reserved2[4]; - __be32 next_send_psn; - __be32 cqn_send; - __be32 deth_sqpn; - u8 reserved3[4]; - __be32 last_acked_psn; - __be32 ssn; - __be32 params2; - __be32 rnr_nextrecvpsn; - __be32 xrcd; - __be32 cqn_recv; - __be64 db_rec_addr; - __be32 qkey; - __be32 rq_type_srqn; - __be32 rmsn; - __be16 hw_sq_wqe_counter; - __be16 sw_sq_wqe_counter; - __be16 hw_rcyclic_byte_counter; - __be16 hw_rq_counter; - __be16 sw_rcyclic_byte_counter; - __be16 sw_rq_counter; - u8 rsvd0[5]; - u8 cgs; - u8 cs_req; - u8 cs_res; - __be64 dc_access_key; - u8 rsvd1[24]; -}; - -static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) -{ - return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); -} - -int mlx5_core_create_dct(struct mlx5_core_dev *dev, - struct mlx5_core_dct *qp, - u32 *in, int inlen, - u32 *out, int outlen); -int mlx5_core_create_qp(struct mlx5_core_dev *dev, - struct mlx5_core_qp *qp, - u32 *in, - int inlen); -int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode, - u32 opt_param_mask, void *qpc, - struct mlx5_core_qp *qp); -int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, - struct mlx5_core_qp *qp); -int mlx5_core_destroy_dct(struct mlx5_core_dev *dev, - struct mlx5_core_dct *dct); -int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, - u32 *out, int outlen); -int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct, - u32 *out, int outlen); - -int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev, - u32 timeout_usec); - -int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn); -int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn); -void mlx5_init_qp_table(struct mlx5_core_dev *dev); -void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); -int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, - struct mlx5_core_qp *rq); -void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev, - struct mlx5_core_qp *rq); -int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, - struct mlx5_core_qp *sq); -void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev, - struct mlx5_core_qp *sq); -int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id); -int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id); -int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, - int reset, void *out, int out_size); - -struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev, - int res_num, - enum mlx5_res_type res_type); -void mlx5_core_res_put(struct mlx5_core_rsc_common *res); static inline const char *mlx5_qp_type_str(int type) { diff --git a/include/linux/mlx5/rsc_dump.h b/include/linux/mlx5/rsc_dump.h new file mode 100644 index 000000000000..d11c0b228620 --- /dev/null +++ b/include/linux/mlx5/rsc_dump.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies inc. */ + +#include <linux/mlx5/driver.h> + +#ifndef __MLX5_RSC_DUMP +#define __MLX5_RSC_DUMP + +enum mlx5_sgmt_type { + MLX5_SGMT_TYPE_HW_CQPC, + MLX5_SGMT_TYPE_HW_SQPC, + MLX5_SGMT_TYPE_HW_RQPC, + MLX5_SGMT_TYPE_FULL_SRQC, + MLX5_SGMT_TYPE_FULL_CQC, + MLX5_SGMT_TYPE_FULL_EQC, + MLX5_SGMT_TYPE_FULL_QPC, + MLX5_SGMT_TYPE_SND_BUFF, + MLX5_SGMT_TYPE_RCV_BUFF, + MLX5_SGMT_TYPE_SRQ_BUFF, + MLX5_SGMT_TYPE_CQ_BUFF, + MLX5_SGMT_TYPE_EQ_BUFF, + MLX5_SGMT_TYPE_SX_SLICE, + MLX5_SGMT_TYPE_SX_SLICE_ALL, + MLX5_SGMT_TYPE_RDB, + MLX5_SGMT_TYPE_RX_SLICE_ALL, + MLX5_SGMT_TYPE_PRM_QUERY_QP, + MLX5_SGMT_TYPE_PRM_QUERY_CQ, + MLX5_SGMT_TYPE_PRM_QUERY_MKEY, + MLX5_SGMT_TYPE_MENU, + MLX5_SGMT_TYPE_TERMINATE, + + MLX5_SGMT_TYPE_NUM, /* Keep last */ +}; + +struct mlx5_rsc_key { + enum mlx5_sgmt_type rsc; + int index1; + int index2; + int num_of_obj1; + int num_of_obj2; + int size; +}; + +struct mlx5_rsc_dump_cmd; + +struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev, + struct mlx5_rsc_key *key); +void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd); +int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd, + struct page *page, int *size); +#endif /* __MLX5_RSC_DUMP */ diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index dc6b1e7cb8c4..028f442530cf 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -39,27 +39,20 @@ int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn); void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn); int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn); -int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen); +int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in); void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn); int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out); int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn); -int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen); +int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in); void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn); int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out); int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state); -int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *tirn); -int mlx5_core_create_tir_out(struct mlx5_core_dev *dev, - u32 *in, int inlen, - u32 *out, int outlen); -int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in, - int inlen); +int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, u32 *tirn); +int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in); void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn); -int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *tisn); -int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in, - int inlen); +int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, u32 *tisn); +int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in); void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn); int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqtn); diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 16060fb9b5e5..4db87bcfce7b 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -75,7 +75,7 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline); int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev, u16 vport, u8 min_inline); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, - u16 vport, u8 *addr); + u16 vport, const u8 *addr); int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, @@ -127,8 +127,7 @@ int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, u8 other_vport, u64 *rx_discard_vport_down, u64 *tx_discard_vport_down); int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, - int vf, u8 port_num, void *out, - size_t out_sz); + int vf, u8 port_num, void *out); int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, u8 other_vport, u8 port_num, int vf, diff --git a/include/linux/mm.h b/include/linux/mm.h index f3fe7371855c..16b799a0522c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -15,6 +15,7 @@ #include <linux/atomic.h> #include <linux/debug_locks.h> #include <linux/mm_types.h> +#include <linux/mmap_lock.h> #include <linux/range.h> #include <linux/pfn.h> #include <linux/percpu-refcount.h> @@ -23,11 +24,13 @@ #include <linux/resource.h> #include <linux/page_ext.h> #include <linux/err.h> +#include <linux/page-flags.h> #include <linux/page_ref.h> #include <linux/memremap.h> #include <linux/overflow.h> #include <linux/sizes.h> #include <linux/sched.h> +#include <linux/pgtable.h> struct mempolicy; struct anon_vma; @@ -36,6 +39,9 @@ struct file_ra_state; struct user_struct; struct writeback_control; struct bdi_writeback; +struct pt_regs; + +extern int sysctl_page_lock_unfairness; void init_mm_internals(void); @@ -92,7 +98,6 @@ extern int mmap_rnd_compat_bits __read_mostly; #endif #include <asm/page.h> -#include <asm/pgtable.h> #include <asm/processor.h> /* @@ -154,11 +159,14 @@ static inline void __mm_zero_struct_page(struct page *page) switch (sizeof(struct page)) { case 80: - _pp[9] = 0; /* fallthrough */ + _pp[9] = 0; + fallthrough; case 72: - _pp[8] = 0; /* fallthrough */ + _pp[8] = 0; + fallthrough; case 64: - _pp[7] = 0; /* fallthrough */ + _pp[7] = 0; + fallthrough; case 56: _pp[6] = 0; _pp[5] = 0; @@ -201,10 +209,12 @@ extern int sysctl_overcommit_memory; extern int sysctl_overcommit_ratio; extern unsigned long sysctl_overcommit_kbytes; -extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *, - size_t *, loff_t *); -extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *, - size_t *, loff_t *); +int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); +int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); +int overcommit_policy_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) @@ -325,17 +335,13 @@ extern unsigned int kobjsize(const void *objp); #elif defined(CONFIG_SPARC64) # define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */ # define VM_ARCH_CLEAR VM_SPARC_ADI +#elif defined(CONFIG_ARM64) +# define VM_ARM64_BTI VM_ARCH_1 /* BTI guarded page, a.k.a. GP bit */ +# define VM_ARCH_CLEAR VM_ARM64_BTI #elif !defined(CONFIG_MMU) # define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ #endif -#if defined(CONFIG_X86_INTEL_MPX) -/* MPX specific bounds table or bounds directory */ -# define VM_MPX VM_HIGH_ARCH_4 -#else -# define VM_MPX VM_NONE -#endif - #ifndef VM_GROWSUP # define VM_GROWSUP VM_NONE #endif @@ -405,7 +411,7 @@ extern pgprot_t protection_map[16]; * @FAULT_FLAG_WRITE: Fault was a write fault. * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE. * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked. - * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_sem and wait when retrying. + * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying. * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region. * @FAULT_FLAG_TRIED: The fault has been tried once. * @FAULT_FLAG_USER: The fault originated in userspace. @@ -455,10 +461,10 @@ extern pgprot_t protection_map[16]; * fault_flag_allow_retry_first - check ALLOW_RETRY the first time * * This is mostly used for places where we want to try to avoid taking - * the mmap_sem for too long a time when waiting for another condition + * the mmap_lock for too long a time when waiting for another condition * to change, in which case we can try to be polite to release the - * mmap_sem in the first round to avoid potential starvation of other - * processes that would also want the mmap_sem. + * mmap_lock in the first round to avoid potential starvation of other + * processes that would also want the mmap_lock. * * Return: true if the page fault allows retry and this is the first * attempt of the fault handling; false otherwise. @@ -482,7 +488,7 @@ static inline bool fault_flag_allow_retry_first(unsigned int flags) { FAULT_FLAG_INTERRUPTIBLE, "INTERRUPTIBLE" } /* - * vm_fault is filled by the the pagefault handler and passed to the vma's + * vm_fault is filled by the pagefault handler and passed to the vma's * ->fault function. The vma's ->fault is responsible for returning a bitmask * of VM_FAULT_xxx flags that give details about how the fault was handled. * @@ -505,7 +511,6 @@ struct vm_fault { pte_t orig_pte; /* Value of PTE at the time of fault */ struct page *cow_page; /* Page handler may use for COW fault */ - struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */ struct page *page; /* ->fault handlers should return a * page here, unless VM_FAULT_NOPAGE * is set (which is also implied by @@ -586,7 +591,7 @@ struct vm_operations_struct { * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure * in mm/mempolicy.c will do this automatically. * get_policy() must NOT add a ref if the policy at (vma,addr) is not - * marked as MPOL_SHARED. vma policies are protected by the mmap_sem. + * marked as MPOL_SHARED. vma policies are protected by the mmap_lock. * If no [shared/vma] mempolicy exists at the addr, get_policy() op * must return NULL--i.e., do not "fallback" to task or system default * policy. @@ -671,11 +676,6 @@ int vma_is_stack_for_current(struct vm_area_struct *vma); struct mmu_gather; struct inode; -/* - * FIXME: take this include out, include page-flags.h in - * files which need it (119 of them) - */ -#include <linux/page-flags.h> #include <linux/huge_mm.h> /* @@ -781,6 +781,12 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) } extern void kvfree(const void *addr); +extern void kvfree_sensitive(const void *addr, size_t len); + +static inline int head_mapcount(struct page *head) +{ + return atomic_read(compound_mapcount_ptr(head)) + 1; +} /* * Mapcount of compound page as a whole, does not include mapped sub-pages. @@ -791,7 +797,7 @@ static inline int compound_mapcount(struct page *page) { VM_BUG_ON_PAGE(!PageCompound(page), page); page = compound_head(page); - return atomic_read(compound_mapcount_ptr(page)) + 1; + return head_mapcount(page); } /* @@ -871,7 +877,7 @@ enum compound_dtor_id { #endif NR_COMPOUND_DTORS, }; -extern compound_page_dtor * const compound_page_dtors[]; +extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS]; static inline void set_compound_page_dtor(struct page *page, enum compound_dtor_id compound_dtor) @@ -880,10 +886,10 @@ static inline void set_compound_page_dtor(struct page *page, page[1].compound_dtor = compound_dtor; } -static inline compound_page_dtor *get_compound_page_dtor(struct page *page) +static inline void destroy_compound_page(struct page *page) { VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page); - return compound_page_dtors[page[1].compound_dtor]; + compound_page_dtors[page[1].compound_dtor](page); } static inline unsigned int compound_order(struct page *page) @@ -904,22 +910,30 @@ static inline bool hpage_pincount_available(struct page *page) return PageCompound(page) && compound_order(page) > 1; } +static inline int head_pincount(struct page *head) +{ + return atomic_read(compound_pincount_ptr(head)); +} + static inline int compound_pincount(struct page *page) { VM_BUG_ON_PAGE(!hpage_pincount_available(page), page); page = compound_head(page); - return atomic_read(compound_pincount_ptr(page)); + return head_pincount(page); } static inline void set_compound_order(struct page *page, unsigned int order) { page[1].compound_order = order; + page[1].compound_nr = 1U << order; } /* Returns the number of pages in this potentially compound page. */ static inline unsigned long compound_nr(struct page *page) { - return 1UL << compound_order(page); + if (!PageHead(page)) + return 1; + return page[1].compound_nr; } /* Returns the number of bytes in this potentially compound page. */ @@ -950,8 +964,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) return pte; } -vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg, - struct page *page); +vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page); vm_fault_t finish_fault(struct vm_fault *vmf); vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); #endif @@ -1061,6 +1074,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); static inline enum zone_type page_zonenum(const struct page *page) { + ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; } @@ -1230,7 +1244,7 @@ void unpin_user_pages(struct page **pages, unsigned long npages); * used to track the pincount (instead using of the GUP_PIN_COUNTING_BIAS * scheme). * - * For more information, please see Documentation/vm/pin_user_pages.rst. + * For more information, please see Documentation/core-api/pin_user_pages.rst. * * @page: pointer to page to be queried. * @Return: True, if it is likely that the page has been "dma-pinned". @@ -1378,7 +1392,7 @@ static inline int cpu_pid_to_cpupid(int nid, int pid) static inline bool cpupid_pid_unset(int cpupid) { - return 1; + return true; } static inline void page_cpupid_reset_last(struct page *page) @@ -1588,6 +1602,7 @@ static inline void clear_page_pfmemalloc(struct page *page) extern void pagefault_out_of_memory(void); #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) +#define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1)) /* * Flags passed to show_mem() and show_free_areas() to suppress output in @@ -1631,7 +1646,7 @@ struct mmu_notifier_range; void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); int copy_page_range(struct mm_struct *dst, struct mm_struct *src, - struct vm_area_struct *vma); + struct vm_area_struct *vma, struct vm_area_struct *new); int follow_pte_pmd(struct mm_struct *mm, unsigned long address, struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); @@ -1652,8 +1667,9 @@ int invalidate_inode_page(struct page *page); #ifdef CONFIG_MMU extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma, - unsigned long address, unsigned int flags); -extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, + unsigned long address, unsigned int flags, + struct pt_regs *regs); +extern int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked); void unmap_mapping_pages(struct address_space *mapping, @@ -1662,14 +1678,14 @@ void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); #else static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, - unsigned long address, unsigned int flags) + unsigned long address, unsigned int flags, + struct pt_regs *regs) { /* should never happen if there's no MMU */ BUG(); return VM_FAULT_SIGBUS; } -static inline int fixup_user_fault(struct task_struct *tsk, - struct mm_struct *mm, unsigned long address, +static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked) { /* should never happen if there's no MMU */ @@ -1695,11 +1711,11 @@ extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags); -long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, +long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked); -long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, +long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked); @@ -1711,8 +1727,12 @@ long pin_user_pages(unsigned long start, unsigned long nr_pages, struct vm_area_struct **vmas); long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked); +long pin_user_pages_locked(unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, int *locked); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); +long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, + struct page **pages, unsigned int gup_flags); int get_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); @@ -1729,7 +1749,7 @@ struct frame_vector { unsigned int nr_frames; /* Number of frames stored in ptrs array */ bool got_ref; /* Did we pin pages by getting page ref? */ bool is_pfns; /* Does array contain pages or pfns? */ - void *ptrs[0]; /* Array of pinned pfns / pages. Use + void *ptrs[]; /* Array of pinned pfns / pages. Use * pfns_vector_pages() or pfns_vector_pfns() * for access */ }; @@ -1827,8 +1847,16 @@ extern int mprotect_fixup(struct vm_area_struct *vma, /* * doesn't attempt to fault and will return short. */ -int __get_user_pages_fast(unsigned long start, int nr_pages, int write, - struct page **pages); +int get_user_pages_fast_only(unsigned long start, int nr_pages, + unsigned int gup_flags, struct page **pages); +int pin_user_pages_fast_only(unsigned long start, int nr_pages, + unsigned int gup_flags, struct page **pages); + +static inline bool get_user_page_fast_only(unsigned long addr, + unsigned int gup_flags, struct page **pagep) +{ + return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1; +} /* * per-process(per-mm_struct) statistics. */ @@ -2071,11 +2099,6 @@ int __pte_alloc_kernel(pmd_t *pmd); #if defined(CONFIG_MMU) -/* - * The following ifdef needed to get the 5level-fixup.h header to work. - * Remove it when 5level-fixup.h has been removed. - */ -#ifndef __ARCH_HAS_5LEVEL_HACK static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { @@ -2089,7 +2112,6 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d, return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ? NULL : pud_offset(p4d, address); } -#endif /* !__ARCH_HAS_5LEVEL_HACK */ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) { @@ -2283,9 +2305,7 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) } extern void __init pagecache_init(void); -extern void free_area_init(unsigned long * zones_size); -extern void __init free_area_init_node(int nid, unsigned long * zones_size, - unsigned long zone_start_pfn, unsigned long *zholes_size); +extern void __init free_area_init_memoryless_node(int nid); extern void free_initmem(void); /* @@ -2355,34 +2375,23 @@ static inline unsigned long get_num_physpages(void) return phys_pages; } -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP /* - * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its - * zones, allocate the backing mem_map and account for memory holes in a more - * architecture independent manner. This is a substitute for creating the - * zone_sizes[] and zholes_size[] arrays and passing them to - * free_area_init_node() + * Using memblock node mappings, an architecture may initialise its + * zones, allocate the backing mem_map and account for memory holes in an + * architecture independent manner. * * An architecture is expected to register range of page frames backed by * physical memory with memblock_add[_node]() before calling - * free_area_init_nodes() passing in the PFN each zone ends at. At a basic + * free_area_init() passing in the PFN each zone ends at. At a basic * usage, an architecture is expected to do something like * * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, * max_highmem_pfn}; * for_each_valid_physical_page_range() * memblock_add_node(base, size, nid) - * free_area_init_nodes(max_zone_pfns); - * - * free_bootmem_with_active_regions() calls free_bootmem_node() for each - * registered physical page range. Similarly - * sparse_memory_present_with_active_regions() calls memory_present() for - * each range when SPARSEMEM is enabled. - * - * See mm/page_alloc.c for more information on each function exposed by - * CONFIG_HAVE_MEMBLOCK_NODE_MAP. + * free_area_init(max_zone_pfns); */ -extern void free_area_init_nodes(unsigned long *max_zone_pfn); +void free_area_init(unsigned long *max_zone_pfn); unsigned long node_map_pfn_alignment(void); unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, unsigned long end_pfn); @@ -2391,16 +2400,9 @@ extern unsigned long absent_pages_in_range(unsigned long start_pfn, extern void get_pfn_range_for_nid(unsigned int nid, unsigned long *start_pfn, unsigned long *end_pfn); extern unsigned long find_min_pfn_with_active_regions(void); -extern void free_bootmem_with_active_regions(int nid, - unsigned long max_low_pfn); -extern void sparse_memory_present_with_active_regions(int nid); -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ - -#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ - !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) -static inline int __early_pfn_to_nid(unsigned long pfn, - struct mminit_pfnnid_cache *state) +#ifndef CONFIG_NEED_MULTIPLE_NODES +static inline int early_pfn_to_nid(unsigned long pfn) { return 0; } @@ -2414,7 +2416,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn, extern void set_dma_reserve(unsigned long new_dma_reserve); extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, - enum memmap_context, struct vmem_altmap *); + enum meminit_context, struct vmem_altmap *); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); @@ -2436,6 +2438,7 @@ extern void setup_per_cpu_pageset(void); extern int min_free_kbytes; extern int watermark_boost_factor; extern int watermark_scale_factor; +extern bool arch_has_descending_max_zone_pfns(void); /* nommu.c */ extern atomic_long_t mmap_pages_allocated; @@ -2547,23 +2550,13 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr, struct list_head *uf); extern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, - vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, - struct list_head *uf); + unsigned long pgoff, unsigned long *populate, struct list_head *uf); extern int __do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf, bool downgrade); extern int do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf); extern int do_madvise(unsigned long start, size_t len_in, int behavior); -static inline unsigned long -do_mmap_pgoff(struct file *file, unsigned long addr, - unsigned long len, unsigned long prot, unsigned long flags, - unsigned long pgoff, unsigned long *populate, - struct list_head *uf) -{ - return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf); -} - #ifdef CONFIG_MMU extern int __mm_populate(unsigned long addr, unsigned long len, int ignore_errors); @@ -2612,30 +2605,11 @@ extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); int __must_check write_one_page(struct page *page); void task_dirty_inc(struct task_struct *tsk); -/* readahead.c */ -#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) - -int force_page_cache_readahead(struct address_space *mapping, struct file *filp, - pgoff_t offset, unsigned long nr_to_read); - -void page_cache_sync_readahead(struct address_space *mapping, - struct file_ra_state *ra, - struct file *filp, - pgoff_t offset, - unsigned long size); - -void page_cache_async_readahead(struct address_space *mapping, - struct file_ra_state *ra, - struct file *filp, - struct page *pg, - pgoff_t offset, - unsigned long size); - extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ extern int expand_stack(struct vm_area_struct *vma, unsigned long address); -/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */ +/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */ extern int expand_downwards(struct vm_area_struct *vma, unsigned long address); #if VM_GROWSUP @@ -2791,6 +2765,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, #define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */ #define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */ #define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */ +#define FOLL_FAST_ONLY 0x80000 /* gup_fast: prevent fall-back to slow gup */ /* * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each @@ -2845,7 +2820,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, * releasing pages: get_user_pages*() pages must be released via put_page(), * while pin_user_pages*() pages must be released via unpin_user_page(). * - * Please see Documentation/vm/pin_user_pages.rst for more information. + * Please see Documentation/core-api/pin_user_pages.rst for more information. */ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) @@ -2968,8 +2943,8 @@ extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm); #ifdef CONFIG_SYSCTL extern int sysctl_drop_caches; -int drop_caches_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); +int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); #endif void drop_slab(void); @@ -2997,14 +2972,15 @@ pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); -pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); +pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, + struct vmem_altmap *altmap); void *vmemmap_alloc_block(unsigned long size, int node); struct vmem_altmap; -void *vmemmap_alloc_block_buf(unsigned long size, int node); -void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap); +void *vmemmap_alloc_block_buf(unsigned long size, int node, + struct vmem_altmap *altmap); void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); int vmemmap_populate_basepages(unsigned long start, unsigned long end, - int node); + int node, struct vmem_altmap *altmap); int vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap); void vmemmap_populate_print_last(void); @@ -3023,6 +2999,7 @@ enum mf_flags { }; extern int memory_failure(unsigned long pfn, int flags); extern void memory_failure_queue(unsigned long pfn, int flags); +extern void memory_failure_queue_kick(int cpu); extern int unpoison_memory(unsigned long pfn); extern int get_hwpoison_page(struct page *page); #define put_hwpoison_page(page) put_page(page) @@ -3151,5 +3128,7 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping, pgoff_t first_index, pgoff_t nr); #endif +extern int sysctl_nr_trim_pages; + #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 219bef41d87c..8fc71e9d7bb0 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -48,14 +48,14 @@ static __always_inline void update_lru_size(struct lruvec *lruvec, static __always_inline void add_page_to_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { - update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); + update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); list_add(&page->lru, &lruvec->lists[lru]); } static __always_inline void add_page_to_lru_list_tail(struct page *page, struct lruvec *lruvec, enum lru_list lru) { - update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page)); + update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); list_add_tail(&page->lru, &lruvec->lists[lru]); } @@ -63,7 +63,7 @@ static __always_inline void del_page_from_lru_list(struct page *page, struct lruvec *lruvec, enum lru_list lru) { list_del(&page->lru); - update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page)); + update_lru_size(lruvec, lru, page_zonenum(page), -thp_nr_pages(page)); } /** diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 4aba6c0c2ba8..ed028af3cb19 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -134,6 +134,7 @@ struct page { unsigned char compound_dtor; unsigned char compound_order; atomic_t compound_mapcount; + unsigned int compound_nr; /* 1 << compound_order */ }; struct { /* Second tail page of compound page */ unsigned long _compound_pad_1; /* compound_head */ @@ -198,7 +199,10 @@ struct page { atomic_t _refcount; #ifdef CONFIG_MEMCG - struct mem_cgroup *mem_cgroup; + union { + struct mem_cgroup *mem_cgroup; + struct obj_cgroup **obj_cgroups; + }; #endif /* @@ -240,7 +244,11 @@ static inline atomic_t *compound_pincount_ptr(struct page *page) #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) #define page_private(page) ((page)->private) -#define set_page_private(page, v) ((page)->private = (v)) + +static inline void set_page_private(struct page *page, unsigned long private) +{ + page->private = private; +} struct page_frag_cache { void * va; @@ -340,7 +348,7 @@ struct vm_area_struct { * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack * or brk vma (with NULL file) can only be in an anon_vma list. */ - struct list_head anon_vma_chain; /* Serialized by mmap_sem & + struct list_head anon_vma_chain; /* Serialized by mmap_lock & * page_table_lock */ struct anon_vma *anon_vma; /* Serialized by page_table_lock */ @@ -428,6 +436,16 @@ struct mm_struct { */ atomic_t mm_count; + /** + * @has_pinned: Whether this mm has pinned any pages. This can + * be either replaced in the future by @pinned_vm when it + * becomes stable, or grow into a counter on its own. We're + * aggresive on this bit now - even if the pinned pages were + * unpinned later on, we'll still keep this bit set for the + * lifecycle of this mm just for simplicity. + */ + atomic_t has_pinned; + #ifdef CONFIG_MMU atomic_long_t pgtables_bytes; /* PTE page table pages */ #endif @@ -436,7 +454,7 @@ struct mm_struct { spinlock_t page_table_lock; /* Protects page tables and some * counters */ - struct rw_semaphore mmap_sem; + struct rw_semaphore mmap_lock; struct list_head mmlist; /* List of maybe swapped mm's. These * are globally strung together off diff --git a/include/linux/mman.h b/include/linux/mman.h index 4b08e9c9c538..6f34c33075f9 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -57,8 +57,12 @@ extern struct percpu_counter vm_committed_as; #ifdef CONFIG_SMP extern s32 vm_committed_as_batch; +extern void mm_compute_batch(int overcommit_policy); #else #define vm_committed_as_batch 0 +static inline void mm_compute_batch(int overcommit_policy) +{ +} #endif unsigned long vm_memory_committed(void); diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h new file mode 100644 index 000000000000..0707671851a8 --- /dev/null +++ b/include/linux/mmap_lock.h @@ -0,0 +1,90 @@ +#ifndef _LINUX_MMAP_LOCK_H +#define _LINUX_MMAP_LOCK_H + +#include <linux/mmdebug.h> + +#define MMAP_LOCK_INITIALIZER(name) \ + .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock), + +static inline void mmap_init_lock(struct mm_struct *mm) +{ + init_rwsem(&mm->mmap_lock); +} + +static inline void mmap_write_lock(struct mm_struct *mm) +{ + down_write(&mm->mmap_lock); +} + +static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass) +{ + down_write_nested(&mm->mmap_lock, subclass); +} + +static inline int mmap_write_lock_killable(struct mm_struct *mm) +{ + return down_write_killable(&mm->mmap_lock); +} + +static inline bool mmap_write_trylock(struct mm_struct *mm) +{ + return down_write_trylock(&mm->mmap_lock) != 0; +} + +static inline void mmap_write_unlock(struct mm_struct *mm) +{ + up_write(&mm->mmap_lock); +} + +static inline void mmap_write_downgrade(struct mm_struct *mm) +{ + downgrade_write(&mm->mmap_lock); +} + +static inline void mmap_read_lock(struct mm_struct *mm) +{ + down_read(&mm->mmap_lock); +} + +static inline int mmap_read_lock_killable(struct mm_struct *mm) +{ + return down_read_killable(&mm->mmap_lock); +} + +static inline bool mmap_read_trylock(struct mm_struct *mm) +{ + return down_read_trylock(&mm->mmap_lock) != 0; +} + +static inline void mmap_read_unlock(struct mm_struct *mm) +{ + up_read(&mm->mmap_lock); +} + +static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm) +{ + if (down_read_trylock(&mm->mmap_lock)) { + rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_); + return true; + } + return false; +} + +static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) +{ + up_read_non_owner(&mm->mmap_lock); +} + +static inline void mmap_assert_locked(struct mm_struct *mm) +{ + lockdep_assert_held(&mm->mmap_lock); + VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); +} + +static inline void mmap_assert_write_locked(struct mm_struct *mm) +{ + lockdep_assert_held_write(&mm->mmap_lock); + VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); +} + +#endif /* _LINUX_MMAP_LOCK_H */ diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index cf3780a6ccc4..7d46411ffaa2 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -48,6 +48,7 @@ struct mmc_ext_csd { u8 sec_feature_support; u8 rel_sectors; u8 rel_param; + bool enhanced_rpmb_supported; u8 part_config; u8 cache_ctrl; u8 rst_n_function; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index c318fb5b6a94..c5b6e97cb21a 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -92,6 +92,9 @@ struct mmc_host_ops { int err); void (*pre_req)(struct mmc_host *host, struct mmc_request *req); void (*request)(struct mmc_host *host, struct mmc_request *req); + /* Submit one request to host in atomic context. */ + int (*request_atomic)(struct mmc_host *host, + struct mmc_request *req); /* * Avoid calling the next three functions too often or in a "fast @@ -284,6 +287,7 @@ struct mmc_host { #ifdef CONFIG_PM_SLEEP struct notifier_block pm_notify; #endif + struct wakeup_source *ws; /* Enable consume of uevents */ u32 max_current_330; u32 max_current_300; u32 max_current_180; @@ -318,7 +322,6 @@ struct mmc_host { #define MMC_CAP_AGGRESSIVE_PM (1 << 7) /* Suspend (e)MMC/SD at idle */ #define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ #define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */ -#define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */ #define MMC_CAP_3_3V_DDR (1 << 11) /* Host supports eMMC DDR 3.3V */ #define MMC_CAP_1_8V_DDR (1 << 12) /* Host supports eMMC DDR 1.8V */ #define MMC_CAP_1_2V_DDR (1 << 13) /* Host supports eMMC DDR 1.2V */ @@ -349,6 +352,7 @@ struct mmc_host { #define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */ #define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */ +#define MMC_CAP2_FULL_PWR_CYCLE_IN_SUSPEND (1 << 3) /* Can do full power cycle in suspend */ #define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */ #define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */ #define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 4b85ef05a906..d9a65c6a8816 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -325,6 +325,7 @@ static inline bool mmc_ready_for_data(u32 status) */ #define EXT_CSD_WR_REL_PARAM_EN (1<<2) +#define EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR (1<<4) #define EXT_CSD_BOOT_WP_B_PWR_WP_DIS (0x40) #define EXT_CSD_BOOT_WP_B_PERM_WP_DIS (0x10) diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 2e9a6e4634eb..12036619346c 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -24,59 +24,108 @@ /* * Vendors and devices. Sort key: vendor first, device next. */ + +#define SDIO_VENDOR_ID_STE 0x0020 +#define SDIO_DEVICE_ID_STE_CW1200 0x2280 + +#define SDIO_VENDOR_ID_INTEL 0x0089 +#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402 +#define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI 0x1403 +#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404 +#define SDIO_DEVICE_ID_INTEL_IWMC3200GPS 0x1405 +#define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406 +#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5 0x1407 + +#define SDIO_VENDOR_ID_CGUYS 0x0092 +#define SDIO_DEVICE_ID_CGUYS_EW_CG1102GC 0x0004 + +#define SDIO_VENDOR_ID_TI 0x0097 +#define SDIO_DEVICE_ID_TI_WL1271 0x4076 + +#define SDIO_VENDOR_ID_ATHEROS 0x0271 +#define SDIO_DEVICE_ID_ATHEROS_AR6003_00 0x0300 +#define SDIO_DEVICE_ID_ATHEROS_AR6003_01 0x0301 +#define SDIO_DEVICE_ID_ATHEROS_AR6004_00 0x0400 +#define SDIO_DEVICE_ID_ATHEROS_AR6004_01 0x0401 +#define SDIO_DEVICE_ID_ATHEROS_AR6004_02 0x0402 +#define SDIO_DEVICE_ID_ATHEROS_AR6004_18 0x0418 +#define SDIO_DEVICE_ID_ATHEROS_AR6004_19 0x0419 +#define SDIO_DEVICE_ID_ATHEROS_AR6005 0x050A +#define SDIO_DEVICE_ID_ATHEROS_QCA9377 0x0701 + #define SDIO_VENDOR_ID_BROADCOM 0x02d0 -#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887 +#define SDIO_DEVICE_ID_BROADCOM_NINTENDO_WII 0x044b #define SDIO_DEVICE_ID_BROADCOM_43241 0x4324 #define SDIO_DEVICE_ID_BROADCOM_4329 0x4329 #define SDIO_DEVICE_ID_BROADCOM_4330 0x4330 #define SDIO_DEVICE_ID_BROADCOM_4334 0x4334 -#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c -#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d #define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 #define SDIO_DEVICE_ID_BROADCOM_4339 0x4339 -#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 -#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4 -#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 #define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 -#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 +#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_89359 0x4355 #define SDIO_DEVICE_ID_BROADCOM_4356 0x4356 #define SDIO_DEVICE_ID_BROADCOM_4359 0x4359 -#define SDIO_DEVICE_ID_CYPRESS_4373 0x4373 -#define SDIO_DEVICE_ID_CYPRESS_43012 43012 -#define SDIO_DEVICE_ID_CYPRESS_89359 0x4355 - -#define SDIO_VENDOR_ID_INTEL 0x0089 -#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402 -#define SDIO_DEVICE_ID_INTEL_IWMC3200WIFI 0x1403 -#define SDIO_DEVICE_ID_INTEL_IWMC3200TOP 0x1404 -#define SDIO_DEVICE_ID_INTEL_IWMC3200GPS 0x1405 -#define SDIO_DEVICE_ID_INTEL_IWMC3200BT 0x1406 -#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5 0x1407 +#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_4373 0x4373 +#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43012 0xa804 +#define SDIO_DEVICE_ID_BROADCOM_43143 0xa887 +#define SDIO_DEVICE_ID_BROADCOM_43340 0xa94c +#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d +#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 +#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4 +#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 +#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf #define SDIO_VENDOR_ID_MARVELL 0x02df #define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103 -#define SDIO_DEVICE_ID_MARVELL_8688WLAN 0x9104 -#define SDIO_DEVICE_ID_MARVELL_8688BT 0x9105 +#define SDIO_DEVICE_ID_MARVELL_8688_WLAN 0x9104 +#define SDIO_DEVICE_ID_MARVELL_8688_BT 0x9105 +#define SDIO_DEVICE_ID_MARVELL_8786_WLAN 0x9116 +#define SDIO_DEVICE_ID_MARVELL_8787_WLAN 0x9119 +#define SDIO_DEVICE_ID_MARVELL_8787_BT 0x911a +#define SDIO_DEVICE_ID_MARVELL_8787_BT_AMP 0x911b #define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128 -#define SDIO_DEVICE_ID_MARVELL_8887WLAN 0x9134 +#define SDIO_DEVICE_ID_MARVELL_8797_WLAN 0x9129 +#define SDIO_DEVICE_ID_MARVELL_8797_BT 0x912a +#define SDIO_DEVICE_ID_MARVELL_8897_WLAN 0x912d +#define SDIO_DEVICE_ID_MARVELL_8897_BT 0x912e +#define SDIO_DEVICE_ID_MARVELL_8887_F0 0x9134 +#define SDIO_DEVICE_ID_MARVELL_8887_WLAN 0x9135 +#define SDIO_DEVICE_ID_MARVELL_8887_BT 0x9136 +#define SDIO_DEVICE_ID_MARVELL_8801_WLAN 0x9139 +#define SDIO_DEVICE_ID_MARVELL_8997_F0 0x9140 +#define SDIO_DEVICE_ID_MARVELL_8997_WLAN 0x9141 +#define SDIO_DEVICE_ID_MARVELL_8997_BT 0x9142 +#define SDIO_DEVICE_ID_MARVELL_8977_WLAN 0x9145 +#define SDIO_DEVICE_ID_MARVELL_8977_BT 0x9146 +#define SDIO_DEVICE_ID_MARVELL_8987_WLAN 0x9149 +#define SDIO_DEVICE_ID_MARVELL_8987_BT 0x914a #define SDIO_VENDOR_ID_MEDIATEK 0x037a +#define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663 +#define SDIO_DEVICE_ID_MEDIATEK_MT7668 0x7668 + +#define SDIO_VENDOR_ID_MICROCHIP_WILC 0x0296 +#define SDIO_DEVICE_ID_MICROCHIP_WILC1000 0x5347 #define SDIO_VENDOR_ID_SIANO 0x039a #define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201 #define SDIO_DEVICE_ID_SIANO_NICE 0x0202 #define SDIO_DEVICE_ID_SIANO_VEGA_A0 0x0300 #define SDIO_DEVICE_ID_SIANO_VENICE 0x0301 +#define SDIO_DEVICE_ID_SIANO_MING 0x0302 +#define SDIO_DEVICE_ID_SIANO_PELE 0x0500 +#define SDIO_DEVICE_ID_SIANO_RIO 0x0600 +#define SDIO_DEVICE_ID_SIANO_DENVER_2160 0x0700 +#define SDIO_DEVICE_ID_SIANO_DENVER_1530 0x0800 #define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100 #define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347 -#define SDIO_VENDOR_ID_TI 0x0097 -#define SDIO_DEVICE_ID_TI_WL1271 0x4076 +#define SDIO_VENDOR_ID_RSI 0x041b +#define SDIO_DEVICE_ID_RSI_9113 0x9330 +#define SDIO_DEVICE_ID_RSI_9116 0x9116 + #define SDIO_VENDOR_ID_TI_WL1251 0x104c #define SDIO_DEVICE_ID_TI_WL1251 0x9066 -#define SDIO_VENDOR_ID_STE 0x0020 -#define SDIO_DEVICE_ID_STE_CW1200 0x2280 - #endif /* LINUX_MMC_SDIO_IDS_H */ diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h index d9a543a9e1cc..03dee12d2b61 100644 --- a/include/linux/mmu_context.h +++ b/include/linux/mmu_context.h @@ -3,15 +3,15 @@ #define _LINUX_MMU_CONTEXT_H #include <asm/mmu_context.h> - -struct mm_struct; - -void use_mm(struct mm_struct *mm); -void unuse_mm(struct mm_struct *mm); +#include <asm/mmu.h> /* Architectures that care about IRQ state in switch_mm can override this. */ #ifndef switch_mm_irqs_off # define switch_mm_irqs_off switch_mm #endif +#ifndef leave_mm +static inline void leave_mm(int cpu) { } +#endif + #endif diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 736f6918335e..b8200782dede 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -5,6 +5,7 @@ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/mm_types.h> +#include <linux/mmap_lock.h> #include <linux/srcu.h> #include <linux/interval_tree.h> @@ -37,6 +38,10 @@ struct mmu_interval_notifier; * * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal * that the mm refcount is zero and the range is no longer accessible. + * + * @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal + * a device driver to possibly ignore the invalidation if the + * migrate_pgmap_owner field matches the driver's device private pgmap owner. */ enum mmu_notifier_event { MMU_NOTIFY_UNMAP = 0, @@ -45,6 +50,7 @@ enum mmu_notifier_event { MMU_NOTIFY_PROTECTION_PAGE, MMU_NOTIFY_SOFT_DIRTY, MMU_NOTIFY_RELEASE, + MMU_NOTIFY_MIGRATE, }; #define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0) @@ -121,7 +127,7 @@ struct mmu_notifier_ops { /* * invalidate_range_start() and invalidate_range_end() must be - * paired and are called only when the mmap_sem and/or the + * paired and are called only when the mmap_lock and/or the * locks protecting the reverse maps are held. If the subsystem * can't guarantee that no additional references are taken to * the pages in the range, it has to implement the @@ -212,13 +218,13 @@ struct mmu_notifier_ops { }; /* - * The notifier chains are protected by mmap_sem and/or the reverse map + * The notifier chains are protected by mmap_lock and/or the reverse map * semaphores. Notifier chains are only changed when all reverse maps and - * the mmap_sem locks are taken. + * the mmap_lock locks are taken. * * Therefore notifier chains can only be traversed when either * - * 1. mmap_sem is held. + * 1. mmap_lock is held. * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem). * 3. No other concurrent thread can access the list (release) */ @@ -263,6 +269,7 @@ struct mmu_notifier_range { unsigned long end; unsigned flags; enum mmu_notifier_event event; + void *migrate_pgmap_owner; }; static inline int mm_has_notifiers(struct mm_struct *mm) @@ -277,9 +284,9 @@ mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm) { struct mmu_notifier *ret; - down_write(&mm->mmap_sem); + mmap_write_lock(mm); ret = mmu_notifier_get_locked(ops, mm); - up_write(&mm->mmap_sem); + mmap_write_unlock(mm); return ret; } void mmu_notifier_put(struct mmu_notifier *subscription); @@ -514,6 +521,16 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range, range->flags = flags; } +static inline void mmu_notifier_range_init_migrate( + struct mmu_notifier_range *range, unsigned int flags, + struct vm_area_struct *vma, struct mm_struct *mm, + unsigned long start, unsigned long end, void *pgmap) +{ + mmu_notifier_range_init(range, MMU_NOTIFY_MIGRATE, flags, vma, mm, + start, end); + range->migrate_pgmap_owner = pgmap; +} + #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ ({ \ int __young; \ @@ -638,6 +655,9 @@ static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range, #define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \ _mmu_notifier_range_init(range, start, end) +#define mmu_notifier_range_init_migrate(range, flags, vma, mm, start, end, \ + pgmap) \ + _mmu_notifier_range_init(range, start, end) static inline bool mmu_notifier_range_blockable(const struct mmu_notifier_range *range) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 1b9de7d220fb..0f7a4ff4b059 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -88,12 +88,10 @@ static inline bool is_migrate_movable(int mt) extern int page_group_by_mobility_disabled; -#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1) -#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1) +#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1) #define get_pageblock_migratetype(page) \ - get_pfnblock_flags_mask(page, page_to_pfn(page), \ - PB_migrate_end, MIGRATETYPE_MASK) + get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK) struct free_area { struct list_head free_list[MIGRATE_TYPES]; @@ -155,7 +153,6 @@ enum zone_stat_item { NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ NR_MLOCK, /* mlock()ed pages found and moved off LRU */ NR_PAGETABLE, /* used for pagetables */ - NR_KERNEL_STACK_KB, /* measured in KiB */ /* Second 128 byte cacheline */ NR_BOUNCE, #if IS_ENABLED(CONFIG_ZSMALLOC) @@ -171,14 +168,20 @@ enum node_stat_item { NR_INACTIVE_FILE, /* " " " " " */ NR_ACTIVE_FILE, /* " " " " " */ NR_UNEVICTABLE, /* " " " " " */ - NR_SLAB_RECLAIMABLE, - NR_SLAB_UNRECLAIMABLE, + NR_SLAB_RECLAIMABLE_B, + NR_SLAB_UNRECLAIMABLE_B, NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ WORKINGSET_NODES, - WORKINGSET_REFAULT, - WORKINGSET_ACTIVATE, - WORKINGSET_RESTORE, + WORKINGSET_REFAULT_BASE, + WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE, + WORKINGSET_REFAULT_FILE, + WORKINGSET_ACTIVATE_BASE, + WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE, + WORKINGSET_ACTIVATE_FILE, + WORKINGSET_RESTORE_BASE, + WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE, + WORKINGSET_RESTORE_FILE, WORKINGSET_NODERECLAIM, NR_ANON_MAPPED, /* Mapped anonymous pages */ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. @@ -193,7 +196,6 @@ enum node_stat_item { NR_FILE_THPS, NR_FILE_PMDMAPPED, NR_ANON_THPS, - NR_UNSTABLE_NFS, /* NFS unstable pages */ NR_VMSCAN_WRITE, NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ NR_DIRTIED, /* page dirtyings since bootup */ @@ -201,10 +203,34 @@ enum node_stat_item { NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */ NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */ + NR_KERNEL_STACK_KB, /* measured in KiB */ +#if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) + NR_KERNEL_SCS_KB, /* measured in KiB */ +#endif NR_VM_NODE_STAT_ITEMS }; /* + * Returns true if the value is measured in bytes (most vmstat values are + * measured in pages). This defines the API part, the internal representation + * might be different. + */ +static __always_inline bool vmstat_item_in_bytes(int idx) +{ + /* + * Global and per-node slab counters track slab pages. + * It's expected that changes are multiples of PAGE_SIZE. + * Internally values are stored in pages. + * + * Per-memcg and per-lruvec counters track memory, consumed + * by individual slab objects. These counters are actually + * byte-precise. + */ + return (idx == NR_SLAB_RECLAIMABLE_B || + idx == NR_SLAB_UNRECLAIMABLE_B); +} + +/* * We do arithmetic on the LRU lists in various places in the code, * so it is important to keep the active lists LRU_ACTIVE higher in * the array than the corresponding inactive lists, and to keep @@ -240,19 +266,6 @@ static inline bool is_active_lru(enum lru_list lru) return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); } -struct zone_reclaim_stat { - /* - * The pageout code in vmscan.c keeps track of how many of the - * mem/swap backed and file backed pages are referenced. - * The higher the rotated/scanned ratio, the more valuable - * that cache is. - * - * The anon LRU stats live in [0], file LRU stats in [1] - */ - unsigned long recent_rotated[2]; - unsigned long recent_scanned[2]; -}; - enum lruvec_flags { LRUVEC_CONGESTED, /* lruvec has many dirty pages * backed by a congested BDI @@ -261,11 +274,17 @@ enum lruvec_flags { struct lruvec { struct list_head lists[NR_LRU_LISTS]; - struct zone_reclaim_stat reclaim_stat; - /* Evictions & activations on the inactive file list */ - atomic_long_t inactive_age; - /* Refaults at the time of last reclaim cycle */ - unsigned long refaults; + /* + * These track the cost of reclaiming one LRU - file or anon - + * over the other. As the observed cost of reclaiming one LRU + * increases, the reclaim scan balance tips toward the other. + */ + unsigned long anon_cost; + unsigned long file_cost; + /* Non-resident age, driven by LRU movement */ + atomic_long_t nonresident_age; + /* Refaults at the time of last reclaim cycle, anon=0, file=1 */ + unsigned long refaults[2]; /* Various lruvec state flags (enum lruvec_flags) */ unsigned long flags; #ifdef CONFIG_MEMCG @@ -517,6 +536,7 @@ struct zone { * On compaction failure, 1<<compact_defer_shift compactions * are skipped before trying again. The number attempted since * last failure is tracked with compact_considered. + * compact_order_failed is the minimum compaction failed order. */ unsigned int compact_considered; unsigned int compact_defer_shift; @@ -665,9 +685,21 @@ struct deferred_split { * per-zone basis. */ typedef struct pglist_data { + /* + * node_zones contains just the zones for THIS node. Not all of the + * zones may be populated, but it is the full list. It is referenced by + * this node's node_zonelists as well as other node's node_zonelists. + */ struct zone node_zones[MAX_NR_ZONES]; + + /* + * node_zonelists contains references to all zones in all nodes. + * Generally the first zones will be references to this node's + * node_zones. + */ struct zonelist node_zonelists[MAX_ZONELISTS]; - int nr_zones; + + int nr_zones; /* number of populated zones in this node */ #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ struct page *node_mem_map; #ifdef CONFIG_PAGE_EXTENSION @@ -678,6 +710,8 @@ typedef struct pglist_data { /* * Must be held any time you expect node_start_pfn, * node_present_pages, node_spanned_pages or nr_zones to stay constant. + * Also synchronizes pgdat->first_deferred_pfn during deferred page + * init. * * pgdat_resize_lock() and pgdat_resize_unlock() are provided to * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG @@ -697,13 +731,13 @@ typedef struct pglist_data { struct task_struct *kswapd; /* Protected by mem_hotplug_begin/end() */ int kswapd_order; - enum zone_type kswapd_classzone_idx; + enum zone_type kswapd_highest_zoneidx; int kswapd_failures; /* Number of 'reclaimed == 0' runs */ #ifdef CONFIG_COMPACTION int kcompactd_max_order; - enum zone_type kcompactd_classzone_idx; + enum zone_type kcompactd_highest_zoneidx; wait_queue_head_t kcompactd_wait; struct task_struct *kcompactd; #endif @@ -781,19 +815,24 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat) void build_all_zonelists(pg_data_t *pgdat); void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, - enum zone_type classzone_idx); + enum zone_type highest_zoneidx); bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, - int classzone_idx, unsigned int alloc_flags, + int highest_zoneidx, unsigned int alloc_flags, long free_pages); bool zone_watermark_ok(struct zone *z, unsigned int order, - unsigned long mark, int classzone_idx, + unsigned long mark, int highest_zoneidx, unsigned int alloc_flags); bool zone_watermark_ok_safe(struct zone *z, unsigned int order, - unsigned long mark, int classzone_idx); -enum memmap_context { - MEMMAP_EARLY, - MEMMAP_HOTPLUG, + unsigned long mark, int highest_zoneidx); +/* + * Memory initialization context, use to differentiate memory added by + * the platform statically or via memory hotplug interface. + */ +enum meminit_context { + MEMINIT_EARLY, + MEMINIT_HOTPLUG, }; + extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, unsigned long size); @@ -810,18 +849,6 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx); -#ifdef CONFIG_HAVE_MEMORY_PRESENT -void memory_present(int nid, unsigned long start, unsigned long end); -#else -static inline void memory_present(int nid, unsigned long start, unsigned long end) {} -#endif - -#if defined(CONFIG_SPARSEMEM) -void memblocks_present(void); -#else -static inline void memblocks_present(void) {} -#endif - #ifdef CONFIG_HAVE_MEMORYLESS_NODES int local_memory_node(int node_id); #else @@ -874,7 +901,7 @@ extern int movable_zone; #ifdef CONFIG_HIGHMEM static inline int zone_movable_is_highmem(void) { -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP +#ifdef CONFIG_NEED_MULTIPLE_NODES return movable_zone == ZONE_HIGHMEM; #else return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM; @@ -909,24 +936,23 @@ static inline int is_highmem(struct zone *zone) /* These two functions are used to setup the per zone pages min values */ struct ctl_table; -int min_free_kbytes_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -int watermark_boost_factor_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); + +int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *, + loff_t *); +int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *, + size_t *, loff_t *); extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; -int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); +int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *, + size_t *, loff_t *); int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); + void *, size_t *, loff_t *); int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); + void *, size_t *, loff_t *); int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); - -extern int numa_zonelist_order_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); + void *, size_t *, loff_t *); +int numa_zonelist_order_handler(struct ctl_table *, int, + void *, size_t *, loff_t *); +extern int percpu_pagelist_fraction; extern char numa_zonelist_order[]; #define NUMA_ZONELIST_ORDER_LEN 16 @@ -1078,15 +1104,6 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, #include <asm/sparsemem.h> #endif -#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ - !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) -static inline unsigned long early_pfn_to_nid(unsigned long pfn) -{ - BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA)); - return 0; -} -#endif - #ifdef CONFIG_FLATMEM #define pfn_to_nid(pfn) (0) #endif @@ -1388,8 +1405,6 @@ struct mminit_pfnnid_cache { #define early_pfn_valid(pfn) (1) #endif -void memory_present(int nid, unsigned long start, unsigned long end); - /* * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we * need to check pfn validity within that MAX_ORDER_NR_PAGES block. diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h index 35942084cd40..8f882f5881e8 100644 --- a/include/linux/mnt_namespace.h +++ b/include/linux/mnt_namespace.h @@ -6,10 +6,12 @@ struct mnt_namespace; struct fs_struct; struct user_namespace; +struct ns_common; extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *, struct user_namespace *, struct fs_struct *); extern void put_mnt_ns(struct mnt_namespace *ns); +extern struct ns_common *from_mnt_ns(struct mnt_namespace *); extern const struct file_operations proc_mounts_operations; extern const struct file_operations proc_mountinfo_operations; diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 498827b2c9f7..5b08a473cdba 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -251,6 +251,8 @@ struct hda_device_id { struct sdw_device_id { __u16 mfg_id; __u16 part_id; + __u8 sdw_version; + __u8 class_id; kernel_ulong_t driver_data; }; @@ -434,7 +436,7 @@ struct virtio_device_id { * For Hyper-V devices we use the device guid as the id. */ struct hv_vmbus_device_id { - uuid_le guid; + guid_t guid; kernel_ulong_t driver_data; /* Data private to the driver */ }; @@ -532,6 +534,8 @@ enum dmi_field { DMI_BIOS_VENDOR, DMI_BIOS_VERSION, DMI_BIOS_DATE, + DMI_BIOS_RELEASE, + DMI_EC_FIRMWARE_RELEASE, DMI_SYS_VENDOR, DMI_PRODUCT_NAME, DMI_PRODUCT_VERSION, @@ -663,6 +667,7 @@ struct x86_cpu_id { __u16 vendor; __u16 family; __u16 model; + __u16 steppings; __u16 feature; /* bit index */ kernel_ulong_t driver_data; }; @@ -671,6 +676,7 @@ struct x86_cpu_id { #define X86_VENDOR_ANY 0xffff #define X86_FAMILY_ANY 0 #define X86_MODEL_ANY 0 +#define X86_STEPPING_ANY 0 #define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */ /* diff --git a/include/linux/module.h b/include/linux/module.h index 1ad393e62bef..e30ed5fa33a7 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -389,6 +389,7 @@ struct module { unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const s32 *gpl_crcs; + bool using_gplonly_symbols; #ifdef CONFIG_UNUSED_SYMBOLS /* unused exported symbols. */ @@ -458,6 +459,8 @@ struct module { void __percpu *percpu; unsigned int percpu_size; #endif + void *noinstr_text_start; + unsigned int noinstr_text_size; #ifdef CONFIG_TRACEPOINTS unsigned int num_tracepoints; @@ -489,6 +492,12 @@ struct module { unsigned int num_ftrace_callsites; unsigned long *ftrace_callsites; #endif +#ifdef CONFIG_KPROBES + void *kprobes_text_start; + unsigned int kprobes_text_size; + unsigned long *kprobe_blacklist; + unsigned int num_kprobe_blacklist; +#endif #ifdef CONFIG_LIVEPATCH bool klp; /* Is this a livepatch module? */ @@ -574,34 +583,14 @@ struct module *find_module(const char *name); struct symsearch { const struct kernel_symbol *start, *stop; const s32 *crcs; - enum { + enum mod_license { NOT_GPL_ONLY, GPL_ONLY, WILL_BE_GPL_ONLY, - } licence; + } license; bool unused; }; -/* - * Search for an exported symbol by name. - * - * Must be called with module_mutex held or preemption disabled. - */ -const struct kernel_symbol *find_symbol(const char *name, - struct module **owner, - const s32 **crc, - bool gplok, - bool warn); - -/* - * Walk the exported symbol table - * - * Must be called with module_mutex held or preemption disabled. - */ -bool each_symbol_section(bool (*fn)(const struct symsearch *arr, - struct module *owner, - void *data), void *data); - /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if symnum out of range. */ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, @@ -649,7 +638,6 @@ static inline void __module_get(struct module *module) #define symbol_put_addr(p) do { } while (0) #endif /* CONFIG_MODULE_UNLOAD */ -int ref_module(struct module *a, struct module *b); /* This is a #define so the string doesn't get put in every .o file */ #define module_name(mod) \ @@ -858,14 +846,6 @@ extern int module_sysfs_initialized; #define __MODULE_STRING(x) __stringify(x) -#ifdef CONFIG_STRICT_MODULE_RWX -extern void module_enable_ro(const struct module *mod, bool after_init); -extern void module_disable_ro(const struct module *mod); -#else -static inline void module_enable_ro(const struct module *mod, bool after_init) { } -static inline void module_disable_ro(const struct module *mod) { } -#endif - #ifdef CONFIG_GENERIC_BUG void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, struct module *); diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index ca92aea8a6bd..4fa67a8b2265 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -29,6 +29,11 @@ void *module_alloc(unsigned long size); /* Free memory returned from module_alloc. */ void module_memfree(void *module_region); +/* Determines if the section name is an init section (that is only used during + * module loading). + */ +bool module_init_section(const char *name); + /* Determines if the section name is an exit section (that is only used during * module unloading) */ diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 3ef917ff0964..1ad5aa3b86d9 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -108,7 +108,7 @@ struct kparam_array * ".") the kernel commandline parameter. Note that - is changed to _, so * the user can use "foo-bar=1" even for variable "foo_bar". * - * @perm is 0 if the the variable is not to appear in sysfs, or 0444 + * @perm is 0 if the variable is not to appear in sysfs, or 0444 * for world-readable, 0644 for root-writable, etc. Note that if it * is writable, you may need to use kernel_param_lock() around * accesses (esp. charp, which can be kfreed when it changes). diff --git a/include/linux/mount.h b/include/linux/mount.h index bf8cc4108b8f..de657bd211fa 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h @@ -50,7 +50,8 @@ struct fs_context; #define MNT_ATIME_MASK (MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME ) #define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \ - MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED) + MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED | \ + MNT_CURSOR) #define MNT_INTERNAL 0x4000 @@ -64,6 +65,7 @@ struct fs_context; #define MNT_SYNC_UMOUNT 0x2000000 #define MNT_MARKED 0x4000000 #define MNT_UMOUNT 0x8000000 +#define MNT_CURSOR 0x10000000 struct vfsmount { struct dentry *mnt_root; /* root of the mounted tree */ @@ -109,4 +111,6 @@ extern unsigned int sysctl_mount_max; extern bool path_is_mountpoint(const struct path *path); +extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num); + #endif /* _LINUX_MOUNT_H */ diff --git a/include/linux/mpage.h b/include/linux/mpage.h index 001f1fcf9836..f4f5e90a6844 100644 --- a/include/linux/mpage.h +++ b/include/linux/mpage.h @@ -13,9 +13,9 @@ #ifdef CONFIG_BLOCK struct writeback_control; +struct readahead_control; -int mpage_readpages(struct address_space *mapping, struct list_head *pages, - unsigned nr_pages, get_block_t get_block); +void mpage_readahead(struct readahead_control *, get_block_t get_block); int mpage_readpage(struct page *page, get_block_t get_block); int mpage_writepages(struct address_space *mapping, struct writeback_control *wbc, get_block_t get_block); diff --git a/include/linux/mpi.h b/include/linux/mpi.h index 7bd6d8af0004..5d906dfbf3ed 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h @@ -63,6 +63,9 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod); int mpi_cmp_ui(MPI u, ulong v); int mpi_cmp(MPI u, MPI v); +/*-- mpi-sub-ui.c --*/ +int mpi_sub_ui(MPI w, MPI u, unsigned long vval); + /*-- mpi-bit.c --*/ void mpi_normalize(MPI a); unsigned mpi_get_nbits(MPI a); diff --git a/include/linux/mroute.h b/include/linux/mroute.h index 9a36fad9e068..6cbbfe94348c 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h @@ -8,6 +8,7 @@ #include <net/fib_notifier.h> #include <uapi/linux/mroute.h> #include <linux/mroute_base.h> +#include <linux/sockptr.h> #ifdef CONFIG_IP_MROUTE static inline int ip_mroute_opt(int opt) @@ -15,7 +16,7 @@ static inline int ip_mroute_opt(int opt) return opt >= MRT_BASE && opt <= MRT_MAX; } -int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); +int ip_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int); int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *); int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg); int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); @@ -23,7 +24,7 @@ int ip_mr_init(void); bool ipmr_rule_default(const struct fib_rule *rule); #else static inline int ip_mroute_setsockopt(struct sock *sock, int optname, - char __user *optval, unsigned int optlen) + sockptr_t optval, unsigned int optlen) { return -ENOPROTOOPT; } diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h index c4a45859f586..bc351a85ce9b 100644 --- a/include/linux/mroute6.h +++ b/include/linux/mroute6.h @@ -8,6 +8,7 @@ #include <net/net_namespace.h> #include <uapi/linux/mroute6.h> #include <linux/mroute_base.h> +#include <linux/sockptr.h> #include <net/fib_rules.h> #ifdef CONFIG_IPV6_MROUTE @@ -25,7 +26,7 @@ static inline int ip6_mroute_opt(int opt) struct sock; #ifdef CONFIG_IPV6_MROUTE -extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, unsigned int); +extern int ip6_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int); extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *); extern int ip6_mr_input(struct sk_buff *skb); extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg); @@ -33,9 +34,8 @@ extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *ar extern int ip6_mr_init(void); extern void ip6_mr_cleanup(void); #else -static inline -int ip6_mroute_setsockopt(struct sock *sock, - int optname, char __user *optval, unsigned int optlen) +static inline int ip6_mroute_setsockopt(struct sock *sock, int optname, + sockptr_t optval, unsigned int optlen) { return -ENOPROTOOPT; } diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h index 886e30441c90..d890805f5494 100644 --- a/include/linux/mtd/bbm.h +++ b/include/linux/mtd/bbm.h @@ -98,7 +98,7 @@ struct nand_bbt_descr { /* * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr - * was allocated dynamicaly and must be freed in nand_release(). Has no meaning + * was allocated dynamicaly and must be freed in nand_cleanup(). Has no meaning * in nand_chip.bbt_options. */ #define NAND_BBT_DYNAMICSTRUCT 0x80000000 diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index c98a21108688..fd1ecb821106 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -138,7 +138,7 @@ struct cfi_ident { uint16_t InterfaceDesc; uint16_t MaxBufWriteSize; uint8_t NumEraseRegions; - uint32_t EraseRegionInfo[0]; /* Not host ordered */ + uint32_t EraseRegionInfo[]; /* Not host ordered */ } __packed; /* Extended Query Structure for both PRI and ALT */ @@ -165,7 +165,7 @@ struct cfi_pri_intelext { uint16_t ProtRegAddr; uint8_t FactProtRegSize; uint8_t UserProtRegSize; - uint8_t extra[0]; + uint8_t extra[]; } __packed; struct cfi_intelext_otpinfo { @@ -286,7 +286,7 @@ struct cfi_private { map_word sector_erase_cmd; unsigned long chipshift; /* Because they're of the same type */ const char *im_name; /* inter_module name for cmdset_setup */ - struct flchip chips[0]; /* per-chip data structure for each chip */ + struct flchip chips[]; /* per-chip data structure for each chip */ }; uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h index 2dfe65964f6e..2129f7d3b6eb 100644 --- a/include/linux/mtd/hyperbus.h +++ b/include/linux/mtd/hyperbus.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 * - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef __LINUX_MTD_HYPERBUS_H__ diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 2d1f4a61f4ac..157357ec1441 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -200,6 +200,8 @@ struct mtd_debug_info { * * @node: list node used to add an MTD partition to the parent partition list * @offset: offset of the partition relatively to the parent offset + * @size: partition size. Should be equal to mtd->size unless + * MTD_SLC_ON_MLC_EMULATION is set * @flags: original flags (before the mtdpart logic decided to tweak them based * on flash constraints, like eraseblock/pagesize alignment) * @@ -209,6 +211,7 @@ struct mtd_debug_info { struct mtd_part { struct list_head node; u64 offset; + u64 size; u32 flags; }; @@ -622,7 +625,9 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd) static inline int mtd_wunit_per_eb(struct mtd_info *mtd) { - return mtd->erasesize / mtd->writesize; + struct mtd_info *master = mtd_get_master(mtd); + + return master->erasesize / mtd->writesize; } static inline int mtd_offset_to_wunit(struct mtd_info *mtd, loff_t offs) diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 0c7483843a32..af99041ceaa9 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -12,6 +12,8 @@ #include <linux/mtd/mtd.h> +struct nand_device; + /** * struct nand_memory_organization - Memory organization structure * @bits_per_cell: number of bits per NAND cell @@ -114,11 +116,11 @@ struct nand_page_io_req { }; /** - * struct nand_ecc_req - NAND ECC requirements + * struct nand_ecc_props - NAND ECC properties * @strength: ECC strength - * @step_size: ECC step/block size + * @step_size: Number of bytes per step */ -struct nand_ecc_req { +struct nand_ecc_props { unsigned int strength; unsigned int step_size; }; @@ -133,8 +135,6 @@ struct nand_bbt { unsigned long *cache; }; -struct nand_device; - /** * struct nand_ops - NAND operations * @erase: erase a specific block. No need to check if the block is bad before @@ -179,7 +179,7 @@ struct nand_ops { struct nand_device { struct mtd_info mtd; struct nand_memory_organization memorg; - struct nand_ecc_req eccreq; + struct nand_ecc_props eccreq; struct nand_row_converter rowconv; struct nand_bbt bbt; const struct nand_ops *ops; diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index e545c050d3e8..b74a539ec581 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h @@ -37,6 +37,7 @@ * master MTD flag set for the corresponding MTD partition. * For example, to force a read-only partition, simply adding * MTD_WRITEABLE to the mask_flags will do the trick. + * add_flags: contains flags to add to the parent flags * * Note: writeable partitions require their size and offset be * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK). @@ -48,6 +49,7 @@ struct mtd_partition { uint64_t size; /* partition size */ uint64_t offset; /* offset within the master MTD space */ uint32_t mask_flags; /* master MTD flags to mask out for this partition */ + uint32_t add_flags; /* flags to add to the partition */ struct device_node *of_node; }; diff --git a/include/linux/mtd/pfow.h b/include/linux/mtd/pfow.h index 122f3439e1af..6166e7c60869 100644 --- a/include/linux/mtd/pfow.h +++ b/include/linux/mtd/pfow.h @@ -19,7 +19,7 @@ /* Identification info for LPDDR chip */ #define PFOW_MANUFACTURER_ID 0x0020 #define PFOW_DEVICE_ID 0x0022 -/* Address in PFOW where prog buffer can can be found */ +/* Address in PFOW where prog buffer can be found */ #define PFOW_PROGRAM_BUFFER_OFFSET 0x0040 /* Size of program buffer in words */ #define PFOW_PROGRAM_BUFFER_SIZE 0x0042 diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h index df5b9fddea16..2e3f43788d48 100644 --- a/include/linux/mtd/qinfo.h +++ b/include/linux/mtd/qinfo.h @@ -24,7 +24,7 @@ struct lpddr_private { struct qinfo_chip *qinfo; int numchips; unsigned long chipshift; - struct flchip chips[0]; + struct flchip chips[]; }; /* qinfo_query_info structure contains request information for diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 1e76196f9829..a725b620aca2 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -83,14 +83,14 @@ struct nand_chip; /* * Constants for ECC_MODES */ -typedef enum { +enum nand_ecc_mode { + NAND_ECC_INVALID, NAND_ECC_NONE, NAND_ECC_SOFT, NAND_ECC_HW, NAND_ECC_HW_SYNDROME, - NAND_ECC_HW_OOB_FIRST, NAND_ECC_ON_DIE, -} nand_ecc_modes_t; +}; enum nand_ecc_algo { NAND_ECC_UNKNOWN, @@ -119,85 +119,73 @@ enum nand_ecc_algo { #define NAND_ECC_MAXIMIZE BIT(1) /* + * Option constants for bizarre disfunctionality and real + * features. + */ + +/* Buswidth is 16 bit */ +#define NAND_BUSWIDTH_16 BIT(1) + +/* * When using software implementation of Hamming, we can specify which byte * ordering should be used. */ #define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2) -/* - * Option constants for bizarre disfunctionality and real - * features. - */ -/* Buswidth is 16 bit */ -#define NAND_BUSWIDTH_16 0x00000002 /* Chip has cache program function */ -#define NAND_CACHEPRG 0x00000008 +#define NAND_CACHEPRG BIT(3) +/* Options valid for Samsung large page devices */ +#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG + /* * Chip requires ready check on read (for auto-incremented sequential read). * True only for small page devices; large page devices do not support * autoincrement. */ -#define NAND_NEED_READRDY 0x00000100 +#define NAND_NEED_READRDY BIT(8) /* Chip does not allow subpage writes */ -#define NAND_NO_SUBPAGE_WRITE 0x00000200 +#define NAND_NO_SUBPAGE_WRITE BIT(9) /* Device is one of 'new' xD cards that expose fake nand command set */ -#define NAND_BROKEN_XD 0x00000400 +#define NAND_BROKEN_XD BIT(10) /* Device behaves just like nand, but is readonly */ -#define NAND_ROM 0x00000800 +#define NAND_ROM BIT(11) /* Device supports subpage reads */ -#define NAND_SUBPAGE_READ 0x00001000 +#define NAND_SUBPAGE_READ BIT(12) +/* Macros to identify the above */ +#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) /* * Some MLC NANDs need data scrambling to limit bitflips caused by repeated * patterns. */ -#define NAND_NEED_SCRAMBLING 0x00002000 +#define NAND_NEED_SCRAMBLING BIT(13) /* Device needs 3rd row address cycle */ -#define NAND_ROW_ADDR_3 0x00004000 - -/* Options valid for Samsung large page devices */ -#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG - -/* Macros to identify the above */ -#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) - -/* - * There are different places where the manufacturer stores the factory bad - * block markers. - * - * Position within the block: Each of these pages needs to be checked for a - * bad block marking pattern. - */ -#define NAND_BBM_FIRSTPAGE 0x01000000 -#define NAND_BBM_SECONDPAGE 0x02000000 -#define NAND_BBM_LASTPAGE 0x04000000 - -/* Position within the OOB data of the page */ -#define NAND_BBM_POS_SMALL 5 -#define NAND_BBM_POS_LARGE 0 +#define NAND_ROW_ADDR_3 BIT(14) /* Non chip related options */ /* This option skips the bbt scan during initialization. */ -#define NAND_SKIP_BBTSCAN 0x00010000 +#define NAND_SKIP_BBTSCAN BIT(16) /* Chip may not exist, so silence any errors in scan */ -#define NAND_SCAN_SILENT_NODEV 0x00040000 +#define NAND_SCAN_SILENT_NODEV BIT(18) + /* * Autodetect nand buswidth with readid/onfi. * This suppose the driver will configure the hardware in 8 bits mode * when calling nand_scan_ident, and update its configuration * before calling nand_scan_tail. */ -#define NAND_BUSWIDTH_AUTO 0x00080000 +#define NAND_BUSWIDTH_AUTO BIT(19) + /* * This option could be defined by controller drivers to protect against * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers */ -#define NAND_USE_BOUNCE_BUFFER 0x00100000 +#define NAND_USES_DMA BIT(20) /* * In case your controller is implementing ->legacy.cmd_ctrl() and is relying @@ -207,26 +195,49 @@ enum nand_ecc_algo { * If your controller already takes care of this delay, you don't need to set * this flag. */ -#define NAND_WAIT_TCCS 0x00200000 +#define NAND_WAIT_TCCS BIT(21) /* * Whether the NAND chip is a boot medium. Drivers might use this information * to select ECC algorithms supported by the boot ROM or similar restrictions. */ -#define NAND_IS_BOOT_MEDIUM 0x00400000 +#define NAND_IS_BOOT_MEDIUM BIT(22) /* * Do not try to tweak the timings at runtime. This is needed when the * controller initializes the timings on itself or when it relies on * configuration done by the bootloader. */ -#define NAND_KEEP_TIMINGS 0x00800000 +#define NAND_KEEP_TIMINGS BIT(23) + +/* + * There are different places where the manufacturer stores the factory bad + * block markers. + * + * Position within the block: Each of these pages needs to be checked for a + * bad block marking pattern. + */ +#define NAND_BBM_FIRSTPAGE BIT(24) +#define NAND_BBM_SECONDPAGE BIT(25) +#define NAND_BBM_LASTPAGE BIT(26) + +/* + * Some controllers with pipelined ECC engines override the BBM marker with + * data or ECC bytes, thus making bad block detection through bad block marker + * impossible. Let's flag those chips so the core knows it shouldn't check the + * BBM and consider all blocks good. + */ +#define NAND_NO_BBM_QUIRK BIT(27) /* Cell info constants */ #define NAND_CI_CHIPNR_MSK 0x03 #define NAND_CI_CELLTYPE_MSK 0x0C #define NAND_CI_CELLTYPE_SHIFT 2 +/* Position within the OOB data of the page */ +#define NAND_BBM_POS_SMALL 5 +#define NAND_BBM_POS_LARGE 0 + /** * struct nand_parameters - NAND generic parameters from the parameter page * @model: Model name @@ -351,7 +362,7 @@ static const struct nand_ecc_caps __name = { \ * @write_oob: function to write chip OOB data */ struct nand_ecc_ctrl { - nand_ecc_modes_t mode; + enum nand_ecc_mode mode; enum nand_ecc_algo algo; int steps; int size; @@ -481,23 +492,27 @@ struct nand_sdr_timings { }; /** - * enum nand_data_interface_type - NAND interface timing type + * enum nand_interface_type - NAND interface type * @NAND_SDR_IFACE: Single Data Rate interface */ -enum nand_data_interface_type { +enum nand_interface_type { NAND_SDR_IFACE, }; /** - * struct nand_data_interface - NAND interface timing + * struct nand_interface_config - NAND interface timing * @type: type of the timing - * @timings: The timing, type according to @type + * @timings: The timing information + * @timings.mode: Timing mode as defined in the specification * @timings.sdr: Use it when @type is %NAND_SDR_IFACE. */ -struct nand_data_interface { - enum nand_data_interface_type type; - union { - struct nand_sdr_timings sdr; +struct nand_interface_config { + enum nand_interface_type type; + struct nand_timings { + unsigned int mode; + union { + struct nand_sdr_timings sdr; + }; } timings; }; @@ -506,7 +521,7 @@ struct nand_data_interface { * @conf: The data interface */ static inline const struct nand_sdr_timings * -nand_get_sdr_timings(const struct nand_data_interface *conf) +nand_get_sdr_timings(const struct nand_interface_config *conf) { if (conf->type != NAND_SDR_IFACE) return ERR_PTR(-EINVAL); @@ -694,6 +709,7 @@ struct nand_op_instr { /** * struct nand_subop - a sub operation + * @cs: the CS line to select for this NAND sub-operation * @instrs: array of instructions * @ninstrs: length of the @instrs array * @first_instr_start_off: offset to start from for the first instruction @@ -709,6 +725,7 @@ struct nand_op_instr { * controller driver. */ struct nand_subop { + unsigned int cs; const struct nand_op_instr *instrs; unsigned int ninstrs; unsigned int first_instr_start_off; @@ -927,11 +944,10 @@ static inline void nand_op_trace(const char *prefix, * This method replaces chip->legacy.cmdfunc(), * chip->legacy.{read,write}_{buf,byte,word}(), * chip->legacy.dev_ready() and chip->legacy.waifunc(). - * @setup_data_interface: setup the data interface and timing. If - * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this - * means the configuration should not be applied but - * only checked. - * This hook is optional. + * @setup_interface: setup the data interface and timing. If chipnr is set to + * %NAND_DATA_IFACE_CHECK_ONLY this means the configuration + * should not be applied but only checked. + * This hook is optional. */ struct nand_controller_ops { int (*attach_chip)(struct nand_chip *chip); @@ -939,8 +955,8 @@ struct nand_controller_ops { int (*exec_op)(struct nand_chip *chip, const struct nand_operation *op, bool check_only); - int (*setup_data_interface)(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf); + int (*setup_interface)(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf); }; /** @@ -1011,140 +1027,138 @@ struct nand_legacy { }; /** - * struct nand_chip - NAND Private Flash Chip Data - * @base: Inherit from the generic NAND device - * @legacy: All legacy fields/hooks. If you develop a new driver, - * don't even try to use any of these fields/hooks, and if - * you're modifying an existing driver that is using those - * fields/hooks, you should consider reworking the driver - * avoid using them. - * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for - * setting the read-retry mode. Mostly needed for MLC NAND. - * @ecc: [BOARDSPECIFIC] ECC control structure - * @buf_align: minimum buffer alignment required by a platform - * @oob_poi: "poison value buffer," used for laying out OOB data - * before writing - * @page_shift: [INTERN] number of address bits in a page (column - * address bits). - * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock - * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry - * @chip_shift: [INTERN] number of address bits in one chip - * @options: [BOARDSPECIFIC] various chip options. They can partly - * be set to inform nand_scan about special functionality. - * See the defines for further explanation. - * @bbt_options: [INTERN] bad block specific options. All options used - * here must come from bbm.h. By default, these options - * will be copied to the appropriate nand_bbt_descr's. - * @badblockpos: [INTERN] position of the bad block marker in the oob - * area. - * @badblockbits: [INTERN] minimum number of set bits in a good block's - * bad block marker position; i.e., BBM == 11110111b is - * not bad when badblockbits == 7 - * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is - * set to the actually used ONFI mode if the chip is - * ONFI compliant or deduced from the datasheet if - * the NAND chip is not ONFI compliant. - * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 - * @data_buf: [INTERN] buffer for data, size is (page size + oobsize). - * @pagecache: Structure containing page cache related fields - * @pagecache.bitflips: Number of bitflips of the cached page - * @pagecache.page: Page number currently in the cache. -1 means no page is - * currently cached - * @subpagesize: [INTERN] holds the subpagesize - * @id: [INTERN] holds NAND ID - * @parameters: [INTERN] holds generic parameters under an easily - * readable form. - * @data_interface: [INTERN] NAND interface timing information - * @cur_cs: currently selected target. -1 means no target selected, - * otherwise we should always have cur_cs >= 0 && - * cur_cs < nanddev_ntargets(). NAND Controller drivers - * should not modify this value, but they're allowed to - * read it. - * @read_retries: [INTERN] the number of read retry modes supported - * @lock: lock protecting the suspended field. Also used to - * serialize accesses to the NAND device. - * @suspended: set to 1 when the device is suspended, 0 when it's not. - * @suspend: [REPLACEABLE] specific NAND device suspend operation - * @resume: [REPLACEABLE] specific NAND device resume operation - * @bbt: [INTERN] bad block table pointer - * @bbt_td: [REPLACEABLE] bad block table descriptor for flash - * lookup. - * @bbt_md: [REPLACEABLE] bad block table mirror descriptor - * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial - * bad block scan. - * @controller: [REPLACEABLE] a pointer to a hardware controller - * structure which is shared among multiple independent - * devices. - * @priv: [OPTIONAL] pointer to private chip data - * @manufacturer: [INTERN] Contains manufacturer information - * @manufacturer.desc: [INTERN] Contains manufacturer's description - * @manufacturer.priv: [INTERN] Contains manufacturer private information - * @lock_area: [REPLACEABLE] specific NAND chip lock operation - * @unlock_area: [REPLACEABLE] specific NAND chip unlock operation + * struct nand_chip_ops - NAND chip operations + * @suspend: Suspend operation + * @resume: Resume operation + * @lock_area: Lock operation + * @unlock_area: Unlock operation + * @setup_read_retry: Set the read-retry mode (mostly needed for MLC NANDs) + * @choose_interface_config: Choose the best interface configuration + */ +struct nand_chip_ops { + int (*suspend)(struct nand_chip *chip); + void (*resume)(struct nand_chip *chip); + int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); + int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); + int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); + int (*choose_interface_config)(struct nand_chip *chip, + struct nand_interface_config *iface); +}; + +/** + * struct nand_manufacturer - NAND manufacturer structure + * @desc: The manufacturer description + * @priv: Private information for the manufacturer driver */ +struct nand_manufacturer { + const struct nand_manufacturer_desc *desc; + void *priv; +}; +/** + * struct nand_chip - NAND Private Flash Chip Data + * @base: Inherit from the generic NAND device + * @id: Holds NAND ID + * @parameters: Holds generic parameters under an easily readable form + * @manufacturer: Manufacturer information + * @ops: NAND chip operations + * @legacy: All legacy fields/hooks. If you develop a new driver, don't even try + * to use any of these fields/hooks, and if you're modifying an + * existing driver that is using those fields/hooks, you should + * consider reworking the driver and avoid using them. + * @options: Various chip options. They can partly be set to inform nand_scan + * about special functionality. See the defines for further + * explanation. + * @current_interface_config: The currently used NAND interface configuration + * @best_interface_config: The best NAND interface configuration which fits both + * the NAND chip and NAND controller constraints. If + * unset, the default reset interface configuration must + * be used. + * @bbt_erase_shift: Number of address bits in a bbt entry + * @bbt_options: Bad block table specific options. All options used here must + * come from bbm.h. By default, these options will be copied to + * the appropriate nand_bbt_descr's. + * @badblockpos: Bad block marker position in the oob area + * @badblockbits: Minimum number of set bits in a good block's bad block marker + * position; i.e., BBM = 11110111b is good when badblockbits = 7 + * @bbt_td: Bad block table descriptor for flash lookup + * @bbt_md: Bad block table mirror descriptor + * @badblock_pattern: Bad block scan pattern used for initial bad block scan + * @bbt: Bad block table pointer + * @page_shift: Number of address bits in a page (column address bits) + * @phys_erase_shift: Number of address bits in a physical eraseblock + * @chip_shift: Number of address bits in one chip + * @pagemask: Page number mask = number of (pages / chip) - 1 + * @subpagesize: Holds the subpagesize + * @data_buf: Buffer for data, size is (page size + oobsize) + * @oob_poi: pointer on the OOB area covered by data_buf + * @pagecache: Structure containing page cache related fields + * @pagecache.bitflips: Number of bitflips of the cached page + * @pagecache.page: Page number currently in the cache. -1 means no page is + * currently cached + * @buf_align: Minimum buffer alignment required by a platform + * @lock: Lock protecting the suspended field. Also used to serialize accesses + * to the NAND device + * @suspended: Set to 1 when the device is suspended, 0 when it's not + * @cur_cs: Currently selected target. -1 means no target selected, otherwise we + * should always have cur_cs >= 0 && cur_cs < nanddev_ntargets(). + * NAND Controller drivers should not modify this value, but they're + * allowed to read it. + * @read_retries: The number of read retry modes supported + * @controller: The hardware controller structure which is shared among multiple + * independent devices + * @ecc: The ECC controller structure + * @priv: Chip private data + */ struct nand_chip { struct nand_device base; - + struct nand_id id; + struct nand_parameters parameters; + struct nand_manufacturer manufacturer; + struct nand_chip_ops ops; struct nand_legacy legacy; + unsigned int options; - int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); + /* Data interface */ + const struct nand_interface_config *current_interface_config; + struct nand_interface_config *best_interface_config; - unsigned int options; + /* Bad block information */ + unsigned int bbt_erase_shift; unsigned int bbt_options; + unsigned int badblockpos; + unsigned int badblockbits; + struct nand_bbt_descr *bbt_td; + struct nand_bbt_descr *bbt_md; + struct nand_bbt_descr *badblock_pattern; + u8 *bbt; - int page_shift; - int phys_erase_shift; - int bbt_erase_shift; - int chip_shift; - int pagemask; - u8 *data_buf; + /* Device internal layout */ + unsigned int page_shift; + unsigned int phys_erase_shift; + unsigned int chip_shift; + unsigned int pagemask; + unsigned int subpagesize; + /* Buffers */ + u8 *data_buf; + u8 *oob_poi; struct { unsigned int bitflips; int page; } pagecache; + unsigned long buf_align; - int subpagesize; - int onfi_timing_mode_default; - unsigned int badblockpos; - int badblockbits; - - struct nand_id id; - struct nand_parameters parameters; - - struct nand_data_interface data_interface; - - int cur_cs; - - int read_retries; - + /* Internals */ struct mutex lock; unsigned int suspended : 1; - int (*suspend)(struct nand_chip *chip); - void (*resume)(struct nand_chip *chip); + int cur_cs; + int read_retries; - uint8_t *oob_poi; + /* Externals */ struct nand_controller *controller; - struct nand_ecc_ctrl ecc; - unsigned long buf_align; - - uint8_t *bbt; - struct nand_bbt_descr *bbt_td; - struct nand_bbt_descr *bbt_md; - - struct nand_bbt_descr *badblock_pattern; - void *priv; - - struct { - const struct nand_manufacturer *desc; - void *priv; - } manufacturer; - - int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); - int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len); }; extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; @@ -1192,6 +1206,17 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) return mtd_get_of_node(nand_to_mtd(chip)); } +/** + * nand_get_interface_config - Retrieve the current interface configuration + * of a NAND chip + * @chip: The NAND chip + */ +static inline const struct nand_interface_config * +nand_get_interface_config(struct nand_chip *chip) +{ + return chip->current_interface_config; +} + /* * A helper for defining older NAND chips where the second ID byte fully * defined the chip, including the geometry (chip size, eraseblock size, page @@ -1244,10 +1269,6 @@ static inline struct device_node *nand_get_flash_node(struct nand_chip *chip) * @ecc_step_ds in nand_chip{}, also from the datasheet. * For example, the "4bit ECC for each 512Byte" can be set with * NAND_ECC_INFO(4, 512). - * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND - * reset. Should be deduced from timings described - * in the datasheet. - * */ struct nand_flash_dev { char *name; @@ -1268,7 +1289,6 @@ struct nand_flash_dev { uint16_t strength_ds; uint16_t step_ds; } ecc; - int onfi_timing_mode_default; }; int nand_create_bbt(struct nand_chip *chip); @@ -1321,13 +1341,17 @@ int nand_read_oob_std(struct nand_chip *chip, int page); int nand_get_set_features_notsupp(struct nand_chip *chip, int addr, u8 *subfeature_param); -/* Default read_page_raw implementation */ +/* read_page_raw implementations */ int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required, int page); +int nand_monolithic_read_page_raw(struct nand_chip *chip, uint8_t *buf, + int oob_required, int page); -/* Default write_page_raw implementation */ +/* write_page_raw implementations */ int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf, int oob_required, int page); +int nand_monolithic_write_page_raw(struct nand_chip *chip, const uint8_t *buf, + int oob_required, int page); /* Reset and initialize a NAND device */ int nand_reset(struct nand_chip *chip, int chipnr); @@ -1356,7 +1380,7 @@ int nand_change_write_column_op(struct nand_chip *chip, unsigned int offset_in_page, const void *buf, unsigned int len, bool force_8bit); int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, - bool force_8bit); + bool force_8bit, bool check_only); int nand_write_data_op(struct nand_chip *chip, const void *buf, unsigned int len, bool force_8bit); @@ -1377,8 +1401,6 @@ void nand_wait_ready(struct nand_chip *chip); * sucessful nand_scan(). */ void nand_cleanup(struct nand_chip *chip); -/* Unregister the MTD device and calls nand_cleanup() */ -void nand_release(struct nand_chip *chip); /* * External helper for controller drivers that have to implement the WAITRDY @@ -1393,6 +1415,10 @@ int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, void nand_select_target(struct nand_chip *chip, unsigned int cs); void nand_deselect_target(struct nand_chip *chip); +/* Bitops */ +void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src, + unsigned int src_off, unsigned int nbits); + /** * nand_get_data_buf() - Get the internal page buffer * @chip: NAND chip object diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 1e2af0ec1f03..60bac2c0ec45 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -20,6 +20,7 @@ */ /* Flash opcodes. */ +#define SPINOR_OP_WRDI 0x04 /* Write disable */ #define SPINOR_OP_WREN 0x06 /* Write enable */ #define SPINOR_OP_RDSR 0x05 /* Read status register */ #define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */ @@ -80,7 +81,6 @@ /* Used for SST flashes only. */ #define SPINOR_OP_BP 0x02 /* Byte program */ -#define SPINOR_OP_WRDI 0x04 /* Write disable */ #define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */ /* Used for S3AN flashes only */ @@ -302,7 +302,7 @@ struct spi_nor; * @read: read data from the SPI NOR. * @write: write data to the SPI NOR. * @erase: erase a sector of the SPI NOR at the offset @offs; if - * not provided by the driver, spi-nor will send the erase + * not provided by the driver, SPI NOR will send the erase * opcode via write_reg(). */ struct spi_nor_controller_ops { @@ -327,16 +327,16 @@ struct spi_nor_manufacturer; struct spi_nor_flash_parameter; /** - * struct spi_nor - Structure for defining a the SPI NOR layer - * @mtd: point to a mtd_info structure + * struct spi_nor - Structure for defining the SPI NOR layer + * @mtd: an mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations - * @dev: point to a spi device, or a spi nor controller device. - * @spimem: point to the spi mem device + * @dev: pointer to an SPI device or an SPI NOR controller device + * @spimem: pointer to the SPI memory device * @bouncebuf: bounce buffer used when the buffer passed by the MTD * layer is not DMA-able * @bouncebuf_size: size of the bounce buffer - * @info: spi-nor part JDEC MFR id and other info - * @manufacturer: spi-nor manufacturer + * @info: SPI NOR part JEDEC MFR ID and other info + * @manufacturer: SPI NOR manufacturer * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes * @erase_opcode: the opcode for erasing a sector @@ -344,17 +344,17 @@ struct spi_nor_flash_parameter; * @read_dummy: the dummy needed by the read operation * @program_opcode: the program opcode * @sst_write_second: used by the SST write operation - * @flags: flag options for the current SPI-NOR (SNOR_F_*) + * @flags: flag options for the current SPI NOR (SNOR_F_*) * @read_proto: the SPI protocol for read operations * @write_proto: the SPI protocol for write operations - * @reg_proto the SPI protocol for read_reg/write_reg/erase operations + * @reg_proto: the SPI protocol for read_reg/write_reg/erase operations * @controller_ops: SPI NOR controller driver specific operations. - * @params: [FLASH-SPECIFIC] SPI-NOR flash parameters and settings. + * @params: [FLASH-SPECIFIC] SPI NOR flash parameters and settings. * The structure includes legacy flash parameters and * settings that can be overwritten by the spi_nor_fixups * hooks, or dynamically when parsing the SFDP tables. * @dirmap: pointers to struct spi_mem_dirmap_desc for reads/writes. - * @priv: the private data + * @priv: pointer to the private data */ struct spi_nor { struct mtd_info mtd; diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 1077c45721ff..7b78c4ba9b3e 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -309,7 +309,7 @@ struct spinand_info { struct spinand_devid devid; u32 flags; struct nand_memory_organization memorg; - struct nand_ecc_req eccreq; + struct nand_ecc_props eccreq; struct spinand_ecc_info eccinfo; struct { const struct spinand_op_variants *read_cache; diff --git a/include/linux/mutex.h b/include/linux/mutex.h index ae197cc00cc8..dcd185cbfe79 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -65,6 +65,17 @@ struct mutex { #endif }; +struct ww_class; +struct ww_acquire_ctx; + +struct ww_mutex { + struct mutex base; + struct ww_acquire_ctx *ctx; +#ifdef CONFIG_DEBUG_MUTEXES + struct ww_class *ww_class; +#endif +}; + /* * This is the control structure for tasks blocked on mutex, * which resides on the blocked task's kernel stack: diff --git a/include/linux/net.h b/include/linux/net.h index 6451425e828f..ae713c851342 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -21,6 +21,8 @@ #include <linux/rcupdate.h> #include <linux/once.h> #include <linux/fs.h> +#include <linux/mm.h> +#include <linux/sockptr.h> #include <uapi/linux/net.h> @@ -162,15 +164,10 @@ struct proto_ops { int (*listen) (struct socket *sock, int len); int (*shutdown) (struct socket *sock, int flags); int (*setsockopt)(struct socket *sock, int level, - int optname, char __user *optval, unsigned int optlen); + int optname, sockptr_t optval, + unsigned int optlen); int (*getsockopt)(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen); -#ifdef CONFIG_COMPAT - int (*compat_setsockopt)(struct socket *sock, int level, - int optname, char __user *optval, unsigned int optlen); - int (*compat_getsockopt)(struct socket *sock, int level, - int optname, char __user *optval, int __user *optlen); -#endif void (*show_fdinfo)(struct seq_file *m, struct socket *sock); int (*sendmsg) (struct socket *sock, struct msghdr *m, size_t total_len); @@ -264,7 +261,8 @@ do { \ net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__) #define net_info_ratelimited(fmt, ...) \ net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__) -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) #define net_dbg_ratelimited(fmt, ...) \ do { \ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ @@ -289,6 +287,21 @@ do { \ #define net_get_random_once_wait(buf, nbytes) \ get_random_once_wait((buf), (nbytes)) +/* + * E.g. XFS meta- & log-data is in slab pages, or bcache meta + * data pages, or other high order pages allocated by + * __get_free_pages() without __GFP_COMP, which have a page_count + * of 0 and/or have PageSlab() set. We cannot use send_page for + * those, as that does get_page(); put_page(); and would cause + * either a VM_BUG directly, or __page_cache_release a page that + * would actually still be referenced by someone, leading to some + * obscure delayed Oops somewhere else. + */ +static inline bool sendpage_ok(struct page *page) +{ + return !PageSlab(page) && page_count(page) >= 1; +} + int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t len); int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg, @@ -303,10 +316,6 @@ int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, int flags); int kernel_getsockname(struct socket *sock, struct sockaddr *addr); int kernel_getpeername(struct socket *sock, struct sockaddr *addr); -int kernel_getsockopt(struct socket *sock, int level, int optname, char *optval, - int *optlen); -int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, - unsigned int optlen); int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, diff --git a/include/linux/net/intel/i40e_client.h b/include/linux/net/intel/i40e_client.h new file mode 100644 index 000000000000..f41387a8969f --- /dev/null +++ b/include/linux/net/intel/i40e_client.h @@ -0,0 +1,194 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_CLIENT_H_ +#define _I40E_CLIENT_H_ + +#define I40E_CLIENT_STR_LENGTH 10 + +/* Client interface version should be updated anytime there is a change in the + * existing APIs or data structures. + */ +#define I40E_CLIENT_VERSION_MAJOR 0 +#define I40E_CLIENT_VERSION_MINOR 01 +#define I40E_CLIENT_VERSION_BUILD 00 +#define I40E_CLIENT_VERSION_STR \ + __stringify(I40E_CLIENT_VERSION_MAJOR) "." \ + __stringify(I40E_CLIENT_VERSION_MINOR) "." \ + __stringify(I40E_CLIENT_VERSION_BUILD) + +struct i40e_client_version { + u8 major; + u8 minor; + u8 build; + u8 rsvd; +}; + +enum i40e_client_state { + __I40E_CLIENT_NULL, + __I40E_CLIENT_REGISTERED +}; + +enum i40e_client_instance_state { + __I40E_CLIENT_INSTANCE_NONE, + __I40E_CLIENT_INSTANCE_OPENED, +}; + +struct i40e_ops; +struct i40e_client; + +#define I40E_QUEUE_INVALID_IDX 0xFFFF + +struct i40e_qv_info { + u32 v_idx; /* msix_vector */ + u16 ceq_idx; + u16 aeq_idx; + u8 itr_idx; +}; + +struct i40e_qvlist_info { + u32 num_vectors; + struct i40e_qv_info qv_info[1]; +}; + + +/* set of LAN parameters useful for clients managed by LAN */ + +/* Struct to hold per priority info */ +struct i40e_prio_qos_params { + u16 qs_handle; /* qs handle for prio */ + u8 tc; /* TC mapped to prio */ + u8 reserved; +}; + +#define I40E_CLIENT_MAX_USER_PRIORITY 8 +/* Struct to hold Client QoS */ +struct i40e_qos_params { + struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY]; +}; + +struct i40e_params { + struct i40e_qos_params qos; + u16 mtu; +}; + +/* Structure to hold Lan device info for a client device */ +struct i40e_info { + struct i40e_client_version version; + u8 lanmac[6]; + struct net_device *netdev; + struct pci_dev *pcidev; + u8 __iomem *hw_addr; + u8 fid; /* function id, PF id or VF id */ +#define I40E_CLIENT_FTYPE_PF 0 + u8 ftype; /* function type, PF or VF */ + void *pf; + + /* All L2 params that could change during the life span of the PF + * and needs to be communicated to the client when they change + */ + struct i40e_qvlist_info *qvlist_info; + struct i40e_params params; + struct i40e_ops *ops; + + u16 msix_count; /* number of msix vectors*/ + /* Array down below will be dynamically allocated based on msix_count */ + struct msix_entry *msix_entries; + u16 itr_index; /* Which ITR index the PE driver is suppose to use */ + u16 fw_maj_ver; /* firmware major version */ + u16 fw_min_ver; /* firmware minor version */ + u32 fw_build; /* firmware build number */ +}; + +#define I40E_CLIENT_RESET_LEVEL_PF 1 +#define I40E_CLIENT_RESET_LEVEL_CORE 2 +#define I40E_CLIENT_VSI_FLAG_TCP_ENABLE BIT(1) + +struct i40e_ops { + /* setup_q_vector_list enables queues with a particular vector */ + int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client, + struct i40e_qvlist_info *qv_info); + + int (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client, + u32 vf_id, u8 *msg, u16 len); + + /* If the PE Engine is unresponsive, RDMA driver can request a reset. + * The level helps determine the level of reset being requested. + */ + void (*request_reset)(struct i40e_info *ldev, + struct i40e_client *client, u32 level); + + /* API for the RDMA driver to set certain VSI flags that control + * PE Engine. + */ + int (*update_vsi_ctxt)(struct i40e_info *ldev, + struct i40e_client *client, + bool is_vf, u32 vf_id, + u32 flag, u32 valid_flag); +}; + +struct i40e_client_ops { + /* Should be called from register_client() or whenever PF is ready + * to create a specific client instance. + */ + int (*open)(struct i40e_info *ldev, struct i40e_client *client); + + /* Should be called when netdev is unavailable or when unregister + * call comes in. If the close is happenening due to a reset being + * triggered set the reset bit to true. + */ + void (*close)(struct i40e_info *ldev, struct i40e_client *client, + bool reset); + + /* called when some l2 managed parameters changes - mtu */ + void (*l2_param_change)(struct i40e_info *ldev, + struct i40e_client *client, + struct i40e_params *params); + + int (*virtchnl_receive)(struct i40e_info *ldev, + struct i40e_client *client, u32 vf_id, + u8 *msg, u16 len); + + /* called when a VF is reset by the PF */ + void (*vf_reset)(struct i40e_info *ldev, + struct i40e_client *client, u32 vf_id); + + /* called when the number of VFs changes */ + void (*vf_enable)(struct i40e_info *ldev, + struct i40e_client *client, u32 num_vfs); + + /* returns true if VF is capable of specified offload */ + int (*vf_capable)(struct i40e_info *ldev, + struct i40e_client *client, u32 vf_id); +}; + +/* Client device */ +struct i40e_client_instance { + struct list_head list; + struct i40e_info lan_info; + struct i40e_client *client; + unsigned long state; +}; + +struct i40e_client { + struct list_head list; /* list of registered clients */ + char name[I40E_CLIENT_STR_LENGTH]; + struct i40e_client_version version; + unsigned long state; /* client state */ + atomic_t ref_cnt; /* Count of all the client devices of this kind */ + u32 flags; + u8 type; +#define I40E_CLIENT_IWARP 0 + const struct i40e_client_ops *ops; /* client ops provided by the client */ +}; + +static inline bool i40e_client_is_registered(struct i40e_client *client) +{ + return test_bit(__I40E_CLIENT_REGISTERED, &client->state); +} + +/* used by clients */ +int i40e_register_client(struct i40e_client *client); +int i40e_unregister_client(struct i40e_client *client); + +#endif /* _I40E_CLIENT_H_ */ diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 9d53c5ad272c..0b17c4322b09 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -89,7 +89,7 @@ enum { * Add your fresh new feature above and remember to update * netdev_features_strings[] in net/core/ethtool.c and maybe * some feature mask #defines below. Please also describe it - * in Documentation/networking/netdev-features.txt. + * in Documentation/networking/netdev-features.rst. */ /**/NETDEV_FEATURE_COUNT @@ -193,7 +193,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start) #define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \ __NETIF_F_BIT(NETIF_F_GSO_SHIFT)) -/* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be +/* List of IP checksum features. Note that NETIF_F_HW_CSUM should not be * set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set-- * this would be contradictory */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 130a668049ab..18dec08439f9 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -53,6 +53,7 @@ struct netpoll_info; struct device; struct phy_device; struct dsa_port; +struct ip_tunnel_parm; struct macsec_context; struct macsec_ops; @@ -64,6 +65,8 @@ struct wpan_dev; struct mpls_dev; /* UDP Tunnel offloads */ struct udp_tunnel_info; +struct udp_tunnel_nic_info; +struct udp_tunnel_nic; struct bpf_prog; struct xdp_buff; @@ -288,6 +291,7 @@ enum netdev_state_t { __LINK_STATE_NOCARRIER, __LINK_STATE_LINKWATCH_PENDING, __LINK_STATE_DORMANT, + __LINK_STATE_TESTING, }; @@ -328,6 +332,7 @@ struct napi_struct { unsigned long state; int weight; + int defer_hard_irqs_count; unsigned long gro_bitmask; int (*poll)(struct napi_struct *, int); #ifdef CONFIG_NETPOLL @@ -871,8 +876,6 @@ enum bpf_netdev_command { */ XDP_SETUP_PROG, XDP_SETUP_PROG_HW, - XDP_QUERY_PROG, - XDP_QUERY_PROG_HW, /* BPF program for offload callbacks, invoked at program load time. */ BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE, @@ -883,6 +886,19 @@ struct bpf_prog_offload_ops; struct netlink_ext_ack; struct xdp_umem; struct xdp_dev_bulk_queue; +struct bpf_xdp_link; + +enum bpf_xdp_mode { + XDP_MODE_SKB = 0, + XDP_MODE_DRV = 1, + XDP_MODE_HW = 2, + __MAX_XDP_MODE +}; + +struct bpf_xdp_entity { + struct bpf_prog *prog; + struct bpf_xdp_link *link; +}; struct netdev_bpf { enum bpf_netdev_command command; @@ -893,12 +909,6 @@ struct netdev_bpf { struct bpf_prog *prog; struct netlink_ext_ack *extack; }; - /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */ - struct { - u32 prog_id; - /* flags with which program was installed */ - u32 prog_flags; - }; /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ struct { struct bpf_offloaded_map *offmap; @@ -1146,6 +1156,12 @@ struct netdev_net_notifier { * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); * Called to release previously enslaved netdev. * + * struct net_device *(*ndo_get_xmit_slave)(struct net_device *dev, + * struct sk_buff *skb, + * bool all_slaves); + * Get the xmit slave of master device. If all_slaves is true, function + * assume all the slaves can transmit. + * * Feature/offload setting functions. * netdev_features_t (*ndo_fix_features)(struct net_device *dev, * netdev_features_t features); @@ -1266,6 +1282,9 @@ struct netdev_net_notifier { * Get devlink port instance associated with a given netdev. * Called with a reference on the netdevice and devlink locks only, * rtnl_lock is not held. + * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, + * int cmd); + * Add, change, delete or get information on an IPv4 tunnel. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1389,6 +1408,9 @@ struct net_device_ops { struct netlink_ext_ack *extack); int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); + struct net_device* (*ndo_get_xmit_slave)(struct net_device *dev, + struct sk_buff *skb, + bool all_slaves); netdev_features_t (*ndo_fix_features)(struct net_device *dev, netdev_features_t features); int (*ndo_set_features)(struct net_device *dev, @@ -1468,6 +1490,8 @@ struct net_device_ops { int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); + int (*ndo_tunnel_ctl)(struct net_device *dev, + struct ip_tunnel_parm *p, int cmd); }; /** @@ -1725,6 +1749,8 @@ enum netdev_priv_flags { * @real_num_rx_queues: Number of RX queues currently active in device * @xdp_prog: XDP sockets filter program pointer * @gro_flush_timeout: timeout for GRO layer in NAPI + * @napi_defer_hard_irqs: If not zero, provides a counter that would + * allow to avoid NIC hard IRQ, on busy queues. * * @rx_handler: handler for received packets * @rx_handler_data: XXX: need comments on this one @@ -1758,6 +1784,7 @@ enum netdev_priv_flags { * the watchdog (see dev_watchdog()) * @watchdog_timer: List of timers * + * @proto_down_reason: reason a netdev interface is held down * @pcpu_refcnt: Number of references to this device * @todo_list: Delayed register/unregister * @link_watch_list: XXX: need comments on this one @@ -1803,13 +1830,9 @@ enum netdev_priv_flags { * @phydev: Physical device may attach itself * for hardware timestamping * @sfp_bus: attached &struct sfp_bus structure. - * @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock - * spinlock - * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount - * @qdisc_xmit_lock_key: lockdep class annotating - * netdev_queue->_xmit_lock spinlock - * @addr_list_lock_key: lockdep class annotating - * net_device->addr_list_lock spinlock + * + * @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock + * @qdisc_running_key: lockdep class annotating Qdisc->running seqcount * * @proto_down: protocol port state information can be sent to the * switch driver and used to set the phys state of the @@ -1823,6 +1846,16 @@ enum netdev_priv_flags { * * @macsec_ops: MACsec offloading ops * + * @udp_tunnel_nic_info: static structure describing the UDP tunnel + * offload capabilities of the device + * @udp_tunnel_nic: UDP tunnel offload state + * @xdp_state: stores info on attached XDP BPF programs + * + * @nested_level: Used as as a parameter of spin_lock_nested() of + * dev->addr_list_lock. + * @unlink_list: As netif_addr_lock() can be called recursively, + * keep a list of interfaces to be deleted. + * * FIXME: cleanup struct net_device such that network protocol info * moves out. */ @@ -1927,6 +1960,7 @@ struct net_device { unsigned short type; unsigned short hard_header_len; unsigned char min_header_len; + unsigned char name_assign_type; unsigned short needed_headroom; unsigned short needed_tailroom; @@ -1937,12 +1971,12 @@ struct net_device { unsigned char addr_len; unsigned char upper_level; unsigned char lower_level; + unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; spinlock_t addr_list_lock; - unsigned char name_assign_type; - bool uc_promisc; + struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; struct netdev_hw_addr_list dev_addrs; @@ -1950,8 +1984,15 @@ struct net_device { #ifdef CONFIG_SYSFS struct kset *queues_kset; #endif +#ifdef CONFIG_LOCKDEP + struct list_head unlink_list; +#endif unsigned int promiscuity; unsigned int allmulti; + bool uc_promisc; +#ifdef CONFIG_LOCKDEP + unsigned char nested_level; +#endif /* Protocol-specific pointers */ @@ -1994,6 +2035,7 @@ struct net_device { struct bpf_prog __rcu *xdp_prog; unsigned long gro_flush_timeout; + int napi_defer_hard_irqs; rx_handler_func_t __rcu *rx_handler; void __rcu *rx_handler_data; @@ -2038,6 +2080,8 @@ struct net_device { struct timer_list watchdog_timer; int watchdog_timeo; + u32 proto_down_reason; + struct list_head todo_list; int __percpu *pcpu_refcnt; @@ -2109,10 +2153,8 @@ struct net_device { #endif struct phy_device *phydev; struct sfp_bus *sfp_bus; - struct lock_class_key qdisc_tx_busylock_key; - struct lock_class_key qdisc_running_key; - struct lock_class_key qdisc_xmit_lock_key; - struct lock_class_key addr_list_lock_key; + struct lock_class_key *qdisc_tx_busylock; + struct lock_class_key *qdisc_running_key; bool proto_down; unsigned wol_enabled:1; @@ -2122,6 +2164,11 @@ struct net_device { /* MACsec management functions */ const struct macsec_ops *macsec_ops; #endif + const struct udp_tunnel_nic_info *udp_tunnel_nic_info; + struct udp_tunnel_nic *udp_tunnel_nic; + + /* protected by rtnl_lock */ + struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -2197,6 +2244,23 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, f(dev, &dev->_tx[i], arg); } +#define netdev_lockdep_set_classes(dev) \ +{ \ + static struct lock_class_key qdisc_tx_busylock_key; \ + static struct lock_class_key qdisc_running_key; \ + static struct lock_class_key qdisc_xmit_lock_key; \ + static struct lock_class_key dev_addr_list_lock_key; \ + unsigned int i; \ + \ + (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key; \ + (dev)->qdisc_running_key = &qdisc_running_key; \ + lockdep_set_class(&(dev)->addr_list_lock, \ + &dev_addr_list_lock_key); \ + for (i = 0; i < (dev)->num_tx_queues; i++) \ + lockdep_set_class(&(dev)->_tx[i]._xmit_lock, \ + &qdisc_xmit_lock_key); \ +} + u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev); struct netdev_queue *netdev_core_pick_tx(struct net_device *dev, @@ -2731,6 +2795,9 @@ void netdev_freemem(struct net_device *dev); void synchronize_net(void); int init_dummy_netdev(struct net_device *dev); +struct net_device *netdev_get_xmit_slave(struct net_device *dev, + struct sk_buff *skb, + bool all_slaves); struct net_device *dev_get_by_index(struct net *net, int ifindex); struct net_device *__dev_get_by_index(struct net *net, int ifindex); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); @@ -3125,7 +3192,7 @@ static inline int dev_recursion_level(void) return this_cpu_read(softnet_data.xmit.recursion); } -#define XMIT_RECURSION_LIMIT 10 +#define XMIT_RECURSION_LIMIT 8 static inline bool dev_xmit_recursion(void) { return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > @@ -3221,7 +3288,6 @@ static inline void netif_stop_queue(struct net_device *dev) } void netif_tx_stop_all_queues(struct net_device *dev); -void netdev_update_lockdep_key(struct net_device *dev); static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) { @@ -3771,6 +3837,8 @@ int dev_get_port_parent_id(struct net_device *dev, bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); int dev_change_proto_down(struct net_device *dev, bool proto_down); int dev_change_proto_down_generic(struct net_device *dev, bool proto_down); +void dev_change_proto_down_reason(struct net_device *dev, unsigned long mask, + u32 value); struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); @@ -3778,8 +3846,9 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, int expected_fd, u32 flags); -u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, - enum bpf_netdev_command cmd); +int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); + int xdp_umem_query(struct net_device *dev, u16 queue_id); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); @@ -3908,6 +3977,46 @@ static inline bool netif_dormant(const struct net_device *dev) /** + * netif_testing_on - mark device as under test. + * @dev: network device + * + * Mark device as under test (as per RFC2863). + * + * The testing state indicates that some test(s) must be performed on + * the interface. After completion, of the test, the interface state + * will change to up, dormant, or down, as appropriate. + */ +static inline void netif_testing_on(struct net_device *dev) +{ + if (!test_and_set_bit(__LINK_STATE_TESTING, &dev->state)) + linkwatch_fire_event(dev); +} + +/** + * netif_testing_off - set device as not under test. + * @dev: network device + * + * Device is not in testing state. + */ +static inline void netif_testing_off(struct net_device *dev) +{ + if (test_and_clear_bit(__LINK_STATE_TESTING, &dev->state)) + linkwatch_fire_event(dev); +} + +/** + * netif_testing - test if device is under test + * @dev: network device + * + * Check if device is under test + */ +static inline bool netif_testing(const struct net_device *dev) +{ + return test_bit(__LINK_STATE_TESTING, &dev->state); +} + + +/** * netif_oper_up - test if device is operational * @dev: network device * @@ -4164,12 +4273,23 @@ static inline void netif_tx_disable(struct net_device *dev) static inline void netif_addr_lock(struct net_device *dev) { - spin_lock(&dev->addr_list_lock); + unsigned char nest_level = 0; + +#ifdef CONFIG_LOCKDEP + nest_level = dev->nested_level; +#endif + spin_lock_nested(&dev->addr_list_lock, nest_level); } static inline void netif_addr_lock_bh(struct net_device *dev) { - spin_lock_bh(&dev->addr_list_lock); + unsigned char nest_level = 0; + +#ifdef CONFIG_LOCKDEP + nest_level = dev->nested_level; +#endif + local_bh_disable(); + spin_lock_nested(&dev->addr_list_lock, nest_level); } static inline void netif_addr_unlock(struct net_device *dev) @@ -4208,6 +4328,8 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, int register_netdev(struct net_device *dev); void unregister_netdev(struct net_device *dev); +int devm_register_netdev(struct device *dev, struct net_device *ndev); + /* General hardware address lists handling functions */ int __hw_addr_sync(struct netdev_hw_addr_list *to_list, struct netdev_hw_addr_list *from_list, int addr_len); @@ -4352,12 +4474,38 @@ extern int dev_rx_weight; extern int dev_tx_weight; extern int gro_normal_batch; +enum { + NESTED_SYNC_IMM_BIT, + NESTED_SYNC_TODO_BIT, +}; + +#define __NESTED_SYNC_BIT(bit) ((u32)1 << (bit)) +#define __NESTED_SYNC(name) __NESTED_SYNC_BIT(NESTED_SYNC_ ## name ## _BIT) + +#define NESTED_SYNC_IMM __NESTED_SYNC(IMM) +#define NESTED_SYNC_TODO __NESTED_SYNC(TODO) + +struct netdev_nested_priv { + unsigned char flags; + void *data; +}; + bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev); struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev, struct list_head **iter); struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, struct list_head **iter); +#ifdef CONFIG_LOCKDEP +static LIST_HEAD(net_unlink_list); + +static inline void net_unlink_todo(struct net_device *dev) +{ + if (list_empty(&dev->unlink_list)) + list_add_tail(&dev->unlink_list, &net_unlink_list); +} +#endif + /* iterate through upper list, must be called under RCU read lock */ #define netdev_for_each_upper_dev_rcu(dev, updev, iter) \ for (iter = &(dev)->adj_list.upper, \ @@ -4367,8 +4515,8 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, int netdev_walk_all_upper_dev_rcu(struct net_device *dev, int (*fn)(struct net_device *upper_dev, - void *data), - void *data); + struct netdev_nested_priv *priv), + struct netdev_nested_priv *priv); bool netdev_has_upper_dev_all_rcu(struct net_device *dev, struct net_device *upper_dev); @@ -4405,12 +4553,12 @@ struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev, struct list_head **iter); int netdev_walk_all_lower_dev(struct net_device *dev, int (*fn)(struct net_device *lower_dev, - void *data), - void *data); + struct netdev_nested_priv *priv), + struct netdev_nested_priv *priv); int netdev_walk_all_lower_dev_rcu(struct net_device *dev, int (*fn)(struct net_device *lower_dev, - void *data), - void *data); + struct netdev_nested_priv *priv), + struct netdev_nested_priv *priv); void *netdev_adjacent_get_private(struct list_head *adj_list); void *netdev_lower_get_first_private_rcu(struct net_device *dev); @@ -4741,6 +4889,11 @@ static inline bool netif_is_ovs_port(const struct net_device *dev) return dev->priv_flags & IFF_OVS_DATAPATH; } +static inline bool netif_is_any_bridge_port(const struct net_device *dev) +{ + return netif_is_bridge_port(dev) || netif_is_ovs_port(dev); +} + static inline bool netif_is_team_master(const struct net_device *dev) { return dev->priv_flags & IFF_TEAM; @@ -4868,7 +5021,8 @@ do { \ #define MODULE_ALIAS_NETDEV(device) \ MODULE_ALIAS("netdev-" device) -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) #define netdev_dbg(__dev, format, args...) \ do { \ dynamic_netdev_dbg(__dev, format, ##args); \ @@ -4938,7 +5092,8 @@ do { \ #define netif_info(priv, type, dev, fmt, args...) \ netif_level(info, priv, type, dev, fmt, ##args) -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) #define netif_dbg(priv, type, netdev, format, args...) \ do { \ if (netif_msg_##type(priv)) \ diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index eb312e7ca36e..0101747de549 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -13,6 +13,7 @@ #include <linux/static_key.h> #include <linux/netfilter_defs.h> #include <linux/netdevice.h> +#include <linux/sockptr.h> #include <net/net_namespace.h> static inline int NF_DROP_GETERR(int verdict) @@ -163,18 +164,11 @@ struct nf_sockopt_ops { /* Non-inclusive ranges: use 0/0/NULL to never get called. */ int set_optmin; int set_optmax; - int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len); -#ifdef CONFIG_COMPAT - int (*compat_set)(struct sock *sk, int optval, - void __user *user, unsigned int len); -#endif + int (*set)(struct sock *sk, int optval, sockptr_t arg, + unsigned int len); int get_optmin; int get_optmax; int (*get)(struct sock *sk, int optval, void __user *user, int *len); -#ifdef CONFIG_COMPAT - int (*compat_get)(struct sock *sk, int optval, - void __user *user, int *len); -#endif /* Use the module struct to lock set/get code in place */ struct module *owner; }; @@ -346,16 +340,10 @@ NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, } /* Call setsockopt() */ -int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, +int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, sockptr_t opt, unsigned int len); int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt, int *len); -#ifdef CONFIG_COMPAT -int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, - char __user *opt, unsigned int len); -int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, - char __user *opt, int *len); -#endif struct flowi; struct nf_queue_entry; diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h index 9a33f171aa82..625f491b95de 100644 --- a/include/linux/netfilter/nf_conntrack_sctp.h +++ b/include/linux/netfilter/nf_conntrack_sctp.h @@ -9,6 +9,8 @@ struct ip_ct_sctp { enum sctp_conntrack state; __be32 vtag[IP_CT_DIR_MAX]; + u8 last_dir; + u8 flags; }; #endif /* _NF_CONNTRACK_SCTP_H */ diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 851425c3178f..89016d08f6a2 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -43,8 +43,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group); int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid, unsigned int group, int echo, gfp_t flags); int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error); -int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid, - int flags); +int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid); static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type) { diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 5da88451853b..5deb099d156d 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -301,8 +301,8 @@ int xt_target_to_user(const struct xt_entry_target *t, int xt_data_to_user(void __user *dst, const void *src, int usersize, int size, int aligned_size); -void *xt_copy_counters_from_user(const void __user *user, unsigned int len, - struct xt_counters_info *info, bool compat); +void *xt_copy_counters(sockptr_t arg, unsigned int len, + struct xt_counters_info *info); struct xt_counters *xt_counters_alloc(unsigned int counters); struct xt_table *xt_register_table(struct net *net, diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index b394bd4f68a3..c4676d6feeff 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h @@ -25,6 +25,12 @@ int ipt_register_table(struct net *net, const struct xt_table *table, const struct ipt_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res); + +void ipt_unregister_table_pre_exit(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); + +void ipt_unregister_table_exit(struct net *net, struct xt_table *table); + void ipt_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops); diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index aac42c28fe62..9b67394471e1 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -58,7 +58,6 @@ struct nf_ipv6_ops { int (*output)(struct net *, struct sock *, struct sk_buff *)); int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); #if IS_MODULE(CONFIG_IPV6) - int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user); int (*br_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, @@ -117,23 +116,6 @@ static inline int nf_ip6_route(struct net *net, struct dst_entry **dst, #include <net/netfilter/ipv6/nf_defrag_ipv6.h> -static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb, - u32 user) -{ -#if IS_MODULE(CONFIG_IPV6) - const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); - - if (!v6_ops) - return 1; - - return v6_ops->br_defrag(net, skb, user); -#elif IS_BUILTIN(CONFIG_IPV6) - return nf_ct_frag6_gather(net, skb, user); -#else - return 1; -#endif -} - int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 8225f7821a29..1547d5f9ae06 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h @@ -29,6 +29,9 @@ int ip6t_register_table(struct net *net, const struct xt_table *table, const struct nf_hook_ops *ops, struct xt_table **res); void ip6t_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops); +void ip6t_unregister_table_pre_exit(struct net *net, struct xt_table *table, + const struct nf_hook_ops *ops); +void ip6t_unregister_table_exit(struct net *net, struct xt_table *table); extern unsigned int ip6t_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table); diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 676f1ff161a9..e6a2d72e0dc7 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -63,15 +63,7 @@ int netpoll_setup(struct netpoll *np); void __netpoll_cleanup(struct netpoll *np); void __netpoll_free(struct netpoll *np); void netpoll_cleanup(struct netpoll *np); -void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, - struct net_device *dev); -static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) -{ - unsigned long flags; - local_irq_save(flags); - netpoll_send_skb_on_dev(np, skb, np->dev); - local_irq_restore(flags); -} +netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); #ifdef CONFIG_NETPOLL static inline void *netpoll_poll_lock(struct napi_struct *napi) @@ -110,9 +102,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi) static inline void netpoll_poll_unlock(void *have) { } -static inline void netpoll_netdev_init(struct net_device *dev) -{ -} static inline bool netpoll_tx_running(struct net_device *dev) { return false; diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 82d8fb422092..b8360be141da 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -38,7 +38,7 @@ struct nfs4_ace { struct nfs4_acl { uint32_t naces; - struct nfs4_ace aces[0]; + struct nfs4_ace aces[]; }; #define NFS4_MAXLABELLEN 2048 @@ -150,6 +150,12 @@ enum nfs_opnum4 { OP_WRITE_SAME = 70, OP_CLONE = 71, + /* xattr support (RFC8726) */ + OP_GETXATTR = 72, + OP_SETXATTR = 73, + OP_LISTXATTRS = 74, + OP_REMOVEXATTR = 75, + OP_ILLEGAL = 10044, }; @@ -159,7 +165,7 @@ Needs to be updated if more operations are defined in future.*/ #define FIRST_NFS4_OP OP_ACCESS #define LAST_NFS40_OP OP_RELEASE_LOCKOWNER #define LAST_NFS41_OP OP_RECLAIM_COMPLETE -#define LAST_NFS42_OP OP_CLONE +#define LAST_NFS42_OP OP_REMOVEXATTR #define LAST_NFS4_OP LAST_NFS42_OP enum nfsstat4 { @@ -280,6 +286,10 @@ enum nfsstat4 { NFS4ERR_WRONG_LFS = 10092, NFS4ERR_BADLABEL = 10093, NFS4ERR_OFFLOAD_NO_REQS = 10094, + + /* xattr (RFC8276) */ + NFS4ERR_NOXATTR = 10095, + NFS4ERR_XATTR2BIG = 10096, }; static inline bool seqid_mutating_err(u32 err) @@ -295,7 +305,7 @@ static inline bool seqid_mutating_err(u32 err) case NFS4ERR_NOFILEHANDLE: case NFS4ERR_MOVED: return false; - }; + } return true; } @@ -452,6 +462,7 @@ enum change_attr_type4 { #define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15) #define FATTR4_WORD2_SECURITY_LABEL (1UL << 16) #define FATTR4_WORD2_MODE_UMASK (1UL << 17) +#define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18) /* MDS threshold bitmap bits */ #define THRESHOLD_RD (1UL << 0) @@ -542,6 +553,11 @@ enum { NFSPROC4_CLNT_LAYOUTERROR, NFSPROC4_CLNT_COPY_NOTIFY, + + NFSPROC4_CLNT_GETXATTR, + NFSPROC4_CLNT_SETXATTR, + NFSPROC4_CLNT_LISTXATTRS, + NFSPROC4_CLNT_REMOVEXATTR, }; /* nfs41 types */ @@ -700,4 +716,13 @@ struct nl4_server { struct nfs42_netaddr nl4_addr; /* NL4_NETADDR */ } u; }; + +/* + * Options for setxattr. These match the flags for setxattr(2). + */ +enum nfs4_setxattr_options { + SETXATTR4_EITHER = 0, + SETXATTR4_CREATE = 1, + SETXATTR4_REPLACE = 2, +}; #endif diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 73eda45f1cfd..a2c6455ea3fa 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -102,6 +102,8 @@ struct nfs_delegation; struct posix_acl; +struct nfs4_xattr_cache; + /* * nfs fs inode data in memory */ @@ -188,6 +190,10 @@ struct nfs_inode { struct fscache_cookie *fscache; #endif struct inode vfs_inode; + +#ifdef CONFIG_NFS_V4_2 + struct nfs4_xattr_cache *xattr_cache; +#endif }; struct nfs4_copy_state { @@ -212,6 +218,9 @@ struct nfs4_copy_state { #define NFS_ACCESS_EXTEND 0x0008 #define NFS_ACCESS_DELETE 0x0010 #define NFS_ACCESS_EXECUTE 0x0020 +#define NFS_ACCESS_XAREAD 0x0040 +#define NFS_ACCESS_XAWRITE 0x0080 +#define NFS_ACCESS_XALIST 0x0100 /* * Cache validity bit flags @@ -230,6 +239,8 @@ struct nfs4_copy_state { #define NFS_INO_INVALID_OTHER BIT(12) /* other attrs are invalid */ #define NFS_INO_DATA_INVAL_DEFER \ BIT(13) /* Deferred cache invalidation */ +#define NFS_INO_INVALID_BLOCKS BIT(14) /* cached blocks are invalid */ +#define NFS_INO_INVALID_XATTR BIT(15) /* xattrs are invalid */ #define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ | NFS_INO_INVALID_CTIME \ @@ -489,6 +500,8 @@ extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr, struct nfs4_label *label); extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags); extern void nfs_access_zap_cache(struct inode *inode); +extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, + bool may_block); /* * linux/fs/nfs/symlink.c diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 465fa98258a3..7eae72a8762e 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -163,6 +163,11 @@ struct nfs_server { unsigned int dtsize; /* readdir size */ unsigned short port; /* "port=" setting */ unsigned int bsize; /* server block size */ +#ifdef CONFIG_NFS_V4_2 + unsigned int gxasize; /* getxattr size */ + unsigned int sxasize; /* setxattr size */ + unsigned int lxasize; /* listxattr size */ +#endif unsigned int acregmin; /* attr cache timeouts */ unsigned int acregmax; unsigned int acdirmin; @@ -281,5 +286,6 @@ struct nfs_server { #define NFS_CAP_OFFLOAD_CANCEL (1U << 25) #define NFS_CAP_LAYOUTERROR (1U << 26) #define NFS_CAP_COPY_NOTIFY (1U << 27) +#define NFS_CAP_XATTR (1U << 28) #endif diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index e5f3e7d8d3d5..69cb46f7b8d2 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -150,6 +150,7 @@ struct nfs_fsinfo { __u32 layouttype[NFS_MAX_LAYOUT_TYPES]; /* supported pnfs layout driver */ __u32 blksize; /* preferred pnfs io block size */ __u32 clone_blksize; /* granularity of a CLONE operation */ + __u32 xattr_support; /* User xattrs supported */ }; struct nfs_fsstat { @@ -1227,7 +1228,7 @@ struct nfs4_secinfo4 { struct nfs4_secinfo_flavors { unsigned int num_flavors; - struct nfs4_secinfo4 flavors[0]; + struct nfs4_secinfo4 flavors[]; }; struct nfs4_secinfo_arg { @@ -1497,7 +1498,64 @@ struct nfs42_seek_res { u32 sr_eof; u64 sr_offset; }; -#endif + +struct nfs42_setxattrargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + const char *xattr_name; + u32 xattr_flags; + size_t xattr_len; + struct page **xattr_pages; +}; + +struct nfs42_setxattrres { + struct nfs4_sequence_res seq_res; + struct nfs4_change_info cinfo; +}; + +struct nfs42_getxattrargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + const char *xattr_name; + size_t xattr_len; + struct page **xattr_pages; +}; + +struct nfs42_getxattrres { + struct nfs4_sequence_res seq_res; + size_t xattr_len; +}; + +struct nfs42_listxattrsargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + u32 count; + u64 cookie; + struct page **xattr_pages; +}; + +struct nfs42_listxattrsres { + struct nfs4_sequence_res seq_res; + struct page *scratch; + void *xattr_buf; + size_t xattr_len; + u64 cookie; + bool eof; + size_t copied; +}; + +struct nfs42_removexattrargs { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + const char *xattr_name; +}; + +struct nfs42_removexattrres { + struct nfs4_sequence_res seq_res; + struct nfs4_change_info cinfo; +}; + +#endif /* CONFIG_NFS_V4_2 */ struct nfs_page; @@ -1553,8 +1611,8 @@ struct nfs_pgio_header { __u64 mds_offset; /* Filelayout dense stripe */ struct nfs_page_array page_array; struct nfs_client *ds_clp; /* pNFS data server */ - int ds_commit_idx; /* ds index if ds_clp is set */ - int pgio_mirror_idx;/* mirror index in pgio layer */ + u32 ds_commit_idx; /* ds index if ds_clp is set */ + u32 pgio_mirror_idx;/* mirror index in pgio layer */ }; struct nfs_mds_commit_info { diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 9003e29cde46..750c7f395ca9 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -202,16 +202,11 @@ static inline void watchdog_update_hrtimer_threshold(u64 period) { } #endif struct ctl_table; -extern int proc_watchdog(struct ctl_table *, int , - void __user *, size_t *, loff_t *); -extern int proc_nmi_watchdog(struct ctl_table *, int , - void __user *, size_t *, loff_t *); -extern int proc_soft_watchdog(struct ctl_table *, int , - void __user *, size_t *, loff_t *); -extern int proc_watchdog_thresh(struct ctl_table *, int , - void __user *, size_t *, loff_t *); -extern int proc_watchdog_cpumask(struct ctl_table *, int, - void __user *, size_t *, loff_t *); +int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *); +int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *); +int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *); +int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *); #ifdef CONFIG_HAVE_ACPI_APEI_NMI #include <asm/nmi.h> diff --git a/include/linux/node.h b/include/linux/node.h index 4866f32a02d8..014ba3ab2efd 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -99,11 +99,13 @@ extern struct node *node_devices[]; typedef void (*node_registration_func_t)(struct node *); #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) -extern int link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn); +int link_mem_sections(int nid, unsigned long start_pfn, + unsigned long end_pfn, + enum meminit_context context); #else static inline int link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn) + unsigned long end_pfn, + enum meminit_context context) { return 0; } @@ -128,7 +130,8 @@ static inline int register_one_node(int nid) if (error) return error; /* link memory sections under this node */ - error = link_mem_sections(nid, start_pfn, end_pfn); + error = link_mem_sections(nid, start_pfn, end_pfn, + MEMINIT_EARLY); } return error; diff --git a/include/linux/nospec.h b/include/linux/nospec.h index 0c5ef54fd416..c1e79f72cd89 100644 --- a/include/linux/nospec.h +++ b/include/linux/nospec.h @@ -5,6 +5,8 @@ #ifndef _LINUX_NOSPEC_H #define _LINUX_NOSPEC_H + +#include <linux/compiler.h> #include <asm/barrier.h> struct task_struct; diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index 074f395b9ad2..cdb171efc7cb 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h @@ -42,6 +42,30 @@ struct nsproxy { extern struct nsproxy init_nsproxy; /* + * A structure to encompass all bits needed to install + * a partial or complete new set of namespaces. + * + * If a new user namespace is requested cred will + * point to a modifiable set of credentials. If a pointer + * to a modifiable set is needed nsset_cred() must be + * used and tested. + */ +struct nsset { + unsigned flags; + struct nsproxy *nsproxy; + struct fs_struct *fs; + const struct cred *cred; +}; + +static inline struct cred *nsset_cred(struct nsset *set) +{ + if (set->flags & CLONE_NEWUSER) + return (struct cred *)set->cred; + + return NULL; +} + +/* * the namespaces access rules are: * * 1. only current task is allowed to change tsk->nsproxy pointer or diff --git a/include/linux/ntb.h b/include/linux/ntb.h index 8c13538aeffe..191b524e5c0d 100644 --- a/include/linux/ntb.h +++ b/include/linux/ntb.h @@ -478,7 +478,7 @@ void ntb_unregister_client(struct ntb_client *client); int ntb_register_device(struct ntb_dev *ntb); /** - * ntb_register_device() - unregister a ntb device + * ntb_unregister_device() - unregister a ntb device * @ntb: NTB device context. * * The device will be removed from the list of ntb devices. If the ntb device @@ -1351,7 +1351,7 @@ static inline int ntb_spad_write(struct ntb_dev *ntb, int sidx, u32 val) * @sidx: Scratchpad index. * @spad_addr: OUT - The address of the peer scratchpad register. * - * Return the address of the peer doorbell register. This may be used, for + * Return the address of the peer scratchpad register. This may be used, for * example, by drivers that offload memory copy operations to a dma engine. * * Return: Zero on success, otherwise an error number. @@ -1373,7 +1373,7 @@ static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx, * * Read the peer scratchpad register, and return the value. * - * Return: The value of the local scratchpad register. + * Return: The value of the peer scratchpad register. */ static inline u32 ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx) { diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 10f81629b9ce..2a38f2b477a5 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h @@ -10,47 +10,26 @@ /* - * ********************** LLDD FC-NVME Host API ******************** + * ********************** FC-NVME LS API ******************** * - * For FC LLDD's that are the NVME Host role. + * Data structures used by both FC-NVME hosts and FC-NVME + * targets to perform FC-NVME LS requests or transmit + * responses. * - * ****************************************************************** + * *********************************************************** */ - - /** - * struct nvme_fc_port_info - port-specific ids and FC connection-specific - * data element used during NVME Host role - * registrations - * - * Static fields describing the port being registered: - * @node_name: FC WWNN for the port - * @port_name: FC WWPN for the port - * @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx) - * @dev_loss_tmo: maximum delay for reconnects to an association on - * this device. Used only on a remoteport. + * struct nvmefc_ls_req - Request structure passed from the transport + * to the LLDD to perform a NVME-FC LS request and obtain + * a response. + * Used by nvme-fc transport (host) to send LS's such as + * Create Association, Create Connection and Disconnect + * Association. + * Used by the nvmet-fc transport (controller) to send + * LS's such as Disconnect Association. * - * Initialization values for dynamic port fields: - * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must - * be set to 0. - */ -struct nvme_fc_port_info { - u64 node_name; - u64 port_name; - u32 port_role; - u32 port_id; - u32 dev_loss_tmo; -}; - - -/** - * struct nvmefc_ls_req - Request structure passed from NVME-FC transport - * to LLDD in order to perform a NVME FC-4 LS - * request and obtain a response. - * - * Values set by the NVME-FC layer prior to calling the LLDD ls_req - * entrypoint. + * Values set by the requestor prior to calling the LLDD ls_req entrypoint: * @rqstaddr: pointer to request buffer * @rqstdma: PCI DMA address of request buffer * @rqstlen: Length, in bytes, of request buffer @@ -63,8 +42,8 @@ struct nvme_fc_port_info { * @private: pointer to memory allocated alongside the ls request structure * that is specifically for the LLDD to use while processing the * request. The length of the buffer corresponds to the - * lsrqst_priv_sz value specified in the nvme_fc_port_template - * supplied by the LLDD. + * lsrqst_priv_sz value specified in the xxx_template supplied + * by the LLDD. * @done: The callback routine the LLDD is to invoke upon completion of * the LS request. req argument is the pointer to the original LS * request structure. Status argument must be 0 upon success, a @@ -86,6 +65,101 @@ struct nvmefc_ls_req { } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */ +/** + * struct nvmefc_ls_rsp - Structure passed from the transport to the LLDD + * to request the transmit the NVME-FC LS response to a + * NVME-FC LS request. The structure originates in the LLDD + * and is given to the transport via the xxx_rcv_ls_req() + * transport routine. As such, the structure represents the + * FC exchange context for the NVME-FC LS request that was + * received and which the response is to be sent for. + * Used by the LLDD to pass the nvmet-fc transport (controller) + * received LS's such as Create Association, Create Connection + * and Disconnect Association. + * Used by the LLDD to pass the nvme-fc transport (host) + * received LS's such as Disconnect Association or Disconnect + * Connection. + * + * The structure is allocated by the LLDD whenever a LS Request is received + * from the FC link. The address of the structure is passed to the nvmet-fc + * or nvme-fc layer via the xxx_rcv_ls_req() transport routines. + * + * The address of the structure is to be passed back to the LLDD + * when the response is to be transmit. The LLDD will use the address to + * map back to the LLDD exchange structure which maintains information such + * the remote N_Port that sent the LS as well as any FC exchange context. + * Upon completion of the LS response transmit, the LLDD will pass the + * address of the structure back to the transport LS rsp done() routine, + * allowing the transport release dma resources. Upon completion of + * the done() routine, no further access to the structure will be made by + * the transport and the LLDD can de-allocate the structure. + * + * Field initialization: + * At the time of the xxx_rcv_ls_req() call, there is no content that + * is valid in the structure. + * + * When the structure is used for the LLDD->xmt_ls_rsp() call, the + * transport layer will fully set the fields in order to specify the + * response payload buffer and its length as well as the done routine + * to be called upon completion of the transmit. The transport layer + * will also set a private pointer for its own use in the done routine. + * + * Values set by the transport layer prior to calling the LLDD xmt_ls_rsp + * entrypoint: + * @rspbuf: pointer to the LS response buffer + * @rspdma: PCI DMA address of the LS response buffer + * @rsplen: Length, in bytes, of the LS response buffer + * @done: The callback routine the LLDD is to invoke upon completion of + * transmitting the LS response. req argument is the pointer to + * the original ls request. + * @nvme_fc_private: pointer to an internal transport-specific structure + * used as part of the transport done() processing. The LLDD is + * not to access this pointer. + */ +struct nvmefc_ls_rsp { + void *rspbuf; + dma_addr_t rspdma; + u16 rsplen; + + void (*done)(struct nvmefc_ls_rsp *rsp); + void *nvme_fc_private; /* LLDD is not to access !! */ +}; + + + +/* + * ********************** LLDD FC-NVME Host API ******************** + * + * For FC LLDD's that are the NVME Host role. + * + * ****************************************************************** + */ + + +/** + * struct nvme_fc_port_info - port-specific ids and FC connection-specific + * data element used during NVME Host role + * registrations + * + * Static fields describing the port being registered: + * @node_name: FC WWNN for the port + * @port_name: FC WWPN for the port + * @port_role: What NVME roles are supported (see FC_PORT_ROLE_xxx) + * @dev_loss_tmo: maximum delay for reconnects to an association on + * this device. Used only on a remoteport. + * + * Initialization values for dynamic port fields: + * @port_id: FC N_Port_ID currently assigned the port. Upper 8 bits must + * be set to 0. + */ +struct nvme_fc_port_info { + u64 node_name; + u64 port_name; + u32 port_role; + u32 port_id; + u32 dev_loss_tmo; +}; + enum nvmefc_fcp_datadir { NVMEFC_FCP_NODATA, /* payload_length and sg_cnt will be zero */ NVMEFC_FCP_WRITE, @@ -337,6 +411,21 @@ struct nvme_fc_remote_port { * indicating an FC transport Aborted status. * Entrypoint is Mandatory. * + * @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service. + * The nvmefc_ls_rsp structure is the same LLDD-supplied exchange + * structure specified in the nvme_fc_rcv_ls_req() call made when + * the LS request was received. The structure will fully describe + * the buffers for the response payload and the dma address of the + * payload. The LLDD is to transmit the response (or return a + * non-zero errno status), and upon completion of the transmit, call + * the "done" routine specified in the nvmefc_ls_rsp structure + * (argument to done is the address of the nvmefc_ls_rsp structure + * itself). Upon the completion of the done routine, the LLDD shall + * consider the LS handling complete and the nvmefc_ls_rsp structure + * may be freed/released. + * Entrypoint is mandatory if the LLDD calls the nvme_fc_rcv_ls_req() + * entrypoint. + * * @max_hw_queues: indicates the maximum number of hw queues the LLDD * supports for cpu affinitization. * Value is Mandatory. Must be at least 1. @@ -371,7 +460,7 @@ struct nvme_fc_remote_port { * @lsrqst_priv_sz: The LLDD sets this field to the amount of additional * memory that it would like fc nvme layer to allocate on the LLDD's * behalf whenever a ls request structure is allocated. The additional - * memory area solely for the of the LLDD and its location is + * memory area is solely for use by the LLDD and its location is * specified by the ls_request->private pointer. * Value is Mandatory. Allowed to be zero. * @@ -405,6 +494,9 @@ struct nvme_fc_port_template { struct nvme_fc_remote_port *, void *hw_queue_handle, struct nvmefc_fcp_req *); + int (*xmt_ls_rsp)(struct nvme_fc_local_port *localport, + struct nvme_fc_remote_port *rport, + struct nvmefc_ls_rsp *ls_rsp); u32 max_hw_queues; u16 max_sgl_segments; @@ -441,6 +533,34 @@ void nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport); int nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *remoteport, u32 dev_loss_tmo); +/* + * Routine called to pass a NVME-FC LS request, received by the lldd, + * to the nvme-fc transport. + * + * If the return value is zero: the LS was successfully accepted by the + * transport. + * If the return value is non-zero: the transport has not accepted the + * LS. The lldd should ABTS-LS the LS. + * + * Note: if the LLDD receives and ABTS for the LS prior to the transport + * calling the ops->xmt_ls_rsp() routine to transmit a response, the LLDD + * shall mark the LS as aborted, and when the xmt_ls_rsp() is called: the + * response shall not be transmit and the struct nvmefc_ls_rsp() done + * routine shall be called. The LLDD may transmit the ABTS response as + * soon as the LS was marked or can delay until the xmt_ls_rsp() call is + * made. + * Note: if an RCV LS was successfully posted to the transport and the + * remoteport is then unregistered before xmt_ls_rsp() was called for + * the lsrsp structure, the transport will still call xmt_ls_rsp() + * afterward to cleanup the outstanding lsrsp structure. The LLDD should + * noop the transmission of the rsp and call the lsrsp->done() routine + * to allow the lsrsp structure to be released. + */ +int nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *remoteport, + struct nvmefc_ls_rsp *lsrsp, + void *lsreqbuf, u32 lsreqbuf_len); + + /* * *************** LLDD FC-NVME Target/Subsystem API *************** @@ -470,55 +590,6 @@ struct nvmet_fc_port_info { }; -/** - * struct nvmefc_tgt_ls_req - Structure used between LLDD and NVMET-FC - * layer to represent the exchange context for - * a FC-NVME Link Service (LS). - * - * The structure is allocated by the LLDD whenever a LS Request is received - * from the FC link. The address of the structure is passed to the nvmet-fc - * layer via the nvmet_fc_rcv_ls_req() call. The address of the structure - * will be passed back to the LLDD when the response is to be transmit. - * The LLDD is to use the address to map back to the LLDD exchange structure - * which maintains information such as the targetport the LS was received - * on, the remote FC NVME initiator that sent the LS, and any FC exchange - * context. Upon completion of the LS response transmit, the address of the - * structure will be passed back to the LS rsp done() routine, allowing the - * nvmet-fc layer to release dma resources. Upon completion of the done() - * routine, no further access will be made by the nvmet-fc layer and the - * LLDD can de-allocate the structure. - * - * Field initialization: - * At the time of the nvmet_fc_rcv_ls_req() call, there is no content that - * is valid in the structure. - * - * When the structure is used for the LLDD->xmt_ls_rsp() call, the nvmet-fc - * layer will fully set the fields in order to specify the response - * payload buffer and its length as well as the done routine to be called - * upon compeletion of the transmit. The nvmet-fc layer will also set a - * private pointer for its own use in the done routine. - * - * Values set by the NVMET-FC layer prior to calling the LLDD xmt_ls_rsp - * entrypoint. - * @rspbuf: pointer to the LS response buffer - * @rspdma: PCI DMA address of the LS response buffer - * @rsplen: Length, in bytes, of the LS response buffer - * @done: The callback routine the LLDD is to invoke upon completion of - * transmitting the LS response. req argument is the pointer to - * the original ls request. - * @nvmet_fc_private: pointer to an internal NVMET-FC layer structure used - * as part of the NVMET-FC processing. The LLDD is not to access - * this pointer. - */ -struct nvmefc_tgt_ls_req { - void *rspbuf; - dma_addr_t rspdma; - u16 rsplen; - - void (*done)(struct nvmefc_tgt_ls_req *req); - void *nvmet_fc_private; /* LLDD is not to access !! */ -}; - /* Operations that NVME-FC layer may request the LLDD to perform for FCP */ enum { NVMET_FCOP_READDATA = 1, /* xmt data to initiator */ @@ -601,7 +672,7 @@ enum { * Values set by the LLDD indicating completion status of the FCP operation. * Must be set prior to calling the done() callback. * @transferred_length: amount of DATA_OUT payload data received by a - * a WRITEDATA operation. If not a WRITEDATA operation, value must + * WRITEDATA operation. If not a WRITEDATA operation, value must * be set to 0. Should equal transfer_length on success. * @fcp_error: status of the FCP operation. Must be 0 on success; on failure * must be a NVME_SC_FC_xxxx value. @@ -693,17 +764,19 @@ struct nvmet_fc_target_port { * Entrypoint is Mandatory. * * @xmt_ls_rsp: Called to transmit the response to a FC-NVME FC-4 LS service. - * The nvmefc_tgt_ls_req structure is the same LLDD-supplied exchange + * The nvmefc_ls_rsp structure is the same LLDD-supplied exchange * structure specified in the nvmet_fc_rcv_ls_req() call made when - * the LS request was received. The structure will fully describe + * the LS request was received. The structure will fully describe * the buffers for the response payload and the dma address of the - * payload. The LLDD is to transmit the response (or return a non-zero - * errno status), and upon completion of the transmit, call the - * "done" routine specified in the nvmefc_tgt_ls_req structure - * (argument to done is the ls reqwuest structure itself). - * After calling the done routine, the LLDD shall consider the - * LS handling complete and the nvmefc_tgt_ls_req structure may - * be freed/released. + * payload. The LLDD is to transmit the response (or return a + * non-zero errno status), and upon completion of the transmit, call + * the "done" routine specified in the nvmefc_ls_rsp structure + * (argument to done is the address of the nvmefc_ls_rsp structure + * itself). Upon the completion of the done() routine, the LLDD shall + * consider the LS handling complete and the nvmefc_ls_rsp structure + * may be freed/released. + * The transport will always call the xmt_ls_rsp() routine for any + * LS received. * Entrypoint is Mandatory. * * @fcp_op: Called to perform a data transfer or transmit a response. @@ -798,6 +871,39 @@ struct nvmet_fc_target_port { * should cause the initiator to rescan the discovery controller * on the targetport. * + * @ls_req: Called to issue a FC-NVME FC-4 LS service request. + * The nvme_fc_ls_req structure will fully describe the buffers for + * the request payload and where to place the response payload. + * The targetport that is to issue the LS request is identified by + * the targetport argument. The remote port that is to receive the + * LS request is identified by the hosthandle argument. The nvmet-fc + * transport is only allowed to issue FC-NVME LS's on behalf of an + * association that was created prior by a Create Association LS. + * The hosthandle will originate from the LLDD in the struct + * nvmefc_ls_rsp structure for the Create Association LS that + * was delivered to the transport. The transport will save the + * hosthandle as an attribute of the association. If the LLDD + * loses connectivity with the remote port, it must call the + * nvmet_fc_invalidate_host() routine to remove any references to + * the remote port in the transport. + * The LLDD is to allocate an exchange, issue the LS request, obtain + * the LS response, and call the "done" routine specified in the + * request structure (argument to done is the ls request structure + * itself). + * Entrypoint is Optional - but highly recommended. + * + * @ls_abort: called to request the LLDD to abort the indicated ls request. + * The call may return before the abort has completed. After aborting + * the request, the LLDD must still call the ls request done routine + * indicating an FC transport Aborted status. + * Entrypoint is Mandatory if the ls_req entry point is specified. + * + * @host_release: called to inform the LLDD that the request to invalidate + * the host port indicated by the hosthandle has been fully completed. + * No associations exist with the host port and there will be no + * further references to hosthandle. + * Entrypoint is Mandatory if the lldd calls nvmet_fc_invalidate_host(). + * * @max_hw_queues: indicates the maximum number of hw queues the LLDD * supports for cpu affinitization. * Value is Mandatory. Must be at least 1. @@ -826,11 +932,19 @@ struct nvmet_fc_target_port { * area solely for the of the LLDD and its location is specified by * the targetport->private pointer. * Value is Mandatory. Allowed to be zero. + * + * @lsrqst_priv_sz: The LLDD sets this field to the amount of additional + * memory that it would like nvmet-fc layer to allocate on the LLDD's + * behalf whenever a ls request structure is allocated. The additional + * memory area is solely for use by the LLDD and its location is + * specified by the ls_request->private pointer. + * Value is Mandatory. Allowed to be zero. + * */ struct nvmet_fc_target_template { void (*targetport_delete)(struct nvmet_fc_target_port *tgtport); int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport, - struct nvmefc_tgt_ls_req *tls_req); + struct nvmefc_ls_rsp *ls_rsp); int (*fcp_op)(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *fcpreq); void (*fcp_abort)(struct nvmet_fc_target_port *tgtport, @@ -840,6 +954,11 @@ struct nvmet_fc_target_template { void (*defer_rcv)(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *fcpreq); void (*discovery_event)(struct nvmet_fc_target_port *tgtport); + int (*ls_req)(struct nvmet_fc_target_port *targetport, + void *hosthandle, struct nvmefc_ls_req *lsreq); + void (*ls_abort)(struct nvmet_fc_target_port *targetport, + void *hosthandle, struct nvmefc_ls_req *lsreq); + void (*host_release)(void *hosthandle); u32 max_hw_queues; u16 max_sgl_segments; @@ -848,7 +967,9 @@ struct nvmet_fc_target_template { u32 target_features; + /* sizes of additional private data for data structures */ u32 target_priv_sz; + u32 lsrqst_priv_sz; }; @@ -859,10 +980,61 @@ int nvmet_fc_register_targetport(struct nvmet_fc_port_info *portinfo, int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *tgtport); +/* + * Routine called to pass a NVME-FC LS request, received by the lldd, + * to the nvmet-fc transport. + * + * If the return value is zero: the LS was successfully accepted by the + * transport. + * If the return value is non-zero: the transport has not accepted the + * LS. The lldd should ABTS-LS the LS. + * + * Note: if the LLDD receives and ABTS for the LS prior to the transport + * calling the ops->xmt_ls_rsp() routine to transmit a response, the LLDD + * shall mark the LS as aborted, and when the xmt_ls_rsp() is called: the + * response shall not be transmit and the struct nvmefc_ls_rsp() done + * routine shall be called. The LLDD may transmit the ABTS response as + * soon as the LS was marked or can delay until the xmt_ls_rsp() call is + * made. + * Note: if an RCV LS was successfully posted to the transport and the + * targetport is then unregistered before xmt_ls_rsp() was called for + * the lsrsp structure, the transport will still call xmt_ls_rsp() + * afterward to cleanup the outstanding lsrsp structure. The LLDD should + * noop the transmission of the rsp and call the lsrsp->done() routine + * to allow the lsrsp structure to be released. + */ int nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *tgtport, - struct nvmefc_tgt_ls_req *lsreq, + void *hosthandle, + struct nvmefc_ls_rsp *rsp, void *lsreqbuf, u32 lsreqbuf_len); +/* + * Routine called by the LLDD whenever it has a logout or loss of + * connectivity to a NVME-FC host port which there had been active + * NVMe controllers for. The host port is indicated by the + * hosthandle. The hosthandle is given to the nvmet-fc transport + * when a NVME LS was received, typically to create a new association. + * The nvmet-fc transport will cache the hostport value with the + * association for use in LS requests for the association. + * When the LLDD calls this routine, the nvmet-fc transport will + * immediately terminate all associations that were created with + * the hosthandle host port. + * The LLDD, after calling this routine and having control returned, + * must assume the transport may subsequently utilize hosthandle as + * part of sending LS's to terminate the association. The LLDD + * should reject the LS's if they are attempted. + * Once the last association has terminated for the hosthandle host + * port, the nvmet-fc transport will call the ops->host_release() + * callback. As of the callback, the nvmet-fc transport will no + * longer reference hosthandle. + */ +void nvmet_fc_invalidate_host(struct nvmet_fc_target_port *tgtport, + void *hosthandle); + +/* + * If nvmet_fc_rcv_fcp_req returns non-zero, the transport has not accepted + * the FCP cmd. The lldd should ABTS-LS the cmd. + */ int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport, struct nvmefc_tgt_fcp_req *fcpreq, void *cmdiubuf, u32 cmdiubuf_len); diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h index e8c30b39bb27..51fe44e0328b 100644 --- a/include/linux/nvme-fc.h +++ b/include/linux/nvme-fc.h @@ -4,8 +4,8 @@ */ /* - * This file contains definitions relative to FC-NVME-2 r1.06 - * (T11-2019-00210-v001). + * This file contains definitions relative to FC-NVME-2 r1.08 + * (T11-2019-00210-v004). */ #ifndef _NVME_FC_H @@ -81,7 +81,8 @@ struct nvme_fc_ersp_iu { }; -#define FCNVME_NVME_SR_OPCODE 0x01 +#define FCNVME_NVME_SR_OPCODE 0x01 +#define FCNVME_NVME_SR_RSP_OPCODE 0x02 struct nvme_fc_nvme_sr_iu { __u8 fc_id; @@ -94,7 +95,7 @@ struct nvme_fc_nvme_sr_iu { enum { FCNVME_SRSTAT_ACC = 0x0, - FCNVME_SRSTAT_INV_FCID = 0x1, + /* reserved 0x1 */ /* reserved 0x2 */ FCNVME_SRSTAT_LOGICAL_ERR = 0x3, FCNVME_SRSTAT_INV_QUALIF = 0x4, @@ -397,7 +398,7 @@ struct fcnvme_ls_disconnect_conn_rqst { struct fcnvme_ls_rqst_w0 w0; __be32 desc_list_len; struct fcnvme_lsdesc_assoc_id associd; - struct fcnvme_lsdesc_disconn_cmd connectid; + struct fcnvme_lsdesc_conn_id connectid; }; struct fcnvme_ls_disconnect_conn_acc { diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 3d5189f46cb1..d92535997687 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -38,6 +38,8 @@ enum { NVMF_ADDR_FAMILY_IP6 = 2, /* IP6 */ NVMF_ADDR_FAMILY_IB = 3, /* InfiniBand */ NVMF_ADDR_FAMILY_FC = 4, /* Fibre Channel */ + NVMF_ADDR_FAMILY_LOOP = 254, /* Reserved for host usage */ + NVMF_ADDR_FAMILY_MAX, }; /* Transport Type codes for Discovery Log Page entry TRTYPE field */ @@ -130,6 +132,7 @@ enum { #define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) #define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) #define NVME_CAP_NSSRC(cap) (((cap) >> 36) & 0x1) +#define NVME_CAP_CSS(cap) (((cap) >> 37) & 0xff) #define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) #define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf) @@ -160,7 +163,6 @@ enum { enum { NVME_CC_ENABLE = 1 << 0, - NVME_CC_CSS_NVM = 0 << 4, NVME_CC_EN_SHIFT = 0, NVME_CC_CSS_SHIFT = 4, NVME_CC_MPS_SHIFT = 7, @@ -168,6 +170,9 @@ enum { NVME_CC_SHN_SHIFT = 14, NVME_CC_IOSQES_SHIFT = 16, NVME_CC_IOCQES_SHIFT = 20, + NVME_CC_CSS_NVM = 0 << NVME_CC_CSS_SHIFT, + NVME_CC_CSS_CSI = 6 << NVME_CC_CSS_SHIFT, + NVME_CC_CSS_MASK = 7 << NVME_CC_CSS_SHIFT, NVME_CC_AMS_RR = 0 << NVME_CC_AMS_SHIFT, NVME_CC_AMS_WRRU = 1 << NVME_CC_AMS_SHIFT, NVME_CC_AMS_VS = 7 << NVME_CC_AMS_SHIFT, @@ -177,6 +182,8 @@ enum { NVME_CC_SHN_MASK = 3 << NVME_CC_SHN_SHIFT, NVME_CC_IOSQES = NVME_NVM_IOSQES << NVME_CC_IOSQES_SHIFT, NVME_CC_IOCQES = NVME_NVM_IOCQES << NVME_CC_IOCQES_SHIFT, + NVME_CAP_CSS_NVM = 1 << 0, + NVME_CAP_CSS_CSI = 1 << 6, NVME_CSTS_RDY = 1 << 0, NVME_CSTS_CFS = 1 << 1, NVME_CSTS_NSSRO = 1 << 4, @@ -299,10 +306,13 @@ struct nvme_id_ctrl { }; enum { + NVME_CTRL_CMIC_MULTI_CTRL = 1 << 1, + NVME_CTRL_CMIC_ANA = 1 << 3, NVME_CTRL_ONCS_COMPARE = 1 << 0, NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1, NVME_CTRL_ONCS_DSM = 1 << 2, NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3, + NVME_CTRL_ONCS_RESERVATIONS = 1 << 5, NVME_CTRL_ONCS_TIMESTAMP = 1 << 6, NVME_CTRL_VWC_PRESENT = 1 << 0, NVME_CTRL_OACS_SEC_SUPP = 1 << 0, @@ -365,11 +375,37 @@ struct nvme_id_ns { __u8 vs[3712]; }; +struct nvme_zns_lbafe { + __le64 zsze; + __u8 zdes; + __u8 rsvd9[7]; +}; + +struct nvme_id_ns_zns { + __le16 zoc; + __le16 ozcs; + __le32 mar; + __le32 mor; + __le32 rrl; + __le32 frl; + __u8 rsvd20[2796]; + struct nvme_zns_lbafe lbafe[16]; + __u8 rsvd3072[768]; + __u8 vs[256]; +}; + +struct nvme_id_ctrl_zns { + __u8 zasl; + __u8 rsvd1[4095]; +}; + enum { NVME_ID_CNS_NS = 0x00, NVME_ID_CNS_CTRL = 0x01, NVME_ID_CNS_NS_ACTIVE_LIST = 0x02, NVME_ID_CNS_NS_DESC_LIST = 0x03, + NVME_ID_CNS_CS_NS = 0x05, + NVME_ID_CNS_CS_CTRL = 0x06, NVME_ID_CNS_NS_PRESENT_LIST = 0x10, NVME_ID_CNS_NS_PRESENT = 0x11, NVME_ID_CNS_CTRL_NS_LIST = 0x12, @@ -380,6 +416,11 @@ enum { }; enum { + NVME_CSI_NVM = 0, + NVME_CSI_ZNS = 2, +}; + +enum { NVME_DIR_IDENTIFY = 0x00, NVME_DIR_STREAMS = 0x01, NVME_DIR_SND_ID_OP_ENABLE = 0x01, @@ -394,8 +435,12 @@ enum { enum { NVME_NS_FEAT_THIN = 1 << 0, + NVME_NS_FEAT_ATOMICS = 1 << 1, + NVME_NS_FEAT_IO_OPT = 1 << 4, + NVME_NS_ATTR_RO = 1 << 0, NVME_NS_FLBAS_LBA_MASK = 0xf, NVME_NS_FLBAS_META_EXT = 0x10, + NVME_NS_NMIC_SHARED = 1 << 0, NVME_LBAF_RP_BEST = 0, NVME_LBAF_RP_BETTER = 1, NVME_LBAF_RP_GOOD = 2, @@ -412,6 +457,12 @@ enum { NVME_NS_DPS_PI_TYPE3 = 3, }; +/* Identify Namespace Metadata Capabilities (MC): */ +enum { + NVME_MC_EXTENDED_LBA = (1 << 0), + NVME_MC_METADATA_PTR = (1 << 1), +}; + struct nvme_ns_id_desc { __u8 nidt; __u8 nidl; @@ -421,11 +472,13 @@ struct nvme_ns_id_desc { #define NVME_NIDT_EUI64_LEN 8 #define NVME_NIDT_NGUID_LEN 16 #define NVME_NIDT_UUID_LEN 16 +#define NVME_NIDT_CSI_LEN 1 enum { NVME_NIDT_EUI64 = 0x01, NVME_NIDT_NGUID = 0x02, NVME_NIDT_UUID = 0x03, + NVME_NIDT_CSI = 0x04, }; struct nvme_smart_log { @@ -505,6 +558,27 @@ struct nvme_ana_rsp_hdr { __le16 rsvd10[3]; }; +struct nvme_zone_descriptor { + __u8 zt; + __u8 zs; + __u8 za; + __u8 rsvd3[5]; + __le64 zcap; + __le64 zslba; + __le64 wp; + __u8 rsvd32[32]; +}; + +enum { + NVME_ZONE_TYPE_SEQWRITE_REQ = 0x2, +}; + +struct nvme_zone_report { + __le64 nr_zones; + __u8 resv8[56]; + struct nvme_zone_descriptor entries[]; +}; + enum { NVME_SMART_CRIT_SPARE = 1 << 0, NVME_SMART_CRIT_TEMPERATURE = 1 << 1, @@ -599,6 +673,9 @@ enum nvme_opcode { nvme_cmd_resv_report = 0x0e, nvme_cmd_resv_acquire = 0x11, nvme_cmd_resv_release = 0x15, + nvme_cmd_zone_mgmt_send = 0x79, + nvme_cmd_zone_mgmt_recv = 0x7a, + nvme_cmd_zone_append = 0x7d, }; #define nvme_opcode_name(opcode) { opcode, #opcode } @@ -737,6 +814,7 @@ struct nvme_rw_command { enum { NVME_RW_LR = 1 << 15, NVME_RW_FUA = 1 << 14, + NVME_RW_APPEND_PIREMAP = 1 << 9, NVME_RW_DSM_FREQ_UNSPEC = 0, NVME_RW_DSM_FREQ_TYPICAL = 1, NVME_RW_DSM_FREQ_RARE = 2, @@ -802,6 +880,53 @@ struct nvme_write_zeroes_cmd { __le16 appmask; }; +enum nvme_zone_mgmt_action { + NVME_ZONE_CLOSE = 0x1, + NVME_ZONE_FINISH = 0x2, + NVME_ZONE_OPEN = 0x3, + NVME_ZONE_RESET = 0x4, + NVME_ZONE_OFFLINE = 0x5, + NVME_ZONE_SET_DESC_EXT = 0x10, +}; + +struct nvme_zone_mgmt_send_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le32 cdw2[2]; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 slba; + __le32 cdw12; + __u8 zsa; + __u8 select_all; + __u8 rsvd13[2]; + __le32 cdw14[2]; +}; + +struct nvme_zone_mgmt_recv_cmd { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __le64 rsvd2[2]; + union nvme_data_ptr dptr; + __le64 slba; + __le32 numd; + __u8 zra; + __u8 zrasf; + __u8 pr; + __u8 rsvd13; + __le32 cdw14[2]; +}; + +enum { + NVME_ZRA_ZONE_REPORT = 0, + NVME_ZRASF_ZONE_REPORT_ALL = 0, + NVME_REPORT_ZONE_PARTIAL = 1, +}; + /* Features */ enum { @@ -858,6 +983,7 @@ enum nvme_admin_opcode { nvme_admin_security_recv = 0x82, nvme_admin_sanitize_nvm = 0x84, nvme_admin_get_lba_status = 0x86, + nvme_admin_vendor_start = 0xC0, }; #define nvme_admin_opcode_name(opcode) { opcode, #opcode } @@ -921,6 +1047,8 @@ enum { NVME_FEAT_RESV_MASK = 0x82, NVME_FEAT_RESV_PERSIST = 0x83, NVME_FEAT_WRITE_PROTECT = 0x84, + NVME_FEAT_VENDOR_START = 0xC0, + NVME_FEAT_VENDOR_END = 0xFF, NVME_LOG_ERROR = 0x01, NVME_LOG_SMART = 0x02, NVME_LOG_FW_SLOT = 0x03, @@ -958,7 +1086,9 @@ struct nvme_identify { __u8 cns; __u8 rsvd3; __le16 ctrlid; - __u32 rsvd11[5]; + __u8 rsvd11[3]; + __u8 csi; + __u32 rsvd12[4]; }; #define NVME_IDENTIFY_DATA_SIZE 4096 @@ -1072,7 +1202,9 @@ struct nvme_get_log_page_command { }; __le64 lpo; }; - __u32 rsvd14[2]; + __u8 rsvd14[3]; + __u8 csi; + __u32 rsvd15; }; struct nvme_directive_cmd { @@ -1177,7 +1309,7 @@ struct nvmf_disc_rsp_page_hdr { __le64 numrec; __le16 recfmt; __u8 resv14[1006]; - struct nvmf_disc_rsp_page_entry entries[0]; + struct nvmf_disc_rsp_page_entry entries[]; }; enum { @@ -1269,6 +1401,8 @@ struct nvme_command { struct nvme_format_cmd format; struct nvme_dsm_cmd dsm; struct nvme_write_zeroes_cmd write_zeroes; + struct nvme_zone_mgmt_send_cmd zms; + struct nvme_zone_mgmt_recv_cmd zmr; struct nvme_abort_cmd abort; struct nvme_get_log_page_command get_log_page; struct nvmf_common_command fabrics; @@ -1403,6 +1537,18 @@ enum { NVME_SC_AUTH_REQUIRED = 0x191, /* + * I/O Command Set Specific - Zoned commands: + */ + NVME_SC_ZONE_BOUNDARY_ERROR = 0x1b8, + NVME_SC_ZONE_FULL = 0x1b9, + NVME_SC_ZONE_READ_ONLY = 0x1ba, + NVME_SC_ZONE_OFFLINE = 0x1bb, + NVME_SC_ZONE_INVALID_WRITE = 0x1bc, + NVME_SC_ZONE_TOO_MANY_ACTIVE = 0x1bd, + NVME_SC_ZONE_TOO_MANY_OPEN = 0x1be, + NVME_SC_ZONE_INVALID_TRANSITION = 0x1bf, + + /* * Media and Data Integrity Errors: */ NVME_SC_WRITE_FAULT = 0x280, diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index 1b311d27c9b8..052293f4cbdb 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -61,6 +61,7 @@ void nvmem_cell_put(struct nvmem_cell *cell); void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell); void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len); int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len); +int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val); int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val); int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val); int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val); diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index 6d6f8e5d24c9..06409a6c40bc 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -27,6 +27,9 @@ enum nvmem_type { NVMEM_TYPE_BATTERY_BACKED, }; +#define NVMEM_DEVID_NONE (-1) +#define NVMEM_DEVID_AUTO (-2) + /** * struct nvmem_config - NVMEM device configuration * diff --git a/include/linux/of.h b/include/linux/of.h index c669c0a4732f..5cf7ae0465d1 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -554,7 +554,7 @@ bool of_console_check(struct device_node *dn, char *name, int index); extern int of_cpu_node_to_id(struct device_node *np); -int of_map_rid(struct device_node *np, u32 rid, +int of_map_id(struct device_node *np, u32 id, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out); @@ -630,6 +630,11 @@ static inline struct device_node *of_get_parent(const struct device_node *node) return NULL; } +static inline struct device_node *of_get_next_parent(struct device_node *node) +{ + return NULL; +} + static inline struct device_node *of_get_next_child( const struct device_node *node, struct device_node *prev) { @@ -978,7 +983,7 @@ static inline int of_cpu_node_to_id(struct device_node *np) return -ENODEV; } -static inline int of_map_rid(struct device_node *np, u32 rid, +static inline int of_map_id(struct device_node *np, u32 id, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out) { diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 763022ed3456..88bc943405cd 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -6,8 +6,11 @@ #include <linux/of.h> #include <linux/io.h> +struct of_bus; + struct of_pci_range_parser { struct device_node *node; + struct of_bus *bus; const __be32 *range; const __be32 *end; int na; @@ -119,6 +122,7 @@ static inline void __iomem *of_iomap(struct device_node *device, int index) return NULL; } #endif +#define of_range_parser_init of_pci_range_parser_init #if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI) extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 8d31e39dd564..07ca187fc5e4 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -55,9 +55,15 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) return of_node_get(cpu_dev->of_node); } -int of_dma_configure(struct device *dev, +int of_dma_configure_id(struct device *dev, struct device_node *np, - bool force_dma); + bool force_dma, const u32 *id); +static inline int of_dma_configure(struct device *dev, + struct device_node *np, + bool force_dma) +{ + return of_dma_configure_id(dev, np, force_dma, NULL); +} #else /* CONFIG_OF */ static inline int of_driver_match_device(struct device *dev, @@ -106,6 +112,12 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) return NULL; } +static inline int of_dma_configure_id(struct device *dev, + struct device_node *np, + bool force_dma) +{ + return 0; +} static inline int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma) diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h index 01038a6aade0..4d7756087b6b 100644 --- a/include/linux/of_graph.h +++ b/include/linux/of_graph.h @@ -38,6 +38,7 @@ struct of_endpoint { child = of_graph_get_next_endpoint(parent, child)) #ifdef CONFIG_OF +bool of_graph_is_present(const struct device_node *node); int of_graph_parse_endpoint(const struct device_node *node, struct of_endpoint *endpoint); int of_graph_get_endpoint_count(const struct device_node *np); @@ -56,6 +57,11 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node, u32 port, u32 endpoint); #else +static inline bool of_graph_is_present(const struct device_node *node) +{ + return false; +} + static inline int of_graph_parse_endpoint(const struct device_node *node, struct of_endpoint *endpoint) { diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h index f3d40dd7bb66..16f4b3e87f20 100644 --- a/include/linux/of_iommu.h +++ b/include/linux/of_iommu.h @@ -13,7 +13,8 @@ extern int of_get_dma_window(struct device_node *dn, const char *prefix, size_t *size); extern const struct iommu_ops *of_iommu_configure(struct device *dev, - struct device_node *master_np); + struct device_node *master_np, + const u32 *id); #else @@ -25,7 +26,8 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix, } static inline const struct iommu_ops *of_iommu_configure(struct device *dev, - struct device_node *master_np) + struct device_node *master_np, + const u32 *id) { return NULL; } diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index 1214cabb2247..e8b78139f78c 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -52,9 +52,10 @@ extern struct irq_domain *of_msi_get_domain(struct device *dev, struct device_node *np, enum irq_domain_bus_token token); extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, - u32 rid); + u32 id, + u32 bus_token); extern void of_msi_configure(struct device *dev, struct device_node *np); -u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in); +u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in); #else static inline int of_irq_count(struct device_node *dev) { @@ -85,17 +86,17 @@ static inline struct irq_domain *of_msi_get_domain(struct device *dev, return NULL; } static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev, - u32 rid) + u32 id, u32 bus_token) { return NULL; } static inline void of_msi_configure(struct device *dev, struct device_node *np) { } -static inline u32 of_msi_map_rid(struct device *dev, - struct device_node *msi_np, u32 rid_in) +static inline u32 of_msi_map_id(struct device *dev, + struct device_node *msi_np, u32 id_in) { - return rid_in; + return id_in; } #endif diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h index 491a2b7e77c1..1efb88d9f892 100644 --- a/include/linux/of_mdio.h +++ b/include/linux/of_mdio.h @@ -8,29 +8,33 @@ #ifndef __LINUX_OF_MDIO_H #define __LINUX_OF_MDIO_H +#include <linux/device.h> #include <linux/phy.h> #include <linux/of.h> #if IS_ENABLED(CONFIG_OF_MDIO) -extern bool of_mdiobus_child_is_phy(struct device_node *child); -extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); -extern struct phy_device *of_phy_find_device(struct device_node *phy_np); -extern struct phy_device *of_phy_connect(struct net_device *dev, - struct device_node *phy_np, - void (*hndlr)(struct net_device *), - u32 flags, phy_interface_t iface); -extern struct phy_device * +bool of_mdiobus_child_is_phy(struct device_node *child); +int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np); +int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio, + struct device_node *np); +struct phy_device *of_phy_find_device(struct device_node *phy_np); +struct phy_device * +of_phy_connect(struct net_device *dev, struct device_node *phy_np, + void (*hndlr)(struct net_device *), u32 flags, + phy_interface_t iface); +struct phy_device * of_phy_get_and_connect(struct net_device *dev, struct device_node *np, void (*hndlr)(struct net_device *)); -struct phy_device *of_phy_attach(struct net_device *dev, - struct device_node *phy_np, u32 flags, - phy_interface_t iface); - -extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); -extern int of_phy_register_fixed_link(struct device_node *np); -extern void of_phy_deregister_fixed_link(struct device_node *np); -extern bool of_phy_is_fixed_link(struct device_node *np); +struct phy_device * +of_phy_attach(struct net_device *dev, struct device_node *phy_np, + u32 flags, phy_interface_t iface); +struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); +int of_phy_register_fixed_link(struct device_node *np); +void of_phy_deregister_fixed_link(struct device_node *np); +bool of_phy_is_fixed_link(struct device_node *np); +int of_mdiobus_phy_device_register(struct mii_bus *mdio, struct phy_device *phy, + struct device_node *child, u32 addr); static inline int of_mdio_parse_addr(struct device *dev, const struct device_node *np) @@ -118,6 +122,13 @@ static inline bool of_phy_is_fixed_link(struct device_node *np) { return false; } + +static inline int of_mdiobus_phy_device_register(struct mii_bus *mdio, + struct phy_device *phy, + struct device_node *child, u32 addr) +{ + return -ENOSYS; +} #endif diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h index 60f541912ccf..8216a4156263 100644 --- a/include/linux/of_reserved_mem.h +++ b/include/linux/of_reserved_mem.h @@ -3,6 +3,7 @@ #define __OF_RESERVED_MEM_H #include <linux/device.h> +#include <linux/of.h> struct of_phandle_args; struct reserved_mem_ops; @@ -33,6 +34,9 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem); int of_reserved_mem_device_init_by_idx(struct device *dev, struct device_node *np, int idx); +int of_reserved_mem_device_init_by_name(struct device *dev, + struct device_node *np, + const char *name); void of_reserved_mem_device_release(struct device *dev); void fdt_init_reserved_mem(void); @@ -45,6 +49,14 @@ static inline int of_reserved_mem_device_init_by_idx(struct device *dev, { return -ENOSYS; } + +static inline int of_reserved_mem_device_init_by_name(struct device *dev, + struct device_node *np, + const char *name) +{ + return -ENOSYS; +} + static inline void of_reserved_mem_device_release(struct device *pdev) { } static inline void fdt_init_reserved_mem(void) { } diff --git a/include/linux/oom.h b/include/linux/oom.h index c696c265f019..f022f581ac29 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -48,7 +48,7 @@ struct oom_control { /* Used by oom implementation, do not set */ unsigned long totalpages; struct task_struct *chosen; - unsigned long chosen_points; + long chosen_points; /* Used to print the constraint info. */ enum oom_constraint constraint; @@ -107,7 +107,7 @@ static inline vm_fault_t check_stable_address_space(struct mm_struct *mm) bool __oom_reap_task_mm(struct mm_struct *mm); -extern unsigned long oom_badness(struct task_struct *p, +long oom_badness(struct task_struct *p, unsigned long totalpages); extern bool out_of_memory(struct oom_control *oc); diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 659045046468..93fcef105061 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -304,16 +304,33 @@ static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c) * struct_size() - Calculate size of structure with trailing array. * @p: Pointer to the structure. * @member: Name of the array member. - * @n: Number of elements in the array. + * @count: Number of elements in the array. * * Calculates size of memory needed for structure @p followed by an - * array of @n @member elements. + * array of @count number of @member elements. * * Return: number of bytes needed or SIZE_MAX on overflow. */ -#define struct_size(p, member, n) \ - __ab_c_size(n, \ +#define struct_size(p, member, count) \ + __ab_c_size(count, \ sizeof(*(p)->member) + __must_be_array((p)->member),\ sizeof(*(p))) +/** + * flex_array_size() - Calculate size of a flexible array member + * within an enclosing structure. + * + * @p: Pointer to the structure. + * @member: Name of the flexible array member. + * @count: Number of elements in the array. + * + * Calculates size of a flexible array of @count number of @member + * elements, at the end of structure @p. + * + * Return: number of bytes needed or SIZE_MAX on overflow. + */ +#define flex_array_size(p, member, count) \ + array_size(count, \ + sizeof(*(p)->member) + __must_be_array((p)->member)) + #endif /* __LINUX_OVERFLOW_H */ diff --git a/include/linux/padata.h b/include/linux/padata.h index a0d8b41850b2..a433f13fc4bf 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -4,6 +4,9 @@ * * Copyright (C) 2008, 2009 secunet Security Networks AG * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com> + * + * Copyright (c) 2020 Oracle and/or its affiliates. + * Author: Daniel Jordan <daniel.m.jordan@oracle.com> */ #ifndef PADATA_H @@ -24,7 +27,6 @@ * @list: List entry, to attach to the padata lists. * @pd: Pointer to the internal control structure. * @cb_cpu: Callback cpu for serializatioon. - * @cpu: Cpu for parallelization. * @seq_nr: Sequence number of the parallelized data object. * @info: Used to pass information from the parallel to the serial function. * @parallel: Parallel execution function. @@ -34,7 +36,6 @@ struct padata_priv { struct list_head list; struct parallel_data *pd; int cb_cpu; - int cpu; unsigned int seq_nr; int info; void (*parallel)(struct padata_priv *padata); @@ -66,21 +67,6 @@ struct padata_serial_queue { }; /** - * struct padata_parallel_queue - The percpu padata parallel queue - * - * @parallel: List to wait for parallelization. - * @reorder: List to wait for reordering after parallel processing. - * @work: work struct for parallelization. - * @num_obj: Number of objects that are processed by this cpu. - */ -struct padata_parallel_queue { - struct padata_list parallel; - struct padata_list reorder; - struct work_struct work; - atomic_t num_obj; -}; - -/** * struct padata_cpumask - The cpumasks for the parallel/serial workers * * @pcpu: cpumask for the parallel workers. @@ -96,7 +82,7 @@ struct padata_cpumask { * that depends on the cpumask in use. * * @ps: padata_shell object. - * @pqueue: percpu padata queues used for parallelization. + * @reorder_list: percpu reorder lists * @squeue: percpu padata queues used for serialuzation. * @refcnt: Number of objects holding a reference on this parallel_data. * @seq_nr: Sequence number of the parallelized data object. @@ -108,10 +94,10 @@ struct padata_cpumask { */ struct parallel_data { struct padata_shell *ps; - struct padata_parallel_queue __percpu *pqueue; + struct padata_list __percpu *reorder_list; struct padata_serial_queue __percpu *squeue; atomic_t refcnt; - atomic_t seq_nr; + unsigned int seq_nr; unsigned int processed; int cpu; struct padata_cpumask cpumask; @@ -137,25 +123,50 @@ struct padata_shell { }; /** + * struct padata_mt_job - represents one multithreaded job + * + * @thread_fn: Called for each chunk of work that a padata thread does. + * @fn_arg: The thread function argument. + * @start: The start of the job (units are job-specific). + * @size: size of this node's work (units are job-specific). + * @align: Ranges passed to the thread function fall on this boundary, with the + * possible exceptions of the beginning and end of the job. + * @min_chunk: The minimum chunk size in job-specific units. This allows + * the client to communicate the minimum amount of work that's + * appropriate for one worker thread to do at once. + * @max_threads: Max threads to use for the job, actual number may be less + * depending on task size and minimum chunk size. + */ +struct padata_mt_job { + void (*thread_fn)(unsigned long start, unsigned long end, void *arg); + void *fn_arg; + unsigned long start; + unsigned long size; + unsigned long align; + unsigned long min_chunk; + int max_threads; +}; + +/** * struct padata_instance - The overall control structure. * - * @node: Used by CPU hotplug. + * @cpu_online_node: Linkage for CPU online callback. + * @cpu_dead_node: Linkage for CPU offline callback. * @parallel_wq: The workqueue used for parallel work. * @serial_wq: The workqueue used for serial work. * @pslist: List of padata_shell objects attached to this instance. * @cpumask: User supplied cpumasks for parallel and serial works. - * @rcpumask: Actual cpumasks based on user cpumask and cpu_online_mask. * @kobj: padata instance kernel object. * @lock: padata instance lock. * @flags: padata flags. */ struct padata_instance { - struct hlist_node node; + struct hlist_node cpu_online_node; + struct hlist_node cpu_dead_node; struct workqueue_struct *parallel_wq; struct workqueue_struct *serial_wq; struct list_head pslist; struct padata_cpumask cpumask; - struct padata_cpumask rcpumask; struct kobject kobj; struct mutex lock; u8 flags; @@ -164,15 +175,20 @@ struct padata_instance { #define PADATA_INVALID 4 }; -extern struct padata_instance *padata_alloc_possible(const char *name); +#ifdef CONFIG_PADATA +extern void __init padata_init(void); +#else +static inline void __init padata_init(void) {} +#endif + +extern struct padata_instance *padata_alloc(const char *name); extern void padata_free(struct padata_instance *pinst); extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst); extern void padata_free_shell(struct padata_shell *ps); extern int padata_do_parallel(struct padata_shell *ps, struct padata_priv *padata, int *cb_cpu); extern void padata_do_serial(struct padata_priv *padata); +extern void __init padata_do_multithreaded(struct padata_mt_job *job); extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type, cpumask_var_t cpumask); -extern int padata_start(struct padata_instance *pinst); -extern void padata_stop(struct padata_instance *pinst); #endif diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index 71283739ffd2..e200eef6a7fd 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -98,9 +98,11 @@ /* * We are going to use the flags for the page to node mapping if its in * there. This includes the case where there is no node, so it is implicit. + * Note that this #define MUST have a value so that it can be tested with + * the IS_ENABLED() macro. */ #if !(NODES_WIDTH > 0 || NODES_SHIFT == 0) -#define NODE_NOT_IN_PAGE_FLAGS +#define NODE_NOT_IN_PAGE_FLAGS 1 #endif #if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0 diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 222f6f7b2bb3..6be1aa559b1e 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -777,6 +777,16 @@ PAGE_TYPE_OPS(Buddy, buddy) * not onlined when onlining the section). * The content of these pages is effectively stale. Such pages should not * be touched (read/write/dump/save) except by their owner. + * + * If a driver wants to allow to offline unmovable PageOffline() pages without + * putting them back to the buddy, it can do so via the memory notifier by + * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the + * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline() + * pages (now with a reference count of zero) are treated like free pages, + * allowing the containing memory block to get offlined. A driver that + * relies on this feature is aware that re-onlining the memory block will + * require to re-set the pages PageOffline() and not giving them to the + * buddy via online_page_callback_t. */ PAGE_TYPE_OPS(Offline, offline) diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index bab7e57f659b..85bd413e784e 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -10,6 +10,7 @@ struct page_counter { atomic_long_t usage; unsigned long min; unsigned long low; + unsigned long high; unsigned long max; struct page_counter *parent; @@ -55,6 +56,13 @@ bool page_counter_try_charge(struct page_counter *counter, void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages); void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages); + +static inline void page_counter_set_high(struct page_counter *counter, + unsigned long nr_pages) +{ + WRITE_ONCE(counter->high, nr_pages); +} + int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages); int page_counter_memparse(const char *buf, const char *max, unsigned long *nr_pages); diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index c066fec5b74b..fff52ad370c1 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -56,35 +56,25 @@ struct page; unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, - unsigned long end_bitidx, unsigned long mask); void set_pfnblock_flags_mask(struct page *page, unsigned long flags, unsigned long pfn, - unsigned long end_bitidx, unsigned long mask); /* Declarations for getting and setting flags. See mm/page_alloc.c */ -#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \ - get_pfnblock_flags_mask(page, page_to_pfn(page), \ - end_bitidx, \ - (1 << (end_bitidx - start_bitidx + 1)) - 1) -#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \ - set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \ - end_bitidx, \ - (1 << (end_bitidx - start_bitidx + 1)) - 1) - #ifdef CONFIG_COMPACTION #define get_pageblock_skip(page) \ - get_pageblock_flags_group(page, PB_migrate_skip, \ - PB_migrate_skip) + get_pfnblock_flags_mask(page, page_to_pfn(page), \ + (1 << (PB_migrate_skip))) #define clear_pageblock_skip(page) \ - set_pageblock_flags_group(page, 0, PB_migrate_skip, \ - PB_migrate_skip) + set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \ + (1 << PB_migrate_skip)) #define set_pageblock_skip(page) \ - set_pageblock_flags_group(page, 1, PB_migrate_skip, \ - PB_migrate_skip) + set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \ + page_to_pfn(page), \ + (1 << PB_migrate_skip)) #else static inline bool get_pageblock_skip(struct page *page) { diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a8f7bd8ea1c6..434c9c34aeb6 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -51,7 +51,11 @@ static inline void mapping_set_error(struct address_space *mapping, int error) return; /* Record in wb_err for checkers using errseq_t based tracking */ - filemap_set_wb_err(mapping, error); + __filemap_set_wb_err(mapping, error); + + /* Record it in superblock */ + if (mapping->host) + errseq_set(&mapping->host->i_sb->s_wb_err, error); /* Record it in flags for now, for legacy callers */ if (error == -ENOSPC) @@ -205,6 +209,43 @@ static inline int page_cache_add_speculative(struct page *page, int count) return __page_cache_add_speculative(page, count); } +/** + * attach_page_private - Attach private data to a page. + * @page: Page to attach data to. + * @data: Data to attach to page. + * + * Attaching private data to a page increments the page's reference count. + * The data must be detached before the page will be freed. + */ +static inline void attach_page_private(struct page *page, void *data) +{ + get_page(page); + set_page_private(page, (unsigned long)data); + SetPagePrivate(page); +} + +/** + * detach_page_private - Detach private data from a page. + * @page: Page to detach data from. + * + * Removes the data that was previously attached to the page and decrements + * the refcount on the page. + * + * Return: Data that was attached to the page. + */ +static inline void *detach_page_private(struct page *page) +{ + void *data = (void *)page_private(page); + + if (!PagePrivate(page)) + return NULL; + ClearPagePrivate(page); + set_page_private(page, 0); + put_page(page); + + return data; +} + #ifdef CONFIG_NUMA extern struct page *__page_cache_alloc(gfp_t gfp); #else @@ -341,7 +382,7 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index) if (PageHuge(head)) return head; - return head + (index & (hpage_nr_pages(head) - 1)); + return head + (index & (thp_nr_pages(head) - 1)); } struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); @@ -456,8 +497,35 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, return pgoff; } +/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */ +struct wait_page_key { + struct page *page; + int bit_nr; + int page_match; +}; + +struct wait_page_queue { + struct page *page; + int bit_nr; + wait_queue_entry_t wait; +}; + +static inline bool wake_page_match(struct wait_page_queue *wait_page, + struct wait_page_key *key) +{ + if (wait_page->page != key->page) + return false; + key->page_match = 1; + + if (wait_page->bit_nr != key->bit_nr) + return false; + + return true; +} + extern void __lock_page(struct page *page); extern int __lock_page_killable(struct page *page); +extern int __lock_page_async(struct page *page, struct wait_page_queue *wait); extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags); extern void unlock_page(struct page *page); @@ -495,10 +563,26 @@ static inline int lock_page_killable(struct page *page) } /* + * lock_page_async - Lock the page, unless this would block. If the page + * is already locked, then queue a callback when the page becomes unlocked. + * This callback can then retry the operation. + * + * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page + * was already locked and the callback defined in 'wait' was queued. + */ +static inline int lock_page_async(struct page *page, + struct wait_page_queue *wait) +{ + if (!trylock_page(page)) + return __lock_page_async(page, wait); + return 0; +} + +/* * lock_page_or_retry - Lock the page, unless this would block and the * caller indicated that it can handle a retry. * - * Return value and mmap_sem implications depend on flags; see + * Return value and mmap_lock implications depend on flags; see * __lock_page_or_retry(). */ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, @@ -615,6 +699,17 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); void delete_from_page_cache_batch(struct address_space *mapping, struct pagevec *pvec); +#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) + +void page_cache_sync_readahead(struct address_space *, struct file_ra_state *, + struct file *, pgoff_t index, unsigned long req_count); +void page_cache_async_readahead(struct address_space *, struct file_ra_state *, + struct file *, struct page *, pgoff_t index, + unsigned long req_count); +void page_cache_readahead_unbounded(struct address_space *, struct file *, + pgoff_t index, unsigned long nr_to_read, + unsigned long lookahead_count); + /* * Like add_to_page_cache_locked, but used to add newly allocated pages: * the page is new, so we can just run __SetPageLocked() against it. @@ -631,6 +726,146 @@ static inline int add_to_page_cache(struct page *page, return error; } +/** + * struct readahead_control - Describes a readahead request. + * + * A readahead request is for consecutive pages. Filesystems which + * implement the ->readahead method should call readahead_page() or + * readahead_page_batch() in a loop and attempt to start I/O against + * each page in the request. + * + * Most of the fields in this struct are private and should be accessed + * by the functions below. + * + * @file: The file, used primarily by network filesystems for authentication. + * May be NULL if invoked internally by the filesystem. + * @mapping: Readahead this filesystem object. + */ +struct readahead_control { + struct file *file; + struct address_space *mapping; +/* private: use the readahead_* accessors instead */ + pgoff_t _index; + unsigned int _nr_pages; + unsigned int _batch_count; +}; + +/** + * readahead_page - Get the next page to read. + * @rac: The current readahead request. + * + * Context: The page is locked and has an elevated refcount. The caller + * should decreases the refcount once the page has been submitted for I/O + * and unlock the page once all I/O to that page has completed. + * Return: A pointer to the next page, or %NULL if we are done. + */ +static inline struct page *readahead_page(struct readahead_control *rac) +{ + struct page *page; + + BUG_ON(rac->_batch_count > rac->_nr_pages); + rac->_nr_pages -= rac->_batch_count; + rac->_index += rac->_batch_count; + + if (!rac->_nr_pages) { + rac->_batch_count = 0; + return NULL; + } + + page = xa_load(&rac->mapping->i_pages, rac->_index); + VM_BUG_ON_PAGE(!PageLocked(page), page); + rac->_batch_count = thp_nr_pages(page); + + return page; +} + +static inline unsigned int __readahead_batch(struct readahead_control *rac, + struct page **array, unsigned int array_sz) +{ + unsigned int i = 0; + XA_STATE(xas, &rac->mapping->i_pages, 0); + struct page *page; + + BUG_ON(rac->_batch_count > rac->_nr_pages); + rac->_nr_pages -= rac->_batch_count; + rac->_index += rac->_batch_count; + rac->_batch_count = 0; + + xas_set(&xas, rac->_index); + rcu_read_lock(); + xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { + VM_BUG_ON_PAGE(!PageLocked(page), page); + VM_BUG_ON_PAGE(PageTail(page), page); + array[i++] = page; + rac->_batch_count += thp_nr_pages(page); + + /* + * The page cache isn't using multi-index entries yet, + * so the xas cursor needs to be manually moved to the + * next index. This can be removed once the page cache + * is converted. + */ + if (PageHead(page)) + xas_set(&xas, rac->_index + rac->_batch_count); + + if (i == array_sz) + break; + } + rcu_read_unlock(); + + return i; +} + +/** + * readahead_page_batch - Get a batch of pages to read. + * @rac: The current readahead request. + * @array: An array of pointers to struct page. + * + * Context: The pages are locked and have an elevated refcount. The caller + * should decreases the refcount once the page has been submitted for I/O + * and unlock the page once all I/O to that page has completed. + * Return: The number of pages placed in the array. 0 indicates the request + * is complete. + */ +#define readahead_page_batch(rac, array) \ + __readahead_batch(rac, array, ARRAY_SIZE(array)) + +/** + * readahead_pos - The byte offset into the file of this readahead request. + * @rac: The readahead request. + */ +static inline loff_t readahead_pos(struct readahead_control *rac) +{ + return (loff_t)rac->_index * PAGE_SIZE; +} + +/** + * readahead_length - The number of bytes in this readahead request. + * @rac: The readahead request. + */ +static inline loff_t readahead_length(struct readahead_control *rac) +{ + return (loff_t)rac->_nr_pages * PAGE_SIZE; +} + +/** + * readahead_index - The index of the first page in this readahead request. + * @rac: The readahead request. + */ +static inline pgoff_t readahead_index(struct readahead_control *rac) +{ + return rac->_index; +} + +/** + * readahead_count - The number of pages in this readahead request. + * @rac: The readahead request. + */ +static inline unsigned int readahead_count(struct readahead_control *rac) +{ + return rac->_nr_pages; +} + static inline unsigned long dir_pages(struct inode *inode) { return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> diff --git a/include/linux/parport.h b/include/linux/parport.h index 13932ce8b37b..1fb508c19e83 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h @@ -325,18 +325,10 @@ struct pardev_cb { unsigned int flags; }; -/* parport_register_device declares that a device is connected to a - port, and tells the kernel all it needs to know. - - pf is the preemption function (may be NULL for no callback) - - kf is the wake-up function (may be NULL for no callback) - - irq_func is the interrupt handler (may be NULL for no interrupts) - - handle is a user pointer that gets handed to callback functions. */ -struct pardevice *parport_register_device(struct parport *port, - const char *name, - int (*pf)(void *), void (*kf)(void *), - void (*irq_func)(void *), - int flags, void *handle); - +/* + * parport_register_dev_model declares that a device is connected to a + * port, and tells the kernel all it needs to know. + */ struct pardevice * parport_register_dev_model(struct parport *port, const char *name, const struct pardev_cb *par_dev_cb, int cnt); diff --git a/include/linux/parser.h b/include/linux/parser.h index 12fc3482f5fc..89e2b23fb888 100644 --- a/include/linux/parser.h +++ b/include/linux/parser.h @@ -7,7 +7,8 @@ * but could potentially be used anywhere else that simple option=arg * parsing is required. */ - +#ifndef _LINUX_PARSER_H +#define _LINUX_PARSER_H /* associates an integer enumerator with a pattern string. */ struct match_token { @@ -34,3 +35,5 @@ int match_hex(substring_t *, int *result); bool match_wildcard(const char *pattern, const char *str); size_t match_strlcpy(char *, const substring_t *, size_t); char *match_strdup(const substring_t *); + +#endif /* _LINUX_PARSER_H */ diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h index ece607607a86..24125778ef3e 100644 --- a/include/linux/part_stat.h +++ b/include/linux/part_stat.h @@ -4,21 +4,25 @@ #include <linux/genhd.h> +struct disk_stats { + u64 nsecs[NR_STAT_GROUPS]; + unsigned long sectors[NR_STAT_GROUPS]; + unsigned long ios[NR_STAT_GROUPS]; + unsigned long merges[NR_STAT_GROUPS]; + unsigned long io_ticks; + local_t in_flight[2]; +}; + /* * Macros to operate on percpu disk statistics: * - * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters - * and should be called between disk_stat_lock() and - * disk_stat_unlock(). + * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters and should + * be called between disk_stat_lock() and disk_stat_unlock(). * * part_stat_read() can be called at any time. - * - * part_stat_{add|set_all}() and {init|free}_part_stats are for - * internal use only. */ -#ifdef CONFIG_SMP -#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); }) -#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0) +#define part_stat_lock() preempt_disable() +#define part_stat_unlock() preempt_enable() #define part_stat_get_cpu(part, field, cpu) \ (per_cpu_ptr((part)->dkstats, (cpu))->field) @@ -44,50 +48,13 @@ static inline void part_stat_set_all(struct hd_struct *part, int value) sizeof(struct disk_stats)); } -static inline int init_part_stats(struct hd_struct *part) -{ - part->dkstats = alloc_percpu(struct disk_stats); - if (!part->dkstats) - return 0; - return 1; -} - -static inline void free_part_stats(struct hd_struct *part) -{ - free_percpu(part->dkstats); -} - -#else /* !CONFIG_SMP */ -#define part_stat_lock() ({ rcu_read_lock(); 0; }) -#define part_stat_unlock() rcu_read_unlock() - -#define part_stat_get(part, field) ((part)->dkstats.field) -#define part_stat_get_cpu(part, field, cpu) part_stat_get(part, field) -#define part_stat_read(part, field) part_stat_get(part, field) - -static inline void part_stat_set_all(struct hd_struct *part, int value) -{ - memset(&part->dkstats, value, sizeof(struct disk_stats)); -} - -static inline int init_part_stats(struct hd_struct *part) -{ - return 1; -} - -static inline void free_part_stats(struct hd_struct *part) -{ -} - -#endif /* CONFIG_SMP */ - #define part_stat_read_accum(part, field) \ (part_stat_read(part, field[STAT_READ]) + \ part_stat_read(part, field[STAT_WRITE]) + \ part_stat_read(part, field[STAT_DISCARD])) #define __part_stat_add(part, field, addnd) \ - (part_stat_get(part, field) += (addnd)) + __this_cpu_add((part)->dkstats->field, addnd) #define part_stat_add(part, field, addnd) do { \ __part_stat_add((part), field, addnd); \ diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 2d155bfb8fbf..5ba475ca9078 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -27,7 +27,7 @@ extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle); struct pci_ecam_ops; extern int pci_mcfg_lookup(struct acpi_pci_root *root, struct resource *cfgres, - struct pci_ecam_ops **ecam_ops); + const struct pci_ecam_ops **ecam_ops); static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) { @@ -107,10 +107,12 @@ static inline void acpiphp_check_host_bridge(struct acpi_device *adev) { } #endif extern const guid_t pci_acpi_dsm_guid; -#define IGNORE_PCI_BOOT_CONFIG_DSM 0x05 -#define DEVICE_LABEL_DSM 0x07 -#define RESET_DELAY_DSM 0x08 -#define FUNCTION_DELAY_DSM 0x09 + +/* _DSM Definitions for PCI */ +#define DSM_PCI_PRESERVE_BOOT_CONFIG 0x05 +#define DSM_PCI_DEVICE_NAME 0x07 +#define DSM_PCI_POWER_ON_RESET_DELAY 0x08 +#define DSM_PCI_DEVICE_READINESS_DURATIONS 0x09 #ifdef CONFIG_PCIE_EDR void pci_acpi_add_edr_notifier(struct pci_dev *pdev); @@ -125,10 +127,4 @@ static inline void acpi_pci_add_bus(struct pci_bus *bus) { } static inline void acpi_pci_remove_bus(struct pci_bus *bus) { } #endif /* CONFIG_ACPI */ -#ifdef CONFIG_ACPI_APEI -extern bool aer_acpi_firmware_first(void); -#else -static inline bool aer_acpi_firmware_first(void) { return false; } -#endif - #endif /* _PCI_ACPI_H_ */ diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h index d08f0869f121..df54cd5b15db 100644 --- a/include/linux/pci-ats.h +++ b/include/linux/pci-ats.h @@ -6,11 +6,14 @@ #ifdef CONFIG_PCI_ATS /* Address Translation Service */ +bool pci_ats_supported(struct pci_dev *dev); int pci_enable_ats(struct pci_dev *dev, int ps); void pci_disable_ats(struct pci_dev *dev); int pci_ats_queue_depth(struct pci_dev *dev); int pci_ats_page_aligned(struct pci_dev *dev); #else /* CONFIG_PCI_ATS */ +static inline bool pci_ats_supported(struct pci_dev *d) +{ return false; } static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; } static inline void pci_disable_ats(struct pci_dev *d) { } @@ -25,6 +28,10 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs); void pci_disable_pri(struct pci_dev *pdev); int pci_reset_pri(struct pci_dev *pdev); int pci_prg_resp_pasid_required(struct pci_dev *pdev); +bool pci_pri_supported(struct pci_dev *pdev); +#else +static inline bool pci_pri_supported(struct pci_dev *pdev) +{ return false; } #endif /* CONFIG_PCI_PRI */ #ifdef CONFIG_PCI_PASID diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index a73164c85e78..1af5cb02ef7f 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -29,7 +29,7 @@ struct pci_config_window { struct resource res; struct resource busr; void *priv; - struct pci_ecam_ops *ops; + const struct pci_ecam_ops *ops; union { void __iomem *win; /* 64-bit single mapping */ void __iomem **winp; /* 32-bit per-bus mapping */ @@ -40,29 +40,28 @@ struct pci_config_window { /* create and free pci_config_window */ struct pci_config_window *pci_ecam_create(struct device *dev, struct resource *cfgres, struct resource *busr, - struct pci_ecam_ops *ops); + const struct pci_ecam_ops *ops); void pci_ecam_free(struct pci_config_window *cfg); /* map_bus when ->sysdata is an instance of pci_config_window */ void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn, int where); /* default ECAM ops */ -extern struct pci_ecam_ops pci_generic_ecam_ops; +extern const struct pci_ecam_ops pci_generic_ecam_ops; #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) -extern struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */ -extern struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */ -extern struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */ -extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */ -extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */ -extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ -extern struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ +extern const struct pci_ecam_ops pci_32b_ops; /* 32-bit accesses only */ +extern const struct pci_ecam_ops hisi_pcie_ops; /* HiSilicon */ +extern const struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */ +extern const struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */ +extern const struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */ +extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ +extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ #endif -#ifdef CONFIG_PCI_HOST_COMMON +#if IS_ENABLED(CONFIG_PCI_HOST_COMMON) /* for DT-based PCI controllers that support ECAM */ -int pci_host_common_probe(struct platform_device *pdev, - struct pci_ecam_ops *ops); +int pci_host_common_probe(struct platform_device *pdev); int pci_host_common_remove(struct platform_device *pdev); #endif #endif diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index e0ed9d01f6e5..cc66bec8be90 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -66,19 +66,27 @@ struct pci_epc_ops { }; /** + * struct pci_epc_mem_window - address window of the endpoint controller + * @phys_base: physical base address of the PCI address window + * @size: the size of the PCI address window + * @page_size: size of each page + */ +struct pci_epc_mem_window { + phys_addr_t phys_base; + size_t size; + size_t page_size; +}; + +/** * struct pci_epc_mem - address space of the endpoint controller - * @phys_base: physical base address of the PCI address space - * @size: the size of the PCI address space + * @window: address window of the endpoint controller * @bitmap: bitmap to manage the PCI address space * @pages: number of bits representing the address region - * @page_size: size of each page * @lock: mutex to protect bitmap */ struct pci_epc_mem { - phys_addr_t phys_base; - size_t size; + struct pci_epc_mem_window window; unsigned long *bitmap; - size_t page_size; int pages; /* mutex to protect against concurrent access for memory allocation*/ struct mutex lock; @@ -89,7 +97,11 @@ struct pci_epc_mem { * @dev: PCI EPC device * @pci_epf: list of endpoint functions present in this EPC device * @ops: function pointers for performing endpoint operations - * @mem: address space of the endpoint controller + * @windows: array of address space of the endpoint controller + * @mem: first window of the endpoint controller, which corresponds to + * default address space of the endpoint controller supporting + * single window. + * @num_windows: number of windows supported by device * @max_functions: max number of functions that can be configured in this EPC * @group: configfs group representing the PCI EPC device * @lock: mutex to protect pci_epc ops @@ -100,7 +112,9 @@ struct pci_epc { struct device dev; struct list_head pci_epf; const struct pci_epc_ops *ops; + struct pci_epc_mem **windows; struct pci_epc_mem *mem; + unsigned int num_windows; u8 max_functions; struct config_group *group; /* mutex to protect against concurrent access of EP controller */ @@ -137,9 +151,6 @@ struct pci_epc_features { #define devm_pci_epc_create(dev, ops) \ __devm_pci_epc_create((dev), (ops), THIS_MODULE) -#define pci_epc_mem_init(epc, phys_addr, size) \ - __pci_epc_mem_init((epc), (phys_addr), (size), PAGE_SIZE) - static inline void epc_set_drvdata(struct pci_epc *epc, void *data) { dev_set_drvdata(&epc->dev, data); @@ -195,8 +206,11 @@ unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features struct pci_epc *pci_epc_get(const char *epc_name); void pci_epc_put(struct pci_epc *epc); -int __pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_addr, size_t size, - size_t page_size); +int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t base, + size_t size, size_t page_size); +int pci_epc_multi_mem_init(struct pci_epc *epc, + struct pci_epc_mem_window *window, + unsigned int num_windows); void pci_epc_mem_exit(struct pci_epc *epc); void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, phys_addr_t *phys_addr, size_t size); diff --git a/include/linux/pci.h b/include/linux/pci.h index 83ce1cdf5676..835530605c0d 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -100,9 +100,21 @@ enum { PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1, #endif - /* Resources assigned to buses behind the bridge */ +/* PCI-to-PCI (P2P) bridge windows */ +#define PCI_BRIDGE_IO_WINDOW (PCI_BRIDGE_RESOURCES + 0) +#define PCI_BRIDGE_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 1) +#define PCI_BRIDGE_PREF_MEM_WINDOW (PCI_BRIDGE_RESOURCES + 2) + +/* CardBus bridge windows */ +#define PCI_CB_BRIDGE_IO_0_WINDOW (PCI_BRIDGE_RESOURCES + 0) +#define PCI_CB_BRIDGE_IO_1_WINDOW (PCI_BRIDGE_RESOURCES + 1) +#define PCI_CB_BRIDGE_MEM_0_WINDOW (PCI_BRIDGE_RESOURCES + 2) +#define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3) + +/* Total number of bridge resources for P2P and CardBus */ #define PCI_BRIDGE_RESOURCE_NUM 4 + /* Resources assigned to buses behind the bridge */ PCI_BRIDGE_RESOURCES, PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES + PCI_BRIDGE_RESOURCE_NUM - 1, @@ -167,7 +179,7 @@ static inline const char *pci_power_name(pci_power_t state) */ typedef unsigned int __bitwise pci_channel_state_t; -enum pci_channel_state { +enum { /* I/O channel is in normal state */ pci_channel_io_normal = (__force pci_channel_state_t) 1, @@ -279,7 +291,7 @@ struct pci_cap_saved_data { u16 cap_nr; bool cap_extended; unsigned int size; - u32 data[0]; + u32 data[]; }; struct pci_cap_saved_state { @@ -420,8 +432,12 @@ struct pci_dev { * mappings to make sure they cannot access arbitrary memory. */ unsigned int untrusted:1; - unsigned int __aer_firmware_first_valid:1; - unsigned int __aer_firmware_first:1; + /* + * Info from the platform, e.g., ACPI or device tree, may mark a + * device as "external-facing". An external-facing device is + * itself internal but devices downstream from it are external. + */ + unsigned int external_facing:1; unsigned int broken_intx_masking:1; /* INTx masking can't be used */ unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */ unsigned int irq_managed:1; @@ -476,6 +492,7 @@ struct pci_dev { #ifdef CONFIG_PCI_P2PDMA struct pci_p2pdma *p2pdma; #endif + u16 acs_cap; /* ACS Capability offset */ phys_addr_t rom; /* Physical address if not from BAR */ size_t romlen; /* Length if not from BAR */ char *driver_override; /* Driver name to force a match */ @@ -532,7 +549,7 @@ struct pci_host_bridge { resource_size_t start, resource_size_t size, resource_size_t align); - unsigned long private[0] ____cacheline_aligned; + unsigned long private[] ____cacheline_aligned; }; #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) @@ -775,7 +792,7 @@ enum pci_ers_result { struct pci_error_handlers { /* PCI bus error detected on this device */ pci_ers_result_t (*error_detected)(struct pci_dev *dev, - enum pci_channel_state error); + pci_channel_state_t error); /* MMIO has been re-enabled, but not DMA */ pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); @@ -1025,7 +1042,6 @@ void pci_bus_add_device(struct pci_dev *dev); void pci_read_bridge_bases(struct pci_bus *child); struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res); -struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev); u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin); int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); @@ -1044,13 +1060,6 @@ void pci_sort_breadthfirst(void); /* Generic PCI functions exported to card drivers */ -enum pci_lost_interrupt_reason { - PCI_LOST_IRQ_NO_INFORMATION = 0, - PCI_LOST_IRQ_DISABLE_MSI, - PCI_LOST_IRQ_DISABLE_MSIX, - PCI_LOST_IRQ_DISABLE_ACPI, -}; -enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev); int pci_find_capability(struct pci_dev *dev, int cap); int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap); int pci_find_ext_capability(struct pci_dev *dev, int cap); @@ -2048,6 +2057,8 @@ int pci_iov_virtfn_devfn(struct pci_dev *dev, int id); int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); void pci_disable_sriov(struct pci_dev *dev); + +int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id); int pci_iov_add_virtfn(struct pci_dev *dev, int id); void pci_iov_remove_virtfn(struct pci_dev *dev, int id); int pci_num_vf(struct pci_dev *dev); @@ -2073,6 +2084,12 @@ static inline int pci_iov_virtfn_devfn(struct pci_dev *dev, int id) } static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) { return -ENODEV; } + +static inline int pci_iov_sysfs_link(struct pci_dev *dev, + struct pci_dev *virtfn, int id) +{ + return -ENODEV; +} static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id) { return -ENOSYS; @@ -2143,17 +2160,22 @@ static inline int pci_pcie_type(const struct pci_dev *dev) return (pcie_caps_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4; } +/** + * pcie_find_root_port - Get the PCIe root port device + * @dev: PCI device + * + * Traverse up the parent chain and return the PCIe Root Port PCI Device + * for a given PCI/PCIe Device. + */ static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev) { - while (1) { - if (!pci_is_pcie(dev)) - break; - if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) + while (dev) { + if (pci_is_pcie(dev) && + pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) return dev; - if (!dev->bus->self) - break; - dev = dev->bus->self; + dev = pci_upstream_bridge(dev); } + return NULL; } @@ -2280,10 +2302,6 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, struct device_node; struct irq_domain; struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); -int pci_parse_request_of_pci_ranges(struct device *dev, - struct list_head *resources, - struct list_head *ib_resources, - struct resource **bus_range); /* Arch may override this (weak) */ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); @@ -2291,14 +2309,6 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); #else /* CONFIG_OF */ static inline struct irq_domain * pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } -static inline int -pci_parse_request_of_pci_ranges(struct device *dev, - struct list_head *resources, - struct list_head *ib_resources, - struct resource **bus_range) -{ - return -EINVAL; -} #endif /* CONFIG_OF */ static inline struct device_node * diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 1dfc4e1dcb94..1ab1e24bcbce 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -550,6 +550,7 @@ #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 +#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 #define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 @@ -1832,6 +1833,12 @@ #define PCI_VENDOR_ID_NVIDIA_SGS 0x12d2 #define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018 +#define PCI_VENDOR_ID_PERICOM 0x12D8 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7952 0x7952 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7954 0x7954 +#define PCI_DEVICE_ID_PERICOM_PI7C9X7958 0x7958 + #define PCI_SUBVENDOR_ID_CHASE_PCIFAST 0x12E0 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST4 0x0031 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST8 0x0021 @@ -2578,6 +2585,8 @@ #define PCI_VENDOR_ID_ASMEDIA 0x1b21 +#define PCI_VENDOR_ID_REDHAT 0x1b36 + #define PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS 0x1c36 #define PCI_VENDOR_ID_CIRCUITCO 0x1cc8 @@ -2652,6 +2661,8 @@ #define PCI_DEVICE_ID_INTEL_80332_1 0x0332 #define PCI_DEVICE_ID_INTEL_80333_0 0x0370 #define PCI_DEVICE_ID_INTEL_80333_1 0x0372 +#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC 0x0435 +#define PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF 0x0443 #define PCI_DEVICE_ID_INTEL_82375 0x0482 #define PCI_DEVICE_ID_INTEL_82424 0x0483 #define PCI_DEVICE_ID_INTEL_82378 0x0484 @@ -2701,6 +2712,8 @@ #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577 #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578 #define PCI_DEVICE_ID_INTEL_80960_RP 0x1960 +#define PCI_DEVICE_ID_INTEL_QAT_C3XXX 0x19e2 +#define PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF 0x19e3 #define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21 #define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30 #define PCI_DEVICE_ID_INTEL_IOAT 0x1a38 @@ -2917,6 +2930,8 @@ #define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717 #define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718 #define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719 +#define PCI_DEVICE_ID_INTEL_QAT_C62X 0x37c8 +#define PCI_DEVICE_ID_INTEL_QAT_C62X_VF 0x37c9 #define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 #define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 #define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 22d9d183950d..87d8a38bdea1 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -155,7 +155,7 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, * between contaminating the pointer value, meaning that * READ_ONCE() is required when fetching it. * - * The smp_read_barrier_depends() implied by READ_ONCE() pairs + * The dependency ordering from the READ_ONCE() pairs * with smp_store_release() in __percpu_ref_switch_to_percpu(). */ percpu_ptr = READ_ONCE(ref->percpu_count_ptr); diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 5e033fe1ff4e..5fda40f97fe9 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -60,7 +60,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem) * anything we did within this RCU-sched read-size critical section. */ if (likely(rcu_sync_is_idle(&sem->rss))) - __this_cpu_inc(*sem->read_count); + this_cpu_inc(*sem->read_count); else __percpu_down_read(sem, false); /* Unconditional memory barrier */ /* @@ -79,7 +79,7 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem) * Same as in percpu_down_read(). */ if (likely(rcu_sync_is_idle(&sem->rss))) - __this_cpu_inc(*sem->read_count); + this_cpu_inc(*sem->read_count); else ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */ preempt_enable(); @@ -103,7 +103,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem) * Same as in percpu_down_read(). */ if (likely(rcu_sync_is_idle(&sem->rss))) { - __this_cpu_dec(*sem->read_count); + this_cpu_dec(*sem->read_count); } else { /* * slowpath; reader will only ever wake a single blocked @@ -115,7 +115,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem) * aggregate zero, as that is the only time it matters) they * will also see our critical section. */ - __this_cpu_dec(*sem->read_count); + this_cpu_dec(*sem->read_count); rcuwait_wake_up(&sem->writer); } preempt_enable(); diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 0a4f54dd4737..01861eebed79 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -44,6 +44,7 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch); s64 __percpu_counter_sum(struct percpu_counter *fbc); int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); +void percpu_counter_sync(struct percpu_counter *fbc); static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) { @@ -172,6 +173,9 @@ static inline bool percpu_counter_initialized(struct percpu_counter *fbc) return true; } +static inline void percpu_counter_sync(struct percpu_counter *fbc) +{ +} #endif /* CONFIG_SMP */ static inline void percpu_counter_inc(struct percpu_counter *fbc) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 9c3e7619c929..04a49ccc7beb 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -61,7 +61,7 @@ struct perf_guest_info_callbacks { struct perf_callchain_entry { __u64 nr; - __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */ + __u64 ip[]; /* /proc/sys/kernel/perf_event_max_stack */ }; struct perf_callchain_entry_ctx { @@ -113,7 +113,7 @@ struct perf_raw_record { struct perf_branch_stack { __u64 nr; __u64 hw_idx; - struct perf_branch_entry entries[0]; + struct perf_branch_entry entries[]; }; struct task_struct; @@ -366,7 +366,7 @@ struct pmu { * ->stop() with PERF_EF_UPDATE will read the counter and update * period/count values like ->read() would. * - * ->start() with PERF_EF_RELOAD will reprogram the the counter + * ->start() with PERF_EF_RELOAD will reprogram the counter * value, must be preceded by a ->stop() with PERF_EF_UPDATE. */ void (*start) (struct perf_event *event, int flags); @@ -419,10 +419,11 @@ struct pmu { */ void (*sched_task) (struct perf_event_context *ctx, bool sched_in); + /* - * PMU specific data size + * Kmem cache of PMU specific data */ - size_t task_ctx_size; + struct kmem_cache *task_ctx_cache; /* * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data) @@ -1232,6 +1233,9 @@ extern void perf_event_exec(void); extern void perf_event_comm(struct task_struct *tsk, bool exec); extern void perf_event_namespaces(struct task_struct *tsk); extern void perf_event_fork(struct task_struct *tsk); +extern void perf_event_text_poke(const void *addr, + const void *old_bytes, size_t old_len, + const void *new_bytes, size_t new_len); /* Callchains */ DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); @@ -1244,6 +1248,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs); extern int get_callchain_buffers(int max_stack); extern void put_callchain_buffers(void); +extern struct perf_callchain_entry *get_callchain_entry(int *rctx); +extern void put_callchain_entry(int rctx); extern int sysctl_perf_event_max_stack; extern int sysctl_perf_event_max_contexts_per_stack; @@ -1280,15 +1286,12 @@ extern int sysctl_perf_cpu_time_max_percent; extern void perf_sample_event_took(u64 sample_len_ns); -extern int perf_proc_update_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); -extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); - +int perf_proc_update_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); int perf_event_max_stack_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); /* Access to perf_event_open(2) syscall. */ #define PERF_SECURITY_OPEN 0 @@ -1305,7 +1308,7 @@ static inline int perf_is_paranoid(void) static inline int perf_allow_kernel(struct perf_event_attr *attr) { - if (sysctl_perf_event_paranoid > 1 && !capable(CAP_SYS_ADMIN)) + if (sysctl_perf_event_paranoid > 1 && !perfmon_capable()) return -EACCES; return security_perf_event_open(attr, PERF_SECURITY_KERNEL); @@ -1313,7 +1316,7 @@ static inline int perf_allow_kernel(struct perf_event_attr *attr) static inline int perf_allow_cpu(struct perf_event_attr *attr) { - if (sysctl_perf_event_paranoid > 0 && !capable(CAP_SYS_ADMIN)) + if (sysctl_perf_event_paranoid > 0 && !perfmon_capable()) return -EACCES; return security_perf_event_open(attr, PERF_SECURITY_CPU); @@ -1321,7 +1324,7 @@ static inline int perf_allow_cpu(struct perf_event_attr *attr) static inline int perf_allow_tracepoint(struct perf_event_attr *attr) { - if (sysctl_perf_event_paranoid > -1 && !capable(CAP_SYS_ADMIN)) + if (sysctl_perf_event_paranoid > -1 && !perfmon_capable()) return -EPERM; return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT); @@ -1482,6 +1485,11 @@ static inline void perf_event_exec(void) { } static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } static inline void perf_event_namespaces(struct task_struct *tsk) { } static inline void perf_event_fork(struct task_struct *tsk) { } +static inline void perf_event_text_poke(const void *addr, + const void *old_bytes, + size_t old_len, + const void *new_bytes, + size_t new_len) { } static inline void perf_event_init(void) { } static inline int perf_swevent_get_recursion_context(void) { return -1; } static inline void perf_swevent_put_recursion_context(int rctx) { } diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h new file mode 100644 index 000000000000..90654cb63e9e --- /dev/null +++ b/include/linux/pgtable.h @@ -0,0 +1,1460 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_PGTABLE_H +#define _LINUX_PGTABLE_H + +#include <linux/pfn.h> +#include <asm/pgtable.h> + +#ifndef __ASSEMBLY__ +#ifdef CONFIG_MMU + +#include <linux/mm_types.h> +#include <linux/bug.h> +#include <linux/errno.h> +#include <asm-generic/pgtable_uffd.h> + +#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \ + defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS +#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED +#endif + +/* + * On almost all architectures and configurations, 0 can be used as the + * upper ceiling to free_pgtables(): on many architectures it has the same + * effect as using TASK_SIZE. However, there is one configuration which + * must impose a more careful limit, to avoid freeing kernel pgtables. + */ +#ifndef USER_PGTABLES_CEILING +#define USER_PGTABLES_CEILING 0UL +#endif + +/* + * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD] + * + * The pXx_index() functions return the index of the entry in the page + * table page which would control the given virtual address + * + * As these functions may be used by the same code for different levels of + * the page table folding, they are always available, regardless of + * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0 + * because in such cases PTRS_PER_PxD equals 1. + */ + +static inline unsigned long pte_index(unsigned long address) +{ + return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); +} + +#ifndef pmd_index +static inline unsigned long pmd_index(unsigned long address) +{ + return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); +} +#define pmd_index pmd_index +#endif + +#ifndef pud_index +static inline unsigned long pud_index(unsigned long address) +{ + return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); +} +#define pud_index pud_index +#endif + +#ifndef pgd_index +/* Must be a compile-time constant, so implement it as a macro */ +#define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) +#endif + +#ifndef pte_offset_kernel +static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) +{ + return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); +} +#define pte_offset_kernel pte_offset_kernel +#endif + +#if defined(CONFIG_HIGHPTE) +#define pte_offset_map(dir, address) \ + ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \ + pte_index((address))) +#define pte_unmap(pte) kunmap_atomic((pte)) +#else +#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) +#define pte_unmap(pte) ((void)(pte)) /* NOP */ +#endif + +/* Find an entry in the second-level page table.. */ +#ifndef pmd_offset +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) +{ + return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); +} +#define pmd_offset pmd_offset +#endif + +#ifndef pud_offset +static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) +{ + return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address); +} +#define pud_offset pud_offset +#endif + +static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address) +{ + return (pgd + pgd_index(address)); +}; + +/* + * a shortcut to get a pgd_t in a given mm + */ +#ifndef pgd_offset +#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address)) +#endif + +/* + * a shortcut which implies the use of the kernel's pgd, instead + * of a process's + */ +#ifndef pgd_offset_k +#define pgd_offset_k(address) pgd_offset(&init_mm, (address)) +#endif + +/* + * In many cases it is known that a virtual address is mapped at PMD or PTE + * level, so instead of traversing all the page table levels, we can get a + * pointer to the PMD entry in user or kernel page table or translate a virtual + * address to the pointer in the PTE in the kernel page tables with simple + * helpers. + */ +static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va) +{ + return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va); +} + +static inline pmd_t *pmd_off_k(unsigned long va) +{ + return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va); +} + +static inline pte_t *virt_to_kpte(unsigned long vaddr) +{ + pmd_t *pmd = pmd_off_k(vaddr); + + return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr); +} + +#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +extern int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty); +#endif + +#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); +extern int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty); +#else +static inline int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + BUILD_BUG(); + return 0; +} +static inline int pudp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pud_t *pudp, + pud_t entry, int dirty) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep) +{ + pte_t pte = *ptep; + int r = 1; + if (!pte_young(pte)) + r = 0; + else + set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); + return r; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + int r = 1; + if (!pmd_young(pmd)) + r = 0; + else + set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd)); + return r; +} +#else +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +int ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep); +#endif + +#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#else +/* + * Despite relevant to THP only, this API is called from generic rmap code + * under PageTransHuge(), hence needs a dummy implementation for !THP + */ +static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + BUILD_BUG(); + return 0; +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long address, + pte_t *ptep) +{ + pte_t pte = *ptep; + pte_clear(mm, address, ptep); + return pte; +} +#endif + +#ifndef __HAVE_ARCH_PTEP_GET +static inline pte_t ptep_get(pte_t *ptep) +{ + return READ_ONCE(*ptep); +} +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR +static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = *pmdp; + pmd_clear(pmdp); + return pmd; +} +#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */ +#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR +static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, + pud_t *pudp) +{ + pud_t pud = *pudp; + + pud_clear(pudp); + return pud; +} +#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL +static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + int full) +{ + return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); +} +#endif + +#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL +static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pud_t *pudp, + int full) +{ + return pudp_huge_get_and_clear(mm, address, pudp); +} +#endif +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL +static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, + unsigned long address, pte_t *ptep, + int full) +{ + pte_t pte; + pte = ptep_get_and_clear(mm, address, ptep); + return pte; +} +#endif + + +/* + * If two threads concurrently fault at the same page, the thread that + * won the race updates the PTE and its local TLB/Cache. The other thread + * gives up, simply does nothing, and continues; on architectures where + * software can update TLB, local TLB can be updated here to avoid next page + * fault. This function updates TLB only, do nothing with cache or others. + * It is the difference with function update_mmu_cache. + */ +#ifndef __HAVE_ARCH_UPDATE_MMU_TLB +static inline void update_mmu_tlb(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ +} +#define __HAVE_ARCH_UPDATE_MMU_TLB +#endif + +/* + * Some architectures may be able to avoid expensive synchronization + * primitives when modifications are made to PTE's which are already + * not present, or in the process of an address space destruction. + */ +#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL +static inline void pte_clear_not_present_full(struct mm_struct *mm, + unsigned long address, + pte_t *ptep, + int full) +{ + pte_clear(mm, address, ptep); +} +#endif + +#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH +extern pte_t ptep_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep); +#endif + +#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH +extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp); +extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, + unsigned long address, + pud_t *pudp); +#endif + +#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT +struct mm_struct; +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) +{ + pte_t old_pte = *ptep; + set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); +} +#endif + +/* + * On some architectures hardware does not set page access bit when accessing + * memory page, it is responsibilty of software setting this bit. It brings + * out extra page fault penalty to track page access bit. For optimization page + * access bit can be set during all page fault flow on these arches. + * To be differentiate with macro pte_mkyoung, this macro is used on platforms + * where software maintains page access bit. + */ +#ifndef pte_sw_mkyoung +static inline pte_t pte_sw_mkyoung(pte_t pte) +{ + return pte; +} +#define pte_sw_mkyoung pte_sw_mkyoung +#endif + +#ifndef pte_savedwrite +#define pte_savedwrite pte_write +#endif + +#ifndef pte_mk_savedwrite +#define pte_mk_savedwrite pte_mkwrite +#endif + +#ifndef pte_clear_savedwrite +#define pte_clear_savedwrite pte_wrprotect +#endif + +#ifndef pmd_savedwrite +#define pmd_savedwrite pmd_write +#endif + +#ifndef pmd_mk_savedwrite +#define pmd_mk_savedwrite pmd_mkwrite +#endif + +#ifndef pmd_clear_savedwrite +#define pmd_clear_savedwrite pmd_wrprotect +#endif + +#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + pmd_t old_pmd = *pmdp; + set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd)); +} +#else +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + BUILD_BUG(); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif +#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline void pudp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pud_t *pudp) +{ + pud_t old_pud = *pudp; + + set_pud_at(mm, address, pudp, pud_wrprotect(old_pud)); +} +#else +static inline void pudp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pud_t *pudp) +{ + BUILD_BUG(); +} +#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ +#endif + +#ifndef pmdp_collapse_flush +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); +#else +static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + BUILD_BUG(); + return *pmdp; +} +#define pmdp_collapse_flush pmdp_collapse_flush +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +#endif + +#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT +extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pgtable); +#endif + +#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW +extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * This is an implementation of pmdp_establish() that is only suitable for an + * architecture that doesn't have hardware dirty/accessed bits. In this case we + * can't race with CPU which sets these bits and non-atomic aproach is fine. + */ +static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, pmd_t pmd) +{ + pmd_t old_pmd = *pmdp; + set_pmd_at(vma->vm_mm, address, pmdp, pmd); + return old_pmd; +} +#endif + +#ifndef __HAVE_ARCH_PMDP_INVALIDATE +extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); +#endif + +#ifndef __HAVE_ARCH_PTE_SAME +static inline int pte_same(pte_t pte_a, pte_t pte_b) +{ + return pte_val(pte_a) == pte_val(pte_b); +} +#endif + +#ifndef __HAVE_ARCH_PTE_UNUSED +/* + * Some architectures provide facilities to virtualization guests + * so that they can flag allocated pages as unused. This allows the + * host to transparently reclaim unused pages. This function returns + * whether the pte's page is unused. + */ +static inline int pte_unused(pte_t pte) +{ + return 0; +} +#endif + +#ifndef pte_access_permitted +#define pte_access_permitted(pte, write) \ + (pte_present(pte) && (!(write) || pte_write(pte))) +#endif + +#ifndef pmd_access_permitted +#define pmd_access_permitted(pmd, write) \ + (pmd_present(pmd) && (!(write) || pmd_write(pmd))) +#endif + +#ifndef pud_access_permitted +#define pud_access_permitted(pud, write) \ + (pud_present(pud) && (!(write) || pud_write(pud))) +#endif + +#ifndef p4d_access_permitted +#define p4d_access_permitted(p4d, write) \ + (p4d_present(p4d) && (!(write) || p4d_write(p4d))) +#endif + +#ifndef pgd_access_permitted +#define pgd_access_permitted(pgd, write) \ + (pgd_present(pgd) && (!(write) || pgd_write(pgd))) +#endif + +#ifndef __HAVE_ARCH_PMD_SAME +static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) +{ + return pmd_val(pmd_a) == pmd_val(pmd_b); +} + +static inline int pud_same(pud_t pud_a, pud_t pud_b) +{ + return pud_val(pud_a) == pud_val(pud_b); +} +#endif + +#ifndef __HAVE_ARCH_P4D_SAME +static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b) +{ + return p4d_val(p4d_a) == p4d_val(p4d_b); +} +#endif + +#ifndef __HAVE_ARCH_PGD_SAME +static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) +{ + return pgd_val(pgd_a) == pgd_val(pgd_b); +} +#endif + +/* + * Use set_p*_safe(), and elide TLB flushing, when confident that *no* + * TLB flush will be required as a result of the "set". For example, use + * in scenarios where it is known ahead of time that the routine is + * setting non-present entries, or re-setting an existing entry to the + * same value. Otherwise, use the typical "set" helpers and flush the + * TLB. + */ +#define set_pte_safe(ptep, pte) \ +({ \ + WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \ + set_pte(ptep, pte); \ +}) + +#define set_pmd_safe(pmdp, pmd) \ +({ \ + WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \ + set_pmd(pmdp, pmd); \ +}) + +#define set_pud_safe(pudp, pud) \ +({ \ + WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \ + set_pud(pudp, pud); \ +}) + +#define set_p4d_safe(p4dp, p4d) \ +({ \ + WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \ + set_p4d(p4dp, p4d); \ +}) + +#define set_pgd_safe(pgdp, pgd) \ +({ \ + WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \ + set_pgd(pgdp, pgd); \ +}) + +#ifndef __HAVE_ARCH_DO_SWAP_PAGE +/* + * Some architectures support metadata associated with a page. When a + * page is being swapped out, this metadata must be saved so it can be + * restored when the page is swapped back in. SPARC M7 and newer + * processors support an ADI (Application Data Integrity) tag for the + * page as metadata for the page. arch_do_swap_page() can restore this + * metadata when a page is swapped back in. + */ +static inline void arch_do_swap_page(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t pte, pte_t oldpte) +{ + +} +#endif + +#ifndef __HAVE_ARCH_UNMAP_ONE +/* + * Some architectures support metadata associated with a page. When a + * page is being swapped out, this metadata must be saved so it can be + * restored when the page is swapped back in. SPARC M7 and newer + * processors support an ADI (Application Data Integrity) tag for the + * page as metadata for the page. arch_unmap_one() can save this + * metadata on a swap-out of a page. + */ +static inline int arch_unmap_one(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t orig_pte) +{ + return 0; +} +#endif + +#ifndef __HAVE_ARCH_PGD_OFFSET_GATE +#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) +#endif + +#ifndef __HAVE_ARCH_MOVE_PTE +#define move_pte(pte, prot, old_addr, new_addr) (pte) +#endif + +#ifndef pte_accessible +# define pte_accessible(mm, pte) ((void)(pte), 1) +#endif + +#ifndef flush_tlb_fix_spurious_fault +#define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) +#endif + +/* + * When walking page tables, get the address of the next boundary, + * or the end address of the range if that comes earlier. Although no + * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. + */ + +#define pgd_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) + +#ifndef p4d_addr_end +#define p4d_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +#ifndef pud_addr_end +#define pud_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +#ifndef pmd_addr_end +#define pmd_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +/* + * When walking page tables, we usually want to skip any p?d_none entries; + * and any p?d_bad entries - reporting the error before resetting to none. + * Do the tests inline, but report and clear the bad entry in mm/memory.c. + */ +void pgd_clear_bad(pgd_t *); + +#ifndef __PAGETABLE_P4D_FOLDED +void p4d_clear_bad(p4d_t *); +#else +#define p4d_clear_bad(p4d) do { } while (0) +#endif + +#ifndef __PAGETABLE_PUD_FOLDED +void pud_clear_bad(pud_t *); +#else +#define pud_clear_bad(p4d) do { } while (0) +#endif + +void pmd_clear_bad(pmd_t *); + +static inline int pgd_none_or_clear_bad(pgd_t *pgd) +{ + if (pgd_none(*pgd)) + return 1; + if (unlikely(pgd_bad(*pgd))) { + pgd_clear_bad(pgd); + return 1; + } + return 0; +} + +static inline int p4d_none_or_clear_bad(p4d_t *p4d) +{ + if (p4d_none(*p4d)) + return 1; + if (unlikely(p4d_bad(*p4d))) { + p4d_clear_bad(p4d); + return 1; + } + return 0; +} + +static inline int pud_none_or_clear_bad(pud_t *pud) +{ + if (pud_none(*pud)) + return 1; + if (unlikely(pud_bad(*pud))) { + pud_clear_bad(pud); + return 1; + } + return 0; +} + +static inline int pmd_none_or_clear_bad(pmd_t *pmd) +{ + if (pmd_none(*pmd)) + return 1; + if (unlikely(pmd_bad(*pmd))) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} + +static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep) +{ + /* + * Get the current pte state, but zero it out to make it + * non-present, preventing the hardware from asynchronously + * updating it. + */ + return ptep_get_and_clear(vma->vm_mm, addr, ptep); +} + +static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep, pte_t pte) +{ + /* + * The pte is non-present, so there's no hardware state to + * preserve. + */ + set_pte_at(vma->vm_mm, addr, ptep, pte); +} + +#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION +/* + * Start a pte protection read-modify-write transaction, which + * protects against asynchronous hardware modifications to the pte. + * The intention is not to prevent the hardware from making pte + * updates, but to prevent any updates it may make from being lost. + * + * This does not protect against other software modifications of the + * pte; the appropriate pte lock must be held over the transation. + * + * Note that this interface is intended to be batchable, meaning that + * ptep_modify_prot_commit may not actually update the pte, but merely + * queue the update to be done at some later time. The update must be + * actually committed before the pte lock is released, however. + */ +static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep) +{ + return __ptep_modify_prot_start(vma, addr, ptep); +} + +/* + * Commit an update to a pte, leaving any hardware-controlled bits in + * the PTE unmodified. + */ +static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, + unsigned long addr, + pte_t *ptep, pte_t old_pte, pte_t pte) +{ + __ptep_modify_prot_commit(vma, addr, ptep, pte); +} +#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ +#endif /* CONFIG_MMU */ + +/* + * No-op macros that just return the current protection value. Defined here + * because these macros can be used even if CONFIG_MMU is not defined. + */ + +#ifndef pgprot_nx +#define pgprot_nx(prot) (prot) +#endif + +#ifndef pgprot_noncached +#define pgprot_noncached(prot) (prot) +#endif + +#ifndef pgprot_writecombine +#define pgprot_writecombine pgprot_noncached +#endif + +#ifndef pgprot_writethrough +#define pgprot_writethrough pgprot_noncached +#endif + +#ifndef pgprot_device +#define pgprot_device pgprot_noncached +#endif + +#ifdef CONFIG_MMU +#ifndef pgprot_modify +#define pgprot_modify pgprot_modify +static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +{ + if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot))) + newprot = pgprot_noncached(newprot); + if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot))) + newprot = pgprot_writecombine(newprot); + if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot))) + newprot = pgprot_device(newprot); + return newprot; +} +#endif +#endif /* CONFIG_MMU */ + +#ifndef pgprot_encrypted +#define pgprot_encrypted(prot) (prot) +#endif + +#ifndef pgprot_decrypted +#define pgprot_decrypted(prot) (prot) +#endif + +/* + * A facility to provide lazy MMU batching. This allows PTE updates and + * page invalidations to be delayed until a call to leave lazy MMU mode + * is issued. Some architectures may benefit from doing this, and it is + * beneficial for both shadow and direct mode hypervisors, which may batch + * the PTE updates which happen during this window. Note that using this + * interface requires that read hazards be removed from the code. A read + * hazard could result in the direct mode hypervisor case, since the actual + * write to the page tables may not yet have taken place, so reads though + * a raw PTE pointer after it has been modified are not guaranteed to be + * up to date. This mode can only be entered and left under the protection of + * the page table locks for all page tables which may be modified. In the UP + * case, this is required so that preemption is disabled, and in the SMP case, + * it must synchronize the delayed page table writes properly on other CPUs. + */ +#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE +#define arch_enter_lazy_mmu_mode() do {} while (0) +#define arch_leave_lazy_mmu_mode() do {} while (0) +#define arch_flush_lazy_mmu_mode() do {} while (0) +#endif + +/* + * A facility to provide batching of the reload of page tables and + * other process state with the actual context switch code for + * paravirtualized guests. By convention, only one of the batched + * update (lazy) modes (CPU, MMU) should be active at any given time, + * entry should never be nested, and entry and exits should always be + * paired. This is for sanity of maintaining and reasoning about the + * kernel code. In this case, the exit (end of the context switch) is + * in architecture-specific code, and so doesn't need a generic + * definition. + */ +#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH +#define arch_start_context_switch(prev) do {} while (0) +#endif + +#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY +#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION +static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline int pmd_swp_soft_dirty(pmd_t pmd) +{ + return 0; +} + +static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) +{ + return pmd; +} +#endif +#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */ +static inline int pte_soft_dirty(pte_t pte) +{ + return 0; +} + +static inline int pmd_soft_dirty(pmd_t pmd) +{ + return 0; +} + +static inline pte_t pte_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline pte_t pte_clear_soft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline pte_t pte_swp_mksoft_dirty(pte_t pte) +{ + return pte; +} + +static inline int pte_swp_soft_dirty(pte_t pte) +{ + return 0; +} + +static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) +{ + return pte; +} + +static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd) +{ + return pmd; +} + +static inline int pmd_swp_soft_dirty(pmd_t pmd) +{ + return 0; +} + +static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd) +{ + return pmd; +} +#endif + +#ifndef __HAVE_PFNMAP_TRACKING +/* + * Interfaces that can be used by architecture code to keep track of + * memory type of pfn mappings specified by the remap_pfn_range, + * vmf_insert_pfn. + */ + +/* + * track_pfn_remap is called when a _new_ pfn mapping is being established + * by remap_pfn_range() for physical range indicated by pfn and size. + */ +static inline int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long addr, + unsigned long size) +{ + return 0; +} + +/* + * track_pfn_insert is called when a _new_ single pfn is established + * by vmf_insert_pfn(). + */ +static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, + pfn_t pfn) +{ +} + +/* + * track_pfn_copy is called when vma that is covering the pfnmap gets + * copied through copy_page_range(). + */ +static inline int track_pfn_copy(struct vm_area_struct *vma) +{ + return 0; +} + +/* + * untrack_pfn is called while unmapping a pfnmap for a region. + * untrack can be called for a specific region indicated by pfn and size or + * can be for the entire vma (in which case pfn, size are zero). + */ +static inline void untrack_pfn(struct vm_area_struct *vma, + unsigned long pfn, unsigned long size) +{ +} + +/* + * untrack_pfn_moved is called while mremapping a pfnmap for a new region. + */ +static inline void untrack_pfn_moved(struct vm_area_struct *vma) +{ +} +#else +extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, + unsigned long pfn, unsigned long addr, + unsigned long size); +extern void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot, + pfn_t pfn); +extern int track_pfn_copy(struct vm_area_struct *vma); +extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, + unsigned long size); +extern void untrack_pfn_moved(struct vm_area_struct *vma); +#endif + +#ifdef __HAVE_COLOR_ZERO_PAGE +static inline int is_zero_pfn(unsigned long pfn) +{ + extern unsigned long zero_pfn; + unsigned long offset_from_zero_pfn = pfn - zero_pfn; + return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); +} + +#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) + +#else +static inline int is_zero_pfn(unsigned long pfn) +{ + extern unsigned long zero_pfn; + return pfn == zero_pfn; +} + +static inline unsigned long my_zero_pfn(unsigned long addr) +{ + extern unsigned long zero_pfn; + return zero_pfn; +} +#endif + +#ifdef CONFIG_MMU + +#ifndef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_trans_huge(pmd_t pmd) +{ + return 0; +} +#ifndef pmd_write +static inline int pmd_write(pmd_t pmd) +{ + BUG(); + return 0; +} +#endif /* pmd_write */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#ifndef pud_write +static inline int pud_write(pud_t pud) +{ + BUG(); + return 0; +} +#endif /* pud_write */ + +#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) +static inline int pmd_devmap(pmd_t pmd) +{ + return 0; +} +static inline int pud_devmap(pud_t pud) +{ + return 0; +} +static inline int pgd_devmap(pgd_t pgd) +{ + return 0; +} +#endif + +#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ + (defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) +static inline int pud_trans_huge(pud_t pud) +{ + return 0; +} +#endif + +/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */ +static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud) +{ + pud_t pudval = READ_ONCE(*pud); + + if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval)) + return 1; + if (unlikely(pud_bad(pudval))) { + pud_clear_bad(pud); + return 1; + } + return 0; +} + +/* See pmd_trans_unstable for discussion. */ +static inline int pud_trans_unstable(pud_t *pud) +{ +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) + return pud_none_or_trans_huge_or_dev_or_clear_bad(pud); +#else + return 0; +#endif +} + +#ifndef pmd_read_atomic +static inline pmd_t pmd_read_atomic(pmd_t *pmdp) +{ + /* + * Depend on compiler for an atomic pmd read. NOTE: this is + * only going to work, if the pmdval_t isn't larger than + * an unsigned long. + */ + return *pmdp; +} +#endif + +#ifndef arch_needs_pgtable_deposit +#define arch_needs_pgtable_deposit() (false) +#endif +/* + * This function is meant to be used by sites walking pagetables with + * the mmap_lock held in read mode to protect against MADV_DONTNEED and + * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd + * into a null pmd and the transhuge page fault can convert a null pmd + * into an hugepmd or into a regular pmd (if the hugepage allocation + * fails). While holding the mmap_lock in read mode the pmd becomes + * stable and stops changing under us only if it's not null and not a + * transhuge pmd. When those races occurs and this function makes a + * difference vs the standard pmd_none_or_clear_bad, the result is + * undefined so behaving like if the pmd was none is safe (because it + * can return none anyway). The compiler level barrier() is critically + * important to compute the two checks atomically on the same pmdval. + * + * For 32bit kernels with a 64bit large pmd_t this automatically takes + * care of reading the pmd atomically to avoid SMP race conditions + * against pmd_populate() when the mmap_lock is hold for reading by the + * caller (a special atomic read not done by "gcc" as in the generic + * version above, is also needed when THP is disabled because the page + * fault can populate the pmd from under us). + */ +static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) +{ + pmd_t pmdval = pmd_read_atomic(pmd); + /* + * The barrier will stabilize the pmdval in a register or on + * the stack so that it will stop changing under the code. + * + * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE, + * pmd_read_atomic is allowed to return a not atomic pmdval + * (for example pointing to an hugepage that has never been + * mapped in the pmd). The below checks will only care about + * the low part of the pmd with 32bit PAE x86 anyway, with the + * exception of pmd_none(). So the important thing is that if + * the low part of the pmd is found null, the high part will + * be also null or the pmd_none() check below would be + * confused. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + barrier(); +#endif + /* + * !pmd_present() checks for pmd migration entries + * + * The complete check uses is_pmd_migration_entry() in linux/swapops.h + * But using that requires moving current function and pmd_trans_unstable() + * to linux/swapops.h to resovle dependency, which is too much code move. + * + * !pmd_present() is equivalent to is_pmd_migration_entry() currently, + * because !pmd_present() pages can only be under migration not swapped + * out. + * + * pmd_none() is preseved for future condition checks on pmd migration + * entries and not confusing with this function name, although it is + * redundant with !pmd_present(). + */ + if (pmd_none(pmdval) || pmd_trans_huge(pmdval) || + (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval))) + return 1; + if (unlikely(pmd_bad(pmdval))) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} + +/* + * This is a noop if Transparent Hugepage Support is not built into + * the kernel. Otherwise it is equivalent to + * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in + * places that already verified the pmd is not none and they want to + * walk ptes while holding the mmap sem in read mode (write mode don't + * need this). If THP is not enabled, the pmd can't go away under the + * code even if MADV_DONTNEED runs, but if THP is enabled we need to + * run a pmd_trans_unstable before walking the ptes after + * split_huge_pmd returns (because it may have run when the pmd become + * null, but then a page fault can map in a THP and not a regular page). + */ +static inline int pmd_trans_unstable(pmd_t *pmd) +{ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + return pmd_none_or_trans_huge_or_clear_bad(pmd); +#else + return 0; +#endif +} + +#ifndef CONFIG_NUMA_BALANCING +/* + * Technically a PTE can be PROTNONE even when not doing NUMA balancing but + * the only case the kernel cares is for NUMA balancing and is only ever set + * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked + * _PAGE_PROTNONE so by default, implement the helper as "always no". It + * is the responsibility of the caller to distinguish between PROT_NONE + * protections and NUMA hinting fault protections. + */ +static inline int pte_protnone(pte_t pte) +{ + return 0; +} + +static inline int pmd_protnone(pmd_t pmd) +{ + return 0; +} +#endif /* CONFIG_NUMA_BALANCING */ + +#endif /* CONFIG_MMU */ + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP + +#ifndef __PAGETABLE_P4D_FOLDED +int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot); +int p4d_clear_huge(p4d_t *p4d); +#else +static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int p4d_clear_huge(p4d_t *p4d) +{ + return 0; +} +#endif /* !__PAGETABLE_P4D_FOLDED */ + +int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot); +int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); +int pud_clear_huge(pud_t *pud); +int pmd_clear_huge(pmd_t *pmd); +int p4d_free_pud_page(p4d_t *p4d, unsigned long addr); +int pud_free_pmd_page(pud_t *pud, unsigned long addr); +int pmd_free_pte_page(pmd_t *pmd, unsigned long addr); +#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ +static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) +{ + return 0; +} +static inline int p4d_clear_huge(p4d_t *p4d) +{ + return 0; +} +static inline int pud_clear_huge(pud_t *pud) +{ + return 0; +} +static inline int pmd_clear_huge(pmd_t *pmd) +{ + return 0; +} +static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) +{ + return 0; +} +static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr) +{ + return 0; +} +static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) +{ + return 0; +} +#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ + +#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * ARCHes with special requirements for evicting THP backing TLB entries can + * implement this. Otherwise also, it can help optimize normal TLB flush in + * THP regime. Stock flush_tlb_range() typically has optimization to nuke the + * entire TLB if flush span is greater than a threshold, which will + * likely be true for a single huge page. Thus a single THP flush will + * invalidate the entire TLB which is not desirable. + * e.g. see arch/arc: flush_pmd_tlb_range + */ +#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) +#else +#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG() +#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG() +#endif +#endif + +struct file; +int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t *vma_prot); + +#ifndef CONFIG_X86_ESPFIX64 +static inline void init_espfix_bsp(void) { } +#endif + +extern void __init pgtable_cache_init(void); + +#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED +static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot) +{ + return true; +} + +static inline bool arch_has_pfn_modify_check(void) +{ + return false; +} +#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */ + +/* + * Architecture PAGE_KERNEL_* fallbacks + * + * Some architectures don't define certain PAGE_KERNEL_* flags. This is either + * because they really don't support them, or the port needs to be updated to + * reflect the required functionality. Below are a set of relatively safe + * fallbacks, as best effort, which we can count on in lieu of the architectures + * not defining them on their own yet. + */ + +#ifndef PAGE_KERNEL_RO +# define PAGE_KERNEL_RO PAGE_KERNEL +#endif + +#ifndef PAGE_KERNEL_EXEC +# define PAGE_KERNEL_EXEC PAGE_KERNEL +#endif + +/* + * Page Table Modification bits for pgtbl_mod_mask. + * + * These are used by the p?d_alloc_track*() set of functions an in the generic + * vmalloc/ioremap code to track at which page-table levels entries have been + * modified. Based on that the code can better decide when vmalloc and ioremap + * mapping changes need to be synchronized to other page-tables in the system. + */ +#define __PGTBL_PGD_MODIFIED 0 +#define __PGTBL_P4D_MODIFIED 1 +#define __PGTBL_PUD_MODIFIED 2 +#define __PGTBL_PMD_MODIFIED 3 +#define __PGTBL_PTE_MODIFIED 4 + +#define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED) +#define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED) +#define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED) +#define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED) +#define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED) + +/* Page-Table Modification Mask */ +typedef unsigned int pgtbl_mod_mask; + +#endif /* !__ASSEMBLY__ */ + +#ifndef io_remap_pfn_range +#define io_remap_pfn_range remap_pfn_range +#endif + +#ifndef has_transparent_hugepage +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define has_transparent_hugepage() 1 +#else +#define has_transparent_hugepage() 0 +#endif +#endif + +/* + * On some architectures it depends on the mm if the p4d/pud or pmd + * layer of the page table hierarchy is folded or not. + */ +#ifndef mm_p4d_folded +#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED) +#endif + +#ifndef mm_pud_folded +#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED) +#endif + +#ifndef mm_pmd_folded +#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED) +#endif + +#ifndef p4d_offset_lockless +#define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address) +#endif +#ifndef pud_offset_lockless +#define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address) +#endif +#ifndef pmd_offset_lockless +#define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address) +#endif + +/* + * p?d_leaf() - true if this entry is a final mapping to a physical address. + * This differs from p?d_huge() by the fact that they are always available (if + * the architecture supports large pages at the appropriate level) even + * if CONFIG_HUGETLB_PAGE is not defined. + * Only meaningful when called on a valid entry. + */ +#ifndef pgd_leaf +#define pgd_leaf(x) 0 +#endif +#ifndef p4d_leaf +#define p4d_leaf(x) 0 +#endif +#ifndef pud_leaf +#define pud_leaf(x) 0 +#endif +#ifndef pmd_leaf +#define pmd_leaf(x) 0 +#endif + +#endif /* _LINUX_PGTABLE_H */ diff --git a/include/linux/phy.h b/include/linux/phy.h index 2432ca463ddc..3a09d2bf69ea 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -15,6 +15,7 @@ #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/linkmode.h> +#include <linux/netlink.h> #include <linux/mdio.h> #include <linux/mii.h> #include <linux/mii_timestamper.h> @@ -25,6 +26,7 @@ #include <linux/u64_stats_sync.h> #include <linux/irqreturn.h> #include <linux/iopoll.h> +#include <linux/refcount.h> #include <linux/atomic.h> @@ -77,6 +79,7 @@ extern const int phy_10gbit_features_array[1]; #define PHY_IS_INTERNAL 0x00000001 #define PHY_RST_AFTER_CLK_EN 0x00000002 +#define PHY_POLL_CABLE_TEST 0x00000004 #define MDIO_DEVICE_IS_PHY 0x80000000 /* Interface Mode definitions */ @@ -206,12 +209,6 @@ static inline const char *phy_modes(phy_interface_t interface) #define MII_BUS_ID_SIZE 61 -/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit - IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */ -#define MII_ADDR_C45 (1<<30) -#define MII_DEVADDR_C45_SHIFT 16 -#define MII_REGADDR_C45_MASK GENMASK(15, 0) - struct device; struct phylink; struct sfp_bus; @@ -227,6 +224,29 @@ struct mdio_bus_stats { struct u64_stats_sync syncp; }; +/* Represents a shared structure between different phydev's in the same + * package, for example a quad PHY. See phy_package_join() and + * phy_package_leave(). + */ +struct phy_package_shared { + int addr; + refcount_t refcnt; + unsigned long flags; + size_t priv_size; + + /* private data pointer */ + /* note that this pointer is shared between different phydevs and + * the user has to take care of appropriate locking. It is allocated + * and freed automatically by phy_package_join() and + * phy_package_leave(). + */ + void *priv; +}; + +/* used as bit number in atomic bitops */ +#define PHY_SHARED_F_INIT_DONE 0 +#define PHY_SHARED_F_PROBE_DONE 1 + /* * The Bus class for PHYs. Devices which provide access to * PHYs should register using this structure @@ -273,8 +293,24 @@ struct mii_bus { /* GPIO reset pulse width in microseconds */ int reset_delay_us; + /* GPIO reset deassert delay in microseconds */ + int reset_post_delay_us; /* RESET GPIO descriptor pointer */ struct gpio_desc *reset_gpiod; + + /* bus capabilities, used for probing */ + enum { + MDIOBUS_NO_CAP = 0, + MDIOBUS_C22, + MDIOBUS_C45, + MDIOBUS_C22_C45, + } probe_capabilities; + + /* protect access to the shared element */ + struct mutex shared_lock; + + /* shared state across different PHYs */ + struct phy_package_shared *shared[PHY_MAX_ADDR]; }; #define to_mii_bus(d) container_of(d, struct mii_bus, dev) @@ -285,7 +321,12 @@ static inline struct mii_bus *mdiobus_alloc(void) } int __mdiobus_register(struct mii_bus *bus, struct module *owner); +int __devm_mdiobus_register(struct device *dev, struct mii_bus *bus, + struct module *owner); #define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE) +#define devm_mdiobus_register(dev, bus) \ + __devm_mdiobus_register(dev, bus, THIS_MODULE) + void mdiobus_unregister(struct mii_bus *bus); void mdiobus_free(struct mii_bus *bus); struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv); @@ -295,7 +336,6 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev) } struct mii_bus *mdio_find_bus(const char *mdio_name); -void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); #define PHY_INTERRUPT_DISABLED false @@ -326,6 +366,12 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); * - irq or timer will set NOLINK if link goes down * - phy_stop moves to HALTED * + * CABLETEST: PHY is performing a cable test. Packet reception/sending + * is not expected to work, carrier will be indicated as down. PHY will be + * poll once per second, or on interrupt for it current state. + * Once complete, move to UP to restart the PHY. + * - phy_stop aborts the running test and moves to HALTED + * * HALTED: PHY is up, but no polling or interrupts are done. Or * PHY is in an error state. * - phy_start moves to UP @@ -337,16 +383,21 @@ enum phy_state { PHY_UP, PHY_RUNNING, PHY_NOLINK, + PHY_CABLETEST, }; +#define MDIO_MMD_NUM 32 + /** * struct phy_c45_device_ids - 802.3-c45 Device Identifiers - * @devices_in_package: Bit vector of devices present. + * @devices_in_package: IEEE 802.3 devices in package register value. + * @mmds_present: bit vector of MMDs present. * @device_ids: The device identifer for each present device. */ struct phy_c45_device_ids { u32 devices_in_package; - u32 device_ids[8]; + u32 mmds_present; + u32 device_ids[MDIO_MMD_NUM]; }; struct macsec_context; @@ -431,6 +482,9 @@ struct phy_device { int duplex; int pause; int asym_pause; + u8 master_slave_get; + u8 master_slave_set; + u8 master_slave_state; /* Union of PHY and Attached devices' supported link modes */ /* See ethtool.h for more info */ @@ -461,6 +515,15 @@ struct phy_device { /* For use by PHYs to maintain extra state */ void *priv; + /* shared data pointer */ + /* For use by PHYs inside the same package that need a shared state. */ + struct phy_package_shared *shared; + + /* Reporting cable test results */ + struct sk_buff *skb; + void *ehdr; + struct nlattr *nest; + /* Interrupt and Polling infrastructure */ struct delayed_work state_queue; @@ -476,7 +539,7 @@ struct phy_device { u8 mdix; u8 mdix_ctrl; - void (*phy_link_change)(struct phy_device *, bool up, bool do_carrier); + void (*phy_link_change)(struct phy_device *phydev, bool up); void (*adjust_link)(struct net_device *dev); #if IS_ENABLED(CONFIG_MACSEC) @@ -487,6 +550,18 @@ struct phy_device { #define to_phy_device(d) container_of(to_mdio_device(d), \ struct phy_device, mdio) +/* A structure containing possible configuration parameters + * for a TDR cable test. The driver does not need to implement + * all the parameters, but should report what is actually used. + */ +struct phy_tdr_config { + u32 first; + u32 last; + u32 step; + s8 pair; +}; +#define PHY_PAIR_ALL -1 + /* struct phy_driver: Driver structure for a particular PHY type * * driver_data: static driver data @@ -636,6 +711,18 @@ struct phy_driver { int (*module_eeprom)(struct phy_device *dev, struct ethtool_eeprom *ee, u8 *data); + /* Start a cable test */ + int (*cable_test_start)(struct phy_device *dev); + + /* Start a raw TDR cable test */ + int (*cable_test_tdr_start)(struct phy_device *dev, + const struct phy_tdr_config *config); + + /* Once per second, or on interrupt, request the status of the + * test. + */ + int (*cable_test_get_status)(struct phy_device *dev, bool *finished); + /* Get statistics from the phy using ethtool */ int (*get_sset_count)(struct phy_device *dev); void (*get_strings)(struct phy_device *dev, u8 *data); @@ -649,6 +736,8 @@ struct phy_driver { struct ethtool_tunable *tuna, const void *data); int (*set_loopback)(struct phy_device *dev, bool enable); + int (*get_sqi)(struct phy_device *dev); + int (*get_sqi_max)(struct phy_device *dev); }; #define to_phy_driver(d) container_of(to_mdio_common_driver(d), \ struct phy_driver, mdiodrv) @@ -993,6 +1082,10 @@ static inline bool phy_interrupt_is_valid(struct phy_device *phydev) */ static inline bool phy_polling_mode(struct phy_device *phydev) { + if (phydev->state == PHY_CABLETEST) + if (phydev->drv->flags & PHY_POLL_CABLE_TEST) + return true; + return phydev->irq == PHY_POLL; } @@ -1174,6 +1267,34 @@ int phy_speed_up(struct phy_device *phydev); int phy_restart_aneg(struct phy_device *phydev); int phy_reset_after_clk_enable(struct phy_device *phydev); +#if IS_ENABLED(CONFIG_PHYLIB) +int phy_start_cable_test(struct phy_device *phydev, + struct netlink_ext_ack *extack); +int phy_start_cable_test_tdr(struct phy_device *phydev, + struct netlink_ext_ack *extack, + const struct phy_tdr_config *config); +#else +static inline +int phy_start_cable_test(struct phy_device *phydev, + struct netlink_ext_ack *extack) +{ + NL_SET_ERR_MSG(extack, "Kernel not compiled with PHYLIB support"); + return -EOPNOTSUPP; +} +static inline +int phy_start_cable_test_tdr(struct phy_device *phydev, + struct netlink_ext_ack *extack, + const struct phy_tdr_config *config) +{ + NL_SET_ERR_MSG(extack, "Kernel not compiled with PHYLIB support"); + return -EOPNOTSUPP; +} +#endif + +int phy_cable_test_result(struct phy_device *phydev, u8 pair, u16 result); +int phy_cable_test_fault_length(struct phy_device *phydev, u8 pair, + u16 cm); + static inline void phy_device_reset(struct phy_device *phydev, int value) { mdio_device_reset(&phydev->mdio, value); @@ -1234,10 +1355,6 @@ static inline int genphy_config_aneg(struct phy_device *phydev) return __genphy_config_aneg(phydev, false); } -static inline int genphy_no_soft_reset(struct phy_device *phydev) -{ - return 0; -} static inline int genphy_no_ack_interrupt(struct phy_device *phydev) { return 0; @@ -1270,6 +1387,9 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev); int genphy_c45_read_status(struct phy_device *phydev); int genphy_c45_config_aneg(struct phy_device *phydev); +/* Generic C45 PHY driver */ +extern struct phy_driver genphy_c45_driver; + /* The gen10g_* functions are the old Clause 45 stub */ int gen10g_config_aneg(struct phy_device *phydev); @@ -1301,6 +1421,7 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev, int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd); +int phy_disable_interrupts(struct phy_device *phydev); void phy_request_interrupt(struct phy_device *phydev); void phy_free_interrupt(struct phy_device *phydev); void phy_print_status(struct phy_device *phydev); @@ -1315,6 +1436,10 @@ void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx); bool phy_validate_pause(struct phy_device *phydev, struct ethtool_pauseparam *pp); void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause); + +s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev, + const int *delay_values, int size, bool is_rx); + void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv, bool *tx_pause, bool *rx_pause); @@ -1341,56 +1466,82 @@ int phy_ethtool_get_link_ksettings(struct net_device *ndev, int phy_ethtool_set_link_ksettings(struct net_device *ndev, const struct ethtool_link_ksettings *cmd); int phy_ethtool_nway_reset(struct net_device *ndev); +int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size); +void phy_package_leave(struct phy_device *phydev); +int devm_phy_package_join(struct device *dev, struct phy_device *phydev, + int addr, size_t priv_size); #if IS_ENABLED(CONFIG_PHYLIB) int __init mdio_bus_init(void); void mdio_bus_exit(void); #endif -/* Inline function for use within net/core/ethtool.c (built-in) */ -static inline int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data) +int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data); +int phy_ethtool_get_sset_count(struct phy_device *phydev); +int phy_ethtool_get_stats(struct phy_device *phydev, + struct ethtool_stats *stats, u64 *data); + +static inline int phy_package_read(struct phy_device *phydev, u32 regnum) { - if (!phydev->drv) - return -EIO; + struct phy_package_shared *shared = phydev->shared; - mutex_lock(&phydev->lock); - phydev->drv->get_strings(phydev, data); - mutex_unlock(&phydev->lock); + if (!shared) + return -EIO; - return 0; + return mdiobus_read(phydev->mdio.bus, shared->addr, regnum); } -static inline int phy_ethtool_get_sset_count(struct phy_device *phydev) +static inline int __phy_package_read(struct phy_device *phydev, u32 regnum) { - int ret; + struct phy_package_shared *shared = phydev->shared; - if (!phydev->drv) + if (!shared) return -EIO; - if (phydev->drv->get_sset_count && - phydev->drv->get_strings && - phydev->drv->get_stats) { - mutex_lock(&phydev->lock); - ret = phydev->drv->get_sset_count(phydev); - mutex_unlock(&phydev->lock); + return __mdiobus_read(phydev->mdio.bus, shared->addr, regnum); +} - return ret; - } +static inline int phy_package_write(struct phy_device *phydev, + u32 regnum, u16 val) +{ + struct phy_package_shared *shared = phydev->shared; - return -EOPNOTSUPP; + if (!shared) + return -EIO; + + return mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val); } -static inline int phy_ethtool_get_stats(struct phy_device *phydev, - struct ethtool_stats *stats, u64 *data) +static inline int __phy_package_write(struct phy_device *phydev, + u32 regnum, u16 val) { - if (!phydev->drv) + struct phy_package_shared *shared = phydev->shared; + + if (!shared) return -EIO; - mutex_lock(&phydev->lock); - phydev->drv->get_stats(phydev, stats, data); - mutex_unlock(&phydev->lock); + return __mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val); +} - return 0; +static inline bool __phy_package_set_once(struct phy_device *phydev, + unsigned int b) +{ + struct phy_package_shared *shared = phydev->shared; + + if (!shared) + return false; + + return !test_and_set_bit(b, &shared->flags); +} + +static inline bool phy_package_init_once(struct phy_device *phydev) +{ + return __phy_package_set_once(phydev, PHY_SHARED_F_INIT_DONE); +} + +static inline bool phy_package_probe_once(struct phy_device *phydev) +{ + return __phy_package_set_once(phydev, PHY_SHARED_F_PROBE_DONE); } extern struct bus_type mdio_bus_type; diff --git a/include/linux/phy/omap_usb.h b/include/linux/phy/omap_usb.h index 5973a6313529..e23b52df93ec 100644 --- a/include/linux/phy/omap_usb.h +++ b/include/linux/phy/omap_usb.h @@ -2,68 +2,14 @@ /* * omap_usb.h -- omap usb2 phy header file * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2012-2020 Texas Instruments Incorporated - http://www.ti.com * Author: Kishon Vijay Abraham I <kishon@ti.com> */ #ifndef __DRIVERS_OMAP_USB2_H #define __DRIVERS_OMAP_USB2_H -#include <linux/io.h> -#include <linux/usb/otg.h> - -struct usb_dpll_params { - u16 m; - u8 n; - u8 freq:3; - u8 sd; - u32 mf; -}; - -enum omap_usb_phy_type { - TYPE_USB2, /* USB2_PHY, power down in CONTROL_DEV_CONF */ - TYPE_DRA7USB2, /* USB2 PHY, power and power_aux e.g. DRA7 */ - TYPE_AM437USB2, /* USB2 PHY, power e.g. AM437x */ -}; - -struct omap_usb { - struct usb_phy phy; - struct phy_companion *comparator; - void __iomem *pll_ctrl_base; - void __iomem *phy_base; - struct device *dev; - struct device *control_dev; - struct clk *wkupclk; - struct clk *optclk; - u8 flags; - enum omap_usb_phy_type type; - struct regmap *syscon_phy_power; /* ctrl. reg. acces */ - unsigned int power_reg; /* power reg. index within syscon */ - u32 mask; - u32 power_on; - u32 power_off; -}; - -struct usb_phy_data { - const char *label; - u8 flags; - u32 mask; - u32 power_on; - u32 power_off; -}; - -/* Driver Flags */ -#define OMAP_USB2_HAS_START_SRP (1 << 0) -#define OMAP_USB2_HAS_SET_VBUS (1 << 1) -#define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT (1 << 2) - -#define OMAP_DEV_PHY_PD BIT(0) -#define OMAP_USB2_PHY_PD BIT(28) - -#define AM437X_USB2_PHY_PD BIT(0) -#define AM437X_USB2_OTG_PD BIT(1) -#define AM437X_USB2_OTGVDET_EN BIT(19) -#define AM437X_USB2_OTGSESSEND_EN BIT(20) +#include <linux/usb/phy_companion.h> #define phy_to_omapusb(x) container_of((x), struct omap_usb, phy) @@ -76,15 +22,4 @@ static inline int omap_usb2_set_comparator(struct phy_companion *comparator) } #endif -static inline u32 omap_usb_readl(void __iomem *addr, unsigned offset) -{ - return __raw_readl(addr + offset); -} - -static inline void omap_usb_writel(void __iomem *addr, unsigned offset, - u32 data) -{ - __raw_writel(data, addr + offset); -} - #endif /* __DRIVERS_OMAP_USB_H */ diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 3f8d37ec5503..c36fb41a7d90 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -62,18 +62,27 @@ enum phylink_op_type { * @dev: a pointer to a struct device associated with the MAC * @type: operation type of PHYLINK instance * @pcs_poll: MAC PCS cannot provide link change interrupt + * @poll_fixed_state: if true, starts link_poll, + * if MAC link is at %MLO_AN_FIXED mode. + * @get_fixed_state: callback to execute to determine the fixed link state, + * if MAC link is at %MLO_AN_FIXED mode. */ struct phylink_config { struct device *dev; enum phylink_op_type type; bool pcs_poll; + bool poll_fixed_state; + void (*get_fixed_state)(struct phylink_config *config, + struct phylink_link_state *state); }; /** * struct phylink_mac_ops - MAC operations structure. * @validate: Validate and update the link configuration. * @mac_pcs_get_state: Read the current link state from the hardware. + * @mac_prepare: prepare for a major reconfiguration of the interface. * @mac_config: configure the MAC for the selected mode and state. + * @mac_finish: finish a major reconfiguration of the interface. * @mac_an_restart: restart 802.3z BaseX autonegotiation. * @mac_link_down: take the link down. * @mac_link_up: allow the link to come up. @@ -86,8 +95,12 @@ struct phylink_mac_ops { struct phylink_link_state *state); void (*mac_pcs_get_state)(struct phylink_config *config, struct phylink_link_state *state); + int (*mac_prepare)(struct phylink_config *config, unsigned int mode, + phy_interface_t iface); void (*mac_config)(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state); + int (*mac_finish)(struct phylink_config *config, unsigned int mode, + phy_interface_t iface); void (*mac_an_restart)(struct phylink_config *config); void (*mac_link_down)(struct phylink_config *config, unsigned int mode, phy_interface_t interface); @@ -143,6 +156,31 @@ void mac_pcs_get_state(struct phylink_config *config, struct phylink_link_state *state); /** + * mac_prepare() - prepare to change the PHY interface mode + * @config: a pointer to a &struct phylink_config. + * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. + * @iface: interface mode to switch to + * + * phylink will call this method at the beginning of a full initialisation + * of the link, which includes changing the interface mode or at initial + * startup time. It may be called for the current mode. The MAC driver + * should perform whatever actions are required, e.g. disabling the + * Serdes PHY. + * + * This will be the first call in the sequence: + * - mac_prepare() + * - mac_config() + * - pcs_config() + * - possible pcs_an_restart() + * - mac_finish() + * + * Returns zero on success, or negative errno on failure which will be + * reported to the kernel log. + */ +int mac_prepare(struct phylink_config *config, unsigned int mode, + phy_interface_t iface); + +/** * mac_config() - configure the MAC for the selected mode and state * @config: a pointer to a &struct phylink_config. * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. @@ -218,6 +256,23 @@ void mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state); /** + * mac_finish() - finish a to change the PHY interface mode + * @config: a pointer to a &struct phylink_config. + * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. + * @iface: interface mode to switch to + * + * phylink will call this if it called mac_prepare() to allow the MAC to + * complete any necessary steps after the MAC and PCS have been configured + * for the @mode and @iface. E.g. a MAC driver may wish to re-enable the + * Serdes PHY here if it was previously disabled by mac_prepare(). + * + * Returns zero on success, or negative errno on failure which will be + * reported to the kernel log. + */ +int mac_finish(struct phylink_config *config, unsigned int mode, + phy_interface_t iface); + +/** * mac_an_restart() - restart 802.3z BaseX autonegotiation * @config: a pointer to a &struct phylink_config. */ @@ -270,6 +325,21 @@ void mac_link_up(struct phylink_config *config, struct phy_device *phy, int speed, int duplex, bool tx_pause, bool rx_pause); #endif +struct phylink_pcs_ops; + +/** + * struct phylink_pcs - PHYLINK PCS instance + * @ops: a pointer to the &struct phylink_pcs_ops structure + * @poll: poll the PCS for link changes + * + * This structure is designed to be embedded within the PCS private data, + * and will be passed between phylink and the PCS. + */ +struct phylink_pcs { + const struct phylink_pcs_ops *ops; + bool poll; +}; + /** * struct phylink_pcs_ops - MAC PCS operations structure. * @pcs_get_state: read the current MAC PCS link state from the hardware. @@ -279,20 +349,21 @@ void mac_link_up(struct phylink_config *config, struct phy_device *phy, * (where necessary). */ struct phylink_pcs_ops { - void (*pcs_get_state)(struct phylink_config *config, + void (*pcs_get_state)(struct phylink_pcs *pcs, struct phylink_link_state *state); - int (*pcs_config)(struct phylink_config *config, unsigned int mode, + int (*pcs_config)(struct phylink_pcs *pcs, unsigned int mode, phy_interface_t interface, - const unsigned long *advertising); - void (*pcs_an_restart)(struct phylink_config *config); - void (*pcs_link_up)(struct phylink_config *config, unsigned int mode, + const unsigned long *advertising, + bool permit_pause_to_mac); + void (*pcs_an_restart)(struct phylink_pcs *pcs); + void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int mode, phy_interface_t interface, int speed, int duplex); }; #if 0 /* For kernel-doc purposes only. */ /** * pcs_get_state() - Read the current inband link state from the hardware - * @config: a pointer to a &struct phylink_config. + * @pcs: a pointer to a &struct phylink_pcs. * @state: a pointer to a &struct phylink_link_state. * * Read the current inband link state from the MAC PCS, reporting the @@ -305,18 +376,20 @@ struct phylink_pcs_ops { * When present, this overrides mac_pcs_get_state() in &struct * phylink_mac_ops. */ -void pcs_get_state(struct phylink_config *config, +void pcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state); /** * pcs_config() - Configure the PCS mode and advertisement - * @config: a pointer to a &struct phylink_config. + * @pcs: a pointer to a &struct phylink_pcs. * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. * @interface: interface mode to be used * @advertising: adertisement ethtool link mode mask + * @permit_pause_to_mac: permit forwarding pause resolution to MAC * * Configure the PCS for the operating mode, the interface mode, and set - * the advertisement mask. + * the advertisement mask. @permit_pause_to_mac indicates whether the + * hardware may forward the pause mode resolution to the MAC. * * When operating in %MLO_AN_INBAND, inband should always be enabled, * otherwise inband should be disabled. @@ -328,21 +401,22 @@ void pcs_get_state(struct phylink_config *config, * * For most 10GBASE-R, there is no advertisement. */ -int (*pcs_config)(struct phylink_config *config, unsigned int mode, - phy_interface_t interface, const unsigned long *advertising); +int pcs_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, const unsigned long *advertising, + bool permit_pause_to_mac); /** * pcs_an_restart() - restart 802.3z BaseX autonegotiation - * @config: a pointer to a &struct phylink_config. + * @pcs: a pointer to a &struct phylink_pcs. * * When PCS ops are present, this overrides mac_an_restart() in &struct * phylink_mac_ops. */ -void (*pcs_an_restart)(struct phylink_config *config); +void pcs_an_restart(struct phylink_pcs *pcs); /** * pcs_link_up() - program the PCS for the resolved link configuration - * @config: a pointer to a &struct phylink_config. + * @pcs: a pointer to a &struct phylink_pcs. * @mode: link autonegotiation mode * @interface: link &typedef phy_interface_t mode * @speed: link speed @@ -353,22 +427,19 @@ void (*pcs_an_restart)(struct phylink_config *config); * mode without in-band AN needs to be manually configured for the link * and duplex setting. Otherwise, this should be a no-op. */ -void (*pcs_link_up)(struct phylink_config *config, unsigned int mode, - phy_interface_t interface, int speed, int duplex); +void pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, int speed, int duplex); #endif struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *, phy_interface_t iface, const struct phylink_mac_ops *mac_ops); -void phylink_add_pcs(struct phylink *, const struct phylink_pcs_ops *ops); +void phylink_set_pcs(struct phylink *, struct phylink_pcs *pcs); void phylink_destroy(struct phylink *); int phylink_connect_phy(struct phylink *, struct phy_device *); int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags); void phylink_disconnect_phy(struct phylink *); -int phylink_fixed_state_cb(struct phylink *, - void (*cb)(struct net_device *dev, - struct phylink_link_state *)); void phylink_mac_change(struct phylink *, bool up); @@ -392,6 +463,8 @@ int phylink_init_eee(struct phylink *, bool); int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *); int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *); int phylink_mii_ioctl(struct phylink *, struct ifreq *, int); +int phylink_speed_down(struct phylink *pl, bool sync); +int phylink_speed_up(struct phylink *pl); #define phylink_zero(bm) \ bitmap_zero(bm, __ETHTOOL_LINK_MODE_MASK_NBITS) @@ -410,6 +483,9 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs, int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs, phy_interface_t interface, const unsigned long *advertising); +int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising); void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs); void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs, diff --git a/include/linux/pid.h b/include/linux/pid.h index cc896f0fc4e3..176d6cf80e7c 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -102,12 +102,16 @@ extern void attach_pid(struct task_struct *task, enum pid_type); extern void detach_pid(struct task_struct *task, enum pid_type); extern void change_pid(struct task_struct *task, enum pid_type, struct pid *pid); +extern void exchange_tids(struct task_struct *task, struct task_struct *old); extern void transfer_pid(struct task_struct *old, struct task_struct *new, enum pid_type); struct pid_namespace; extern struct pid_namespace init_pid_ns; +extern int pid_max; +extern int pid_max_min, pid_max_max; + /* * look up a PID in the hash table. Must be called with the tasklist_lock * or rcu_read_lock() held. diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index 4956e362e55e..5a5cb45ac57e 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -17,12 +17,6 @@ struct fs_pin; -enum { /* definitions for pid_namespace's hide_pid field */ - HIDEPID_OFF = 0, - HIDEPID_NO_ACCESS = 1, - HIDEPID_INVISIBLE = 2, -}; - struct pid_namespace { struct kref kref; struct idr idr; @@ -32,17 +26,11 @@ struct pid_namespace { struct kmem_cache *pid_cachep; unsigned int level; struct pid_namespace *parent; -#ifdef CONFIG_PROC_FS - struct dentry *proc_self; - struct dentry *proc_thread_self; -#endif #ifdef CONFIG_BSD_PROCESS_ACCT struct fs_pin *bacct; #endif struct user_namespace *user_ns; struct ucounts *ucounts; - kgid_t pid_gid; - int hide_pid; int reboot; /* group exit code if this pidns was rebooted */ struct ns_common ns; } __randomize_layout; diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index ae58fad7f1e0..5d2705f1d01c 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -8,6 +8,11 @@ #define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */ #define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */ #define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */ +#define PIPE_BUF_FLAG_CAN_MERGE 0x10 /* can merge buffers */ +#define PIPE_BUF_FLAG_WHOLE 0x20 /* read() must return entire buffer or error */ +#ifdef CONFIG_WATCH_QUEUE +#define PIPE_BUF_FLAG_LOSS 0x40 /* Message loss happened after this buffer */ +#endif /** * struct pipe_buffer - a linux kernel pipe buffer @@ -33,8 +38,10 @@ struct pipe_buffer { * @wr_wait: writer wait point in case of full pipe * @head: The point of buffer production * @tail: The point of buffer consumption + * @note_loss: The next read() should insert a data-lost message * @max_usage: The maximum number of slots that may be used in the ring * @ring_size: total number of buffers (should be a power of 2) + * @nr_accounted: The amount this pipe accounts for in user->pipe_bufs * @tmp_page: cached released page * @readers: number of current readers of this pipe * @writers: number of current writers of this pipe @@ -45,6 +52,7 @@ struct pipe_buffer { * @fasync_writers: writer side fasync * @bufs: the circular array of pipe buffers * @user: the user who created this pipe + * @watch_queue: If this pipe is a watch_queue, this is the stuff for that **/ struct pipe_inode_info { struct mutex mutex; @@ -53,6 +61,10 @@ struct pipe_inode_info { unsigned int tail; unsigned int max_usage; unsigned int ring_size; +#ifdef CONFIG_WATCH_QUEUE + bool note_loss; +#endif + unsigned int nr_accounted; unsigned int readers; unsigned int writers; unsigned int files; @@ -63,17 +75,20 @@ struct pipe_inode_info { struct fasync_struct *fasync_writers; struct pipe_buffer *bufs; struct user_struct *user; +#ifdef CONFIG_WATCH_QUEUE + struct watch_queue *watch_queue; +#endif }; /* * Note on the nesting of these functions: * * ->confirm() - * ->steal() + * ->try_steal() * - * That is, ->steal() must be called on a confirmed buffer. - * See below for the meaning of each operation. Also see kerneldoc - * in fs/pipe.c for the pipe and generic variants of these hooks. + * That is, ->try_steal() must be called on a confirmed buffer. See below for + * the meaning of each operation. Also see the kerneldoc in fs/pipe.c for the + * pipe and generic variants of these hooks. */ struct pipe_buf_operations { /* @@ -81,7 +96,7 @@ struct pipe_buf_operations { * and that the contents are good. If the pages in the pipe belong * to a file system, we may need to wait for IO completion in this * hook. Returns 0 for good, or a negative error value in case of - * error. + * error. If not present all pages are considered good. */ int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *); @@ -93,13 +108,13 @@ struct pipe_buf_operations { /* * Attempt to take ownership of the pipe buffer and its contents. - * ->steal() returns 0 for success, in which case the contents - * of the pipe (the buf->page) is locked and now completely owned - * by the caller. The page may then be transferred to a different - * mapping, the most often used case is insertion into different - * file address space cache. + * ->try_steal() returns %true for success, in which case the contents + * of the pipe (the buf->page) is locked and now completely owned by the + * caller. The page may then be transferred to a different mapping, the + * most often used case is insertion into different file address space + * cache. */ - int (*steal)(struct pipe_inode_info *, struct pipe_buffer *); + bool (*try_steal)(struct pipe_inode_info *, struct pipe_buffer *); /* * Get a reference to the pipe buffer. @@ -194,18 +209,22 @@ static inline void pipe_buf_release(struct pipe_inode_info *pipe, static inline int pipe_buf_confirm(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { + if (!buf->ops->confirm) + return 0; return buf->ops->confirm(pipe, buf); } /** - * pipe_buf_steal - attempt to take ownership of a pipe_buffer + * pipe_buf_try_steal - attempt to take ownership of a pipe_buffer * @pipe: the pipe that the buffer belongs to * @buf: the buffer to attempt to steal */ -static inline int pipe_buf_steal(struct pipe_inode_info *pipe, - struct pipe_buffer *buf) +static inline bool pipe_buf_try_steal(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) { - return buf->ops->steal(pipe, buf); + if (!buf->ops->try_steal) + return false; + return buf->ops->try_steal(pipe, buf); } /* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual @@ -221,25 +240,34 @@ extern unsigned int pipe_max_size; extern unsigned long pipe_user_pages_hard; extern unsigned long pipe_user_pages_soft; -/* Drop the inode semaphore and wait for a pipe event, atomically */ -void pipe_wait(struct pipe_inode_info *pipe); +/* Wait for a pipe to be readable/writable while dropping the pipe lock */ +void pipe_wait_readable(struct pipe_inode_info *); +void pipe_wait_writable(struct pipe_inode_info *); struct pipe_inode_info *alloc_pipe_info(void); void free_pipe_info(struct pipe_inode_info *); /* Generic pipe buffer ops functions */ bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); -int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); -int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); -int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *); +bool generic_pipe_buf_try_steal(struct pipe_inode_info *, struct pipe_buffer *); void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); -void pipe_buf_mark_unmergeable(struct pipe_buffer *buf); extern const struct pipe_buf_operations nosteal_pipe_buf_ops; +#ifdef CONFIG_WATCH_QUEUE +unsigned long account_pipe_buffers(struct user_struct *user, + unsigned long old, unsigned long new); +bool too_many_pipe_buffers_soft(unsigned long user_bufs); +bool too_many_pipe_buffers_hard(unsigned long user_bufs); +bool pipe_is_unprivileged_user(void); +#endif + /* for F_SETPIPE_SZ and F_GETPIPE_SZ */ +#ifdef CONFIG_WATCH_QUEUE +int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots); +#endif long pipe_fcntl(struct file *, unsigned int, unsigned long arg); -struct pipe_inode_info *get_pipe_info(struct file *file); +struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice); int create_pipe_files(struct file **, int); unsigned int round_pipe_size(unsigned long size); diff --git a/include/linux/platform_data/ad5761.h b/include/linux/platform_data/ad5761.h index 02bef5177ff5..69e261e2ca14 100644 --- a/include/linux/platform_data/ad5761.h +++ b/include/linux/platform_data/ad5761.h @@ -3,7 +3,7 @@ * AD5721, AD5721R, AD5761, AD5761R, Voltage Output Digital to Analog Converter * * Copyright 2016 Qtechnology A/S - * 2016 Ricardo Ribalda <ricardo.ribalda@gmail.com> + * 2016 Ricardo Ribalda <ribalda@kernel.org> */ #ifndef __LINUX_PLATFORM_DATA_AD5761_H__ #define __LINUX_PLATFORM_DATA_AD5761_H__ diff --git a/include/linux/platform_data/clk-st.h b/include/linux/platform_data/clk-fch.h index 7cdb6a402b35..b9f682459f08 100644 --- a/include/linux/platform_data/clk-st.h +++ b/include/linux/platform_data/clk-fch.h @@ -1,17 +1,18 @@ /* SPDX-License-Identifier: MIT */ /* - * clock framework for AMD Stoney based clock + * clock framework for AMD misc clocks * * Copyright 2018 Advanced Micro Devices, Inc. */ -#ifndef __CLK_ST_H -#define __CLK_ST_H +#ifndef __CLK_FCH_H +#define __CLK_FCH_H #include <linux/compiler.h> -struct st_clk_data { +struct fch_clk_data { void __iomem *base; + u32 is_rv; }; -#endif /* __CLK_ST_H */ +#endif /* __CLK_FCH_H */ diff --git a/include/linux/platform_data/clk-integrator.h b/include/linux/platform_data/clk-integrator.h deleted file mode 100644 index addd48cac625..000000000000 --- a/include/linux/platform_data/clk-integrator.h +++ /dev/null @@ -1,2 +0,0 @@ -void integrator_impd1_clk_init(void __iomem *base, unsigned int id); -void integrator_impd1_clk_exit(unsigned int id); diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 69210881ebac..1fcfe9e63cb9 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -4917,15 +4917,26 @@ struct ec_response_usb_pd_control_v1 { #define USBC_PD_CC_UFP_ATTACHED 4 /* UFP attached to usbc */ #define USBC_PD_CC_DFP_ATTACHED 5 /* DPF attached to usbc */ +/* Active/Passive Cable */ +#define USB_PD_CTRL_ACTIVE_CABLE BIT(0) +/* Optical/Non-optical cable */ +#define USB_PD_CTRL_OPTICAL_CABLE BIT(1) +/* 3rd Gen TBT device (or AMA)/2nd gen tbt Adapter */ +#define USB_PD_CTRL_TBT_LEGACY_ADAPTER BIT(2) +/* Active Link Uni-Direction */ +#define USB_PD_CTRL_ACTIVE_LINK_UNIDIR BIT(3) + struct ec_response_usb_pd_control_v2 { uint8_t enabled; uint8_t role; uint8_t polarity; char state[32]; - uint8_t cc_state; /* USBC_PD_CC_*Encoded cc state */ - uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */ - /* CL:1500994 Current cable type */ - uint8_t reserved_cable_type; + uint8_t cc_state; /* enum pd_cc_states representing cc state */ + uint8_t dp_mode; /* Current DP pin mode (MODE_DP_PIN_[A-E]) */ + uint8_t reserved; /* Reserved for future use */ + uint8_t control_flags; /* USB_PD_CTRL_*flags */ + uint8_t cable_speed; /* TBT_SS_* cable speed */ + uint8_t cable_gen; /* TBT_GEN3_* cable rounded support */ } __ec_align1; #define EC_CMD_USB_PD_PORTS 0x0102 @@ -5207,11 +5218,15 @@ struct ec_params_usb_pd_mux_info { } __ec_align1; /* Flags representing mux state */ -#define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */ -#define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */ -#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */ -#define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */ -#define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */ +#define USB_PD_MUX_NONE 0 /* Open switch */ +#define USB_PD_MUX_USB_ENABLED BIT(0) /* USB connected */ +#define USB_PD_MUX_DP_ENABLED BIT(1) /* DP connected */ +#define USB_PD_MUX_POLARITY_INVERTED BIT(2) /* CC line Polarity inverted */ +#define USB_PD_MUX_HPD_IRQ BIT(3) /* HPD IRQ is asserted */ +#define USB_PD_MUX_HPD_LVL BIT(4) /* HPD level is asserted */ +#define USB_PD_MUX_SAFE_MODE BIT(5) /* DP is in safe mode */ +#define USB_PD_MUX_TBT_COMPAT_ENABLED BIT(6) /* TBT compat enabled */ +#define USB_PD_MUX_USB4_ENABLED BIT(7) /* USB4 enabled */ struct ec_response_usb_pd_mux_info { uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */ @@ -5431,6 +5446,89 @@ struct ec_response_rollback_info { #define EC_CMD_AP_RESET 0x0125 /*****************************************************************************/ +/* Voltage regulator controls */ + +/* + * Get basic info of voltage regulator for given index. + * + * Returns the regulator name and supported voltage list in mV. + */ +#define EC_CMD_REGULATOR_GET_INFO 0x012C + +/* Maximum length of regulator name */ +#define EC_REGULATOR_NAME_MAX_LEN 16 + +/* Maximum length of the supported voltage list. */ +#define EC_REGULATOR_VOLTAGE_MAX_COUNT 16 + +struct ec_params_regulator_get_info { + uint32_t index; +} __ec_align4; + +struct ec_response_regulator_get_info { + char name[EC_REGULATOR_NAME_MAX_LEN]; + uint16_t num_voltages; + uint16_t voltages_mv[EC_REGULATOR_VOLTAGE_MAX_COUNT]; +} __ec_align2; + +/* + * Configure the regulator as enabled / disabled. + */ +#define EC_CMD_REGULATOR_ENABLE 0x012D + +struct ec_params_regulator_enable { + uint32_t index; + uint8_t enable; +} __ec_align4; + +/* + * Query if the regulator is enabled. + * + * Returns 1 if the regulator is enabled, 0 if not. + */ +#define EC_CMD_REGULATOR_IS_ENABLED 0x012E + +struct ec_params_regulator_is_enabled { + uint32_t index; +} __ec_align4; + +struct ec_response_regulator_is_enabled { + uint8_t enabled; +} __ec_align1; + +/* + * Set voltage for the voltage regulator within the range specified. + * + * The driver should select the voltage in range closest to min_mv. + * + * Also note that this might be called before the regulator is enabled, and the + * setting should be in effect after the regulator is enabled. + */ +#define EC_CMD_REGULATOR_SET_VOLTAGE 0x012F + +struct ec_params_regulator_set_voltage { + uint32_t index; + uint32_t min_mv; + uint32_t max_mv; +} __ec_align4; + +/* + * Get the currently configured voltage for the voltage regulator. + * + * Note that this might be called before the regulator is enabled, and this + * should return the configured output voltage if the regulator is enabled. + */ +#define EC_CMD_REGULATOR_GET_VOLTAGE 0x0130 + +struct ec_params_regulator_get_voltage { + uint32_t index; +} __ec_align4; + +struct ec_response_regulator_get_voltage { + uint32_t voltage_mv; +} __ec_align4; + +/*****************************************************************************/ /* The command range 0x200-0x2FF is reserved for Rotor. */ /*****************************************************************************/ diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h index 383243326676..4a415ae851ef 100644 --- a/include/linux/platform_data/cros_ec_proto.h +++ b/include/linux/platform_data/cros_ec_proto.h @@ -216,9 +216,6 @@ int cros_ec_prepare_tx(struct cros_ec_device *ec_dev, int cros_ec_check_result(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); -int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, - struct cros_ec_command *msg); - int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, struct cros_ec_command *msg); diff --git a/include/linux/platform_data/davinci-cpufreq.h b/include/linux/platform_data/davinci-cpufreq.h index 3fbf9f2793b5..bc208c64e3d7 100644 --- a/include/linux/platform_data/davinci-cpufreq.h +++ b/include/linux/platform_data/davinci-cpufreq.h @@ -2,7 +2,7 @@ /* * TI DaVinci CPUFreq platform support. * - * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/ + * Copyright (C) 2009 Texas Instruments, Inc. https://www.ti.com/ */ #ifndef _MACH_DAVINCI_CPUFREQ_H diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h index 7fe80f1c7e08..5d1fb0d78a22 100644 --- a/include/linux/platform_data/davinci_asp.h +++ b/include/linux/platform_data/davinci_asp.h @@ -1,7 +1,7 @@ /* * TI DaVinci Audio Serial Port support * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index f3eaf9ec00a1..fbbeb2f6189b 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -8,10 +8,15 @@ #ifndef _PLATFORM_DATA_DMA_DW_H #define _PLATFORM_DATA_DMA_DW_H -#include <linux/device.h> +#include <linux/bits.h> +#include <linux/types.h> #define DW_DMA_MAX_NR_MASTERS 4 #define DW_DMA_MAX_NR_CHANNELS 8 +#define DW_DMA_MIN_BURST 1 +#define DW_DMA_MAX_BURST 256 + +struct device; /** * struct dw_dma_slave - Controller-specific information about a slave @@ -42,6 +47,8 @@ struct dw_dma_slave { * @data_width: Maximum data width supported by hardware per AHB master * (in bytes, power of 2) * @multi_block: Multi block transfers supported by hardware per channel. + * @max_burst: Maximum value of burst transaction size supported by hardware + * per channel (in units of CTL.SRC_TR_WIDTH/CTL.DST_TR_WIDTH). * @protctl: Protection control signals setting per channel. */ struct dw_dma_platform_data { @@ -56,6 +63,7 @@ struct dw_dma_platform_data { unsigned char nr_masters; unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS]; + u32 max_burst[DW_DMA_MAX_NR_CHANNELS]; #define CHAN_PROTCTL_PRIVILEGED BIT(0) #define CHAN_PROTCTL_BUFFERABLE BIT(1) #define CHAN_PROTCTL_CACHEABLE BIT(2) diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h index 0f491d8abfdd..3cc78f0447b1 100644 --- a/include/linux/platform_data/elm.h +++ b/include/linux/platform_data/elm.h @@ -2,7 +2,7 @@ /* * BCH Error Location Module * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ */ #ifndef __ELM_H diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h index a93841bfb9f7..e182a46e609f 100644 --- a/include/linux/platform_data/gpio-davinci.h +++ b/include/linux/platform_data/gpio-davinci.h @@ -1,7 +1,7 @@ /* * DaVinci GPIO Platform Related Defines * - * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h index 3c606c450d05..ff1be737bad6 100644 --- a/include/linux/platform_data/gpio-dwapb.h +++ b/include/linux/platform_data/gpio-dwapb.h @@ -12,7 +12,6 @@ struct dwapb_port_property { unsigned int ngpio; unsigned int gpio_base; int irq[32]; - bool has_irq; bool irq_shared; }; diff --git a/include/linux/platform_data/gpio/gpio-amd-fch.h b/include/linux/platform_data/gpio/gpio-amd-fch.h index 9e46678edb2a..255d51c9d36d 100644 --- a/include/linux/platform_data/gpio/gpio-amd-fch.h +++ b/include/linux/platform_data/gpio/gpio-amd-fch.h @@ -19,7 +19,7 @@ #define AMD_FCH_GPIO_REG_GPIO49 0x40 #define AMD_FCH_GPIO_REG_GPIO50 0x41 #define AMD_FCH_GPIO_REG_GPIO51 0x42 -#define AMD_FCH_GPIO_REG_GPIO59_DEVSLP0 0x43 +#define AMD_FCH_GPIO_REG_GPIO55_DEVSLP0 0x43 #define AMD_FCH_GPIO_REG_GPIO57 0x44 #define AMD_FCH_GPIO_REG_GPIO58 0x45 #define AMD_FCH_GPIO_REG_GPIO59_DEVSLP1 0x46 diff --git a/include/linux/platform_data/gpmc-omap.h b/include/linux/platform_data/gpmc-omap.h index ef663e570552..c9cc4e32435d 100644 --- a/include/linux/platform_data/gpmc-omap.h +++ b/include/linux/platform_data/gpmc-omap.h @@ -2,7 +2,7 @@ /* * OMAP GPMC Platform data * - * Copyright (C) 2014 Texas Instruments, Inc. - http://www.ti.com + * Copyright (C) 2014 Texas Instruments, Inc. - https://www.ti.com * Roger Quadros <rogerq@ti.com> */ diff --git a/include/linux/platform_data/gsc_hwmon.h b/include/linux/platform_data/gsc_hwmon.h new file mode 100644 index 000000000000..37a8f554da00 --- /dev/null +++ b/include/linux/platform_data/gsc_hwmon.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _GSC_HWMON_H +#define _GSC_HWMON_H + +enum gsc_hwmon_mode { + mode_temperature, + mode_voltage_24bit, + mode_voltage_raw, + mode_voltage_16bit, + mode_max, +}; + +/** + * struct gsc_hwmon_channel - configuration parameters + * @reg: I2C register offset + * @mode: channel mode + * @name: channel name + * @mvoffset: voltage offset + * @vdiv: voltage divider array (2 resistor values in milli-ohms) + */ +struct gsc_hwmon_channel { + unsigned int reg; + unsigned int mode; + const char *name; + unsigned int mvoffset; + unsigned int vdiv[2]; +}; + +/** + * struct gsc_hwmon_platform_data - platform data for gsc_hwmon driver + * @channels: pointer to array of gsc_hwmon_channel structures + * describing channels + * @nchannels: number of elements in @channels array + * @vreference: voltage reference (mV) + * @resolution: ADC bit resolution + * @fan_base: register base for FAN controller + */ +struct gsc_hwmon_platform_data { + const struct gsc_hwmon_channel *channels; + int nchannels; + unsigned int resolution; + unsigned int vreference; + unsigned int fan_base; +}; +#endif diff --git a/include/linux/platform_data/i2c-pxa.h b/include/linux/platform_data/i2c-pxa.h index 6a9b28399b39..24953981bd9f 100644 --- a/include/linux/platform_data/i2c-pxa.h +++ b/include/linux/platform_data/i2c-pxa.h @@ -7,54 +7,6 @@ #ifndef _I2C_PXA_H_ #define _I2C_PXA_H_ -#if 0 -#define DEF_TIMEOUT 3 -#else -/* need a longer timeout if we're dealing with the fact we may well be - * looking at a multi-master environment -*/ -#define DEF_TIMEOUT 32 -#endif - -#define BUS_ERROR (-EREMOTEIO) -#define XFER_NAKED (-ECONNREFUSED) -#define I2C_RETRY (-2000) /* an error has occurred retry transmit */ - -/* ICR initialize bit values -* -* 15. FM 0 (100 Khz operation) -* 14. UR 0 (No unit reset) -* 13. SADIE 0 (Disables the unit from interrupting on slave addresses -* matching its slave address) -* 12. ALDIE 0 (Disables the unit from interrupt when it loses arbitration -* in master mode) -* 11. SSDIE 0 (Disables interrupts from a slave stop detected, in slave mode) -* 10. BEIE 1 (Enable interrupts from detected bus errors, no ACK sent) -* 9. IRFIE 1 (Enable interrupts from full buffer received) -* 8. ITEIE 1 (Enables the I2C unit to interrupt when transmit buffer empty) -* 7. GCD 1 (Disables i2c unit response to general call messages as a slave) -* 6. IUE 0 (Disable unit until we change settings) -* 5. SCLE 1 (Enables the i2c clock output for master mode (drives SCL) -* 4. MA 0 (Only send stop with the ICR stop bit) -* 3. TB 0 (We are not transmitting a byte initially) -* 2. ACKNAK 0 (Send an ACK after the unit receives a byte) -* 1. STOP 0 (Do not send a STOP) -* 0. START 0 (Do not send a START) -* -*/ -#define I2C_ICR_INIT (ICR_BEIE | ICR_IRFIE | ICR_ITEIE | ICR_GCD | ICR_SCLE) - -/* I2C status register init values - * - * 10. BED 1 (Clear bus error detected) - * 9. SAD 1 (Clear slave address detected) - * 7. IRF 1 (Clear IDBR Receive Full) - * 6. ITE 1 (Clear IDBR Transmit Empty) - * 5. ALD 1 (Clear Arbitration Loss Detected) - * 4. SSD 1 (Clear Slave Stop Detected) - */ -#define I2C_ISR_INIT 0x7FF /* status register init */ - struct i2c_pxa_platform_data { unsigned int class; unsigned int use_pio :1; diff --git a/include/linux/platform_data/itco_wdt.h b/include/linux/platform_data/itco_wdt.h index 2ccdce6a4e27..45d860cac2b0 100644 --- a/include/linux/platform_data/itco_wdt.h +++ b/include/linux/platform_data/itco_wdt.h @@ -12,13 +12,16 @@ #define ICH_RES_MEM_OFF 2 #define ICH_RES_MEM_GCS_PMC 0 +/** + * struct itco_wdt_platform_data - iTCO_wdt platform data + * @name: Name of the platform + * @version: iTCO version + * @no_reboot_use_pmc: Use PMC BXT API to set and clear NO_REBOOT bit + */ struct itco_wdt_platform_data { char name[32]; unsigned int version; - /* private data to be passed to update_no_reboot_bit API */ - void *no_reboot_priv; - /* pointer for platform specific no reboot update function */ - int (*update_no_reboot_bit)(void *priv, bool set); + bool no_reboot_use_pmc; }; #endif /* _ITCO_WDT_H_ */ diff --git a/include/linux/platform_data/leds-lp55xx.h b/include/linux/platform_data/leds-lp55xx.h index 96a787100fda..3441064713a3 100644 --- a/include/linux/platform_data/leds-lp55xx.h +++ b/include/linux/platform_data/leds-lp55xx.h @@ -12,17 +12,26 @@ #ifndef _LEDS_LP55XX_H #define _LEDS_LP55XX_H +#include <linux/gpio/consumer.h> +#include <linux/led-class-multicolor.h> + /* Clock configuration */ #define LP55XX_CLOCK_AUTO 0 #define LP55XX_CLOCK_INT 1 #define LP55XX_CLOCK_EXT 2 +#define LP55XX_MAX_GROUPED_CHAN 4 + struct lp55xx_led_config { const char *name; const char *default_trigger; u8 chan_nr; u8 led_current; /* mA x10, 0 if led is not connected */ u8 max_current; + int num_colors; + unsigned int max_channel; + int color_id[LED_COLOR_ID_MAX]; + int output_num[LED_COLOR_ID_MAX]; }; struct lp55xx_predef_pattern { @@ -49,7 +58,7 @@ enum lp8501_pwr_sel { * @clock_mode : Input clock mode. LP55XX_CLOCK_AUTO or _INT or _EXT * @setup_resources : Platform specific function before enabling the chip * @release_resources : Platform specific function after disabling the chip - * @enable : EN pin control by platform side + * @enable_gpiod : enable GPIO descriptor * @patterns : Predefined pattern data for RGB channels * @num_patterns : Number of patterns * @update_config : Value of CONFIG register @@ -65,7 +74,7 @@ struct lp55xx_platform_data { u8 clock_mode; /* optional enable GPIO */ - int enable_gpio; + struct gpio_desc *enable_gpiod; /* Predefined pattern data */ struct lp55xx_predef_pattern *patterns; diff --git a/include/linux/platform_data/leds-s3c24xx.h b/include/linux/platform_data/leds-s3c24xx.h index 5bbae85811e2..64f8d14876e0 100644 --- a/include/linux/platform_data/leds-s3c24xx.h +++ b/include/linux/platform_data/leds-s3c24xx.h @@ -10,13 +10,7 @@ #ifndef __LEDS_S3C24XX_H #define __LEDS_S3C24XX_H -#define S3C24XX_LEDF_ACTLOW (1<<0) /* LED is on when GPIO low */ -#define S3C24XX_LEDF_TRISTATE (1<<1) /* tristate to turn off */ - struct s3c24xx_led_platdata { - unsigned int gpio; - unsigned int flags; - char *name; char *def_trigger; }; diff --git a/include/linux/platform_data/media/omap1_camera.h b/include/linux/platform_data/media/omap1_camera.h deleted file mode 100644 index 386439db68de..000000000000 --- a/include/linux/platform_data/media/omap1_camera.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Header for V4L2 SoC Camera driver for OMAP1 Camera Interface - * - * Copyright (C) 2010, Janusz Krzysztofik <jkrzyszt@tis.icnet.pl> - */ - -#ifndef __MEDIA_OMAP1_CAMERA_H_ -#define __MEDIA_OMAP1_CAMERA_H_ - -#include <linux/bitops.h> - -#define OMAP1_CAMERA_IOSIZE 0x1c - -enum omap1_cam_vb_mode { - OMAP1_CAM_DMA_CONTIG = 0, - OMAP1_CAM_DMA_SG, -}; - -#define OMAP1_CAMERA_MIN_BUF_COUNT(x) ((x) == OMAP1_CAM_DMA_CONTIG ? 3 : 2) - -struct omap1_cam_platform_data { - unsigned long camexclk_khz; - unsigned long lclk_khz_max; - unsigned long flags; -}; - -#define OMAP1_CAMERA_LCLK_RISING BIT(0) -#define OMAP1_CAMERA_RST_LOW BIT(1) -#define OMAP1_CAMERA_RST_HIGH BIT(2) - -#endif /* __MEDIA_OMAP1_CAMERA_H_ */ diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h index b8da8aef2446..1af9c01563f9 100644 --- a/include/linux/platform_data/mlxreg.h +++ b/include/linux/platform_data/mlxreg.h @@ -43,10 +43,13 @@ * * TYPE1 HW watchdog implementation exist in old systems. * All new systems have TYPE2 HW watchdog. + * TYPE3 HW watchdog can exist on all systems with new CPLD. + * TYPE3 is selected by WD capability bit. */ enum mlxreg_wdt_type { MLX_WDT_TYPE1, MLX_WDT_TYPE2, + MLX_WDT_TYPE3, }; /** @@ -75,11 +78,13 @@ struct mlxreg_hotplug_device { * @mask: attribute access mask; * @bit: attribute effective bit; * @capability: attribute capability register; + * @reg_prsnt: attribute presence register; * @mode: access mode; * @np - pointer to node platform associated with attribute; * @hpdev - hotplug device data; * @health_cntr: dynamic device health indication counter; * @attached: true if device has been attached after good health indication; + * @regnum: number of registers occupied by multi-register attribute; */ struct mlxreg_core_data { char label[MLXREG_CORE_LABEL_MAX_SIZE]; @@ -87,11 +92,13 @@ struct mlxreg_core_data { u32 mask; u32 bit; u32 capability; + u32 reg_prsnt; umode_t mode; struct device_node *np; struct mlxreg_hotplug_device hpdev; - u8 health_cntr; + u32 health_cntr; bool attached; + u8 regnum; }; /** diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h index 0434f68eda86..cba1184b364c 100644 --- a/include/linux/platform_data/mmc-esdhc-imx.h +++ b/include/linux/platform_data/mmc-esdhc-imx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright 2010 Wolfram Sang <w.sang@pengutronix.de> + * Copyright 2010 Wolfram Sang <kernel@pengutronix.de> */ #ifndef __ASM_ARCH_IMX_ESDHC_H diff --git a/include/linux/platform_data/mmc-esdhc-mcf.h b/include/linux/platform_data/mmc-esdhc-mcf.h new file mode 100644 index 000000000000..85cb786a62fe --- /dev/null +++ b/include/linux/platform_data/mmc-esdhc-mcf.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_PLATFORM_DATA_MCF_ESDHC_H__ +#define __LINUX_PLATFORM_DATA_MCF_ESDHC_H__ + +enum cd_types { + ESDHC_CD_NONE, /* no CD, neither controller nor gpio */ + ESDHC_CD_CONTROLLER, /* mmc controller internal CD */ + ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */ +}; + +struct mcf_esdhc_platform_data { + int max_bus_width; + int cd_type; +}; + +#endif /* __LINUX_PLATFORM_DATA_MCF_ESDHC_H__ */ diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h index 9acf0e87aa9b..f0b8947e6b07 100644 --- a/include/linux/platform_data/mmc-omap.h +++ b/include/linux/platform_data/mmc-omap.h @@ -116,3 +116,6 @@ struct omap_mmc_platform_data { } slots[OMAP_MMC_MAX_SLOTS]; }; + +extern void omap_mmc_notify_cover_event(struct device *dev, int slot, + int is_closed); diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h index a403dd51dacc..a49826214a39 100644 --- a/include/linux/platform_data/mtd-davinci-aemif.h +++ b/include/linux/platform_data/mtd-davinci-aemif.h @@ -1,7 +1,7 @@ /* * TI DaVinci AEMIF support * - * Copyright 2010 (C) Texas Instruments, Inc. http://www.ti.com/ + * Copyright 2010 (C) Texas Instruments, Inc. https://www.ti.com/ * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h index 08e639e047e5..03e92c71b3fa 100644 --- a/include/linux/platform_data/mtd-davinci.h +++ b/include/linux/platform_data/mtd-davinci.h @@ -68,7 +68,7 @@ struct davinci_nand_pdata { /* platform_data */ * Newer ones also support 4-bit ECC, but are awkward * using it with large page chips. */ - nand_ecc_modes_t ecc_mode; + enum nand_ecc_mode ecc_mode; u8 ecc_bits; /* e.g. NAND_BUSWIDTH_16 */ diff --git a/include/linux/platform_data/mtd-nand-s3c2410.h b/include/linux/platform_data/mtd-nand-s3c2410.h index deb849bcf0ec..08675b16f9e1 100644 --- a/include/linux/platform_data/mtd-nand-s3c2410.h +++ b/include/linux/platform_data/mtd-nand-s3c2410.h @@ -49,7 +49,7 @@ struct s3c2410_platform_nand { unsigned int ignore_unset_ecc:1; - nand_ecc_modes_t ecc_mode; + enum nand_ecc_mode ecc_mode; int nr_sets; struct s3c2410_nand_set *sets; diff --git a/include/linux/platform_data/omap-twl4030.h b/include/linux/platform_data/omap-twl4030.h index 8419c8caf54e..0dd851ea1c72 100644 --- a/include/linux/platform_data/omap-twl4030.h +++ b/include/linux/platform_data/omap-twl4030.h @@ -3,7 +3,7 @@ * omap-twl4030.h - ASoC machine driver for TI SoC based boards with twl4030 * codec, header. * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com * All rights reserved. * * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> diff --git a/include/linux/platform_data/sky81452-backlight.h b/include/linux/platform_data/sky81452-backlight.h deleted file mode 100644 index 02653d92d84f..000000000000 --- a/include/linux/platform_data/sky81452-backlight.h +++ /dev/null @@ -1,35 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * sky81452.h SKY81452 backlight driver - * - * Copyright 2014 Skyworks Solutions Inc. - * Author : Gyungoh Yoo <jack.yoo@skyworksinc.com> - */ - -#ifndef _SKY81452_BACKLIGHT_H -#define _SKY81452_BACKLIGHT_H - -/** - * struct sky81452_platform_data - * @name: backlight driver name. - If it is not defined, default name is lcd-backlight. - * @gpio_enable:GPIO number which control EN pin - * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6. - * @ignore_pwm: true if DPWMI should be ignored. - * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode. - * @phase_shift:true is phase shift mode. - * @short_detecion_threshold: It should be one of 4, 5, 6 and 7V. - * @boost_current_limit: It should be one of 2300, 2750mA. - */ -struct sky81452_bl_platform_data { - const char *name; - int gpio_enable; - unsigned int enable; - bool ignore_pwm; - bool dpwm_mode; - bool phase_shift; - unsigned int short_detection_threshold; - unsigned int boost_current_limit; -}; - -#endif diff --git a/include/linux/platform_data/spi-imx.h b/include/linux/platform_data/spi-imx.h deleted file mode 100644 index 328f670d10bd..000000000000 --- a/include/linux/platform_data/spi-imx.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef __MACH_SPI_H_ -#define __MACH_SPI_H_ - -/* - * struct spi_imx_master - device.platform_data for SPI controller devices. - * @chipselect: Array of chipselects for this master or NULL. Numbers >= 0 - * mean GPIO pins, -ENOENT means internal CSPI chipselect - * matching the position in the array. E.g., if chipselect[1] = - * -ENOENT then a SPI slave using chip select 1 will use the - * native SS1 line of the CSPI. Omitting the array will use - * all native chip selects. - - * Normally you want to use gpio based chip selects as the CSPI - * module tries to be intelligent about when to assert the - * chipselect: The CSPI module deasserts the chipselect once it - * runs out of input data. The other problem is that it is not - * possible to mix between high active and low active chipselects - * on one single bus using the internal chipselects. - * Unfortunately, on some SoCs, Freescale decided to put some - * chipselects on dedicated pins which are not usable as gpios, - * so we have to support the internal chipselects. - * - * @num_chipselect: If @chipselect is specified, ARRAY_SIZE(chipselect), - * otherwise the number of native chip selects. - */ -struct spi_imx_master { - int *chipselect; - int num_chipselect; -}; - -#endif /* __MACH_SPI_H_*/ diff --git a/include/linux/platform_data/uio_pruss.h b/include/linux/platform_data/uio_pruss.h index 3d47d219827f..31f2e22661bc 100644 --- a/include/linux/platform_data/uio_pruss.h +++ b/include/linux/platform_data/uio_pruss.h @@ -3,7 +3,7 @@ * * Platform data for uio_pruss driver * - * Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2010-11 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/include/linux/platform_data/usb-omap.h b/include/linux/platform_data/usb-omap.h index fa579b4c666b..5e70d667031c 100644 --- a/include/linux/platform_data/usb-omap.h +++ b/include/linux/platform_data/usb-omap.h @@ -1,7 +1,7 @@ /* * usb-omap.h - Platform data for the various OMAP USB IPs * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com * * This software is distributed under the terms of the GNU General Public * License ("GPL") version 2, as published by the Free Software Foundation. diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index d39fc658c320..897b8332a39f 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -85,6 +85,9 @@ /* Maximum charging percentage */ #define ASUS_WMI_DEVID_RSOC 0x00120057 +/* Keyboard dock */ +#define ASUS_WMI_DEVID_KBD_DOCK 0x00120063 + /* DSTS masks */ #define ASUS_WMI_DSTS_STATUS_BIT 0x00000001 #define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002 diff --git a/include/linux/pldmfw.h b/include/linux/pldmfw.h new file mode 100644 index 000000000000..0fc831338226 --- /dev/null +++ b/include/linux/pldmfw.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018-2019, Intel Corporation. */ + +#ifndef _PLDMFW_H_ +#define _PLDMFW_H_ + +#include <linux/list.h> +#include <linux/firmware.h> + +#define PLDM_DEVICE_UPDATE_CONTINUE_AFTER_FAIL BIT(0) + +#define PLDM_STRING_TYPE_UNKNOWN 0 +#define PLDM_STRING_TYPE_ASCII 1 +#define PLDM_STRING_TYPE_UTF8 2 +#define PLDM_STRING_TYPE_UTF16 3 +#define PLDM_STRING_TYPE_UTF16LE 4 +#define PLDM_STRING_TYPE_UTF16BE 5 + +struct pldmfw_record { + struct list_head entry; + + /* List of descriptor TLVs */ + struct list_head descs; + + /* Component Set version string*/ + const u8 *version_string; + u8 version_type; + u8 version_len; + + /* Package Data length */ + u16 package_data_len; + + /* Bitfield of Device Update Flags */ + u32 device_update_flags; + + /* Package Data block */ + const u8 *package_data; + + /* Bitmap of components applicable to this record */ + unsigned long *component_bitmap; + u16 component_bitmap_len; +}; + +/* Standard descriptor TLV identifiers */ +#define PLDM_DESC_ID_PCI_VENDOR_ID 0x0000 +#define PLDM_DESC_ID_IANA_ENTERPRISE_ID 0x0001 +#define PLDM_DESC_ID_UUID 0x0002 +#define PLDM_DESC_ID_PNP_VENDOR_ID 0x0003 +#define PLDM_DESC_ID_ACPI_VENDOR_ID 0x0004 +#define PLDM_DESC_ID_PCI_DEVICE_ID 0x0100 +#define PLDM_DESC_ID_PCI_SUBVENDOR_ID 0x0101 +#define PLDM_DESC_ID_PCI_SUBDEV_ID 0x0102 +#define PLDM_DESC_ID_PCI_REVISION_ID 0x0103 +#define PLDM_DESC_ID_PNP_PRODUCT_ID 0x0104 +#define PLDM_DESC_ID_ACPI_PRODUCT_ID 0x0105 +#define PLDM_DESC_ID_VENDOR_DEFINED 0xFFFF + +struct pldmfw_desc_tlv { + struct list_head entry; + + const u8 *data; + u16 type; + u16 size; +}; + +#define PLDM_CLASSIFICATION_UNKNOWN 0x0000 +#define PLDM_CLASSIFICATION_OTHER 0x0001 +#define PLDM_CLASSIFICATION_DRIVER 0x0002 +#define PLDM_CLASSIFICATION_CONFIG_SW 0x0003 +#define PLDM_CLASSIFICATION_APP_SW 0x0004 +#define PLDM_CLASSIFICATION_INSTRUMENTATION 0x0005 +#define PLDM_CLASSIFICATION_BIOS 0x0006 +#define PLDM_CLASSIFICATION_DIAGNOSTIC_SW 0x0007 +#define PLDM_CLASSIFICATION_OS 0x0008 +#define PLDM_CLASSIFICATION_MIDDLEWARE 0x0009 +#define PLDM_CLASSIFICATION_FIRMWARE 0x000A +#define PLDM_CLASSIFICATION_CODE 0x000B +#define PLDM_CLASSIFICATION_SERVICE_PACK 0x000C +#define PLDM_CLASSIFICATION_SOFTWARE_BUNDLE 0x000D + +#define PLDM_ACTIVATION_METHOD_AUTO BIT(0) +#define PLDM_ACTIVATION_METHOD_SELF_CONTAINED BIT(1) +#define PLDM_ACTIVATION_METHOD_MEDIUM_SPECIFIC BIT(2) +#define PLDM_ACTIVATION_METHOD_REBOOT BIT(3) +#define PLDM_ACTIVATION_METHOD_DC_CYCLE BIT(4) +#define PLDM_ACTIVATION_METHOD_AC_CYCLE BIT(5) + +#define PLDMFW_COMPONENT_OPTION_FORCE_UPDATE BIT(0) +#define PLDMFW_COMPONENT_OPTION_USE_COMPARISON_STAMP BIT(1) + +struct pldmfw_component { + struct list_head entry; + + /* component identifier */ + u16 classification; + u16 identifier; + + u16 options; + u16 activation_method; + + u32 comparison_stamp; + + u32 component_size; + const u8 *component_data; + + /* Component version string */ + const u8 *version_string; + u8 version_type; + u8 version_len; + + /* component index */ + u8 index; + +}; + +/* Transfer flag used for sending components to the firmware */ +#define PLDM_TRANSFER_FLAG_START BIT(0) +#define PLDM_TRANSFER_FLAG_MIDDLE BIT(1) +#define PLDM_TRANSFER_FLAG_END BIT(2) + +struct pldmfw_ops; + +/* Main entry point to the PLDM firmware update engine. Device drivers + * should embed this in a private structure and use container_of to obtain + * a pointer to their own data, used to implement the device specific + * operations. + */ +struct pldmfw { + const struct pldmfw_ops *ops; + struct device *dev; +}; + +bool pldmfw_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record); + +/* Operations invoked by the generic PLDM firmware update engine. Used to + * implement device specific logic. + * + * @match_record: check if the device matches the given record. For + * convenience, a standard implementation is provided for PCI devices. + * + * @send_package_data: send the package data associated with the matching + * record to firmware. + * + * @send_component_table: send the component data associated with a given + * component to firmware. Called once for each applicable component. + * + * @flash_component: Flash the data for a given component to the device. + * Called once for each applicable component, after all component tables have + * been sent. + * + * @finalize_update: (optional) Finish the update. Called after all components + * have been flashed. + */ +struct pldmfw_ops { + bool (*match_record)(struct pldmfw *context, struct pldmfw_record *record); + int (*send_package_data)(struct pldmfw *context, const u8 *data, u16 length); + int (*send_component_table)(struct pldmfw *context, struct pldmfw_component *component, + u8 transfer_flag); + int (*flash_component)(struct pldmfw *context, struct pldmfw_component *component); + int (*finalize_update)(struct pldmfw *context); +}; + +int pldmfw_flash_image(struct pldmfw *context, const struct firmware *fw); + +#endif diff --git a/include/linux/pm.h b/include/linux/pm.h index e057d1fa2469..a30a4b54df52 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -351,7 +351,7 @@ struct dev_pm_ops { * to RAM and hibernation. */ #define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ -const struct dev_pm_ops name = { \ +const struct dev_pm_ops __maybe_unused name = { \ SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ } @@ -369,11 +369,17 @@ const struct dev_pm_ops name = { \ * .runtime_resume(), respectively (and analogously for hibernation). */ #define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \ -const struct dev_pm_ops name = { \ +const struct dev_pm_ops __maybe_unused name = { \ SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \ SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ } +#ifdef CONFIG_PM +#define pm_ptr(_ptr) (_ptr) +#else +#define pm_ptr(_ptr) NULL +#endif + /* * PM_EVENT_ messages * @@ -544,31 +550,17 @@ struct pm_subsys_data { * These flags can be set by device drivers at the probe time. They need not be * cleared by the drivers as the driver core will take care of that. * - * NEVER_SKIP: Do not skip all system suspend/resume callbacks for the device. - * SMART_PREPARE: Check the return value of the driver's ->prepare callback. - * SMART_SUSPEND: No need to resume the device from runtime suspend. - * LEAVE_SUSPENDED: Avoid resuming the device during system resume if possible. - * - * Setting SMART_PREPARE instructs bus types and PM domains which may want - * system suspend/resume callbacks to be skipped for the device to return 0 from - * their ->prepare callbacks if the driver's ->prepare callback returns 0 (in - * other words, the system suspend/resume callbacks can only be skipped for the - * device if its driver doesn't object against that). This flag has no effect - * if NEVER_SKIP is set. - * - * Setting SMART_SUSPEND instructs bus types and PM domains which may want to - * runtime resume the device upfront during system suspend that doing so is not - * necessary from the driver's perspective. It also may cause them to skip - * invocations of the ->suspend_late and ->suspend_noirq callbacks provided by - * the driver if they decide to leave the device in runtime suspend. - * - * Setting LEAVE_SUSPENDED informs the PM core and middle-layer code that the - * driver prefers the device to be left in suspend after system resume. + * NO_DIRECT_COMPLETE: Do not apply direct-complete optimization to the device. + * SMART_PREPARE: Take the driver ->prepare callback return value into account. + * SMART_SUSPEND: Avoid resuming the device from runtime suspend. + * MAY_SKIP_RESUME: Allow driver "noirq" and "early" callbacks to be skipped. + * + * See Documentation/driver-api/pm/devices.rst for details. */ -#define DPM_FLAG_NEVER_SKIP BIT(0) +#define DPM_FLAG_NO_DIRECT_COMPLETE BIT(0) #define DPM_FLAG_SMART_PREPARE BIT(1) #define DPM_FLAG_SMART_SUSPEND BIT(2) -#define DPM_FLAG_LEAVE_SUSPENDED BIT(3) +#define DPM_FLAG_MAY_SKIP_RESUME BIT(3) struct dev_pm_info { pm_message_t power_state; @@ -758,8 +750,8 @@ extern int pm_generic_poweroff_late(struct device *dev); extern int pm_generic_poweroff(struct device *dev); extern void pm_generic_complete(struct device *dev); -extern bool dev_pm_may_skip_resume(struct device *dev); -extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); +extern bool dev_pm_skip_resume(struct device *dev); +extern bool dev_pm_skip_suspend(struct device *dev); #else /* !CONFIG_PM_SLEEP */ diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 9ec78ee53652..ee11502a575b 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -95,8 +95,8 @@ struct generic_pm_domain { struct device dev; struct dev_pm_domain domain; /* PM domain operations */ struct list_head gpd_list_node; /* Node in the global PM domains list */ - struct list_head master_links; /* Links with PM domain as a master */ - struct list_head slave_links; /* Links with PM domain as a slave */ + struct list_head parent_links; /* Links with PM domain as a parent */ + struct list_head child_links; /* Links with PM domain as a child */ struct list_head dev_list; /* List of devices */ struct dev_power_governor *gov; struct work_struct power_off_work; @@ -151,10 +151,10 @@ static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) } struct gpd_link { - struct generic_pm_domain *master; - struct list_head master_node; - struct generic_pm_domain *slave; - struct list_head slave_node; + struct generic_pm_domain *parent; + struct list_head parent_node; + struct generic_pm_domain *child; + struct list_head child_node; /* Sub-domain's per-master domain performance state */ unsigned int performance_state; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 747861816f4f..dbb484524f82 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -11,6 +11,7 @@ #ifndef __LINUX_OPP_H__ #define __LINUX_OPP_H__ +#include <linux/energy_model.h> #include <linux/err.h> #include <linux/notifier.h> @@ -42,6 +43,18 @@ struct dev_pm_opp_supply { }; /** + * struct dev_pm_opp_icc_bw - Interconnect bandwidth values + * @avg: Average bandwidth corresponding to this OPP (in icc units) + * @peak: Peak bandwidth corresponding to this OPP (in icc units) + * + * This structure stores the bandwidth values for a single interconnect path. + */ +struct dev_pm_opp_icc_bw { + u32 avg; + u32 peak; +}; + +/** * struct dev_pm_opp_info - OPP freq/voltage/current values * @rate: Target clk rate in hz * @supplies: Array of voltage/current values for all power supplies @@ -139,6 +152,7 @@ struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names void dev_pm_opp_detach_genpd(struct opp_table *opp_table); int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); +int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); void dev_pm_opp_remove_table(struct device *dev); @@ -330,6 +344,11 @@ static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_f return -ENOTSUPP; } +static inline int dev_pm_opp_set_bw(struct device *dev, struct dev_pm_opp *opp) +{ + return -EOPNOTSUPP; +} + static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) { return -ENOTSUPP; @@ -360,7 +379,12 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); int of_get_required_opp_performance_state(struct device_node *np, int index); -void dev_pm_opp_of_register_em(struct cpumask *cpus); +int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table); +int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus); +static inline void dev_pm_opp_of_unregister_em(struct device *dev) +{ + em_dev_unregister_perf_domain(dev); +} #else static inline int dev_pm_opp_of_add_table(struct device *dev) { @@ -400,7 +424,13 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) return NULL; } -static inline void dev_pm_opp_of_register_em(struct cpumask *cpus) +static inline int dev_pm_opp_of_register_em(struct device *dev, + struct cpumask *cpus) +{ + return -ENOTSUPP; +} + +static inline void dev_pm_opp_of_unregister_em(struct device *dev) { } @@ -408,6 +438,11 @@ static inline int of_get_required_opp_performance_state(struct device_node *np, { return -ENOTSUPP; } + +static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table) +{ + return -ENOTSUPP; +} #endif #endif /* __LINUX_OPP_H__ */ diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 3bdcbce8141a..6245caa18034 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -60,58 +60,151 @@ extern void pm_runtime_put_suppliers(struct device *dev); extern void pm_runtime_new_link(struct device *dev); extern void pm_runtime_drop_link(struct device *dev); +/** + * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. + * @dev: Target device. + * + * Increment the runtime PM usage counter of @dev if its runtime PM status is + * %RPM_ACTIVE and its runtime PM usage counter is greater than 0. + */ static inline int pm_runtime_get_if_in_use(struct device *dev) { return pm_runtime_get_if_active(dev, false); } +/** + * pm_suspend_ignore_children - Set runtime PM behavior regarding children. + * @dev: Target device. + * @enable: Whether or not to ignore possible dependencies on children. + * + * The dependencies of @dev on its children will not be taken into account by + * the runtime PM framework going forward if @enable is %true, or they will + * be taken into account otherwise. + */ static inline void pm_suspend_ignore_children(struct device *dev, bool enable) { dev->power.ignore_children = enable; } +/** + * pm_runtime_get_noresume - Bump up runtime PM usage counter of a device. + * @dev: Target device. + */ static inline void pm_runtime_get_noresume(struct device *dev) { atomic_inc(&dev->power.usage_count); } +/** + * pm_runtime_put_noidle - Drop runtime PM usage counter of a device. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev unless it is 0 already. + */ static inline void pm_runtime_put_noidle(struct device *dev) { atomic_add_unless(&dev->power.usage_count, -1, 0); } +/** + * pm_runtime_suspended - Check whether or not a device is runtime-suspended. + * @dev: Target device. + * + * Return %true if runtime PM is enabled for @dev and its runtime PM status is + * %RPM_SUSPENDED, or %false otherwise. + * + * Note that the return value of this function can only be trusted if it is + * called under the runtime PM lock of @dev or under conditions in which + * runtime PM cannot be either disabled or enabled for @dev and its runtime PM + * status cannot change. + */ static inline bool pm_runtime_suspended(struct device *dev) { return dev->power.runtime_status == RPM_SUSPENDED && !dev->power.disable_depth; } +/** + * pm_runtime_active - Check whether or not a device is runtime-active. + * @dev: Target device. + * + * Return %true if runtime PM is enabled for @dev and its runtime PM status is + * %RPM_ACTIVE, or %false otherwise. + * + * Note that the return value of this function can only be trusted if it is + * called under the runtime PM lock of @dev or under conditions in which + * runtime PM cannot be either disabled or enabled for @dev and its runtime PM + * status cannot change. + */ static inline bool pm_runtime_active(struct device *dev) { return dev->power.runtime_status == RPM_ACTIVE || dev->power.disable_depth; } +/** + * pm_runtime_status_suspended - Check if runtime PM status is "suspended". + * @dev: Target device. + * + * Return %true if the runtime PM status of @dev is %RPM_SUSPENDED, or %false + * otherwise, regardless of whether or not runtime PM has been enabled for @dev. + * + * Note that the return value of this function can only be trusted if it is + * called under the runtime PM lock of @dev or under conditions in which the + * runtime PM status of @dev cannot change. + */ static inline bool pm_runtime_status_suspended(struct device *dev) { return dev->power.runtime_status == RPM_SUSPENDED; } +/** + * pm_runtime_enabled - Check if runtime PM is enabled. + * @dev: Target device. + * + * Return %true if runtime PM is enabled for @dev or %false otherwise. + * + * Note that the return value of this function can only be trusted if it is + * called under the runtime PM lock of @dev or under conditions in which + * runtime PM cannot be either disabled or enabled for @dev. + */ static inline bool pm_runtime_enabled(struct device *dev) { return !dev->power.disable_depth; } -static inline bool pm_runtime_callbacks_present(struct device *dev) +/** + * pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present. + * @dev: Target device. + * + * Return %true if @dev is a special device without runtime PM callbacks or + * %false otherwise. + */ +static inline bool pm_runtime_has_no_callbacks(struct device *dev) { - return !dev->power.no_callbacks; + return dev->power.no_callbacks; } +/** + * pm_runtime_mark_last_busy - Update the last access time of a device. + * @dev: Target device. + * + * Update the last access time of @dev used by the runtime PM autosuspend + * mechanism to the current time as returned by ktime_get_mono_fast_ns(). + */ static inline void pm_runtime_mark_last_busy(struct device *dev) { WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns()); } +/** + * pm_runtime_is_irq_safe - Check if runtime PM can work in interrupt context. + * @dev: Target device. + * + * Return %true if @dev has been marked as an "IRQ-safe" device (with respect + * to runtime PM), in which case its runtime PM callabcks can be expected to + * work correctly when invoked from interrupt handlers. + */ static inline bool pm_runtime_is_irq_safe(struct device *dev) { return dev->power.irq_safe; @@ -191,97 +284,250 @@ static inline void pm_runtime_drop_link(struct device *dev) {} #endif /* !CONFIG_PM */ +/** + * pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it. + * @dev: Target device. + * + * Invoke the "idle check" callback of @dev and, depending on its return value, + * set up autosuspend of @dev or suspend it (depending on whether or not + * autosuspend has been enabled for it). + */ static inline int pm_runtime_idle(struct device *dev) { return __pm_runtime_idle(dev, 0); } +/** + * pm_runtime_suspend - Suspend a device synchronously. + * @dev: Target device. + */ static inline int pm_runtime_suspend(struct device *dev) { return __pm_runtime_suspend(dev, 0); } +/** + * pm_runtime_autosuspend - Set up autosuspend of a device or suspend it. + * @dev: Target device. + * + * Set up autosuspend of @dev or suspend it (depending on whether or not + * autosuspend is enabled for it) without engaging its "idle check" callback. + */ static inline int pm_runtime_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_AUTO); } +/** + * pm_runtime_resume - Resume a device synchronously. + * @dev: Target device. + */ static inline int pm_runtime_resume(struct device *dev) { return __pm_runtime_resume(dev, 0); } +/** + * pm_request_idle - Queue up "idle check" execution for a device. + * @dev: Target device. + * + * Queue up a work item to run an equivalent of pm_runtime_idle() for @dev + * asynchronously. + */ static inline int pm_request_idle(struct device *dev) { return __pm_runtime_idle(dev, RPM_ASYNC); } +/** + * pm_request_resume - Queue up runtime-resume of a device. + * @dev: Target device. + */ static inline int pm_request_resume(struct device *dev) { return __pm_runtime_resume(dev, RPM_ASYNC); } +/** + * pm_request_autosuspend - Queue up autosuspend of a device. + * @dev: Target device. + * + * Queue up a work item to run an equivalent pm_runtime_autosuspend() for @dev + * asynchronously. + */ static inline int pm_request_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO); } +/** + * pm_runtime_get - Bump up usage counter and queue up resume of a device. + * @dev: Target device. + * + * Bump up the runtime PM usage counter of @dev and queue up a work item to + * carry out runtime-resume of it. + */ static inline int pm_runtime_get(struct device *dev) { return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC); } +/** + * pm_runtime_get_sync - Bump up usage counter of a device and resume it. + * @dev: Target device. + * + * Bump up the runtime PM usage counter of @dev and carry out runtime-resume of + * it synchronously. + * + * The possible return values of this function are the same as for + * pm_runtime_resume() and the runtime PM usage counter of @dev remains + * incremented in all cases, even if it returns an error code. + */ static inline int pm_runtime_get_sync(struct device *dev) { return __pm_runtime_resume(dev, RPM_GET_PUT); } +/** + * pm_runtime_put - Drop device usage counter and queue up "idle check" if 0. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev and if it turns out to be + * equal to 0, queue up a work item for @dev like in pm_request_idle(). + */ static inline int pm_runtime_put(struct device *dev) { return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC); } +/** + * pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev and if it turns out to be + * equal to 0, queue up a work item for @dev like in pm_request_autosuspend(). + */ static inline int pm_runtime_put_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_ASYNC | RPM_AUTO); } +/** + * pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev and if it turns out to be + * equal to 0, invoke the "idle check" callback of @dev and, depending on its + * return value, set up autosuspend of @dev or suspend it (depending on whether + * or not autosuspend has been enabled for it). + * + * The possible return values of this function are the same as for + * pm_runtime_idle() and the runtime PM usage counter of @dev remains + * decremented in all cases, even if it returns an error code. + */ static inline int pm_runtime_put_sync(struct device *dev) { return __pm_runtime_idle(dev, RPM_GET_PUT); } +/** + * pm_runtime_put_sync_suspend - Drop device usage counter and suspend if 0. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev and if it turns out to be + * equal to 0, carry out runtime-suspend of @dev synchronously. + * + * The possible return values of this function are the same as for + * pm_runtime_suspend() and the runtime PM usage counter of @dev remains + * decremented in all cases, even if it returns an error code. + */ static inline int pm_runtime_put_sync_suspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_GET_PUT); } +/** + * pm_runtime_put_sync_autosuspend - Drop device usage counter and autosuspend if 0. + * @dev: Target device. + * + * Decrement the runtime PM usage counter of @dev and if it turns out to be + * equal to 0, set up autosuspend of @dev or suspend it synchronously (depending + * on whether or not autosuspend has been enabled for it). + * + * The possible return values of this function are the same as for + * pm_runtime_autosuspend() and the runtime PM usage counter of @dev remains + * decremented in all cases, even if it returns an error code. + */ static inline int pm_runtime_put_sync_autosuspend(struct device *dev) { return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO); } +/** + * pm_runtime_set_active - Set runtime PM status to "active". + * @dev: Target device. + * + * Set the runtime PM status of @dev to %RPM_ACTIVE and ensure that dependencies + * of it will be taken into account. + * + * It is not valid to call this function for devices with runtime PM enabled. + */ static inline int pm_runtime_set_active(struct device *dev) { return __pm_runtime_set_status(dev, RPM_ACTIVE); } +/** + * pm_runtime_set_suspended - Set runtime PM status to "active". + * @dev: Target device. + * + * Set the runtime PM status of @dev to %RPM_SUSPENDED and ensure that + * dependencies of it will be taken into account. + * + * It is not valid to call this function for devices with runtime PM enabled. + */ static inline int pm_runtime_set_suspended(struct device *dev) { return __pm_runtime_set_status(dev, RPM_SUSPENDED); } +/** + * pm_runtime_disable - Disable runtime PM for a device. + * @dev: Target device. + * + * Prevent the runtime PM framework from working with @dev (by incrementing its + * "blocking" counter). + * + * For each invocation of this function for @dev there must be a matching + * pm_runtime_enable() call in order for runtime PM to be enabled for it. + */ static inline void pm_runtime_disable(struct device *dev) { __pm_runtime_disable(dev, true); } +/** + * pm_runtime_use_autosuspend - Allow autosuspend to be used for a device. + * @dev: Target device. + * + * Allow the runtime PM autosuspend mechanism to be used for @dev whenever + * requested (or "autosuspend" will be handled as direct runtime-suspend for + * it). + */ static inline void pm_runtime_use_autosuspend(struct device *dev) { __pm_runtime_use_autosuspend(dev, true); } +/** + * pm_runtime_dont_use_autosuspend - Prevent autosuspend from being used. + * @dev: Target device. + * + * Prevent the runtime PM autosuspend mechanism from being used for @dev which + * means that "autosuspend" will be handled as direct runtime-suspend for it + * going forward. + */ static inline void pm_runtime_dont_use_autosuspend(struct device *dev) { __pm_runtime_use_autosuspend(dev, false); diff --git a/include/linux/poison.h b/include/linux/poison.h index df34330b4e34..dc8ae5d8db03 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h @@ -24,10 +24,6 @@ #define LIST_POISON2 ((void *) 0x122 + POISON_POINTER_DELTA) /********** include/linux/timer.h **********/ -/* - * Magic number "tsta" to indicate a static timer initializer - * for the object debugging code. - */ #define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) /********** mm/page_poison.c **********/ diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index e3f0f8585da4..896c16d2c5fb 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -6,6 +6,7 @@ #include <linux/list.h> #include <linux/alarmtimer.h> #include <linux/timerqueue.h> +#include <linux/task_work.h> struct kernel_siginfo; struct task_struct; @@ -125,6 +126,16 @@ struct posix_cputimers { unsigned int expiry_active; }; +/** + * posix_cputimers_work - Container for task work based posix CPU timer expiry + * @work: The task work to be scheduled + * @scheduled: @work has been scheduled already, no further processing + */ +struct posix_cputimers_work { + struct callback_head work; + unsigned int scheduled; +}; + static inline void posix_cputimers_init(struct posix_cputimers *pct) { memset(pct, 0, sizeof(*pct)); @@ -165,6 +176,12 @@ static inline void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit) { } #endif +#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK +void posix_cputimers_init_work(void); +#else +static inline void posix_cputimers_init_work(void) { } +#endif + #define REQUEUE_PENDING 1 /** diff --git a/include/linux/power/bq2415x_charger.h b/include/linux/power/bq2415x_charger.h index 4ca08321e251..f3c267f2a467 100644 --- a/include/linux/power/bq2415x_charger.h +++ b/include/linux/power/bq2415x_charger.h @@ -14,8 +14,8 @@ * value is -1 then default chip value (specified in datasheet) will be * used. * - * Value resistor_sense is needed for for configuring charge and - * termination current. It it is less or equal to zero, configuring charge + * Value resistor_sense is needed for configuring charge and + * termination current. If it is less or equal to zero, configuring charge * and termination current will not be possible. * * For automode support is needed to provide name of power supply device diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index 507c5e214c42..987d9652aa4e 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -30,6 +30,8 @@ enum bq27xxx_chip { BQ27426, BQ27441, BQ27621, + BQ27Z561, + BQ28Z610, }; struct bq27xxx_device_info; diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index dcd5a71e6c67..97cc4b85bf61 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -48,6 +48,7 @@ enum { POWER_SUPPLY_CHARGE_TYPE_STANDARD, /* normal speed */ POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE, /* dynamically adjusted speed */ POWER_SUPPLY_CHARGE_TYPE_CUSTOM, /* use CHARGE_CONTROL_* props */ + POWER_SUPPLY_CHARGE_TYPE_LONGLIFE, /* slow speed, longer life */ }; enum { @@ -61,6 +62,10 @@ enum { POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE, POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE, POWER_SUPPLY_HEALTH_OVERCURRENT, + POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED, + POWER_SUPPLY_HEALTH_WARM, + POWER_SUPPLY_HEALTH_COOL, + POWER_SUPPLY_HEALTH_HOT, }; enum { @@ -139,6 +144,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_CAPACITY, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */ + POWER_SUPPLY_PROP_CAPACITY_ERROR_MARGIN, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_MAX, @@ -158,6 +164,9 @@ enum power_supply_property { POWER_SUPPLY_PROP_PRECHARGE_CURRENT, POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, POWER_SUPPLY_PROP_CALIBRATE, + POWER_SUPPLY_PROP_MANUFACTURE_YEAR, + POWER_SUPPLY_PROP_MANUFACTURE_MONTH, + POWER_SUPPLY_PROP_MANUFACTURE_DAY, /* Properties of type `const char *' */ POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, @@ -223,9 +232,9 @@ struct power_supply_config { struct power_supply_desc { const char *name; enum power_supply_type type; - enum power_supply_usb_type *usb_types; + const enum power_supply_usb_type *usb_types; size_t num_usb_types; - enum power_supply_property *properties; + const enum power_supply_property *properties; size_t num_properties; /* @@ -346,8 +355,12 @@ struct power_supply_battery_info { int charge_full_design_uah; /* microAmp-hours */ int voltage_min_design_uv; /* microVolts */ int voltage_max_design_uv; /* microVolts */ + int tricklecharge_current_ua; /* microAmps */ int precharge_current_ua; /* microAmps */ + int precharge_voltage_max_uv; /* microVolts */ int charge_term_current_ua; /* microAmps */ + int charge_restart_voltage_uv; /* microVolts */ + int overvoltage_limit_uv; /* microVolts */ int constant_charge_current_max_ua; /* microAmps */ int constant_charge_voltage_max_uv; /* microVolts */ int factory_internal_resistance_uohm; /* microOhms */ diff --git a/include/linux/powercap.h b/include/linux/powercap.h index 4537f57f9e42..3d557bbcd2c7 100644 --- a/include/linux/powercap.h +++ b/include/linux/powercap.h @@ -44,19 +44,18 @@ struct powercap_control_type_ops { }; /** - * struct powercap_control_type- Defines a powercap control_type - * @name: name of control_type + * struct powercap_control_type - Defines a powercap control_type * @dev: device for this control_type * @idr: idr to have unique id for its child - * @root_node: Root holding power zones for this control_type + * @nr_zones: counter for number of zones of this type * @ops: Pointer to callback struct - * @node_lock: mutex for control type + * @lock: mutex for control type * @allocated: This is possible that client owns the memory * used by this structure. In this case * this flag is set to false by framework to * prevent deallocation during release process. * Otherwise this flag is set to true. - * @ctrl_inst: link to the control_type list + * @node: linked-list node * * Defines powercap control_type. This acts as a container for power * zones, which use same method to control power. E.g. RAPL, RAPL-PCI etc. @@ -129,7 +128,7 @@ struct powercap_zone_ops { * this flag is set to false by framework to * prevent deallocation during release process. * Otherwise this flag is set to true. - * @constraint_ptr: List of constraints for this zone. + * @constraints: List of constraints for this zone. * * This defines a power zone instance. The fields of this structure are * private, and should not be used by client drivers. diff --git a/include/linux/prandom.h b/include/linux/prandom.h new file mode 100644 index 000000000000..aa16e6468f91 --- /dev/null +++ b/include/linux/prandom.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/linux/prandom.h + * + * Include file for the fast pseudo-random 32-bit + * generation. + */ +#ifndef _LINUX_PRANDOM_H +#define _LINUX_PRANDOM_H + +#include <linux/types.h> +#include <linux/percpu.h> + +u32 prandom_u32(void); +void prandom_bytes(void *buf, size_t nbytes); +void prandom_seed(u32 seed); +void prandom_reseed_late(void); + +struct rnd_state { + __u32 s1, s2, s3, s4; +}; + +DECLARE_PER_CPU(struct rnd_state, net_rand_state); + +u32 prandom_u32_state(struct rnd_state *state); +void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); +void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); + +#define prandom_init_once(pcpu_state) \ + DO_ONCE(prandom_seed_full_state, (pcpu_state)) + +/** + * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) + * @ep_ro: right open interval endpoint + * + * Returns a pseudo-random number that is in interval [0, ep_ro). Note + * that the result depends on PRNG being well distributed in [0, ~0U] + * u32 space. Here we use maximally equidistributed combined Tausworthe + * generator, that is, prandom_u32(). This is useful when requesting a + * random index of an array containing ep_ro elements, for example. + * + * Returns: pseudo-random number in interval [0, ep_ro) + */ +static inline u32 prandom_u32_max(u32 ep_ro) +{ + return (u32)(((u64) prandom_u32() * ep_ro) >> 32); +} + +/* + * Handle minimum values for seeds + */ +static inline u32 __seed(u32 x, u32 m) +{ + return (x < m) ? x + m : x; +} + +/** + * prandom_seed_state - set seed for prandom_u32_state(). + * @state: pointer to state structure to receive the seed. + * @seed: arbitrary 64-bit value to use as a seed. + */ +static inline void prandom_seed_state(struct rnd_state *state, u64 seed) +{ + u32 i = (seed >> 32) ^ (seed << 10) ^ seed; + + state->s1 = __seed(i, 2U); + state->s2 = __seed(i, 8U); + state->s3 = __seed(i, 16U); + state->s4 = __seed(i, 128U); +} + +/* Pseudo random number generator from numerical recipes. */ +static inline u32 next_pseudo_random32(u32 seed) +{ + return seed * 1664525 + 1013904223; +} + +#endif diff --git a/include/linux/preempt.h b/include/linux/preempt.h index bc3f1aecaa19..7d9c1c0e149c 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -26,13 +26,13 @@ * PREEMPT_MASK: 0x000000ff * SOFTIRQ_MASK: 0x0000ff00 * HARDIRQ_MASK: 0x000f0000 - * NMI_MASK: 0x00100000 + * NMI_MASK: 0x00f00000 * PREEMPT_NEED_RESCHED: 0x80000000 */ #define PREEMPT_BITS 8 #define SOFTIRQ_BITS 8 #define HARDIRQ_BITS 4 -#define NMI_BITS 1 +#define NMI_BITS 4 #define PREEMPT_SHIFT 0 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) diff --git a/include/linux/printk.h b/include/linux/printk.h index e061635e0409..34c1a7be3e01 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -7,6 +7,7 @@ #include <linux/kern_levels.h> #include <linux/linkage.h> #include <linux/cache.h> +#include <linux/ratelimit_types.h> extern const char linux_banner[]; extern const char linux_proc_banner[]; @@ -189,7 +190,7 @@ extern int printk_delay_msec; extern int dmesg_restrict; extern int -devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf, +devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void *buf, size_t *lenp, loff_t *ppos); extern void wake_up_klogd(void); @@ -279,39 +280,116 @@ static inline void printk_safe_flush_on_panic(void) extern int kptr_restrict; +/** + * pr_fmt - used by the pr_*() macros to generate the printk format string + * @fmt: format string passed from a pr_*() macro + * + * This macro can be used to generate a unified format string for pr_*() + * macros. A common use is to prefix all pr_*() messages in a file with a common + * string. For example, defining this at the top of a source file: + * + * #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + * + * would prefix all pr_info, pr_emerg... messages in the file with the module + * name. + */ #ifndef pr_fmt #define pr_fmt(fmt) fmt #endif -/* - * These can be used to print at the various log levels. - * All of these will print unconditionally, although note that pr_debug() - * and other debug macros are compiled out unless either DEBUG is defined - * or CONFIG_DYNAMIC_DEBUG is set. +/** + * pr_emerg - Print an emergency-level message + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_EMERG loglevel. It uses pr_fmt() to + * generate the format string. */ #define pr_emerg(fmt, ...) \ printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) +/** + * pr_alert - Print an alert-level message + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_ALERT loglevel. It uses pr_fmt() to + * generate the format string. + */ #define pr_alert(fmt, ...) \ printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) +/** + * pr_crit - Print a critical-level message + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_CRIT loglevel. It uses pr_fmt() to + * generate the format string. + */ #define pr_crit(fmt, ...) \ printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) +/** + * pr_err - Print an error-level message + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_ERR loglevel. It uses pr_fmt() to + * generate the format string. + */ #define pr_err(fmt, ...) \ printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) +/** + * pr_warn - Print a warning-level message + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_WARNING loglevel. It uses pr_fmt() + * to generate the format string. + */ #define pr_warn(fmt, ...) \ printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) +/** + * pr_notice - Print a notice-level message + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_NOTICE loglevel. It uses pr_fmt() to + * generate the format string. + */ #define pr_notice(fmt, ...) \ printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) +/** + * pr_info - Print an info-level message + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_INFO loglevel. It uses pr_fmt() to + * generate the format string. + */ #define pr_info(fmt, ...) \ printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) -/* - * Like KERN_CONT, pr_cont() should only be used when continuing - * a line with no newline ('\n') enclosed. Otherwise it defaults - * back to KERN_DEFAULT. + +/** + * pr_cont - Continues a previous log message in the same line. + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_CONT loglevel. It should only be + * used when continuing a log message with no newline ('\n') enclosed. Otherwise + * it defaults back to KERN_DEFAULT loglevel. */ #define pr_cont(fmt, ...) \ printk(KERN_CONT fmt, ##__VA_ARGS__) -/* pr_devel() should produce zero code unless DEBUG is defined */ +/** + * pr_devel - Print a debug-level message conditionally + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to a printk with KERN_DEBUG loglevel if DEBUG is + * defined. Otherwise it does nothing. + * + * It uses pr_fmt() to generate the format string. + */ #ifdef DEBUG #define pr_devel(fmt, ...) \ printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) @@ -322,11 +400,23 @@ extern int kptr_restrict; /* If you are writing a driver, please use dev_dbg instead */ -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) #include <linux/dynamic_debug.h> -/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ -#define pr_debug(fmt, ...) \ +/** + * pr_debug - Print a debug-level message conditionally + * @fmt: format string + * @...: arguments for the format string + * + * This macro expands to dynamic_pr_debug() if CONFIG_DYNAMIC_DEBUG is + * set. Otherwise, if DEBUG is defined, it's equivalent to a printk with + * KERN_DEBUG loglevel. If DEBUG is not defined it does nothing. + * + * It uses pr_fmt() to generate the format string (dynamic_pr_debug() uses + * pr_fmt() internally). + */ +#define pr_debug(fmt, ...) \ dynamic_pr_debug(fmt, ##__VA_ARGS__) #elif defined(DEBUG) #define pr_debug(fmt, ...) \ @@ -384,8 +474,7 @@ extern int kptr_restrict; printk_once(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) #define pr_info_once(fmt, ...) \ printk_once(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) -#define pr_cont_once(fmt, ...) \ - printk_once(KERN_CONT pr_fmt(fmt), ##__VA_ARGS__) +/* no pr_cont_once, don't do that... */ #if defined(DEBUG) #define pr_devel_once(fmt, ...) \ @@ -448,7 +537,8 @@ extern int kptr_restrict; #endif /* If you are writing a driver, please use dev_dbg instead */ -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) /* descriptor check is first to prevent flooding with "callbacks suppressed" */ #define pr_debug_ratelimited(fmt, ...) \ do { \ @@ -495,7 +585,8 @@ static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type, #endif -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) #define print_hex_dump_debug(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 45c05fd9c99d..2df965cd0974 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -42,6 +42,34 @@ struct proc_ops { unsigned long (*proc_get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); } __randomize_layout; +/* definitions for hide_pid field */ +enum proc_hidepid { + HIDEPID_OFF = 0, + HIDEPID_NO_ACCESS = 1, + HIDEPID_INVISIBLE = 2, + HIDEPID_NOT_PTRACEABLE = 4, /* Limit pids to only ptraceable pids */ +}; + +/* definitions for proc mount option pidonly */ +enum proc_pidonly { + PROC_PIDONLY_OFF = 0, + PROC_PIDONLY_ON = 1, +}; + +struct proc_fs_info { + struct pid_namespace *pid_ns; + struct dentry *proc_self; /* For /proc/self */ + struct dentry *proc_thread_self; /* For /proc/thread-self */ + kgid_t pid_gid; + enum proc_hidepid hide_pid; + enum proc_pidonly pidonly; +}; + +static inline struct proc_fs_info *proc_sb_info(struct super_block *sb) +{ + return sb->s_fs_info; +} + #ifdef CONFIG_PROC_FS typedef int (*proc_write_t)(struct file *, char *, size_t); @@ -105,6 +133,10 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo void *data); extern struct pid *tgid_pidfd_to_pid(const struct file *file); +struct bpf_iter_aux_info; +extern int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux); +extern void bpf_iter_fini_seq_net(void *priv_data); + #ifdef CONFIG_PROC_PID_ARCH_STATUS /* * The architecture which selects CONFIG_PROC_PID_ARCH_STATUS must @@ -174,9 +206,11 @@ int open_related_ns(struct ns_common *ns, struct ns_common *(*get_ns)(struct ns_common *ns)); /* get the associated pid namespace for a file in procfs */ -static inline struct pid_namespace *proc_pid_ns(const struct inode *inode) +static inline struct pid_namespace *proc_pid_ns(struct super_block *sb) { - return inode->i_sb->s_fs_info; + return proc_sb_info(sb)->pid_ns; } +bool proc_ns_file(const struct file *file); + #endif /* _LINUX_PROC_FS_H */ diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h index 6abe85c34681..75807ecef880 100644 --- a/include/linux/proc_ns.h +++ b/include/linux/proc_ns.h @@ -8,7 +8,7 @@ #include <linux/ns_common.h> struct pid_namespace; -struct nsproxy; +struct nsset; struct path; struct task_struct; struct inode; @@ -19,7 +19,7 @@ struct proc_ns_operations { int type; struct ns_common *(*get)(struct task_struct *task); void (*put)(struct ns_common *ns); - int (*install)(struct nsproxy *nsproxy, struct ns_common *ns); + int (*install)(struct nsset *nsset, struct ns_common *ns); struct user_namespace *(*owner)(struct ns_common *ns); struct ns_common *(*get_parent)(struct ns_common *ns); } __randomize_layout; diff --git a/include/linux/property.h b/include/linux/property.h index d86de017c689..9f805c442819 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -389,6 +389,11 @@ struct fwnode_handle * fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port, u32 endpoint); +static inline bool fwnode_graph_is_endpoint(struct fwnode_handle *fwnode) +{ + return fwnode_property_present(fwnode, "remote-endpoint"); +} + /* * Fwnode lookup flags * @@ -440,7 +445,11 @@ software_node_find_by_name(const struct software_node *parent, int software_node_register_nodes(const struct software_node *nodes); void software_node_unregister_nodes(const struct software_node *nodes); +int software_node_register_node_group(const struct software_node **node_group); +void software_node_unregister_node_group(const struct software_node **node_group); + int software_node_register(const struct software_node *node); +void software_node_unregister(const struct software_node *node); int software_node_notify(struct device *dev, unsigned long action); diff --git a/include/linux/psci.h b/include/linux/psci.h index a67712b73b6c..14ad9b9ebcd6 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -21,11 +21,6 @@ bool psci_power_state_is_valid(u32 state); int psci_set_osi_mode(void); bool psci_has_osi_support(void); -enum smccc_version { - SMCCC_VERSION_1_0, - SMCCC_VERSION_1_1, -}; - struct psci_operations { u32 (*get_version)(void); int (*cpu_suspend)(u32 state, unsigned long entry_point); @@ -35,8 +30,6 @@ struct psci_operations { int (*affinity_info)(unsigned long target_affinity, unsigned long lowest_affinity_level); int (*migrate_info_type)(void); - enum arm_smccc_conduit conduit; - enum smccc_version smccc_version; }; extern struct psci_operations psci_ops; diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h index 4b7258495a04..b95f3211566a 100644 --- a/include/linux/psi_types.h +++ b/include/linux/psi_types.h @@ -153,9 +153,10 @@ struct psi_group { unsigned long avg[NR_PSI_STATES - 1][3]; /* Monitor work control */ - atomic_t poll_scheduled; - struct kthread_worker __rcu *poll_kworker; - struct kthread_delayed_work poll_work; + struct task_struct __rcu *poll_task; + struct timer_list poll_timer; + wait_queue_head_t poll_wait; + atomic_t poll_wakeup; /* Protects data used by the monitor */ struct mutex trigger_lock; diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 5167bf2bfc75..49d155cd2dfe 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -100,6 +100,8 @@ struct sev_data_init { u32 tmr_len; /* In */ } __packed; +#define SEV_INIT_FLAGS_SEV_ES 0x01 + /** * struct sev_data_pek_csr - PEK_CSR command parameters * @@ -595,7 +597,7 @@ int sev_guest_df_flush(int *error); */ int sev_guest_decommission(struct sev_data_decommission *data, int *error); -void *psp_copy_user_blob(u64 __user uaddr, u32 len); +void *psp_copy_user_blob(u64 uaddr, u32 len); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ diff --git a/include/linux/pstore.h b/include/linux/pstore.h index e779441e6d26..eb93a54cff31 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -96,6 +96,12 @@ struct pstore_record { * * @read_mutex: serializes @open, @read, @close, and @erase callbacks * @flags: bitfield of frontends the backend can accept writes for + * @max_reason: Used when PSTORE_FLAGS_DMESG is set. Contains the + * kmsg_dump_reason enum value. KMSG_DUMP_UNDEF means + * "use existing kmsg_dump() filtering, based on the + * printk.always_kmsg_dump boot param" (which is either + * KMSG_DUMP_OOPS when false, or KMSG_DUMP_MAX when + * true); see printk.always_kmsg_dump for more details. * @data: backend-private pointer passed back during callbacks * * Callbacks: @@ -170,7 +176,7 @@ struct pstore_record { */ struct pstore_info { struct module *owner; - char *name; + const char *name; struct semaphore buf_lock; char *buf; @@ -179,6 +185,7 @@ struct pstore_info { struct mutex read_mutex; int flags; + int max_reason; void *data; int (*open)(struct pstore_info *psi); diff --git a/include/linux/pstore_blk.h b/include/linux/pstore_blk.h new file mode 100644 index 000000000000..61e914522b01 --- /dev/null +++ b/include/linux/pstore_blk.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __PSTORE_BLK_H_ +#define __PSTORE_BLK_H_ + +#include <linux/types.h> +#include <linux/pstore.h> +#include <linux/pstore_zone.h> + +/** + * typedef pstore_blk_panic_write_op - panic write operation to block device + * + * @buf: the data to write + * @start_sect: start sector to block device + * @sects: sectors count on buf + * + * Return: On success, zero should be returned. Others excluding -ENOMSG + * mean error. -ENOMSG means to try next zone. + * + * Panic write to block device must be aligned to SECTOR_SIZE. + */ +typedef int (*pstore_blk_panic_write_op)(const char *buf, sector_t start_sect, + sector_t sects); + +/** + * struct pstore_blk_info - pstore/blk registration details + * + * @major: Which major device number to support with pstore/blk + * @flags: The supported PSTORE_FLAGS_* from linux/pstore.h. + * @panic_write:The write operation only used for the panic case. + * This can be NULL, but is recommended to avoid losing + * crash data if the kernel's IO path or work queues are + * broken during a panic. + * @devt: The dev_t that pstore/blk has attached to. + * @nr_sects: Number of sectors on @devt. + * @start_sect: Starting sector on @devt. + */ +struct pstore_blk_info { + unsigned int major; + unsigned int flags; + pstore_blk_panic_write_op panic_write; + + /* Filled in by pstore/blk after registration. */ + dev_t devt; + sector_t nr_sects; + sector_t start_sect; +}; + +int register_pstore_blk(struct pstore_blk_info *info); +void unregister_pstore_blk(unsigned int major); + +/** + * struct pstore_device_info - back-end pstore/blk driver structure. + * + * @total_size: The total size in bytes pstore/blk can use. It must be greater + * than 4096 and be multiple of 4096. + * @flags: Refer to macro starting with PSTORE_FLAGS defined in + * linux/pstore.h. It means what front-ends this device support. + * Zero means all backends for compatible. + * @read: The general read operation. Both of the function parameters + * @size and @offset are relative value to bock device (not the + * whole disk). + * On success, the number of bytes should be returned, others + * means error. + * @write: The same as @read, but the following error number: + * -EBUSY means try to write again later. + * -ENOMSG means to try next zone. + * @erase: The general erase operation for device with special removing + * job. Both of the function parameters @size and @offset are + * relative value to storage. + * Return 0 on success and others on failure. + * @panic_write:The write operation only used for panic case. It's optional + * if you do not care panic log. The parameters are relative + * value to storage. + * On success, the number of bytes should be returned, others + * excluding -ENOMSG mean error. -ENOMSG means to try next zone. + */ +struct pstore_device_info { + unsigned long total_size; + unsigned int flags; + pstore_zone_read_op read; + pstore_zone_write_op write; + pstore_zone_erase_op erase; + pstore_zone_write_op panic_write; +}; + +int register_pstore_device(struct pstore_device_info *dev); +void unregister_pstore_device(struct pstore_device_info *dev); + +/** + * struct pstore_blk_config - the pstore_blk backend configuration + * + * @device: Name of the desired block device + * @max_reason: Maximum kmsg dump reason to store to block device + * @kmsg_size: Total size of for kmsg dumps + * @pmsg_size: Total size of the pmsg storage area + * @console_size: Total size of the console storage area + * @ftrace_size: Total size for ftrace logging data (for all CPUs) + */ +struct pstore_blk_config { + char device[80]; + enum kmsg_dump_reason max_reason; + unsigned long kmsg_size; + unsigned long pmsg_size; + unsigned long console_size; + unsigned long ftrace_size; +}; + +/** + * pstore_blk_get_config - get a copy of the pstore_blk backend configuration + * + * @info: The sturct pstore_blk_config to be filled in + * + * Failure returns negative error code, and success returns 0. + */ +int pstore_blk_get_config(struct pstore_blk_config *info); + +#endif diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index 9cb9b9067298..9f16afec7290 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -133,7 +133,7 @@ struct ramoops_platform_data { unsigned long console_size; unsigned long ftrace_size; unsigned long pmsg_size; - int dump_oops; + int max_reason; u32 flags; struct persistent_ram_ecc_info ecc_info; }; diff --git a/include/linux/pstore_zone.h b/include/linux/pstore_zone.h new file mode 100644 index 000000000000..1e35eaa33e5e --- /dev/null +++ b/include/linux/pstore_zone.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __PSTORE_ZONE_H_ +#define __PSTORE_ZONE_H_ + +#include <linux/types.h> + +typedef ssize_t (*pstore_zone_read_op)(char *, size_t, loff_t); +typedef ssize_t (*pstore_zone_write_op)(const char *, size_t, loff_t); +typedef ssize_t (*pstore_zone_erase_op)(size_t, loff_t); +/** + * struct pstore_zone_info - pstore/zone back-end driver structure + * + * @owner: Module which is responsible for this back-end driver. + * @name: Name of the back-end driver. + * @total_size: The total size in bytes pstore/zone can use. It must be greater + * than 4096 and be multiple of 4096. + * @kmsg_size: The size of oops/panic zone. Zero means disabled, otherwise, + * it must be multiple of SECTOR_SIZE(512 Bytes). + * @max_reason: Maximum kmsg dump reason to store. + * @pmsg_size: The size of pmsg zone which is the same as @kmsg_size. + * @console_size:The size of console zone which is the same as @kmsg_size. + * @ftrace_size:The size of ftrace zone which is the same as @kmsg_size. + * @read: The general read operation. Both of the function parameters + * @size and @offset are relative value to storage. + * On success, the number of bytes should be returned, others + * mean error. + * @write: The same as @read, but the following error number: + * -EBUSY means try to write again later. + * -ENOMSG means to try next zone. + * @erase: The general erase operation for device with special removing + * job. Both of the function parameters @size and @offset are + * relative value to storage. + * Return 0 on success and others on failure. + * @panic_write:The write operation only used for panic case. It's optional + * if you do not care panic log. The parameters are relative + * value to storage. + * On success, the number of bytes should be returned, others + * excluding -ENOMSG mean error. -ENOMSG means to try next zone. + */ +struct pstore_zone_info { + struct module *owner; + const char *name; + + unsigned long total_size; + unsigned long kmsg_size; + int max_reason; + unsigned long pmsg_size; + unsigned long console_size; + unsigned long ftrace_size; + pstore_zone_read_op read; + pstore_zone_write_op write; + pstore_zone_erase_op erase; + pstore_zone_write_op panic_write; +}; + +extern int register_pstore_zone(struct pstore_zone_info *info); +extern void unregister_pstore_zone(struct pstore_zone_info *info); + +#endif diff --git a/include/linux/ptdump.h b/include/linux/ptdump.h index a67065c403c3..2a3a95586425 100644 --- a/include/linux/ptdump.h +++ b/include/linux/ptdump.h @@ -13,7 +13,8 @@ struct ptdump_range { struct ptdump_state { /* level is 0:PGD to 4:PTE, or -1 if unknown */ void (*note_page)(struct ptdump_state *st, unsigned long addr, - int level, unsigned long val); + int level, u64 val); + void (*effective_prot)(struct ptdump_state *st, int level, u64 val); const struct ptdump_range *range; }; diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index c602670bbffb..d3e8ba5c7125 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -36,7 +36,7 @@ struct ptp_system_timestamp { }; /** - * struct ptp_clock_info - decribes a PTP hardware clock + * struct ptp_clock_info - describes a PTP hardware clock * * @owner: The clock driver should set to THIS_MODULE. * @name: A short "friendly name" to identify the clock and to @@ -65,6 +65,9 @@ struct ptp_system_timestamp { * parameter delta: Desired frequency offset from nominal frequency * in parts per billion * + * @adjphase: Adjusts the phase offset of the hardware clock. + * parameter delta: Desired change in nanoseconds. + * * @adjtime: Shifts the time of the hardware clock. * parameter delta: Desired change in nanoseconds. * @@ -128,6 +131,7 @@ struct ptp_clock_info { struct ptp_pin_desc *pin_config; int (*adjfine)(struct ptp_clock_info *ptp, long scaled_ppm); int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); + int (*adjphase)(struct ptp_clock_info *ptp, s32 phase); int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts); int (*gettimex64)(struct ptp_clock_info *ptp, struct timespec64 *ts, diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 417db0a79a62..808f9d3ee546 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -107,7 +107,7 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) return -ENOSPC; /* Make sure the pointer we are storing points to a valid data. */ - /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ + /* Pairs with the dependency ordering in __ptr_ring_consume. */ smp_wmb(); WRITE_ONCE(r->queue[r->producer++], ptr); diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 2635b2a55090..a13ff383fa1d 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -39,7 +39,7 @@ enum pwm_polarity { * current PWM hardware state. */ struct pwm_args { - unsigned int period; + u64 period; enum pwm_polarity polarity; }; @@ -56,8 +56,8 @@ enum { * @enabled: PWM enabled status */ struct pwm_state { - unsigned int period; - unsigned int duty_cycle; + u64 period; + u64 duty_cycle; enum pwm_polarity polarity; bool enabled; }; @@ -107,13 +107,13 @@ static inline bool pwm_is_enabled(const struct pwm_device *pwm) return state.enabled; } -static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period) +static inline void pwm_set_period(struct pwm_device *pwm, u64 period) { if (pwm) pwm->state.period = period; } -static inline unsigned int pwm_get_period(const struct pwm_device *pwm) +static inline u64 pwm_get_period(const struct pwm_device *pwm) { struct pwm_state state; @@ -128,7 +128,7 @@ static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty) pwm->state.duty_cycle = duty; } -static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm) +static inline u64 pwm_get_duty_cycle(const struct pwm_device *pwm) { struct pwm_state state; diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h index dd464943f717..8f385fbe5a0e 100644 --- a/include/linux/qcom-geni-se.h +++ b/include/linux/qcom-geni-se.h @@ -6,6 +6,8 @@ #ifndef _LINUX_QCOM_GENI_SE #define _LINUX_QCOM_GENI_SE +#include <linux/interconnect.h> + /* Transfer mode supported by GENI Serial Engines */ enum geni_se_xfer_mode { GENI_SE_INVALID, @@ -25,6 +27,17 @@ enum geni_se_protocol_type { struct geni_wrapper; struct clk; +enum geni_icc_path_index { + GENI_TO_CORE, + CPU_TO_GENI, + GENI_TO_DDR +}; + +struct geni_icc_path { + struct icc_path *path; + unsigned int avg_bw; +}; + /** * struct geni_se - GENI Serial Engine * @base: Base Address of the Serial Engine's register block @@ -33,6 +46,9 @@ struct clk; * @clk: Handle to the core serial engine clock * @num_clk_levels: Number of valid clock levels in clk_perf_tbl * @clk_perf_tbl: Table of clock frequency input to serial engine clock + * @icc_paths: Array of ICC paths for SE + * @opp_table: Pointer to the OPP table + * @has_opp_table: Specifies if the SE has an OPP table */ struct geni_se { void __iomem *base; @@ -41,6 +57,9 @@ struct geni_se { struct clk *clk; unsigned int num_clk_levels; unsigned long *clk_perf_tbl; + struct geni_icc_path icc_paths[3]; + struct opp_table *opp_table; + bool has_opp_table; }; /* Common SE registers */ @@ -229,6 +248,21 @@ struct geni_se { #define GENI_SE_VERSION_MINOR(ver) ((ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT) #define GENI_SE_VERSION_STEP(ver) (ver & HW_VER_STEP_MASK) +/* + * Define bandwidth thresholds that cause the underlying Core 2X interconnect + * clock to run at the named frequency. These baseline values are recommended + * by the hardware team, and are not dynamically scaled with GENI bandwidth + * beyond basic on/off. + */ +#define CORE_2X_19_2_MHZ 960 +#define CORE_2X_50_MHZ 2500 +#define CORE_2X_100_MHZ 5000 +#define CORE_2X_150_MHZ 7500 +#define CORE_2X_200_MHZ 10000 +#define CORE_2X_236_MHZ 16383 + +#define GENI_DEFAULT_BW Bps_to_icc(1000) + #if IS_ENABLED(CONFIG_QCOM_GENI_SE) u32 geni_se_get_qup_hw_version(struct geni_se *se); @@ -416,5 +450,16 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len, void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len); void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len); + +int geni_icc_get(struct geni_se *se, const char *icc_ddr); + +int geni_icc_set_bw(struct geni_se *se); +void geni_icc_set_tag(struct geni_se *se, u32 tag); + +int geni_icc_enable(struct geni_se *se); + +int geni_icc_disable(struct geni_se *se); + +void geni_remove_earlycon_icc_vote(void); #endif #endif diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 3d6a24697761..2e1193a3fb5f 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -44,6 +44,13 @@ enum qcom_scm_sec_dev_id { QCOM_SCM_ICE_DEV_ID = 20, }; +enum qcom_scm_ice_cipher { + QCOM_SCM_ICE_CIPHER_AES_128_XTS = 0, + QCOM_SCM_ICE_CIPHER_AES_128_CBC = 1, + QCOM_SCM_ICE_CIPHER_AES_256_XTS = 3, + QCOM_SCM_ICE_CIPHER_AES_256_CBC = 4, +}; + #define QCOM_SCM_VMID_HLOS 0x3 #define QCOM_SCM_VMID_MSS_MSA 0xF #define QCOM_SCM_VMID_WLAN 0x18 @@ -88,6 +95,12 @@ extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size); +extern bool qcom_scm_ice_available(void); +extern int qcom_scm_ice_invalidate_key(u32 index); +extern int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, + enum qcom_scm_ice_cipher cipher, + u32 data_unit_size); + extern bool qcom_scm_hdcp_available(void); extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); @@ -138,6 +151,12 @@ static inline int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, static inline int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size) { return -ENODEV; } +static inline bool qcom_scm_ice_available(void) { return false; } +static inline int qcom_scm_ice_invalidate_key(u32 index) { return -ENODEV; } +static inline int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size, + enum qcom_scm_ice_cipher cipher, + u32 data_unit_size) { return -ENODEV; } + static inline bool qcom_scm_hdcp_available(void) { return false; } static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) { return -ENODEV; } diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 2c4737e6694a..977807e1be53 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -1,33 +1,7 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2016 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _COMMON_HSI_H diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index 95f5fd615852..cd1207ad4ada 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -1,33 +1,7 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __ETH_COMMON__ diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h index 98cfc195abe2..68eda1c21cde 100644 --- a/include/linux/qed/fcoe_common.h +++ b/include/linux/qed/fcoe_common.h @@ -1,6 +1,7 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __FCOE_COMMON__ diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 2f0a771a9176..157019f716f1 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h @@ -1,33 +1,7 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __ISCSI_COMMON__ diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h index c6cfd39cd910..14f9e4c0ddd9 100644 --- a/include/linux/qed/iwarp_common.h +++ b/include/linux/qed/iwarp_common.h @@ -1,33 +1,7 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __IWARP_COMMON__ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 733fad7dfbed..4d58dc8943f0 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -1,33 +1,7 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_CHAIN_H @@ -37,6 +11,7 @@ #include <asm/byteorder.h> #include <linux/kernel.h> #include <linux/list.h> +#include <linux/sizes.h> #include <linux/slab.h> #include <linux/qed/common_hsi.h> @@ -52,9 +27,9 @@ enum qed_chain_mode { }; enum qed_chain_use_mode { - QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ - QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */ - QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ + QED_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ + QED_CHAIN_USE_TO_CONSUME, /* Chain starts full */ + QED_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ }; enum qed_chain_cnt_type { @@ -66,191 +41,230 @@ enum qed_chain_cnt_type { }; struct qed_chain_next { - struct regpair next_phys; - void *next_virt; + struct regpair next_phys; + void *next_virt; }; struct qed_chain_pbl_u16 { - u16 prod_page_idx; - u16 cons_page_idx; + u16 prod_page_idx; + u16 cons_page_idx; }; struct qed_chain_pbl_u32 { - u32 prod_page_idx; - u32 cons_page_idx; -}; - -struct qed_chain_ext_pbl { - dma_addr_t p_pbl_phys; - void *p_pbl_virt; + u32 prod_page_idx; + u32 cons_page_idx; }; struct qed_chain_u16 { /* Cyclic index of next element to produce/consme */ - u16 prod_idx; - u16 cons_idx; + u16 prod_idx; + u16 cons_idx; }; struct qed_chain_u32 { /* Cyclic index of next element to produce/consme */ - u32 prod_idx; - u32 cons_idx; + u32 prod_idx; + u32 cons_idx; }; struct addr_tbl_entry { - void *virt_addr; - dma_addr_t dma_map; + void *virt_addr; + dma_addr_t dma_map; }; struct qed_chain { - /* fastpath portion of the chain - required for commands such + /* Fastpath portion of the chain - required for commands such * as produce / consume. */ + /* Point to next element to produce/consume */ - void *p_prod_elem; - void *p_cons_elem; + void *p_prod_elem; + void *p_cons_elem; /* Fastpath portions of the PBL [if exists] */ + struct { /* Table for keeping the virtual and physical addresses of the * chain pages, respectively to the physical addresses * in the pbl table. */ - struct addr_tbl_entry *pp_addr_tbl; + struct addr_tbl_entry *pp_addr_tbl; union { - struct qed_chain_pbl_u16 u16; - struct qed_chain_pbl_u32 u32; - } c; - } pbl; + struct qed_chain_pbl_u16 u16; + struct qed_chain_pbl_u32 u32; + } c; + } pbl; union { - struct qed_chain_u16 chain16; - struct qed_chain_u32 chain32; - } u; + struct qed_chain_u16 chain16; + struct qed_chain_u32 chain32; + } u; /* Capacity counts only usable elements */ - u32 capacity; - u32 page_cnt; + u32 capacity; + u32 page_cnt; - enum qed_chain_mode mode; + enum qed_chain_mode mode; /* Elements information for fast calculations */ - u16 elem_per_page; - u16 elem_per_page_mask; - u16 elem_size; - u16 next_page_mask; - u16 usable_per_page; - u8 elem_unusable; + u16 elem_per_page; + u16 elem_per_page_mask; + u16 elem_size; + u16 next_page_mask; + u16 usable_per_page; + u8 elem_unusable; - u8 cnt_type; + enum qed_chain_cnt_type cnt_type; /* Slowpath of the chain - required for initialization and destruction, * but isn't involved in regular functionality. */ + u32 page_size; + /* Base address of a pre-allocated buffer for pbl */ struct { - dma_addr_t p_phys_table; - void *p_virt_table; - } pbl_sp; + __le64 *table_virt; + dma_addr_t table_phys; + size_t table_size; + } pbl_sp; /* Address of first page of the chain - the address is required - * for fastpath operation [consume/produce] but only for the the SINGLE + * for fastpath operation [consume/produce] but only for the SINGLE * flavour which isn't considered fastpath [== SPQ]. */ - void *p_virt_addr; - dma_addr_t p_phys_addr; + void *p_virt_addr; + dma_addr_t p_phys_addr; /* Total number of elements [for entire chain] */ - u32 size; + u32 size; + + enum qed_chain_use_mode intended_use; + + bool b_external_pbl; +}; - u8 intended_use; +struct qed_chain_init_params { + enum qed_chain_mode mode; + enum qed_chain_use_mode intended_use; + enum qed_chain_cnt_type cnt_type; - bool b_external_pbl; + u32 page_size; + u32 num_elems; + size_t elem_size; + + void *ext_pbl_virt; + dma_addr_t ext_pbl_phys; }; -#define QED_CHAIN_PBL_ENTRY_SIZE (8) -#define QED_CHAIN_PAGE_SIZE (0x1000) -#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size)) +#define QED_CHAIN_PAGE_SIZE SZ_4K + +#define ELEMS_PER_PAGE(elem_size, page_size) \ + ((page_size) / (elem_size)) -#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ - (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \ - (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \ - (elem_size))) : 0) +#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ + (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \ + (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / (elem_size))) : \ + 0) -#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ - ((u32)(ELEMS_PER_PAGE(elem_size) - \ - UNUSABLE_ELEMS_PER_PAGE(elem_size, mode))) +#define USABLE_ELEMS_PER_PAGE(elem_size, page_size, mode) \ + ((u32)(ELEMS_PER_PAGE((elem_size), (page_size)) - \ + UNUSABLE_ELEMS_PER_PAGE((elem_size), (mode)))) -#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ - DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) +#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, page_size, mode) \ + DIV_ROUND_UP((elem_cnt), \ + USABLE_ELEMS_PER_PAGE((elem_size), (page_size), (mode))) -#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16) -#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32) +#define is_chain_u16(p) \ + ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16) +#define is_chain_u32(p) \ + ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32) /* Accessors */ -static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain) + +static inline u16 qed_chain_get_prod_idx(const struct qed_chain *chain) { - return p_chain->u.chain16.prod_idx; + return chain->u.chain16.prod_idx; } -static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain) +static inline u16 qed_chain_get_cons_idx(const struct qed_chain *chain) { - return p_chain->u.chain16.cons_idx; + return chain->u.chain16.cons_idx; } -static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain) +static inline u32 qed_chain_get_prod_idx_u32(const struct qed_chain *chain) { - return p_chain->u.chain32.cons_idx; + return chain->u.chain32.prod_idx; } -static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) +static inline u32 qed_chain_get_cons_idx_u32(const struct qed_chain *chain) { + return chain->u.chain32.cons_idx; +} + +static inline u16 qed_chain_get_elem_used(const struct qed_chain *chain) +{ + u32 prod = qed_chain_get_prod_idx(chain); + u32 cons = qed_chain_get_cons_idx(chain); + u16 elem_per_page = chain->elem_per_page; u16 used; - used = (u16) (((u32)0x10000 + - (u32)p_chain->u.chain16.prod_idx) - - (u32)p_chain->u.chain16.cons_idx); - if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page - - p_chain->u.chain16.cons_idx / p_chain->elem_per_page; + if (prod < cons) + prod += (u32)U16_MAX + 1; + + used = (u16)(prod - cons); + if (chain->mode == QED_CHAIN_MODE_NEXT_PTR) + used -= (u16)(prod / elem_per_page - cons / elem_per_page); + + return used; +} - return (u16)(p_chain->capacity - used); +static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain) +{ + return (u16)(chain->capacity - qed_chain_get_elem_used(chain)); } -static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain) +static inline u32 qed_chain_get_elem_used_u32(const struct qed_chain *chain) { + u64 prod = qed_chain_get_prod_idx_u32(chain); + u64 cons = qed_chain_get_cons_idx_u32(chain); + u16 elem_per_page = chain->elem_per_page; u32 used; - used = (u32) (((u64)0x100000000ULL + - (u64)p_chain->u.chain32.prod_idx) - - (u64)p_chain->u.chain32.cons_idx); - if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) - used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page - - p_chain->u.chain32.cons_idx / p_chain->elem_per_page; + if (prod < cons) + prod += (u64)U32_MAX + 1; + + used = (u32)(prod - cons); + if (chain->mode == QED_CHAIN_MODE_NEXT_PTR) + used -= (u32)(prod / elem_per_page - cons / elem_per_page); + + return used; +} - return p_chain->capacity - used; +static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain) +{ + return chain->capacity - qed_chain_get_elem_used_u32(chain); } -static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain) +static inline u16 qed_chain_get_usable_per_page(const struct qed_chain *chain) { - return p_chain->usable_per_page; + return chain->usable_per_page; } -static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain) +static inline u8 qed_chain_get_unusable_per_page(const struct qed_chain *chain) { - return p_chain->elem_unusable; + return chain->elem_unusable; } -static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain) +static inline u32 qed_chain_get_page_cnt(const struct qed_chain *chain) { - return p_chain->page_cnt; + return chain->page_cnt; } -static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) +static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain) { - return p_chain->pbl_sp.p_phys_table; + return chain->pbl_sp.table_phys; } /** @@ -505,118 +519,6 @@ static inline void qed_chain_reset(struct qed_chain *p_chain) } /** - * @brief qed_chain_init - Initalizes a basic chain struct - * - * @param p_chain - * @param p_virt_addr - * @param p_phys_addr physical address of allocated buffer's beginning - * @param page_cnt number of pages in the allocated buffer - * @param elem_size size of each element in the chain - * @param intended_use - * @param mode - */ -static inline void qed_chain_init_params(struct qed_chain *p_chain, - u32 page_cnt, - u8 elem_size, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type) -{ - /* chain fixed parameters */ - p_chain->p_virt_addr = NULL; - p_chain->p_phys_addr = 0; - p_chain->elem_size = elem_size; - p_chain->intended_use = (u8)intended_use; - p_chain->mode = mode; - p_chain->cnt_type = (u8)cnt_type; - - p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); - p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); - p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; - p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); - p_chain->next_page_mask = (p_chain->usable_per_page & - p_chain->elem_per_page_mask); - - p_chain->page_cnt = page_cnt; - p_chain->capacity = p_chain->usable_per_page * page_cnt; - p_chain->size = p_chain->elem_per_page * page_cnt; - - p_chain->pbl_sp.p_phys_table = 0; - p_chain->pbl_sp.p_virt_table = NULL; - p_chain->pbl.pp_addr_tbl = NULL; -} - -/** - * @brief qed_chain_init_mem - - * - * Initalizes a basic chain struct with its chain buffers - * - * @param p_chain - * @param p_virt_addr virtual address of allocated buffer's beginning - * @param p_phys_addr physical address of allocated buffer's beginning - * - */ -static inline void qed_chain_init_mem(struct qed_chain *p_chain, - void *p_virt_addr, dma_addr_t p_phys_addr) -{ - p_chain->p_virt_addr = p_virt_addr; - p_chain->p_phys_addr = p_phys_addr; -} - -/** - * @brief qed_chain_init_pbl_mem - - * - * Initalizes a basic chain struct with its pbl buffers - * - * @param p_chain - * @param p_virt_pbl pointer to a pre allocated side table which will hold - * virtual page addresses. - * @param p_phys_pbl pointer to a pre-allocated side table which will hold - * physical page addresses. - * @param pp_virt_addr_tbl - * pointer to a pre-allocated side table which will hold - * the virtual addresses of the chain pages. - * - */ -static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, - void *p_virt_pbl, - dma_addr_t p_phys_pbl, - struct addr_tbl_entry *pp_addr_tbl) -{ - p_chain->pbl_sp.p_phys_table = p_phys_pbl; - p_chain->pbl_sp.p_virt_table = p_virt_pbl; - p_chain->pbl.pp_addr_tbl = pp_addr_tbl; -} - -/** - * @brief qed_chain_init_next_ptr_elem - - * - * Initalizes a next pointer element - * - * @param p_chain - * @param p_virt_curr virtual address of a chain page of which the next - * pointer element is initialized - * @param p_virt_next virtual address of the next chain page - * @param p_phys_next physical address of the next chain page - * - */ -static inline void -qed_chain_init_next_ptr_elem(struct qed_chain *p_chain, - void *p_virt_curr, - void *p_virt_next, dma_addr_t p_phys_next) -{ - struct qed_chain_next *p_next; - u32 size; - - size = p_chain->elem_size * p_chain->usable_per_page; - p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size); - - DMA_REGPAIR_LE(p_next->next_phys, p_phys_next); - - p_next->next_virt = p_virt_next; -} - -/** * @brief qed_chain_get_last_elem - * * Returns a pointer to the last element of the chain @@ -723,7 +625,7 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain) for (i = 0; i < page_cnt; i++) memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0, - QED_CHAIN_PAGE_SIZE); + p_chain->page_size); } #endif diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index a1310482c4ed..812a4d751163 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -1,33 +1,7 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_ETH_IF_H diff --git a/include/linux/qed/qed_fcoe_if.h b/include/linux/qed/qed_fcoe_if.h index 46082480a2c3..16752eca5cbd 100644 --- a/include/linux/qed/qed_fcoe_if.h +++ b/include/linux/qed/qed_fcoe_if.h @@ -1,4 +1,6 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* Copyright (c) 2019-2020 Marvell International Ltd. */ + #ifndef _QED_FCOE_IF_H #define _QED_FCOE_IF_H #include <linux/types.h> diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 8f29e0d8a7b3..cdd73afc4c46 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -1,33 +1,7 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_IF_H @@ -524,7 +498,7 @@ struct qed_fcoe_pf_params { u8 bdq_pbl_num_entries[2]; }; -/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ +/* Most of the parameters below are described in the FW iSCSI / TCP HSI */ struct qed_iscsi_pf_params { u64 glbl_q_params_addr; u64 bdq_pbl_base_addr[3]; @@ -607,9 +581,20 @@ struct qed_sb_info { struct qed_dev *cdev; }; +enum qed_hw_err_type { + QED_HW_ERR_FAN_FAIL, + QED_HW_ERR_MFW_RESP_FAIL, + QED_HW_ERR_HW_ATTN, + QED_HW_ERR_DMAE_FAIL, + QED_HW_ERR_RAMROD_FAIL, + QED_HW_ERR_FW_ASSERT, + QED_HW_ERR_LAST, +}; + enum qed_dev_type { QED_DEV_TYPE_BB, QED_DEV_TYPE_AH, + QED_DEV_TYPE_E5, }; struct qed_dev_info { @@ -638,6 +623,7 @@ struct qed_dev_info { #define QED_MFW_VERSION_3_OFFSET 24 u32 flash_size; + bool b_arfs_capable; bool b_inter_pf_switch; bool tx_switching; bool rdma_supported; @@ -677,87 +663,72 @@ enum qed_protocol { QED_PROTOCOL_FCOE, }; -enum qed_link_mode_bits { - QED_LM_FIBRE_BIT = BIT(0), - QED_LM_Autoneg_BIT = BIT(1), - QED_LM_Asym_Pause_BIT = BIT(2), - QED_LM_Pause_BIT = BIT(3), - QED_LM_1000baseT_Full_BIT = BIT(4), - QED_LM_10000baseT_Full_BIT = BIT(5), - QED_LM_10000baseKR_Full_BIT = BIT(6), - QED_LM_20000baseKR2_Full_BIT = BIT(7), - QED_LM_25000baseKR_Full_BIT = BIT(8), - QED_LM_40000baseLR4_Full_BIT = BIT(9), - QED_LM_50000baseKR2_Full_BIT = BIT(10), - QED_LM_100000baseKR4_Full_BIT = BIT(11), - QED_LM_TP_BIT = BIT(12), - QED_LM_Backplane_BIT = BIT(13), - QED_LM_1000baseKX_Full_BIT = BIT(14), - QED_LM_10000baseKX4_Full_BIT = BIT(15), - QED_LM_10000baseR_FEC_BIT = BIT(16), - QED_LM_40000baseKR4_Full_BIT = BIT(17), - QED_LM_40000baseCR4_Full_BIT = BIT(18), - QED_LM_40000baseSR4_Full_BIT = BIT(19), - QED_LM_25000baseCR_Full_BIT = BIT(20), - QED_LM_25000baseSR_Full_BIT = BIT(21), - QED_LM_50000baseCR2_Full_BIT = BIT(22), - QED_LM_100000baseSR4_Full_BIT = BIT(23), - QED_LM_100000baseCR4_Full_BIT = BIT(24), - QED_LM_100000baseLR4_ER4_Full_BIT = BIT(25), - QED_LM_50000baseSR2_Full_BIT = BIT(26), - QED_LM_1000baseX_Full_BIT = BIT(27), - QED_LM_10000baseCR_Full_BIT = BIT(28), - QED_LM_10000baseSR_Full_BIT = BIT(29), - QED_LM_10000baseLR_Full_BIT = BIT(30), - QED_LM_10000baseLRM_Full_BIT = BIT(31), - QED_LM_COUNT = 32 +enum qed_fec_mode { + QED_FEC_MODE_NONE = BIT(0), + QED_FEC_MODE_FIRECODE = BIT(1), + QED_FEC_MODE_RS = BIT(2), + QED_FEC_MODE_AUTO = BIT(3), + QED_FEC_MODE_UNSUPPORTED = BIT(4), }; struct qed_link_params { - bool link_up; - -#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0) -#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) -#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) -#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) -#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4) -#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5) - u32 override_flags; - bool autoneg; - u32 adv_speeds; - u32 forced_speed; -#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0) -#define QED_LINK_PAUSE_RX_ENABLE BIT(1) -#define QED_LINK_PAUSE_TX_ENABLE BIT(2) - u32 pause_config; -#define QED_LINK_LOOPBACK_NONE BIT(0) -#define QED_LINK_LOOPBACK_INT_PHY BIT(1) -#define QED_LINK_LOOPBACK_EXT_PHY BIT(2) -#define QED_LINK_LOOPBACK_EXT BIT(3) -#define QED_LINK_LOOPBACK_MAC BIT(4) - u32 loopback_mode; - struct qed_link_eee_params eee; + bool link_up; + + u32 override_flags; +#define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0) +#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1) +#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) +#define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) +#define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4) +#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5) +#define QED_LINK_OVERRIDE_FEC_CONFIG BIT(6) + + bool autoneg; + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds); + u32 forced_speed; + + u32 pause_config; +#define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0) +#define QED_LINK_PAUSE_RX_ENABLE BIT(1) +#define QED_LINK_PAUSE_TX_ENABLE BIT(2) + + u32 loopback_mode; +#define QED_LINK_LOOPBACK_NONE BIT(0) +#define QED_LINK_LOOPBACK_INT_PHY BIT(1) +#define QED_LINK_LOOPBACK_EXT_PHY BIT(2) +#define QED_LINK_LOOPBACK_EXT BIT(3) +#define QED_LINK_LOOPBACK_MAC BIT(4) +#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123 BIT(5) +#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301 BIT(6) +#define QED_LINK_LOOPBACK_PCS_AH_ONLY BIT(7) +#define QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY BIT(8) +#define QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY BIT(9) + + struct qed_link_eee_params eee; + u32 fec; }; struct qed_link_output { - bool link_up; + bool link_up; - /* In QED_LM_* defs */ - u32 supported_caps; - u32 advertised_caps; - u32 lp_caps; + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps); + __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps); - u32 speed; /* In Mb/s */ - u8 duplex; /* In DUPLEX defs */ - u8 port; /* In PORT defs */ - bool autoneg; - u32 pause_config; + u32 speed; /* In Mb/s */ + u8 duplex; /* In DUPLEX defs */ + u8 port; /* In PORT defs */ + bool autoneg; + u32 pause_config; /* EEE - capability & param */ - bool eee_supported; - bool eee_active; - u8 sup_caps; - struct qed_link_eee_params eee; + bool eee_supported; + bool eee_active; + u8 sup_caps; + struct qed_link_eee_params eee; + + u32 sup_fec; + u32 active_fec; }; struct qed_probe_params { @@ -811,12 +782,14 @@ enum qed_nvm_flash_cmd { struct qed_common_cb_ops { void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc); - void (*link_update)(void *dev, - struct qed_link_output *link); + void (*link_update)(void *dev, struct qed_link_output *link); void (*schedule_recovery_handler)(void *dev); - void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); + void (*schedule_hw_err_handler)(void *dev, + enum qed_hw_err_type err_type); + void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data); void (*get_protocol_tlv_data)(void *dev, void *data); + void (*bw_update)(void *dev); }; struct qed_selftest_ops { @@ -976,13 +949,8 @@ struct qed_common_ops { u8 dp_level); int (*chain_alloc)(struct qed_dev *cdev, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - enum qed_chain_cnt_type cnt_type, - u32 num_elems, - size_t elem_size, - struct qed_chain *p_chain, - struct qed_chain_ext_pbl *ext_pbl); + struct qed_chain *chain, + struct qed_chain_init_params *params); void (*chain_free)(struct qed_dev *cdev, struct qed_chain *p_chain); @@ -1034,6 +1002,15 @@ struct qed_common_ops { */ int (*set_led)(struct qed_dev *cdev, enum qed_led_mode mode); + +/** + * @brief attn_clr_enable - Prevent attentions from being reasserted + * + * @param cdev + * @param clr_enable + */ + void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable); + /** * @brief db_recovery_add - add doorbell information to the doorbell * recovery mechanism. @@ -1408,16 +1385,15 @@ static inline void qed_sb_ack(struct qed_sb_info *sb_info, enum igu_int_cmd int_cmd, u8 upd_flg) { - struct igu_prod_cons_update igu_ack = { 0 }; + u32 igu_ack; - igu_ack.sb_id_and_flags = - ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | - (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | - (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | - (IGU_SEG_ACCESS_REG << - IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); + igu_ack = ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | + (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | + (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | + (IGU_SEG_ACCESS_REG << + IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); - DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags); + DIRECT_REG_WR(sb_info->igu_addr, igu_ack); /* Both segments (interrupts & acks) are written to same place address; * Need to guarantee all commands will be received (in-order) by HW. diff --git a/include/linux/qed/qed_iov_if.h b/include/linux/qed/qed_iov_if.h index ac2e6a3199a3..8e31a28e51b9 100644 --- a/include/linux/qed/qed_iov_if.h +++ b/include/linux/qed/qed_iov_if.h @@ -1,33 +1,7 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_IOV_IF_H diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index d0df1bec5357..04180d9af560 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -1,33 +1,7 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_ISCSI_IF_H diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index 1313c34d9a68..2f64ed79cee9 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -1,33 +1,7 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_LL2_IF_H diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index 74efca15fde7..f464d85e88a4 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -1,34 +1,9 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ + #ifndef _QED_RDMA_IF_H #define _QED_RDMA_IF_H #include <linux/types.h> @@ -53,6 +28,13 @@ enum qed_roce_qp_state { QED_ROCE_QP_STATE_SQE }; +enum qed_rdma_qp_type { + QED_RDMA_QP_TYPE_RC, + QED_RDMA_QP_TYPE_XRC_INI, + QED_RDMA_QP_TYPE_XRC_TGT, + QED_RDMA_QP_TYPE_INVAL = 0xffff, +}; + enum qed_rdma_tid_type { QED_RDMA_TID_REGISTERED_MR, QED_RDMA_TID_FMR, @@ -91,7 +73,6 @@ struct qed_rdma_device { u64 max_mr_size; u32 max_cqe; u32 max_mw; - u32 max_fmr; u32 max_mr_mw_fmr_pbl; u64 max_mr_mw_fmr_size; u32 max_pd; @@ -291,6 +272,12 @@ struct qed_rdma_create_srq_in_params { u16 num_pages; u16 pd_id; u16 page_size; + + /* XRC related only */ + bool reserved_key_en; + bool is_xrc; + u32 cq_cid; + u16 xrcd_id; }; struct qed_rdma_destroy_cq_in_params { @@ -319,7 +306,12 @@ struct qed_rdma_create_qp_in_params { u16 rq_num_pages; u64 rq_pbl_ptr; u16 srq_id; + u16 xrcd_id; u8 stats_queue; + enum qed_rdma_qp_type qp_type; + u8 flags; +#define QED_ROCE_EDPM_MODE_MASK 0x1 +#define QED_ROCE_EDPM_MODE_SHIFT 0 }; struct qed_rdma_create_qp_out_params { @@ -429,11 +421,13 @@ struct qed_rdma_create_srq_out_params { struct qed_rdma_destroy_srq_in_params { u16 srq_id; + bool is_xrc; }; struct qed_rdma_modify_srq_in_params { u32 wqe_limit; u16 srq_id; + bool is_xrc; }; struct qed_rdma_stats_out_params { @@ -611,6 +605,8 @@ struct qed_rdma_ops { int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt); int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd); void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd); + int (*rdma_alloc_xrcd)(void *rdma_cxt, u16 *xrcd); + void (*rdma_dealloc_xrcd)(void *rdma_cxt, u16 xrcd); int (*rdma_create_cq)(void *rdma_cxt, struct qed_rdma_create_cq_in_params *params, u16 *icid); diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h index 5a00c7a473bf..072da2f6da37 100644 --- a/include/linux/qed/qede_rdma.h +++ b/include/linux/qed/qede_rdma.h @@ -1,34 +1,9 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qedr NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ + #ifndef QEDE_ROCE_H #define QEDE_ROCE_H diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index 480a57eb36cc..bab078b25834 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h @@ -1,33 +1,7 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __RDMA_COMMON__ diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index 473fba76aa77..ccddd7a96b67 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h @@ -1,33 +1,7 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __ROCE_COMMON__ diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h index 9a973ffbbff5..91896e8793bf 100644 --- a/include/linux/qed/storage_common.h +++ b/include/linux/qed/storage_common.h @@ -1,33 +1,7 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __STORAGE_COMMON__ diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index 4a4845193539..2b2c87d10e0a 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h @@ -1,33 +1,7 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef __TCP_COMMON__ diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 63e62372443a..c2a9f7c90727 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -16,11 +16,20 @@ #include <linux/spinlock.h> #include <linux/types.h> #include <linux/xarray.h> +#include <linux/local_lock.h> /* Keep unconverted code working */ #define radix_tree_root xarray #define radix_tree_node xa_node +struct radix_tree_preload { + local_lock_t lock; + unsigned nr; + /* nodes->parent points to next preallocated node */ + struct radix_tree_node *nodes; +}; +DECLARE_PER_CPU(struct radix_tree_preload, radix_tree_preloads); + /* * The bottom two bits of the slot determine how the remaining bits in the * slot are interpreted: @@ -245,7 +254,7 @@ int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag); static inline void radix_tree_preload_end(void) { - preempt_enable(); + local_unlock(&radix_tree_preloads.lock); } void __rcu **idr_get_free(struct radix_tree_root *root, diff --git a/include/linux/raid/detect.h b/include/linux/raid/detect.h index 37dd3f40cd31..1f029a71c3ef 100644 --- a/include/linux/raid/detect.h +++ b/include/linux/raid/detect.h @@ -1,3 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ void md_autodetect_dev(dev_t dev); + +#ifdef CONFIG_BLK_DEV_MD +void md_run_setup(void); +#else +static inline void md_run_setup(void) +{ +} +#endif diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h deleted file mode 100644 index 8dfec085a20e..000000000000 --- a/include/linux/raid/md_u.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - md_u.h : user <=> kernel API between Linux raidtools and RAID drivers - Copyright (C) 1998 Ingo Molnar - -*/ -#ifndef _MD_U_H -#define _MD_U_H - -#include <uapi/linux/raid/md_u.h> - -extern int mdp_major; -#endif diff --git a/include/linux/random.h b/include/linux/random.h index 45e1f8fa742b..f45b8be3e3c4 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -110,61 +110,12 @@ declare_get_random_var_wait(long) unsigned long randomize_page(unsigned long start, unsigned long range); -u32 prandom_u32(void); -void prandom_bytes(void *buf, size_t nbytes); -void prandom_seed(u32 seed); -void prandom_reseed_late(void); - -struct rnd_state { - __u32 s1, s2, s3, s4; -}; - -u32 prandom_u32_state(struct rnd_state *state); -void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes); -void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); - -#define prandom_init_once(pcpu_state) \ - DO_ONCE(prandom_seed_full_state, (pcpu_state)) - -/** - * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) - * @ep_ro: right open interval endpoint - * - * Returns a pseudo-random number that is in interval [0, ep_ro). Note - * that the result depends on PRNG being well distributed in [0, ~0U] - * u32 space. Here we use maximally equidistributed combined Tausworthe - * generator, that is, prandom_u32(). This is useful when requesting a - * random index of an array containing ep_ro elements, for example. - * - * Returns: pseudo-random number in interval [0, ep_ro) - */ -static inline u32 prandom_u32_max(u32 ep_ro) -{ - return (u32)(((u64) prandom_u32() * ep_ro) >> 32); -} - /* - * Handle minimum values for seeds + * This is designed to be standalone for just prandom + * users, but for now we include it from <linux/random.h> + * for legacy reasons. */ -static inline u32 __seed(u32 x, u32 m) -{ - return (x < m) ? x + m : x; -} - -/** - * prandom_seed_state - set seed for prandom_u32_state(). - * @state: pointer to state structure to receive the seed. - * @seed: arbitrary 64-bit value to use as a seed. - */ -static inline void prandom_seed_state(struct rnd_state *state, u64 seed) -{ - u32 i = (seed >> 32) ^ (seed << 10) ^ seed; - - state->s1 = __seed(i, 2U); - state->s2 = __seed(i, 8U); - state->s3 = __seed(i, 16U); - state->s4 = __seed(i, 128U); -} +#include <linux/prandom.h> #ifdef CONFIG_ARCH_RANDOM # include <asm/archrandom.h> @@ -207,10 +158,4 @@ static inline bool __init arch_get_random_long_early(unsigned long *v) } #endif -/* Pseudo random number generator from numerical recipes. */ -static inline u32 next_pseudo_random32(u32 seed) -{ - return seed * 1664525 + 1013904223; -} - #endif /* _LINUX_RANDOM_H */ diff --git a/include/linux/ras.h b/include/linux/ras.h index 7c3debb47c87..1f4048bf2674 100644 --- a/include/linux/ras.h +++ b/include/linux/ras.h @@ -17,12 +17,7 @@ static inline int ras_add_daemon_trace(void) { return 0; } #endif #ifdef CONFIG_RAS_CEC -void __init cec_init(void); int __init parse_cec_param(char *str); -int cec_add_elem(u64 pfn); -#else -static inline void __init cec_init(void) { } -static inline int cec_add_elem(u64 pfn) { return -ENODEV; } #endif #ifdef CONFIG_RAS diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 8ddf79e9207a..b17e0cd0a30c 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h @@ -2,41 +2,10 @@ #ifndef _LINUX_RATELIMIT_H #define _LINUX_RATELIMIT_H -#include <linux/param.h> +#include <linux/ratelimit_types.h> #include <linux/sched.h> #include <linux/spinlock.h> -#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) -#define DEFAULT_RATELIMIT_BURST 10 - -/* issue num suppressed message on exit */ -#define RATELIMIT_MSG_ON_RELEASE BIT(0) - -struct ratelimit_state { - raw_spinlock_t lock; /* protect the state */ - - int interval; - int burst; - int printed; - int missed; - unsigned long begin; - unsigned long flags; -}; - -#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ - .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ - .interval = interval_init, \ - .burst = burst_init, \ - } - -#define RATELIMIT_STATE_INIT_DISABLED \ - RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST) - -#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ - \ - struct ratelimit_state name = \ - RATELIMIT_STATE_INIT(name, interval_init, burst_init) \ - static inline void ratelimit_state_init(struct ratelimit_state *rs, int interval, int burst) { @@ -73,9 +42,6 @@ ratelimit_set_flags(struct ratelimit_state *rs, unsigned long flags) extern struct ratelimit_state printk_ratelimit_state; -extern int ___ratelimit(struct ratelimit_state *rs, const char *func); -#define __ratelimit(state) ___ratelimit(state, __func__) - #ifdef CONFIG_PRINTK #define WARN_ON_RATELIMIT(condition, state) ({ \ diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h new file mode 100644 index 000000000000..b676aa419eef --- /dev/null +++ b/include/linux/ratelimit_types.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_RATELIMIT_TYPES_H +#define _LINUX_RATELIMIT_TYPES_H + +#include <linux/bits.h> +#include <linux/param.h> +#include <linux/spinlock_types.h> + +#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) +#define DEFAULT_RATELIMIT_BURST 10 + +/* issue num suppressed message on exit */ +#define RATELIMIT_MSG_ON_RELEASE BIT(0) + +struct ratelimit_state { + raw_spinlock_t lock; /* protect the state */ + + int interval; + int burst; + int printed; + int missed; + unsigned long begin; + unsigned long flags; +}; + +#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \ + .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ + .interval = interval_init, \ + .burst = burst_init, \ + } + +#define RATELIMIT_STATE_INIT_DISABLED \ + RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST) + +#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ + \ + struct ratelimit_state name = \ + RATELIMIT_STATE_INIT(name, interval_init, burst_init) \ + +extern int ___ratelimit(struct ratelimit_state *rs, const char *func); +#define __ratelimit(state) ___ratelimit(state, __func__) + +#endif /* _LINUX_RATELIMIT_TYPES_H */ diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index 1fd61a9af45c..d7db17996322 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -11,7 +11,7 @@ I know it's not the cleaner way, but in C (not in C++) to get performances and genericity... - See Documentation/rbtree.txt for documentation and samples. + See Documentation/core-api/rbtree.rst for documentation and samples. */ #ifndef _LINUX_RBTREE_H diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index 724b0d036b57..d1c53e9d8c75 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -21,7 +21,7 @@ * rb_insert_augmented() and rb_erase_augmented() are intended to be public. * The rest are implementation details you are not expected to depend on. * - * See Documentation/rbtree.txt for documentation and samples. + * See Documentation/core-api/rbtree.rst for documentation and samples. */ struct rb_augment_callbacks { diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 8214cdc715f2..7a6fc9956510 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -248,6 +248,8 @@ static inline void __list_splice_init_rcu(struct list_head *list, */ sync(); + ASSERT_EXCLUSIVE_ACCESS(*first); + ASSERT_EXCLUSIVE_ACCESS(*last); /* * Readers are finished with the source list, so perform splice. @@ -371,7 +373,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the list_head within the struct. - * @cond...: optional lockdep expression if called from non-RCU protection. + * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as list_add_rcu() @@ -506,6 +508,27 @@ static inline void hlist_replace_rcu(struct hlist_node *old, WRITE_ONCE(old->pprev, LIST_POISON2); } +/** + * hlists_swap_heads_rcu - swap the lists the hlist heads point to + * @left: The hlist head on the left + * @right: The hlist head on the right + * + * The lists start out as [@left ][node1 ... ] and + * [@right ][node2 ... ] + * The lists end up as [@left ][node2 ... ] + * [@right ][node1 ... ] + */ +static inline void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right) +{ + struct hlist_node *node1 = left->first; + struct hlist_node *node2 = right->first; + + rcu_assign_pointer(left->first, node2); + rcu_assign_pointer(right->first, node1); + WRITE_ONCE(node2->pprev, &left->first); + WRITE_ONCE(node1->pprev, &right->first); +} + /* * return the first or the next element in an RCU protected hlist */ @@ -646,7 +669,7 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n, * @pos: the type * to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_node within the struct. - * @cond...: optional lockdep expression if called from non-RCU protection. + * @cond: optional lockdep expression if called from non-RCU protection. * * This list-traversal primitive may safely run concurrently with * the _rcu list-mutation primitives such as hlist_add_head_rcu() diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index 9670b54b484a..ff3e94779e73 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -162,7 +162,7 @@ static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n) * The barrier() is needed to make sure compiler doesn't cache first element [1], * as this loop can be restarted [2] * [1] Documentation/core-api/atomic_ops.rst around line 114 - * [2] Documentation/RCU/rculist_nulls.txt around line 146 + * [2] Documentation/RCU/rculist_nulls.rst around line 146 */ #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ for (({barrier();}), \ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 2678a37c3169..d15d46db61f7 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -37,6 +37,7 @@ /* Exported common interfaces */ void call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier_tasks(void); +void rcu_barrier_tasks_rude(void); void synchronize_rcu(void); #ifdef CONFIG_PREEMPT_RCU @@ -129,25 +130,57 @@ static inline void rcu_init_nohz(void) { } * Note a quasi-voluntary context switch for RCU-tasks's benefit. * This is a macro rather than an inline function to avoid #include hell. */ -#ifdef CONFIG_TASKS_RCU -#define rcu_tasks_qs(t) \ - do { \ - if (READ_ONCE((t)->rcu_tasks_holdout)) \ - WRITE_ONCE((t)->rcu_tasks_holdout, false); \ +#ifdef CONFIG_TASKS_RCU_GENERIC + +# ifdef CONFIG_TASKS_RCU +# define rcu_tasks_classic_qs(t, preempt) \ + do { \ + if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \ + WRITE_ONCE((t)->rcu_tasks_holdout, false); \ } while (0) -#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t) void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); void synchronize_rcu_tasks(void); +# else +# define rcu_tasks_classic_qs(t, preempt) do { } while (0) +# define call_rcu_tasks call_rcu +# define synchronize_rcu_tasks synchronize_rcu +# endif + +# ifdef CONFIG_TASKS_RCU_TRACE +# define rcu_tasks_trace_qs(t) \ + do { \ + if (!likely(READ_ONCE((t)->trc_reader_checked)) && \ + !unlikely(READ_ONCE((t)->trc_reader_nesting))) { \ + smp_store_release(&(t)->trc_reader_checked, true); \ + smp_mb(); /* Readers partitioned by store. */ \ + } \ + } while (0) +# else +# define rcu_tasks_trace_qs(t) do { } while (0) +# endif + +#define rcu_tasks_qs(t, preempt) \ +do { \ + rcu_tasks_classic_qs((t), (preempt)); \ + rcu_tasks_trace_qs((t)); \ +} while (0) + +# ifdef CONFIG_TASKS_RUDE_RCU +void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func); +void synchronize_rcu_tasks_rude(void); +# endif + +#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false) void exit_tasks_rcu_start(void); void exit_tasks_rcu_finish(void); -#else /* #ifdef CONFIG_TASKS_RCU */ -#define rcu_tasks_qs(t) do { } while (0) +#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ +#define rcu_tasks_qs(t, preempt) do { } while (0) #define rcu_note_voluntary_context_switch(t) do { } while (0) #define call_rcu_tasks call_rcu #define synchronize_rcu_tasks synchronize_rcu static inline void exit_tasks_rcu_start(void) { } static inline void exit_tasks_rcu_finish(void) { } -#endif /* #else #ifdef CONFIG_TASKS_RCU */ +#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ /** * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU @@ -158,7 +191,7 @@ static inline void exit_tasks_rcu_finish(void) { } */ #define cond_resched_tasks_rcu_qs() \ do { \ - rcu_tasks_qs(current); \ + rcu_tasks_qs(current, false); \ cond_resched(); \ } while (0) @@ -795,17 +828,17 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) /* * Does the specified offset indicate that the corresponding rcu_head - * structure can be handled by kfree_rcu()? + * structure can be handled by kvfree_rcu()? */ -#define __is_kfree_rcu_offset(offset) ((offset) < 4096) +#define __is_kvfree_rcu_offset(offset) ((offset) < 4096) /* * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain. */ -#define __kfree_rcu(head, offset) \ +#define __kvfree_rcu(head, offset) \ do { \ - BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ - kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ + BUILD_BUG_ON(!__is_kvfree_rcu_offset(offset)); \ + kvfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \ } while (0) /** @@ -824,7 +857,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) * Because the functions are not allowed in the low-order 4096 bytes of * kernel virtual memory, offsets up to 4095 bytes can be accommodated. * If the offset is larger than 4095 bytes, a compile-time error will - * be generated in __kfree_rcu(). If this error is triggered, you can + * be generated in __kvfree_rcu(). If this error is triggered, you can * either fall back to use of call_rcu() or rearrange the structure to * position the rcu_head structure into the first 4096 bytes. * @@ -839,7 +872,46 @@ do { \ typeof (ptr) ___p = (ptr); \ \ if (___p) \ - __kfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \ + __kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \ +} while (0) + +/** + * kvfree_rcu() - kvfree an object after a grace period. + * + * This macro consists of one or two arguments and it is + * based on whether an object is head-less or not. If it + * has a head then a semantic stays the same as it used + * to be before: + * + * kvfree_rcu(ptr, rhf); + * + * where @ptr is a pointer to kvfree(), @rhf is the name + * of the rcu_head structure within the type of @ptr. + * + * When it comes to head-less variant, only one argument + * is passed and that is just a pointer which has to be + * freed after a grace period. Therefore the semantic is + * + * kvfree_rcu(ptr); + * + * where @ptr is a pointer to kvfree(). + * + * Please note, head-less way of freeing is permitted to + * use from a context that has to follow might_sleep() + * annotation. Otherwise, please switch and embed the + * rcu_head structure within the type of @ptr. + */ +#define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \ + kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__) + +#define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME +#define kvfree_rcu_arg_2(ptr, rhf) kfree_rcu(ptr, rhf) +#define kvfree_rcu_arg_1(ptr) \ +do { \ + typeof(ptr) ___p = (ptr); \ + \ + if (___p) \ + kvfree_call_rcu(NULL, (rcu_callback_t) (___p)); \ } while (0) /* diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h new file mode 100644 index 000000000000..d9015aac78c6 --- /dev/null +++ b/include/linux/rcupdate_trace.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Read-Copy Update mechanism for mutual exclusion, adapted for tracing. + * + * Copyright (C) 2020 Paul E. McKenney. + */ + +#ifndef __LINUX_RCUPDATE_TRACE_H +#define __LINUX_RCUPDATE_TRACE_H + +#include <linux/sched.h> +#include <linux/rcupdate.h> + +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +extern struct lockdep_map rcu_trace_lock_map; + +static inline int rcu_read_lock_trace_held(void) +{ + return lock_is_held(&rcu_trace_lock_map); +} + +#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +static inline int rcu_read_lock_trace_held(void) +{ + return 1; +} + +#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +#ifdef CONFIG_TASKS_TRACE_RCU + +void rcu_read_unlock_trace_special(struct task_struct *t, int nesting); + +/** + * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section + * + * When synchronize_rcu_tasks_trace() is invoked by one task, then that + * task is guaranteed to block until all other tasks exit their read-side + * critical sections. Similarly, if call_rcu_trace() is invoked on one + * task while other tasks are within RCU read-side critical sections, + * invocation of the corresponding RCU callback is deferred until after + * the all the other tasks exit their critical sections. + * + * For more details, please see the documentation for rcu_read_lock(). + */ +static inline void rcu_read_lock_trace(void) +{ + struct task_struct *t = current; + + WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1); + if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && + t->trc_reader_special.b.need_mb) + smp_mb(); // Pairs with update-side barriers + rcu_lock_acquire(&rcu_trace_lock_map); +} + +/** + * rcu_read_unlock_trace - mark end of RCU-trace read-side critical section + * + * Pairs with a preceding call to rcu_read_lock_trace(), and nesting is + * allowed. Invoking a rcu_read_unlock_trace() when there is no matching + * rcu_read_lock_trace() is verboten, and will result in lockdep complaints. + * + * For more details, please see the documentation for rcu_read_unlock(). + */ +static inline void rcu_read_unlock_trace(void) +{ + int nesting; + struct task_struct *t = current; + + rcu_lock_release(&rcu_trace_lock_map); + nesting = READ_ONCE(t->trc_reader_nesting) - 1; + if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) { + WRITE_ONCE(t->trc_reader_nesting, nesting); + return; // We assume shallow reader nesting. + } + rcu_read_unlock_trace_special(t, nesting); +} + +void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); +void synchronize_rcu_tasks_trace(void); +void rcu_barrier_tasks_trace(void); + +#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ + +#endif /* __LINUX_RCUPDATE_TRACE_H */ diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h index c0578ba23c1a..699b938358bf 100644 --- a/include/linux/rcupdate_wait.h +++ b/include/linux/rcupdate_wait.h @@ -31,4 +31,23 @@ do { \ #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) +/** + * synchronize_rcu_mult - Wait concurrently for multiple grace periods + * @...: List of call_rcu() functions for different grace periods to wait on + * + * This macro waits concurrently for multiple types of RCU grace periods. + * For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait + * on concurrent RCU and RCU-tasks grace periods. Waiting on a given SRCU + * domain requires you to write a wrapper function for that SRCU domain's + * call_srcu() function, with this wrapper supplying the pointer to the + * corresponding srcu_struct. + * + * The first argument tells Tiny RCU's _wait_rcu_gp() not to + * bother waiting for RCU. The reason for this is because anywhere + * synchronize_rcu_mult() can be called is automatically already a full + * grace period. + */ +#define synchronize_rcu_mult(...) \ + _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) + #endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 045c28b71f4f..5cc9637cac16 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -34,9 +34,25 @@ static inline void synchronize_rcu_expedited(void) synchronize_rcu(); } -static inline void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func) +/* + * Add one more declaration of kvfree() here. It is + * not so straight forward to just include <linux/mm.h> + * where it is defined due to getting many compile + * errors caused by that include. + */ +extern void kvfree(const void *addr); + +static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func) { - call_rcu(head, func); + if (head) { + call_rcu(head, func); + return; + } + + // kvfree_rcu(one_arg) call. + might_sleep(); + synchronize_rcu(); + kvfree((void *) func); } void rcu_qs(void); @@ -49,7 +65,7 @@ static inline void rcu_softirq_qs(void) #define rcu_note_context_switch(preempt) \ do { \ rcu_qs(); \ - rcu_tasks_qs(current); \ + rcu_tasks_qs(current, (preempt)); \ } while (0) static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) @@ -71,6 +87,8 @@ static inline void rcu_irq_enter(void) { } static inline void rcu_irq_exit_irqson(void) { } static inline void rcu_irq_enter_irqson(void) { } static inline void rcu_irq_exit(void) { } +static inline void rcu_irq_exit_preempt(void) { } +static inline void rcu_irq_exit_check_preempt(void) { } static inline void exit_rcu(void) { } static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t) { @@ -85,8 +103,10 @@ static inline void rcu_scheduler_starting(void) { } static inline void rcu_end_inkernel_boot(void) { } static inline bool rcu_inkernel_boot_has_ended(void) { return true; } static inline bool rcu_is_watching(void) { return true; } +static inline bool __rcu_is_watching(void) { return true; } static inline void rcu_momentary_dyntick_idle(void) { } static inline void kfree_rcu_scheduler_running(void) { } +static inline bool rcu_gp_might_be_stalled(void) { return false; } /* Avoid RCU read-side critical sections leaking across. */ static inline void rcu_all_qs(void) { barrier(); } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 45f3f66bb04d..d2f4064ebd1d 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -33,12 +33,13 @@ static inline void rcu_virt_note_context_switch(int cpu) } void synchronize_rcu_expedited(void); -void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); +void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier(void); bool rcu_eqs_special_set(int cpu); void rcu_momentary_dyntick_idle(void); void kfree_rcu_scheduler_running(void); +bool rcu_gp_might_be_stalled(void); unsigned long get_state_synchronize_rcu(void); void cond_synchronize_rcu(unsigned long oldstate); @@ -46,9 +47,16 @@ void rcu_idle_enter(void); void rcu_idle_exit(void); void rcu_irq_enter(void); void rcu_irq_exit(void); +void rcu_irq_exit_preempt(void); void rcu_irq_enter_irqson(void); void rcu_irq_exit_irqson(void); +#ifdef CONFIG_PROVE_RCU +void rcu_irq_exit_check_preempt(void); +#else +static inline void rcu_irq_exit_check_preempt(void) { } +#endif + void exit_rcu(void); void rcu_scheduler_starting(void); @@ -56,6 +64,7 @@ extern int rcu_scheduler_active __read_mostly; void rcu_end_inkernel_boot(void); bool rcu_inkernel_boot_has_ended(void); bool rcu_is_watching(void); +bool __rcu_is_watching(void); #ifndef CONFIG_PREEMPTION void rcu_all_qs(void); #endif diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h index 2ffe1ee6d482..61c56cca95c4 100644 --- a/include/linux/rcuwait.h +++ b/include/linux/rcuwait.h @@ -25,16 +25,38 @@ static inline void rcuwait_init(struct rcuwait *w) w->task = NULL; } -extern void rcuwait_wake_up(struct rcuwait *w); +/* + * Note: this provides no serialization and, just as with waitqueues, + * requires care to estimate as to whether or not the wait is active. + */ +static inline int rcuwait_active(struct rcuwait *w) +{ + return !!rcu_access_pointer(w->task); +} + +extern int rcuwait_wake_up(struct rcuwait *w); /* * The caller is responsible for locking around rcuwait_wait_event(), - * such that writes to @task are properly serialized. + * and [prepare_to/finish]_rcuwait() such that writes to @task are + * properly serialized. */ + +static inline void prepare_to_rcuwait(struct rcuwait *w) +{ + rcu_assign_pointer(w->task, current); +} + +static inline void finish_rcuwait(struct rcuwait *w) +{ + rcu_assign_pointer(w->task, NULL); + __set_current_state(TASK_RUNNING); +} + #define rcuwait_wait_event(w, condition, state) \ ({ \ int __ret = 0; \ - rcu_assign_pointer((w)->task, current); \ + prepare_to_rcuwait(w); \ for (;;) { \ /* \ * Implicit barrier (A) pairs with (B) in \ @@ -51,9 +73,7 @@ extern void rcuwait_wake_up(struct rcuwait *w); \ schedule(); \ } \ - \ - WRITE_ONCE((w)->task, NULL); \ - __set_current_state(TASK_RUNNING); \ + finish_rcuwait(w); \ __ret; \ }) diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 40b07168fd8e..1970ed59d49f 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -17,10 +17,13 @@ #include <linux/err.h> #include <linux/bug.h> #include <linux/lockdep.h> +#include <linux/iopoll.h> +#include <linux/fwnode.h> struct module; struct clk; struct device; +struct device_node; struct i2c_client; struct i3c_device; struct irq_domain; @@ -71,35 +74,12 @@ struct reg_sequence { unsigned int delay_us; }; -#define regmap_update_bits(map, reg, mask, val) \ - regmap_update_bits_base(map, reg, mask, val, NULL, false, false) -#define regmap_update_bits_async(map, reg, mask, val)\ - regmap_update_bits_base(map, reg, mask, val, NULL, true, false) -#define regmap_update_bits_check(map, reg, mask, val, change)\ - regmap_update_bits_base(map, reg, mask, val, change, false, false) -#define regmap_update_bits_check_async(map, reg, mask, val, change)\ - regmap_update_bits_base(map, reg, mask, val, change, true, false) - -#define regmap_write_bits(map, reg, mask, val) \ - regmap_update_bits_base(map, reg, mask, val, NULL, false, true) - -#define regmap_field_write(field, val) \ - regmap_field_update_bits_base(field, ~0, val, NULL, false, false) -#define regmap_field_force_write(field, val) \ - regmap_field_update_bits_base(field, ~0, val, NULL, false, true) -#define regmap_field_update_bits(field, mask, val)\ - regmap_field_update_bits_base(field, mask, val, NULL, false, false) -#define regmap_field_force_update_bits(field, mask, val) \ - regmap_field_update_bits_base(field, mask, val, NULL, false, true) - -#define regmap_fields_write(field, id, val) \ - regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, false) -#define regmap_fields_force_write(field, id, val) \ - regmap_fields_update_bits_base(field, id, ~0, val, NULL, false, true) -#define regmap_fields_update_bits(field, id, mask, val)\ - regmap_fields_update_bits_base(field, id, mask, val, NULL, false, false) -#define regmap_fields_force_update_bits(field, id, mask, val) \ - regmap_fields_update_bits_base(field, id, mask, val, NULL, false, true) +#define REG_SEQ(_reg, _def, _delay_us) { \ + .reg = _reg, \ + .def = _def, \ + .delay_us = _delay_us, \ + } +#define REG_SEQ0(_reg, _def) REG_SEQ(_reg, _def, 0) /** * regmap_read_poll_timeout - Poll until a condition is met or a timeout occurs @@ -122,26 +102,10 @@ struct reg_sequence { */ #define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \ ({ \ - u64 __timeout_us = (timeout_us); \ - unsigned long __sleep_us = (sleep_us); \ - ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ - int __ret; \ - might_sleep_if(__sleep_us); \ - for (;;) { \ - __ret = regmap_read((map), (addr), &(val)); \ - if (__ret) \ - break; \ - if (cond) \ - break; \ - if ((__timeout_us) && \ - ktime_compare(ktime_get(), __timeout) > 0) { \ - __ret = regmap_read((map), (addr), &(val)); \ - break; \ - } \ - if (__sleep_us) \ - usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ - } \ - __ret ?: ((cond) ? 0 : -ETIMEDOUT); \ + int __ret, __tmp; \ + __tmp = read_poll_timeout(regmap_read, __ret, __ret || (cond), \ + sleep_us, timeout_us, false, (map), (addr), &(val)); \ + __ret ?: __tmp; \ }) /** @@ -209,25 +173,10 @@ struct reg_sequence { */ #define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \ ({ \ - u64 __timeout_us = (timeout_us); \ - unsigned long __sleep_us = (sleep_us); \ - ktime_t timeout = ktime_add_us(ktime_get(), __timeout_us); \ - int pollret; \ - might_sleep_if(__sleep_us); \ - for (;;) { \ - pollret = regmap_field_read((field), &(val)); \ - if (pollret) \ - break; \ - if (cond) \ - break; \ - if (__timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ - pollret = regmap_field_read((field), &(val)); \ - break; \ - } \ - if (__sleep_us) \ - usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ - } \ - pollret ?: ((cond) ? 0 : -ETIMEDOUT); \ + int __ret, __tmp; \ + __tmp = read_poll_timeout(regmap_field_read, __ret, __ret || (cond), \ + sleep_us, timeout_us, false, (field), &(val)); \ + __ret ?: __tmp; \ }) #ifdef CONFIG_REGMAP @@ -326,7 +275,7 @@ typedef void (*regmap_unlock)(void *); * readable if it belongs to one of the ranges specified * by rd_noinc_table). * @disable_locking: This regmap is either protected by external means or - * is guaranteed not be be accessed from multiple threads. + * is guaranteed not to be accessed from multiple threads. * Don't use any locking mechanisms. * @lock: Optional lock callback (overrides regmap's default lock * function, based on spinlock or mutex). @@ -1076,6 +1025,42 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, int regmap_update_bits_base(struct regmap *map, unsigned int reg, unsigned int mask, unsigned int val, bool *change, bool async, bool force); + +static inline int regmap_update_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + return regmap_update_bits_base(map, reg, mask, val, NULL, false, false); +} + +static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + return regmap_update_bits_base(map, reg, mask, val, NULL, true, false); +} + +static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change) +{ + return regmap_update_bits_base(map, reg, mask, val, + change, false, false); +} + +static inline int +regmap_update_bits_check_async(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change) +{ + return regmap_update_bits_base(map, reg, mask, val, + change, true, false); +} + +static inline int regmap_write_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + return regmap_update_bits_base(map, reg, mask, val, NULL, false, true); +} + int regmap_get_val_bytes(struct regmap *map); int regmap_get_max_register(struct regmap *map); int regmap_get_reg_stride(struct regmap *map); @@ -1111,6 +1096,21 @@ bool regmap_reg_in_ranges(unsigned int reg, const struct regmap_range *ranges, unsigned int nranges); +static inline int regmap_set_bits(struct regmap *map, + unsigned int reg, unsigned int bits) +{ + return regmap_update_bits_base(map, reg, bits, bits, + NULL, false, false); +} + +static inline int regmap_clear_bits(struct regmap *map, + unsigned int reg, unsigned int bits) +{ + return regmap_update_bits_base(map, reg, bits, 0, NULL, false, false); +} + +int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits); + /** * struct reg_field - Description of an register field * @@ -1134,6 +1134,14 @@ struct reg_field { .msb = _msb, \ } +#define REG_FIELD_ID(_reg, _lsb, _msb, _size, _offset) { \ + .reg = _reg, \ + .lsb = _lsb, \ + .msb = _msb, \ + .id_size = _size, \ + .id_offset = _offset, \ + } + struct regmap_field *regmap_field_alloc(struct regmap *regmap, struct reg_field reg_field); void regmap_field_free(struct regmap_field *field); @@ -1151,6 +1159,65 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id, int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, unsigned int mask, unsigned int val, bool *change, bool async, bool force); + +static inline int regmap_field_write(struct regmap_field *field, + unsigned int val) +{ + return regmap_field_update_bits_base(field, ~0, val, + NULL, false, false); +} + +static inline int regmap_field_force_write(struct regmap_field *field, + unsigned int val) +{ + return regmap_field_update_bits_base(field, ~0, val, NULL, false, true); +} + +static inline int regmap_field_update_bits(struct regmap_field *field, + unsigned int mask, unsigned int val) +{ + return regmap_field_update_bits_base(field, mask, val, + NULL, false, false); +} + +static inline int +regmap_field_force_update_bits(struct regmap_field *field, + unsigned int mask, unsigned int val) +{ + return regmap_field_update_bits_base(field, mask, val, + NULL, false, true); +} + +static inline int regmap_fields_write(struct regmap_field *field, + unsigned int id, unsigned int val) +{ + return regmap_fields_update_bits_base(field, id, ~0, val, + NULL, false, false); +} + +static inline int regmap_fields_force_write(struct regmap_field *field, + unsigned int id, unsigned int val) +{ + return regmap_fields_update_bits_base(field, id, ~0, val, + NULL, false, true); +} + +static inline int +regmap_fields_update_bits(struct regmap_field *field, unsigned int id, + unsigned int mask, unsigned int val) +{ + return regmap_fields_update_bits_base(field, id, mask, val, + NULL, false, false); +} + +static inline int +regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id, + unsigned int mask, unsigned int val) +{ + return regmap_fields_update_bits_base(field, id, mask, val, + NULL, false, true); +} + /** * struct regmap_irq_type - IRQ type definitions. * @@ -1310,12 +1377,23 @@ struct regmap_irq_chip_data; int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, int irq_base, const struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data); +int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode, + struct regmap *map, int irq, + int irq_flags, int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data); void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data); int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, int irq_flags, int irq_base, const struct regmap_irq_chip *chip, struct regmap_irq_chip_data **data); +int devm_regmap_add_irq_chip_fwnode(struct device *dev, + struct fwnode_handle *fwnode, + struct regmap *map, int irq, + int irq_flags, int irq_base, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data); void devm_regmap_del_irq_chip(struct device *dev, int irq, struct regmap_irq_chip_data *data); @@ -1410,6 +1488,27 @@ static inline int regmap_update_bits_base(struct regmap *map, unsigned int reg, return -EINVAL; } +static inline int regmap_set_bits(struct regmap *map, + unsigned int reg, unsigned int bits) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_clear_bits(struct regmap *map, + unsigned int reg, unsigned int bits) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_test_bits(struct regmap *map, + unsigned int reg, unsigned int bits) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + static inline int regmap_field_update_bits_base(struct regmap_field *field, unsigned int mask, unsigned int val, bool *change, bool async, bool force) @@ -1427,6 +1526,103 @@ static inline int regmap_fields_update_bits_base(struct regmap_field *field, return -EINVAL; } +static inline int regmap_update_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_update_bits_async(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_update_bits_check(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int +regmap_update_bits_check_async(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_write_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_field_write(struct regmap_field *field, + unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_field_force_write(struct regmap_field *field, + unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_field_update_bits(struct regmap_field *field, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int +regmap_field_force_update_bits(struct regmap_field *field, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_fields_write(struct regmap_field *field, + unsigned int id, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int regmap_fields_force_write(struct regmap_field *field, + unsigned int id, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int +regmap_fields_update_bits(struct regmap_field *field, unsigned int id, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + +static inline int +regmap_fields_force_update_bits(struct regmap_field *field, unsigned int id, + unsigned int mask, unsigned int val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + static inline int regmap_get_val_bytes(struct regmap *map) { WARN_ONCE(1, "regmap API is disabled"); diff --git a/include/linux/regset.h b/include/linux/regset.h index bf0243779738..c3403f328257 100644 --- a/include/linux/regset.h +++ b/include/linux/regset.h @@ -17,6 +17,52 @@ struct task_struct; struct user_regset; +struct membuf { + void *p; + size_t left; +}; + +static inline int membuf_zero(struct membuf *s, size_t size) +{ + if (s->left) { + if (size > s->left) + size = s->left; + memset(s->p, 0, size); + s->p += size; + s->left -= size; + } + return s->left; +} + +static inline int membuf_write(struct membuf *s, const void *v, size_t size) +{ + if (s->left) { + if (size > s->left) + size = s->left; + memcpy(s->p, v, size); + s->p += size; + s->left -= size; + } + return s->left; +} + +/* current s->p must be aligned for v; v must be a scalar */ +#define membuf_store(s, v) \ +({ \ + struct membuf *__s = (s); \ + if (__s->left) { \ + typeof(v) __v = (v); \ + size_t __size = sizeof(__v); \ + if (unlikely(__size > __s->left)) { \ + __size = __s->left; \ + memcpy(__s->p, &__v, __size); \ + } else { \ + *(typeof(__v + 0) *)__s->p = __v; \ + } \ + __s->p += __size; \ + __s->left -= __size; \ + } \ + __s->left;}) /** * user_regset_active_fn - type of @active function in &struct user_regset @@ -36,26 +82,9 @@ struct user_regset; typedef int user_regset_active_fn(struct task_struct *target, const struct user_regset *regset); -/** - * user_regset_get_fn - type of @get function in &struct user_regset - * @target: thread being examined - * @regset: regset being examined - * @pos: offset into the regset data to access, in bytes - * @count: amount of data to copy, in bytes - * @kbuf: if not %NULL, a kernel-space pointer to copy into - * @ubuf: if @kbuf is %NULL, a user-space pointer to copy into - * - * Fetch register values. Return %0 on success; -%EIO or -%ENODEV - * are usual failure returns. The @pos and @count values are in - * bytes, but must be properly aligned. If @kbuf is non-null, that - * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then - * ubuf gives a userland pointer to access directly, and an -%EFAULT - * return value is possible. - */ -typedef int user_regset_get_fn(struct task_struct *target, +typedef int user_regset_get2_fn(struct task_struct *target, const struct user_regset *regset, - unsigned int pos, unsigned int count, - void *kbuf, void __user *ubuf); + struct membuf to); /** * user_regset_set_fn - type of @set function in &struct user_regset @@ -104,28 +133,6 @@ typedef int user_regset_writeback_fn(struct task_struct *target, int immediate); /** - * user_regset_get_size_fn - type of @get_size function in &struct user_regset - * @target: thread being examined - * @regset: regset being examined - * - * This call is optional; usually the pointer is %NULL. - * - * When provided, this function must return the current size of regset - * data, as observed by the @get function in &struct user_regset. The - * value returned must be a multiple of @size. The returned size is - * required to be valid only until the next time (if any) @regset is - * modified for @target. - * - * This function is intended for dynamically sized regsets. A regset - * that is statically sized does not need to implement it. - * - * This function should not be called directly: instead, callers should - * call regset_size() to determine the current size of a regset. - */ -typedef unsigned int user_regset_get_size_fn(struct task_struct *target, - const struct user_regset *regset); - -/** * struct user_regset - accessible thread CPU state * @n: Number of slots (registers). * @size: Size in bytes of a slot (register). @@ -136,7 +143,6 @@ typedef unsigned int user_regset_get_size_fn(struct task_struct *target, * @set: Function to store values. * @active: Function to report if regset is active, or %NULL. * @writeback: Function to write data back to user memory, or %NULL. - * @get_size: Function to return the regset's size, or %NULL. * * This data structure describes a machine resource we call a register set. * This is part of the state of an individual thread, not necessarily @@ -144,12 +150,7 @@ typedef unsigned int user_regset_get_size_fn(struct task_struct *target, * similar slots, given by @n. Each slot is @size bytes, and aligned to * @align bytes (which is at least @size). For dynamically-sized * regsets, @n must contain the maximum possible number of slots for the - * regset, and @get_size must point to a function that returns the - * current regset size. - * - * Callers that need to know only the current size of the regset and do - * not care about its internal structure should call regset_size() - * instead of inspecting @n or calling @get_size. + * regset. * * For backward compatibility, the @get and @set methods must pad to, or * accept, @n * @size bytes, even if the current regset size is smaller. @@ -185,11 +186,10 @@ typedef unsigned int user_regset_get_size_fn(struct task_struct *target, * omitted when there is an @active function and it returns zero. */ struct user_regset { - user_regset_get_fn *get; + user_regset_get2_fn *regset_get; user_regset_set_fn *set; user_regset_active_fn *active; user_regset_writeback_fn *writeback; - user_regset_get_size_fn *get_size; unsigned int n; unsigned int size; unsigned int align; @@ -238,44 +238,6 @@ struct user_regset_view { */ const struct user_regset_view *task_user_regset_view(struct task_struct *tsk); - -/* - * These are helpers for writing regset get/set functions in arch code. - * Because @start_pos and @end_pos are always compile-time constants, - * these are inlined into very little code though they look large. - * - * Use one or more calls sequentially for each chunk of regset data stored - * contiguously in memory. Call with constants for @start_pos and @end_pos, - * giving the range of byte positions in the regset that data corresponds - * to; @end_pos can be -1 if this chunk is at the end of the regset layout. - * Each call updates the arguments to point past its chunk. - */ - -static inline int user_regset_copyout(unsigned int *pos, unsigned int *count, - void **kbuf, - void __user **ubuf, const void *data, - const int start_pos, const int end_pos) -{ - if (*count == 0) - return 0; - BUG_ON(*pos < start_pos); - if (end_pos < 0 || *pos < end_pos) { - unsigned int copy = (end_pos < 0 ? *count - : min(*count, end_pos - *pos)); - data += *pos - start_pos; - if (*kbuf) { - memcpy(*kbuf, data, copy); - *kbuf += copy; - } else if (__copy_to_user(*ubuf, data, copy)) - return -EFAULT; - else - *ubuf += copy; - *pos += copy; - *count -= copy; - } - return 0; -} - static inline int user_regset_copyin(unsigned int *pos, unsigned int *count, const void **kbuf, const void __user **ubuf, void *data, @@ -301,35 +263,6 @@ static inline int user_regset_copyin(unsigned int *pos, unsigned int *count, return 0; } -/* - * These two parallel the two above, but for portions of a regset layout - * that always read as all-zero or for which writes are ignored. - */ -static inline int user_regset_copyout_zero(unsigned int *pos, - unsigned int *count, - void **kbuf, void __user **ubuf, - const int start_pos, - const int end_pos) -{ - if (*count == 0) - return 0; - BUG_ON(*pos < start_pos); - if (end_pos < 0 || *pos < end_pos) { - unsigned int copy = (end_pos < 0 ? *count - : min(*count, end_pos - *pos)); - if (*kbuf) { - memset(*kbuf, 0, copy); - *kbuf += copy; - } else if (__clear_user(*ubuf, copy)) - return -EFAULT; - else - *ubuf += copy; - *pos += copy; - *count -= copy; - } - return 0; -} - static inline int user_regset_copyin_ignore(unsigned int *pos, unsigned int *count, const void **kbuf, @@ -353,31 +286,19 @@ static inline int user_regset_copyin_ignore(unsigned int *pos, return 0; } -/** - * copy_regset_to_user - fetch a thread's user_regset data into user memory - * @target: thread to be examined - * @view: &struct user_regset_view describing user thread machine state - * @setno: index in @view->regsets - * @offset: offset into the regset data, in bytes - * @size: amount of data to copy, in bytes - * @data: user-mode pointer to copy into - */ -static inline int copy_regset_to_user(struct task_struct *target, - const struct user_regset_view *view, - unsigned int setno, - unsigned int offset, unsigned int size, - void __user *data) -{ - const struct user_regset *regset = &view->regsets[setno]; - - if (!regset->get) - return -EOPNOTSUPP; +extern int regset_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int size, void *data); - if (!access_ok(data, size)) - return -EFAULT; +extern int regset_get_alloc(struct task_struct *target, + const struct user_regset *regset, + unsigned int size, + void **data); - return regset->get(target, regset, offset, size, NULL, data); -} +extern int copy_regset_to_user(struct task_struct *target, + const struct user_regset_view *view, + unsigned int setno, unsigned int offset, + unsigned int size, void __user *data); /** * copy_regset_from_user - store into thread's user_regset data from user memory @@ -405,21 +326,4 @@ static inline int copy_regset_from_user(struct task_struct *target, return regset->set(target, regset, offset, size, NULL, data); } -/** - * regset_size - determine the current size of a regset - * @target: thread to be examined - * @regset: regset to be examined - * - * Note that the returned size is valid only until the next time - * (if any) @regset is modified for @target. - */ -static inline unsigned int regset_size(struct task_struct *target, - const struct user_regset *regset) -{ - if (!regset->get_size) - return regset->n * regset->size; - else - return regset->get_size(target, regset); -} - #endif /* <linux/regset.h> */ diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 6a92fd3105a3..2024944fd2f7 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -32,10 +32,12 @@ #define __LINUX_REGULATOR_CONSUMER_H_ #include <linux/err.h> +#include <linux/suspend.h> struct device; struct notifier_block; struct regmap; +struct regulator_dev; /* * Regulator operating modes. @@ -277,6 +279,14 @@ int regulator_unregister_notifier(struct regulator *regulator, void devm_regulator_unregister_notifier(struct regulator *regulator, struct notifier_block *nb); +/* regulator suspend */ +int regulator_suspend_enable(struct regulator_dev *rdev, + suspend_state_t state); +int regulator_suspend_disable(struct regulator_dev *rdev, + suspend_state_t state); +int regulator_set_suspend_voltage(struct regulator *regulator, int min_uV, + int max_uV, suspend_state_t state); + /* driver data - core doesn't touch */ void *regulator_get_drvdata(struct regulator *regulator); void regulator_set_drvdata(struct regulator *regulator, void *data); diff --git a/include/linux/regulator/coupler.h b/include/linux/regulator/coupler.h index 0212d6255e4e..5f86824bd117 100644 --- a/include/linux/regulator/coupler.h +++ b/include/linux/regulator/coupler.h @@ -62,6 +62,8 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev); int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV, int max_uV, suspend_state_t state); +int regulator_do_balance_voltage(struct regulator_dev *rdev, + suspend_state_t state, bool skip_coupled); #else static inline int regulator_coupler_register(struct regulator_coupler *coupler) { @@ -92,6 +94,12 @@ static inline int regulator_set_voltage_rdev(struct regulator_dev *rdev, { return -EINVAL; } +static inline int regulator_do_balance_voltage(struct regulator_dev *rdev, + suspend_state_t state, + bool skip_coupled) +{ + return -EINVAL; +} #endif #endif diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 29d920516e0b..8539f34ae42b 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -13,6 +13,7 @@ #define __LINUX_REGULATOR_DRIVER_H_ #include <linux/device.h> +#include <linux/linear_range.h> #include <linux/notifier.h> #include <linux/regulator/consumer.h> #include <linux/ww_mutex.h> @@ -39,31 +40,13 @@ enum regulator_status { REGULATOR_STATUS_UNDEFINED, }; -/** - * struct regulator_linear_range - specify linear voltage ranges - * - * Specify a range of voltages for regulator_map_linear_range() and - * regulator_list_linear_range(). - * - * @min_uV: Lowest voltage in range - * @min_sel: Lowest selector for range - * @max_sel: Highest selector for range - * @uV_step: Step size - */ -struct regulator_linear_range { - unsigned int min_uV; - unsigned int min_sel; - unsigned int max_sel; - unsigned int uV_step; -}; - -/* Initialize struct regulator_linear_range */ +/* Initialize struct linear_range for regulators */ #define REGULATOR_LINEAR_RANGE(_min_uV, _min_sel, _max_sel, _step_uV) \ { \ - .min_uV = _min_uV, \ + .min = _min_uV, \ .min_sel = _min_sel, \ .max_sel = _max_sel, \ - .uV_step = _step_uV, \ + .step = _step_uV, \ } /** @@ -134,7 +117,7 @@ struct regulator_linear_range { * suspended. * @set_suspend_mode: Set the operating mode for the regulator when the * system is suspended. - * + * @resume: Resume operation of suspended regulator. * @set_pull_down: Configure the regulator to pull down when the regulator * is disabled. * @@ -322,6 +305,9 @@ enum regulator_type { * @enable_time: Time taken for initial enable of regulator (in uS). * @off_on_delay: guard time (in uS), before re-enabling a regulator * + * @poll_enabled_time: The polling interval (in uS) to use while checking that + * the regulator was actually enabled. Max upto enable_time. + * * @of_map_mode: Maps a hardware mode defined in a DeviceTree to a standard mode */ struct regulator_desc { @@ -348,7 +334,7 @@ struct regulator_desc { unsigned int ramp_delay; int min_dropout_uV; - const struct regulator_linear_range *linear_ranges; + const struct linear_range *linear_ranges; const unsigned int *linear_range_selectors; int n_linear_ranges; @@ -389,6 +375,8 @@ struct regulator_desc { unsigned int off_on_delay; + unsigned int poll_enabled_time; + unsigned int (*of_map_mode)(unsigned int mode); }; diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index a84cc8879c3e..8a56f033b6cd 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -101,6 +101,7 @@ struct regulator_state { * @system_load: Load that isn't captured by any consumer requests. * * @max_spread: Max possible spread between coupled regulators + * @max_uV_step: Max possible step change in voltage * @valid_modes_mask: Mask of modes which may be configured by consumers. * @valid_ops_mask: Operations which may be performed by consumers. * diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h new file mode 100644 index 000000000000..1bbd3014f906 --- /dev/null +++ b/include/linux/regulator/pca9450.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Copyright 2020 NXP. */ + +#ifndef __LINUX_REG_PCA9450_H__ +#define __LINUX_REG_PCA9450_H__ + +#include <linux/regmap.h> + +enum pca9450_chip_type { + PCA9450_TYPE_PCA9450A = 0, + PCA9450_TYPE_PCA9450BC, + PCA9450_TYPE_AMOUNT, +}; + +enum { + PCA9450_BUCK1 = 0, + PCA9450_BUCK2, + PCA9450_BUCK3, + PCA9450_BUCK4, + PCA9450_BUCK5, + PCA9450_BUCK6, + PCA9450_LDO1, + PCA9450_LDO2, + PCA9450_LDO3, + PCA9450_LDO4, + PCA9450_LDO5, + PCA9450_REGULATOR_CNT, +}; + +enum { + PCA9450_DVS_LEVEL_RUN = 0, + PCA9450_DVS_LEVEL_STANDBY, + PCA9450_DVS_LEVEL_MAX, +}; + +#define PCA9450_BUCK1_VOLTAGE_NUM 0x80 +#define PCA9450_BUCK2_VOLTAGE_NUM 0x80 +#define PCA9450_BUCK3_VOLTAGE_NUM 0x80 +#define PCA9450_BUCK4_VOLTAGE_NUM 0x80 + +#define PCA9450_BUCK5_VOLTAGE_NUM 0x80 +#define PCA9450_BUCK6_VOLTAGE_NUM 0x80 + +#define PCA9450_LDO1_VOLTAGE_NUM 0x08 +#define PCA9450_LDO2_VOLTAGE_NUM 0x08 +#define PCA9450_LDO3_VOLTAGE_NUM 0x20 +#define PCA9450_LDO4_VOLTAGE_NUM 0x20 +#define PCA9450_LDO5_VOLTAGE_NUM 0x10 + +enum { + PCA9450_REG_DEV_ID = 0x00, + PCA9450_REG_INT1 = 0x01, + PCA9450_REG_INT1_MSK = 0x02, + PCA9450_REG_STATUS1 = 0x03, + PCA9450_REG_STATUS2 = 0x04, + PCA9450_REG_PWRON_STAT = 0x05, + PCA9450_REG_SWRST = 0x06, + PCA9450_REG_PWRCTRL = 0x07, + PCA9450_REG_RESET_CTRL = 0x08, + PCA9450_REG_CONFIG1 = 0x09, + PCA9450_REG_CONFIG2 = 0x0A, + PCA9450_REG_BUCK123_DVS = 0x0C, + PCA9450_REG_BUCK1OUT_LIMIT = 0x0D, + PCA9450_REG_BUCK2OUT_LIMIT = 0x0E, + PCA9450_REG_BUCK3OUT_LIMIT = 0x0F, + PCA9450_REG_BUCK1CTRL = 0x10, + PCA9450_REG_BUCK1OUT_DVS0 = 0x11, + PCA9450_REG_BUCK1OUT_DVS1 = 0x12, + PCA9450_REG_BUCK2CTRL = 0x13, + PCA9450_REG_BUCK2OUT_DVS0 = 0x14, + PCA9450_REG_BUCK2OUT_DVS1 = 0x15, + PCA9450_REG_BUCK3CTRL = 0x16, + PCA9450_REG_BUCK3OUT_DVS0 = 0x17, + PCA9450_REG_BUCK3OUT_DVS1 = 0x18, + PCA9450_REG_BUCK4CTRL = 0x19, + PCA9450_REG_BUCK4OUT = 0x1A, + PCA9450_REG_BUCK5CTRL = 0x1B, + PCA9450_REG_BUCK5OUT = 0x1C, + PCA9450_REG_BUCK6CTRL = 0x1D, + PCA9450_REG_BUCK6OUT = 0x1E, + PCA9450_REG_LDO_AD_CTRL = 0x20, + PCA9450_REG_LDO1CTRL = 0x21, + PCA9450_REG_LDO2CTRL = 0x22, + PCA9450_REG_LDO3CTRL = 0x23, + PCA9450_REG_LDO4CTRL = 0x24, + PCA9450_REG_LDO5CTRL_L = 0x25, + PCA9450_REG_LDO5CTRL_H = 0x26, + PCA9450_REG_LOADSW_CTRL = 0x2A, + PCA9450_REG_VRFLT1_STS = 0x2B, + PCA9450_REG_VRFLT2_STS = 0x2C, + PCA9450_REG_VRFLT1_MASK = 0x2D, + PCA9450_REG_VRFLT2_MASK = 0x2E, + PCA9450_MAX_REGISTER = 0x2F, +}; + +/* PCA9450 BUCK ENMODE bits */ +#define BUCK_ENMODE_OFF 0x00 +#define BUCK_ENMODE_ONREQ 0x01 +#define BUCK_ENMODE_ONREQ_STBYREQ 0x02 +#define BUCK_ENMODE_ON 0x03 + +/* PCA9450_REG_BUCK1_CTRL bits */ +#define BUCK1_RAMP_MASK 0xC0 +#define BUCK1_RAMP_25MV 0x0 +#define BUCK1_RAMP_12P5MV 0x1 +#define BUCK1_RAMP_6P25MV 0x2 +#define BUCK1_RAMP_3P125MV 0x3 +#define BUCK1_DVS_CTRL 0x10 +#define BUCK1_AD 0x08 +#define BUCK1_FPWM 0x04 +#define BUCK1_ENMODE_MASK 0x03 + +/* PCA9450_REG_BUCK2_CTRL bits */ +#define BUCK2_RAMP_MASK 0xC0 +#define BUCK2_RAMP_25MV 0x0 +#define BUCK2_RAMP_12P5MV 0x1 +#define BUCK2_RAMP_6P25MV 0x2 +#define BUCK2_RAMP_3P125MV 0x3 +#define BUCK2_DVS_CTRL 0x10 +#define BUCK2_AD 0x08 +#define BUCK2_FPWM 0x04 +#define BUCK2_ENMODE_MASK 0x03 + +/* PCA9450_REG_BUCK3_CTRL bits */ +#define BUCK3_RAMP_MASK 0xC0 +#define BUCK3_RAMP_25MV 0x0 +#define BUCK3_RAMP_12P5MV 0x1 +#define BUCK3_RAMP_6P25MV 0x2 +#define BUCK3_RAMP_3P125MV 0x3 +#define BUCK3_DVS_CTRL 0x10 +#define BUCK3_AD 0x08 +#define BUCK3_FPWM 0x04 +#define BUCK3_ENMODE_MASK 0x03 + +/* PCA9450_REG_BUCK4_CTRL bits */ +#define BUCK4_AD 0x08 +#define BUCK4_FPWM 0x04 +#define BUCK4_ENMODE_MASK 0x03 + +/* PCA9450_REG_BUCK5_CTRL bits */ +#define BUCK5_AD 0x08 +#define BUCK5_FPWM 0x04 +#define BUCK5_ENMODE_MASK 0x03 + +/* PCA9450_REG_BUCK6_CTRL bits */ +#define BUCK6_AD 0x08 +#define BUCK6_FPWM 0x04 +#define BUCK6_ENMODE_MASK 0x03 + +/* PCA9450_BUCK1OUT_DVS0 bits */ +#define BUCK1OUT_DVS0_MASK 0x7F +#define BUCK1OUT_DVS0_DEFAULT 0x14 + +/* PCA9450_BUCK1OUT_DVS1 bits */ +#define BUCK1OUT_DVS1_MASK 0x7F +#define BUCK1OUT_DVS1_DEFAULT 0x14 + +/* PCA9450_BUCK2OUT_DVS0 bits */ +#define BUCK2OUT_DVS0_MASK 0x7F +#define BUCK2OUT_DVS0_DEFAULT 0x14 + +/* PCA9450_BUCK2OUT_DVS1 bits */ +#define BUCK2OUT_DVS1_MASK 0x7F +#define BUCK2OUT_DVS1_DEFAULT 0x14 + +/* PCA9450_BUCK3OUT_DVS0 bits */ +#define BUCK3OUT_DVS0_MASK 0x7F +#define BUCK3OUT_DVS0_DEFAULT 0x14 + +/* PCA9450_BUCK3OUT_DVS1 bits */ +#define BUCK3OUT_DVS1_MASK 0x7F +#define BUCK3OUT_DVS1_DEFAULT 0x14 + +/* PCA9450_REG_BUCK4OUT bits */ +#define BUCK4OUT_MASK 0x7F +#define BUCK4OUT_DEFAULT 0x6C + +/* PCA9450_REG_BUCK5OUT bits */ +#define BUCK5OUT_MASK 0x7F +#define BUCK5OUT_DEFAULT 0x30 + +/* PCA9450_REG_BUCK6OUT bits */ +#define BUCK6OUT_MASK 0x7F +#define BUCK6OUT_DEFAULT 0x14 + +/* PCA9450_REG_LDO1_VOLT bits */ +#define LDO1_EN_MASK 0xC0 +#define LDO1OUT_MASK 0x07 + +/* PCA9450_REG_LDO2_VOLT bits */ +#define LDO2_EN_MASK 0xC0 +#define LDO2OUT_MASK 0x07 + +/* PCA9450_REG_LDO3_VOLT bits */ +#define LDO3_EN_MASK 0xC0 +#define LDO3OUT_MASK 0x0F + +/* PCA9450_REG_LDO4_VOLT bits */ +#define LDO4_EN_MASK 0xC0 +#define LDO4OUT_MASK 0x0F + +/* PCA9450_REG_LDO5_VOLT bits */ +#define LDO5L_EN_MASK 0xC0 +#define LDO5LOUT_MASK 0x0F + +#define LDO5H_EN_MASK 0xC0 +#define LDO5HOUT_MASK 0x0F + +/* PCA9450_REG_IRQ bits */ +#define IRQ_PWRON 0x80 +#define IRQ_WDOGB 0x40 +#define IRQ_RSVD 0x20 +#define IRQ_VR_FLT1 0x10 +#define IRQ_VR_FLT2 0x08 +#define IRQ_LOWVSYS 0x04 +#define IRQ_THERM_105 0x02 +#define IRQ_THERM_125 0x01 + +#endif /* __LINUX_REG_PCA9450_H__ */ diff --git a/include/linux/relay.h b/include/linux/relay.h index c759f96e39c1..e13a333e7c37 100644 --- a/include/linux/relay.h +++ b/include/linux/relay.h @@ -141,7 +141,7 @@ struct rchan_callbacks * cause relay_open() to create a single global buffer rather * than the default set of per-cpu buffers. * - * See Documentation/filesystems/relay.txt for more info. + * See Documentation/filesystems/relay.rst for more info. */ struct dentry *(*create_buf_file)(const char *filename, struct dentry *parent, diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 9c07d7958c53..2fa68bf5aa4f 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -38,6 +38,7 @@ #include <linux/types.h> #include <linux/mutex.h> #include <linux/virtio.h> +#include <linux/cdev.h> #include <linux/completion.h> #include <linux/idr.h> #include <linux/of.h> @@ -73,7 +74,7 @@ struct resource_table { u32 ver; u32 num; u32 reserved[2]; - u32 offset[0]; + u32 offset[]; } __packed; /** @@ -87,7 +88,7 @@ struct resource_table { */ struct fw_rsc_hdr { u32 type; - u8 data[0]; + u8 data[]; } __packed; /** @@ -306,7 +307,7 @@ struct fw_rsc_vdev { u8 status; u8 num_of_vrings; u8 reserved[2]; - struct fw_rsc_vdev_vring vring[0]; + struct fw_rsc_vdev_vring vring[]; } __packed; struct rproc; @@ -355,8 +356,11 @@ enum rsc_handling_status { /** * struct rproc_ops - platform-specific device handlers + * @prepare: prepare device for code loading + * @unprepare: unprepare device after stop * @start: power on the device and boot it * @stop: power off the device + * @attach: attach to a device that his already powered up * @kick: kick a virtqueue (virtqueue id given as a parameter) * @da_to_va: optional platform hook to perform address translations * @parse_fw: parse firmware to extract information (e.g. resource table) @@ -373,8 +377,11 @@ enum rsc_handling_status { * panic at least the returned number of milliseconds */ struct rproc_ops { + int (*prepare)(struct rproc *rproc); + int (*unprepare)(struct rproc *rproc); int (*start)(struct rproc *rproc); int (*stop)(struct rproc *rproc); + int (*attach)(struct rproc *rproc); void (*kick)(struct rproc *rproc, int vqid); void * (*da_to_va)(struct rproc *rproc, u64 da, size_t len); int (*parse_fw)(struct rproc *rproc, const struct firmware *fw); @@ -396,6 +403,8 @@ struct rproc_ops { * @RPROC_RUNNING: device is up and running * @RPROC_CRASHED: device has crashed; need to start recovery * @RPROC_DELETED: device is deleted + * @RPROC_DETACHED: device has been booted by another entity and waiting + * for the core to attach to it * @RPROC_LAST: just keep this one at the end * * Please note that the values of these states are used as indices @@ -410,7 +419,8 @@ enum rproc_state { RPROC_RUNNING = 2, RPROC_CRASHED = 3, RPROC_DELETED = 4, - RPROC_LAST = 5, + RPROC_DETACHED = 5, + RPROC_LAST = 6, }; /** @@ -431,6 +441,20 @@ enum rproc_crash_type { }; /** + * enum rproc_dump_mechanism - Coredump options for core + * @RPROC_COREDUMP_DEFAULT: Copy dump to separate buffer and carry on with + recovery + * @RPROC_COREDUMP_INLINE: Read segments directly from device memory. Stall + recovery until all segments are read + * @RPROC_COREDUMP_DISABLED: Don't perform any dump + */ +enum rproc_dump_mechanism { + RPROC_COREDUMP_DEFAULT, + RPROC_COREDUMP_INLINE, + RPROC_COREDUMP_DISABLED, +}; + +/** * struct rproc_dump_segment - segment info from ELF header * @node: list node related to the rproc segment list * @da: device address of the segment @@ -447,7 +471,7 @@ struct rproc_dump_segment { void *priv; void (*dump)(struct rproc *rproc, struct rproc_dump_segment *segment, - void *dest); + void *dest, size_t offset, size_t size); loff_t offset; }; @@ -462,6 +486,7 @@ struct rproc_dump_segment { * @dev: virtual device for refcounting and common remoteproc behavior * @power: refcount of users who need this rproc powered up * @state: state of the device + * @dump_conf: Currently selected coredump configuration * @lock: lock which protects concurrent manipulations of the rproc * @dbg_dir: debugfs directory of this rproc device * @traces: list of trace buffers @@ -482,19 +507,23 @@ struct rproc_dump_segment { * @table_sz: size of @cached_table * @has_iommu: flag to indicate if remote processor is behind an MMU * @auto_boot: flag to indicate if remote processor should be auto-started + * @autonomous: true if an external entity has booted the remote processor * @dump_segments: list of segments in the firmware * @nb_vdev: number of vdev currently handled by rproc + * @char_dev: character device of the rproc + * @cdev_put_on_release: flag to indicate if remoteproc should be shutdown on @char_dev release */ struct rproc { struct list_head node; struct iommu_domain *domain; const char *name; - char *firmware; + const char *firmware; void *priv; struct rproc_ops *ops; struct device dev; atomic_t power; unsigned int state; + enum rproc_dump_mechanism dump_conf; struct mutex lock; struct dentry *dbg_dir; struct list_head traces; @@ -515,9 +544,13 @@ struct rproc { size_t table_sz; bool has_iommu; bool auto_boot; + bool autonomous; struct list_head dump_segments; int nb_vdev; u8 elf_class; + u16 elf_machine; + struct cdev cdev; + bool cdev_put_on_release; }; /** @@ -598,6 +631,12 @@ void rproc_put(struct rproc *rproc); int rproc_add(struct rproc *rproc); int rproc_del(struct rproc *rproc); void rproc_free(struct rproc *rproc); +void rproc_resource_cleanup(struct rproc *rproc); + +struct rproc *devm_rproc_alloc(struct device *dev, const char *name, + const struct rproc_ops *ops, + const char *firmware, int len); +int devm_rproc_add(struct device *dev, struct rproc *rproc); void rproc_add_carveout(struct rproc *rproc, struct rproc_mem_entry *mem); @@ -620,8 +659,10 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc, dma_addr_t da, size_t size, void (*dumpfn)(struct rproc *rproc, struct rproc_dump_segment *segment, - void *dest), + void *dest, size_t offset, + size_t size), void *priv); +int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine); static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev) { diff --git a/include/linux/remoteproc/qcom_q6v5_ipa_notify.h b/include/linux/remoteproc/qcom_q6v5_ipa_notify.h deleted file mode 100644 index 0820edc0ab7d..000000000000 --- a/include/linux/remoteproc/qcom_q6v5_ipa_notify.h +++ /dev/null @@ -1,82 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -/* Copyright (C) 2019 Linaro Ltd. */ - -#ifndef __QCOM_Q6V5_IPA_NOTIFY_H__ -#define __QCOM_Q6V5_IPA_NOTIFY_H__ - -#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) - -#include <linux/remoteproc.h> - -enum qcom_rproc_event { - MODEM_STARTING = 0, /* Modem is about to be started */ - MODEM_RUNNING = 1, /* Startup complete; modem is operational */ - MODEM_STOPPING = 2, /* Modem is about to shut down */ - MODEM_CRASHED = 3, /* Modem has crashed (implies stopping) */ - MODEM_OFFLINE = 4, /* Modem is now offline */ - MODEM_REMOVING = 5, /* Modem is about to be removed */ -}; - -typedef void (*qcom_ipa_notify_t)(void *data, enum qcom_rproc_event event); - -struct qcom_rproc_ipa_notify { - struct rproc_subdev subdev; - - qcom_ipa_notify_t notify; - void *data; -}; - -/** - * qcom_add_ipa_notify_subdev() - Register IPA notification subdevice - * @rproc: rproc handle - * @ipa_notify: IPA notification subdevice handle - * - * Register the @ipa_notify subdevice with the @rproc so modem events - * can be sent to IPA when they occur. - * - * This is defined in "qcom_q6v5_ipa_notify.c". - */ -void qcom_add_ipa_notify_subdev(struct rproc *rproc, - struct qcom_rproc_ipa_notify *ipa_notify); - -/** - * qcom_remove_ipa_notify_subdev() - Remove IPA SSR subdevice - * @rproc: rproc handle - * @ipa_notify: IPA notification subdevice handle - * - * This is defined in "qcom_q6v5_ipa_notify.c". - */ -void qcom_remove_ipa_notify_subdev(struct rproc *rproc, - struct qcom_rproc_ipa_notify *ipa_notify); - -/** - * qcom_register_ipa_notify() - Register IPA notification function - * @rproc: Remote processor handle - * @notify: Non-null IPA notification callback function pointer - * @data: Data supplied to IPA notification callback function - * - * @Return: 0 if successful, or a negative error code otherwise - * - * This is defined in "qcom_q6v5_mss.c". - */ -int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify, - void *data); -/** - * qcom_deregister_ipa_notify() - Deregister IPA notification function - * @rproc: Remote processor handle - * - * This is defined in "qcom_q6v5_mss.c". - */ -void qcom_deregister_ipa_notify(struct rproc *rproc); - -#else /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ - -struct qcom_rproc_ipa_notify { /* empty */ }; - -#define qcom_add_ipa_notify_subdev(rproc, ipa_notify) /* no-op */ -#define qcom_remove_ipa_notify_subdev(rproc, ipa_notify) /* no-op */ - -#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ - -#endif /* !__QCOM_Q6V5_IPA_NOTIFY_H__ */ diff --git a/include/linux/remoteproc/qcom_rproc.h b/include/linux/remoteproc/qcom_rproc.h index fa8e38681b4b..647051662174 100644 --- a/include/linux/remoteproc/qcom_rproc.h +++ b/include/linux/remoteproc/qcom_rproc.h @@ -5,17 +5,43 @@ struct notifier_block; #if IS_ENABLED(CONFIG_QCOM_RPROC_COMMON) -int qcom_register_ssr_notifier(struct notifier_block *nb); -void qcom_unregister_ssr_notifier(struct notifier_block *nb); +/** + * enum qcom_ssr_notify_type - Startup/Shutdown events related to a remoteproc + * processor. + * + * @QCOM_SSR_BEFORE_POWERUP: Remoteproc about to start (prepare stage) + * @QCOM_SSR_AFTER_POWERUP: Remoteproc is running (start stage) + * @QCOM_SSR_BEFORE_SHUTDOWN: Remoteproc crashed or shutting down (stop stage) + * @QCOM_SSR_AFTER_SHUTDOWN: Remoteproc is down (unprepare stage) + */ +enum qcom_ssr_notify_type { + QCOM_SSR_BEFORE_POWERUP, + QCOM_SSR_AFTER_POWERUP, + QCOM_SSR_BEFORE_SHUTDOWN, + QCOM_SSR_AFTER_SHUTDOWN, +}; + +struct qcom_ssr_notify_data { + const char *name; + bool crashed; +}; + +void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb); +int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb); #else -static inline int qcom_register_ssr_notifier(struct notifier_block *nb) +static inline void *qcom_register_ssr_notifier(const char *name, + struct notifier_block *nb) { - return 0; + return NULL; } -static inline void qcom_unregister_ssr_notifier(struct notifier_block *nb) {} +static inline int qcom_unregister_ssr_notifier(void *notify, + struct notifier_block *nb) +{ + return 0; +} #endif diff --git a/include/linux/reset/reset-simple.h b/include/linux/reset/reset-simple.h new file mode 100644 index 000000000000..c3e44f45b0f7 --- /dev/null +++ b/include/linux/reset/reset-simple.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Simple Reset Controller ops + * + * Based on Allwinner SoCs Reset Controller driver + * + * Copyright 2013 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + */ + +#ifndef __RESET_SIMPLE_H__ +#define __RESET_SIMPLE_H__ + +#include <linux/io.h> +#include <linux/reset-controller.h> +#include <linux/spinlock.h> + +/** + * struct reset_simple_data - driver data for simple reset controllers + * @lock: spinlock to protect registers during read-modify-write cycles + * @membase: memory mapped I/O register range + * @rcdev: reset controller device base structure + * @active_low: if true, bits are cleared to assert the reset. Otherwise, bits + * are set to assert the reset. Note that this says nothing about + * the voltage level of the actual reset line. + * @status_active_low: if true, bits read back as cleared while the reset is + * asserted. Otherwise, bits read back as set while the + * reset is asserted. + * @reset_us: Minimum delay in microseconds needed that needs to be + * waited for between an assert and a deassert to reset the + * device. If multiple consumers with different delay + * requirements are connected to this controller, it must + * be the largest minimum delay. 0 means that such a delay is + * unknown and the reset operation is unsupported. + */ +struct reset_simple_data { + spinlock_t lock; + void __iomem *membase; + struct reset_controller_dev rcdev; + bool active_low; + bool status_active_low; + unsigned int reset_us; +}; + +extern const struct reset_control_ops reset_simple_ops; + +#endif /* __RESET_SIMPLE_H__ */ diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 70ebef866cc8..68dab3e08aad 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -33,7 +33,7 @@ * of two or more hash tables when the rhashtable is being resized. * The end of the chain is marked with a special nulls marks which has * the least significant bit set but otherwise stores the address of - * the hash bucket. This allows us to be be sure we've found the end + * the hash bucket. This allows us to be sure we've found the end * of the right list. * The value stored in the hash bucket has BIT(0) used as a lock bit. * This bit must be atomically set before any changes are made to @@ -84,7 +84,7 @@ struct bucket_table { struct lockdep_map dep_map; - struct rhash_lock_head *buckets[] ____cacheline_aligned_in_smp; + struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; /* @@ -261,13 +261,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht, void *arg); void rhashtable_destroy(struct rhashtable *ht); -struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash); -struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, - unsigned int hash); -struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, - struct bucket_table *tbl, - unsigned int hash); +struct rhash_lock_head __rcu **rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash); +struct rhash_lock_head __rcu **__rht_bucket_nested( + const struct bucket_table *tbl, unsigned int hash); +struct rhash_lock_head __rcu **rht_bucket_nested_insert( + struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash); #define rht_dereference(p, ht) \ rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) @@ -284,21 +283,21 @@ struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, #define rht_entry(tpos, pos, member) \ ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) -static inline struct rhash_lock_head *const *rht_bucket( +static inline struct rhash_lock_head __rcu *const *rht_bucket( const struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : &tbl->buckets[hash]; } -static inline struct rhash_lock_head **rht_bucket_var( +static inline struct rhash_lock_head __rcu **rht_bucket_var( struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : &tbl->buckets[hash]; } -static inline struct rhash_lock_head **rht_bucket_insert( +static inline struct rhash_lock_head __rcu **rht_bucket_insert( struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) { return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : @@ -325,7 +324,7 @@ static inline struct rhash_lock_head **rht_bucket_insert( */ static inline void rht_lock(struct bucket_table *tbl, - struct rhash_lock_head **bkt) + struct rhash_lock_head __rcu **bkt) { local_bh_disable(); bit_spin_lock(0, (unsigned long *)bkt); @@ -333,7 +332,7 @@ static inline void rht_lock(struct bucket_table *tbl, } static inline void rht_lock_nested(struct bucket_table *tbl, - struct rhash_lock_head **bucket, + struct rhash_lock_head __rcu **bucket, unsigned int subclass) { local_bh_disable(); @@ -342,18 +341,18 @@ static inline void rht_lock_nested(struct bucket_table *tbl, } static inline void rht_unlock(struct bucket_table *tbl, - struct rhash_lock_head **bkt) + struct rhash_lock_head __rcu **bkt) { lock_map_release(&tbl->dep_map); bit_spin_unlock(0, (unsigned long *)bkt); local_bh_enable(); } -static inline struct rhash_head __rcu *__rht_ptr( - struct rhash_lock_head *const *bkt) +static inline struct rhash_head *__rht_ptr( + struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt) { - return (struct rhash_head __rcu *) - ((unsigned long)*bkt & ~BIT(0) ?: + return (struct rhash_head *) + ((unsigned long)p & ~BIT(0) ?: (unsigned long)RHT_NULLS_MARKER(bkt)); } @@ -365,47 +364,41 @@ static inline struct rhash_head __rcu *__rht_ptr( * access is guaranteed, such as when destroying the table. */ static inline struct rhash_head *rht_ptr_rcu( - struct rhash_lock_head *const *bkt) + struct rhash_lock_head __rcu *const *bkt) { - struct rhash_head __rcu *p = __rht_ptr(bkt); - - return rcu_dereference(p); + return __rht_ptr(rcu_dereference(*bkt), bkt); } static inline struct rhash_head *rht_ptr( - struct rhash_lock_head *const *bkt, + struct rhash_lock_head __rcu *const *bkt, struct bucket_table *tbl, unsigned int hash) { - return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash); + return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt); } static inline struct rhash_head *rht_ptr_exclusive( - struct rhash_lock_head *const *bkt) + struct rhash_lock_head __rcu *const *bkt) { - return rcu_dereference_protected(__rht_ptr(bkt), 1); + return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt); } -static inline void rht_assign_locked(struct rhash_lock_head **bkt, +static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, struct rhash_head *obj) { - struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; - if (rht_is_a_nulls(obj)) obj = NULL; - rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(0))); + rcu_assign_pointer(*bkt, (void *)((unsigned long)obj | BIT(0))); } static inline void rht_assign_unlock(struct bucket_table *tbl, - struct rhash_lock_head **bkt, + struct rhash_lock_head __rcu **bkt, struct rhash_head *obj) { - struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; - if (rht_is_a_nulls(obj)) obj = NULL; lock_map_release(&tbl->dep_map); - rcu_assign_pointer(*p, obj); + rcu_assign_pointer(*bkt, (void *)obj); preempt_enable(); __release(bitlock); local_bh_enable(); @@ -593,7 +586,7 @@ static inline struct rhash_head *__rhashtable_lookup( .ht = ht, .key = key, }; - struct rhash_lock_head *const *bkt; + struct rhash_lock_head __rcu *const *bkt; struct bucket_table *tbl; struct rhash_head *he; unsigned int hash; @@ -709,7 +702,7 @@ static inline void *__rhashtable_insert_fast( .ht = ht, .key = key, }; - struct rhash_lock_head **bkt; + struct rhash_lock_head __rcu **bkt; struct rhash_head __rcu **pprev; struct bucket_table *tbl; struct rhash_head *head; @@ -995,7 +988,7 @@ static inline int __rhashtable_remove_fast_one( struct rhash_head *obj, const struct rhashtable_params params, bool rhlist) { - struct rhash_lock_head **bkt; + struct rhash_lock_head __rcu **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; unsigned int hash; @@ -1147,7 +1140,7 @@ static inline int __rhashtable_replace_fast( struct rhash_head *obj_old, struct rhash_head *obj_new, const struct rhashtable_params params) { - struct rhash_lock_head **bkt; + struct rhash_lock_head __rcu **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; unsigned int hash; diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index c76b2f3b3ac4..136ea0997e6d 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -143,6 +143,7 @@ bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter); unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu); void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu); +void ring_buffer_reset_online_cpus(struct trace_buffer *buffer); void ring_buffer_reset(struct trace_buffer *buffer); #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 988d176472df..3a6adfa70fb0 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -77,7 +77,7 @@ struct anon_vma { struct anon_vma_chain { struct vm_area_struct *vma; struct anon_vma *anon_vma; - struct list_head same_vma; /* locked by mmap_sem & page_table_lock */ + struct list_head same_vma; /* locked by mmap_lock & page_table_lock */ struct rb_node rb; /* locked by anon_vma->rwsem */ unsigned long rb_subtree_last; #ifdef CONFIG_DEBUG_VM_RB diff --git a/include/linux/rpmsg/qcom_glink.h b/include/linux/rpmsg/qcom_glink.h index 96e26d94719f..daded9fddf36 100644 --- a/include/linux/rpmsg/qcom_glink.h +++ b/include/linux/rpmsg/qcom_glink.h @@ -12,6 +12,7 @@ struct qcom_glink; struct qcom_glink *qcom_glink_smem_register(struct device *parent, struct device_node *node); void qcom_glink_smem_unregister(struct qcom_glink *glink); +void qcom_glink_ssr_notify(const char *ssr_name); #else @@ -23,7 +24,7 @@ qcom_glink_smem_register(struct device *parent, } static inline void qcom_glink_smem_unregister(struct qcom_glink *glink) {} - +static inline void qcom_glink_ssr_notify(const char *ssr_name) {} #endif #endif diff --git a/include/linux/rtc.h b/include/linux/rtc.h index bba3db3f7efa..22d1575e4991 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -55,10 +55,6 @@ extern struct class *rtc_class; * * The (current) exceptions are mostly filesystem hooks: * - the proc() hook for procfs - * - non-ioctl() chardev hooks: open(), release() - * - * REVISIT those periodic irq calls *do* have ops_lock when they're - * issued through ioctl() ... */ struct rtc_class_ops { int (*ioctl)(struct device *, unsigned int, unsigned long); diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h index 65b8142a7fed..745f5e73f99a 100644 --- a/include/linux/rtsx_pci.h +++ b/include/linux/rtsx_pci.h @@ -99,18 +99,6 @@ #define rtsx_pci_readb(pcr, reg) \ ioread8((pcr)->remap_addr + reg) -#define rtsx_pci_read_config_byte(pcr, where, val) \ - pci_read_config_byte((pcr)->pci, where, val) - -#define rtsx_pci_write_config_byte(pcr, where, val) \ - pci_write_config_byte((pcr)->pci, where, val) - -#define rtsx_pci_read_config_dword(pcr, where, val) \ - pci_read_config_dword((pcr)->pci, where, val) - -#define rtsx_pci_write_config_dword(pcr, where, val) \ - pci_write_config_dword((pcr)->pci, where, val) - #define STATE_TRANS_NONE 0 #define STATE_TRANS_CMD 1 #define STATE_TRANS_BUF 2 @@ -305,6 +293,8 @@ #define SD30_CLK_STOP_CFG0 0x01 #define REG_PRE_RW_MODE 0xFD70 #define EN_INFINITE_MODE 0x01 +#define REG_CRC_DUMMY_0 0xFD71 +#define CFG_SD_POW_AUTO_PD (1<<0) #define SRCTL 0xFC13 @@ -599,6 +589,7 @@ #define ASPM_FORCE_CTL 0xFE57 #define FORCE_ASPM_CTL0 0x10 +#define FORCE_ASPM_CTL1 0x20 #define FORCE_ASPM_VAL_MASK 0x03 #define FORCE_ASPM_L1_EN 0x02 #define FORCE_ASPM_L0_EN 0x01 @@ -667,6 +658,11 @@ #define PM_WAKE_EN 0x01 #define PM_CTRL4 0xFF47 +#define REG_CFG_OOBS_OFF_TIMER 0xFEA6 +#define REG_CFG_OOBS_ON_TIMER 0xFEA7 +#define REG_CFG_VCM_ON_TIMER 0xFEA8 +#define REG_CFG_OOBS_POLLING 0xFEA9 + /* Memory mapping */ #define SRAM_BASE 0xE600 #define RBUF_BASE 0xF400 @@ -1041,10 +1037,6 @@ #define PHY_DIG1E_RX_EN_KEEP 0x0001 #define PHY_DUM_REG 0x1F -#define PCR_ASPM_SETTING_REG1 0x160 -#define PCR_ASPM_SETTING_REG2 0x168 -#define PCR_ASPM_SETTING_5260 0x178 - #define PCR_SETTING_REG1 0x724 #define PCR_SETTING_REG2 0x814 #define PCR_SETTING_REG3 0x747 @@ -1080,11 +1072,7 @@ struct pcr_ops { void (*stop_cmd)(struct rtsx_pcr *pcr); void (*set_aspm)(struct rtsx_pcr *pcr, bool enable); - int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency); - int (*set_l1off_sub)(struct rtsx_pcr *pcr, u8 val); void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active); - void (*full_on)(struct rtsx_pcr *pcr); - void (*power_saving)(struct rtsx_pcr *pcr); void (*enable_ocp)(struct rtsx_pcr *pcr); void (*disable_ocp)(struct rtsx_pcr *pcr); void (*init_ocp)(struct rtsx_pcr *pcr); @@ -1095,11 +1083,6 @@ struct pcr_ops { enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN}; -#define ASPM_L1_1_EN_MASK BIT(3) -#define ASPM_L1_2_EN_MASK BIT(2) -#define PM_L1_1_EN_MASK BIT(1) -#define PM_L1_2_EN_MASK BIT(0) - #define ASPM_L1_1_EN BIT(0) #define ASPM_L1_2_EN BIT(1) #define PM_L1_1_EN BIT(2) @@ -1108,13 +1091,6 @@ enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN}; #define L1_SNOOZE_TEST_EN BIT(5) #define LTR_L1SS_PWR_GATE_CHECK_CARD_EN BIT(6) -enum dev_aspm_mode { - DEV_ASPM_DYNAMIC, - DEV_ASPM_BACKDOOR, - DEV_ASPM_STATIC, - DEV_ASPM_DISABLE, -}; - /* * struct rtsx_cr_option - card reader option * @dev_flags: device flags @@ -1125,7 +1101,6 @@ enum dev_aspm_mode { * @ltr_active_latency: ltr mode active latency * @ltr_idle_latency: ltr mode idle latency * @ltr_l1off_latency: ltr mode l1off latency - * @dev_aspm_mode: device aspm mode * @l1_snooze_delay: l1 snooze delay * @ltr_l1off_sspwrgate: ltr l1off sspwrgate * @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate @@ -1142,7 +1117,6 @@ struct rtsx_cr_option { u32 ltr_active_latency; u32 ltr_idle_latency; u32 ltr_l1off_latency; - enum dev_aspm_mode dev_aspm_mode; u32 l1_snooze_delay; u8 ltr_l1off_sspwrgate; u8 ltr_l1off_snooze_sspwrgate; @@ -1171,7 +1145,6 @@ struct rtsx_hw_param { struct rtsx_pcr { struct pci_dev *pci; unsigned int id; - int pcie_cap; struct rtsx_cr_option option; struct rtsx_hw_param hw_param; @@ -1217,6 +1190,7 @@ struct rtsx_pcr { #define EXTRA_CAPS_MMC_HSDDR (1 << 3) #define EXTRA_CAPS_MMC_HS200 (1 << 4) #define EXTRA_CAPS_MMC_8BIT (1 << 5) +#define EXTRA_CAPS_NO_MMC (1 << 7) u32 extra_caps; #define IC_VER_A 0 @@ -1255,6 +1229,7 @@ struct rtsx_pcr { u8 dma_error_count; u8 ocp_stat; u8 ocp_stat2; + u8 rtd3_en; }; #define PID_524A 0x524A @@ -1263,6 +1238,7 @@ struct rtsx_pcr { #define PID_525A 0x525A #define PID_5260 0x5260 #define PID_5261 0x5261 +#define PID_5228 0x5228 #define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid)) #define PCI_VID(pcr) ((pcr)->pci->vendor) @@ -1320,18 +1296,6 @@ static inline u8 *rtsx_pci_get_cmd_data(struct rtsx_pcr *pcr) return (u8 *)(pcr->host_cmds_ptr); } -static inline int rtsx_pci_update_cfg_byte(struct rtsx_pcr *pcr, int addr, - u8 mask, u8 append) -{ - int err; - u8 val; - - err = pci_read_config_byte(pcr->pci, addr, &val); - if (err < 0) - return err; - return pci_write_config_byte(pcr->pci, addr, (val & mask) | append); -} - static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val) { rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg, 0xFF, val >> 24); diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 7e5b2a4eb560..25e3fde85617 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -60,39 +60,39 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) } #define RWSEM_UNLOCKED_VALUE 0L -#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) +#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) /* Common initializer macros and functions */ #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __RWSEM_DEP_MAP_INIT(lockname) \ - , .dep_map = { \ + .dep_map = { \ .name = #lockname, \ .wait_type_inner = LD_WAIT_SLEEP, \ - } + }, #else # define __RWSEM_DEP_MAP_INIT(lockname) #endif #ifdef CONFIG_DEBUG_RWSEMS -# define __DEBUG_RWSEM_INITIALIZER(lockname) , .magic = &lockname +# define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname, #else -# define __DEBUG_RWSEM_INITIALIZER(lockname) +# define __RWSEM_DEBUG_INIT(lockname) #endif #ifdef CONFIG_RWSEM_SPIN_ON_OWNER -#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED +#define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED, #else #define __RWSEM_OPT_INIT(lockname) #endif #define __RWSEM_INITIALIZER(name) \ - { __RWSEM_INIT_COUNT(name), \ + { __RWSEM_COUNT_INIT(name), \ .owner = ATOMIC_LONG_INIT(0), \ - .wait_list = LIST_HEAD_INIT((name).wait_list), \ - .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ __RWSEM_OPT_INIT(name) \ - __DEBUG_RWSEM_INITIALIZER(name) \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\ + .wait_list = LIST_HEAD_INIT((name).wait_list), \ + __RWSEM_DEBUG_INIT(name) \ __RWSEM_DEP_MAP_INIT(name) } #define DECLARE_RWSEM(name) \ diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 6eec50fb36c8..45cf7b69d852 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -151,6 +151,20 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, #define for_each_sg(sglist, sg, nr, __i) \ for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg)) +/* + * Loop over each sg element in the given sg_table object. + */ +#define for_each_sgtable_sg(sgt, sg, i) \ + for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i) + +/* + * Loop over each sg element in the given *DMA mapped* sg_table object. + * Please use sg_dma_address(sg) and sg_dma_len(sg) to extract DMA addresses + * of the each element. + */ +#define for_each_sgtable_dma_sg(sgt, sg, i) \ + for_each_sg((sgt)->sgl, sg, (sgt)->nents, i) + /** * sg_chain - Chain two sglists together * @prv: First scatterlist @@ -401,9 +415,10 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) * @sglist: sglist to iterate over * @piter: page iterator to hold current page, sg, sg_pgoffset * @nents: maximum number of sg entries to iterate over - * @pgoffset: starting page offset + * @pgoffset: starting page offset (in pages) * * Callers may use sg_page_iter_page() to get each page pointer. + * In each loop it operates on PAGE_SIZE unit. */ #define for_each_sg_page(sglist, piter, nents, pgoffset) \ for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \ @@ -412,18 +427,47 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) /** * for_each_sg_dma_page - iterate over the pages of the given sg list * @sglist: sglist to iterate over - * @dma_iter: page iterator to hold current page + * @dma_iter: DMA page iterator to hold current page * @dma_nents: maximum number of sg entries to iterate over, this is the value * returned from dma_map_sg - * @pgoffset: starting page offset + * @pgoffset: starting page offset (in pages) * * Callers may use sg_page_iter_dma_address() to get each page's DMA address. + * In each loop it operates on PAGE_SIZE unit. */ #define for_each_sg_dma_page(sglist, dma_iter, dma_nents, pgoffset) \ for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents, \ pgoffset); \ __sg_page_iter_dma_next(dma_iter);) +/** + * for_each_sgtable_page - iterate over all pages in the sg_table object + * @sgt: sg_table object to iterate over + * @piter: page iterator to hold current page + * @pgoffset: starting page offset (in pages) + * + * Iterates over the all memory pages in the buffer described by + * a scatterlist stored in the given sg_table object. + * See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit. + */ +#define for_each_sgtable_page(sgt, piter, pgoffset) \ + for_each_sg_page((sgt)->sgl, piter, (sgt)->orig_nents, pgoffset) + +/** + * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object + * @sgt: sg_table object to iterate over + * @dma_iter: DMA page iterator to hold current page + * @pgoffset: starting page offset (in pages) + * + * Iterates over the all DMA mapped pages in the buffer described by + * a scatterlist stored in the given sg_table object. + * See also for_each_sg_dma_page(). In each loop it operates on PAGE_SIZE + * unit. + */ +#define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset) \ + for_each_sg_dma_page((sgt)->sgl, dma_iter, (sgt)->nents, pgoffset) + + /* * Mapping sg iterator * diff --git a/include/linux/sched.h b/include/linux/sched.h index 4418f5cb8324..afe01e232935 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -18,6 +18,7 @@ #include <linux/mutex.h> #include <linux/plist.h> #include <linux/hrtimer.h> +#include <linux/irqflags.h> #include <linux/seccomp.h> #include <linux/nodemask.h> #include <linux/rcupdate.h> @@ -31,6 +32,8 @@ #include <linux/task_io_accounting.h> #include <linux/posix-timers.h> #include <linux/rseq.h> +#include <linux/seqlock.h> +#include <linux/kcsan.h> /* task_struct member predeclarations (sorted alphabetically): */ struct audit_context; @@ -113,10 +116,6 @@ struct task_group; #define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) -#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ - (task->flags & PF_FROZEN) == 0 && \ - (task->state & TASK_NOLOAD) == 0) - #ifdef CONFIG_DEBUG_ATOMIC_SLEEP /* @@ -157,24 +156,24 @@ struct task_group; * * for (;;) { * set_current_state(TASK_UNINTERRUPTIBLE); - * if (!need_sleep) - * break; + * if (CONDITION) + * break; * * schedule(); * } * __set_current_state(TASK_RUNNING); * * If the caller does not need such serialisation (because, for instance, the - * condition test and condition change and wakeup are under the same lock) then + * CONDITION test and condition change and wakeup are under the same lock) then * use __set_current_state(). * * The above is typically ordered against the wakeup, which does: * - * need_sleep = false; + * CONDITION = 1; * wake_up_state(p, TASK_UNINTERRUPTIBLE); * - * where wake_up_state() executes a full memory barrier before accessing the - * task state. + * where wake_up_state()/try_to_wake_up() executes a full memory barrier before + * accessing p->state. * * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a @@ -377,7 +376,7 @@ struct util_est { * For cfs_rq, they are the aggregated values of all runnable and blocked * sched_entities. * - * The load/runnable/util_avg doesn't direcly factor frequency scaling and CPU + * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU * capacity scaling. The scaling is done through the rq_clock_pelt that is used * for computing those signals (see update_rq_clock_pelt()) * @@ -613,7 +612,7 @@ union rcu_special { u8 blocked; u8 need_qs; u8 exp_hint; /* Hint for performance. */ - u8 deferred_qs; + u8 need_mb; /* Readers need smp_mb(). */ } b; /* Bits. */ u32 s; /* Set of bits. */ }; @@ -653,8 +652,8 @@ struct task_struct { unsigned int ptrace; #ifdef CONFIG_SMP - struct llist_node wake_entry; int on_cpu; + struct __call_single_node wake_entry; #ifdef CONFIG_THREAD_INFO_IN_TASK /* Current CPU: */ unsigned int cpu; @@ -689,9 +688,15 @@ struct task_struct { struct sched_dl_entity dl; #ifdef CONFIG_UCLAMP_TASK - /* Clamp values requested for a scheduling entity */ + /* + * Clamp values requested for a scheduling entity. + * Must be updated with task_rq_lock() held. + */ struct uclamp_se uclamp_req[UCLAMP_CNT]; - /* Effective clamp values used for a scheduling entity */ + /* + * Effective clamp values used for a scheduling entity. + * Must be updated with task_rq_lock() held. + */ struct uclamp_se uclamp[UCLAMP_CNT]; #endif @@ -724,6 +729,14 @@ struct task_struct { struct list_head rcu_tasks_holdout_list; #endif /* #ifdef CONFIG_TASKS_RCU */ +#ifdef CONFIG_TASKS_TRACE_RCU + int trc_reader_nesting; + int trc_ipi_to_cpu; + union rcu_special trc_reader_special; + bool trc_reader_checked; + struct list_head trc_holdout_list; +#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ + struct sched_info sched_info; struct list_head tasks; @@ -877,6 +890,10 @@ struct task_struct { /* Empty if CONFIG_POSIX_CPUTIMERS=n */ struct posix_cputimers posix_cputimers; +#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK + struct posix_cputimers_work posix_cputimers_work; +#endif + /* Process credentials: */ /* Tracer's credentials at attach: */ @@ -975,18 +992,9 @@ struct task_struct { #endif #ifdef CONFIG_TRACE_IRQFLAGS - unsigned int irq_events; + struct irqtrace_events irqtrace; unsigned int hardirq_threaded; - unsigned long hardirq_enable_ip; - unsigned long hardirq_disable_ip; - unsigned int hardirq_enable_event; - unsigned int hardirq_disable_event; - int hardirqs_enabled; - int hardirq_context; - unsigned long softirq_disable_ip; - unsigned long softirq_enable_ip; - unsigned int softirq_disable_event; - unsigned int softirq_enable_event; + u64 hardirq_chain_key; int softirqs_enabled; int softirq_context; int irq_config; @@ -1046,7 +1054,7 @@ struct task_struct { /* Protected by ->alloc_lock: */ nodemask_t mems_allowed; /* Seqence number to catch updates: */ - seqcount_t mems_allowed_seq; + seqcount_spinlock_t mems_allowed_seq; int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; #endif @@ -1188,6 +1196,13 @@ struct task_struct { unsigned int kasan_depth; #endif +#ifdef CONFIG_KCSAN + struct kcsan_ctx kcsan_ctx; +#ifdef CONFIG_TRACE_IRQFLAGS + struct irqtrace_events kcsan_save_irqtrace; +#endif +#endif + #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Index of current stored address in ret_stack: */ int curr_ret_stack; @@ -1237,6 +1252,9 @@ struct task_struct { /* KCOV sequence number: */ int kcov_sequence; + + /* Collect coverage from softirq context: */ + unsigned int kcov_softirq; #endif #ifdef CONFIG_MEMCG @@ -1289,6 +1307,14 @@ struct task_struct { unsigned long prev_lowest_stack; #endif +#ifdef CONFIG_X86_MCE + u64 mce_addr; + __u64 mce_ripv : 1, + mce_whole_page : 1, + __mce_reserved : 62; + struct callback_head mce_kill_me; +#endif + /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. @@ -1481,11 +1507,11 @@ extern struct pid *cad_pid; #define PF_KSWAPD 0x00020000 /* I am kswapd */ #define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */ #define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */ -#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ +#define PF_LOCAL_THROTTLE 0x00100000 /* Throttle writes only against the bdi I write to, + * I am cleaning dirty pages from some other bdi. */ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ -#define PF_UMH 0x02000000 /* I'm an Usermodehelper process */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ @@ -1627,6 +1653,9 @@ extern int idle_cpu(int cpu); extern int available_idle_cpu(int cpu); extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); +extern void sched_set_fifo(struct task_struct *p); +extern void sched_set_fifo_low(struct task_struct *p); +extern void sched_set_normal(struct task_struct *p, int nice); extern int sched_setattr(struct task_struct *, const struct sched_attr *); extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); extern struct task_struct *idle_task(int cpu); @@ -1637,7 +1666,7 @@ extern struct task_struct *idle_task(int cpu); * * Return: 1 if @p is an idle task. 0 otherwise. */ -static inline bool is_idle_task(const struct task_struct *p) +static __always_inline bool is_idle_task(const struct task_struct *p) { return !!(p->flags & PF_IDLE); } @@ -1715,7 +1744,15 @@ extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); }) #ifdef CONFIG_SMP -void scheduler_ipi(void); +static __always_inline void scheduler_ipi(void) +{ + /* + * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting + * TIF_NEED_RESCHED remotely (for the first time) will also send + * this IPI. + */ + preempt_fold_need_resched(); +} extern unsigned long wait_task_inactive(struct task_struct *, long match_state); #else static inline void scheduler_ipi(void) { } @@ -1986,14 +2023,6 @@ static inline void rseq_execve(struct task_struct *t) #endif -void __exit_umh(struct task_struct *tsk); - -static inline void exit_umh(struct task_struct *tsk) -{ - if (unlikely(tsk->flags & PF_UMH)) - __exit_umh(tsk); -} - #ifdef CONFIG_DEBUG_RSEQ void rseq_syscall(struct pt_regs *regs); @@ -2015,6 +2044,7 @@ const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq); const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq); int sched_trace_rq_cpu(struct rq *rq); +int sched_trace_rq_nr_running(struct rq *rq); const struct cpumask *sched_trace_rd_span(struct root_domain *rd); diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h index 95fb9e025247..00c45a0e6abe 100644 --- a/include/linux/sched/debug.h +++ b/include/linux/sched/debug.h @@ -30,7 +30,8 @@ extern void show_regs(struct pt_regs *); * task), SP is the stack pointer of the first frame that should be shown in the back * trace (or NULL if the entire call-chain of the task should be shown). */ -extern void show_stack(struct task_struct *task, unsigned long *sp); +extern void show_stack(struct task_struct *task, unsigned long *sp, + const char *loglvl); extern void sched_show_task(struct task_struct *p); diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index 0fbcbacd1b29..cc9f393e2a70 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -14,6 +14,7 @@ enum hk_flags { HK_FLAG_DOMAIN = (1 << 5), HK_FLAG_WQ = (1 << 6), HK_FLAG_MANAGED_IRQ = (1 << 7), + HK_FLAG_KTHREAD = (1 << 8), }; #ifdef CONFIG_CPU_ISOLATION diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h index fa067de9f1a9..d2b4204ba4d3 100644 --- a/include/linux/sched/jobctl.h +++ b/include/linux/sched/jobctl.h @@ -19,6 +19,7 @@ struct task_struct; #define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ #define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ #define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */ +#define JOBCTL_TASK_WORK_BIT 24 /* set by TWA_SIGNAL */ #define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) #define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) @@ -28,9 +29,10 @@ struct task_struct; #define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) #define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) #define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT) +#define JOBCTL_TASK_WORK (1UL << JOBCTL_TASK_WORK_BIT) #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) -#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) +#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK | JOBCTL_TASK_WORK) extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask); extern void task_clear_jobctl_trapping(struct task_struct *task); diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h index 4859bea47a7b..83ec54b65e79 100644 --- a/include/linux/sched/loadavg.h +++ b/include/linux/sched/loadavg.h @@ -43,6 +43,6 @@ extern unsigned long calc_load_n(unsigned long load, unsigned long exp, #define LOAD_INT(x) ((x) >> FSHIFT) #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100) -extern void calc_global_load(unsigned long ticks); +extern void calc_global_load(void); #endif /* _LINUX_SCHED_LOADAVG_H */ diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index c49257a3b510..f889e332912f 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -23,7 +23,7 @@ extern struct mm_struct *mm_alloc(void); * will still exist later on and mmget_not_zero() has to be used before * accessing it. * - * This is a preferred way to to pin @mm for a longer/unbounded amount + * This is a preferred way to pin @mm for a longer/unbounded amount * of time. * * Use mmdrop() to release the reference acquired by mmgrab(). @@ -51,7 +51,7 @@ static inline void mmdrop(struct mm_struct *mm) /* * This has to be called after a get_task_mm()/mmget_not_zero() - * followed by taking the mmap_sem for writing before modifying the + * followed by taking the mmap_lock for writing before modifying the * vmas or anything the coredump pretends not to change from under it. * * It also has to be called when mmgrab() is used in the context of @@ -59,14 +59,14 @@ static inline void mmdrop(struct mm_struct *mm) * the context of the process to run down_write() on that pinned mm. * * NOTE: find_extend_vma() called from GUP context is the only place - * that can modify the "mm" (notably the vm_start/end) under mmap_sem + * that can modify the "mm" (notably the vm_start/end) under mmap_lock * for reading and outside the context of the process, so it is also - * the only case that holds the mmap_sem for reading that must call - * this function. Generally if the mmap_sem is hold for reading + * the only case that holds the mmap_lock for reading that must call + * this function. Generally if the mmap_lock is hold for reading * there's no need of this check after get_task_mm()/mmget_not_zero(). * * This function can be obsoleted and the check can be removed, after - * the coredump code will hold the mmap_sem for writing before + * the coredump code will hold the mmap_lock for writing before * invoking the ->core_dump methods. */ static inline bool mmget_still_valid(struct mm_struct *mm) @@ -175,24 +175,20 @@ static inline bool in_vfork(struct task_struct *tsk) * Applies per-task gfp context to the given allocation flags. * PF_MEMALLOC_NOIO implies GFP_NOIO * PF_MEMALLOC_NOFS implies GFP_NOFS - * PF_MEMALLOC_NOCMA implies no allocation from CMA region. */ static inline gfp_t current_gfp_context(gfp_t flags) { - if (unlikely(current->flags & - (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_NOCMA))) { + unsigned int pflags = READ_ONCE(current->flags); + + if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS))) { /* * NOIO implies both NOIO and NOFS and it is a weaker context * so always make sure it makes precedence */ - if (current->flags & PF_MEMALLOC_NOIO) + if (pflags & PF_MEMALLOC_NOIO) flags &= ~(__GFP_IO | __GFP_FS); - else if (current->flags & PF_MEMALLOC_NOFS) + else if (pflags & PF_MEMALLOC_NOFS) flags &= ~__GFP_FS; -#ifdef CONFIG_CMA - if (current->flags & PF_MEMALLOC_NOCMA) - flags &= ~__GFP_MOVABLE; -#endif } return flags; } @@ -232,7 +228,7 @@ static inline unsigned int memalloc_noio_save(void) * @flags: Flags to restore. * * Ends the implicit GFP_NOIO scope started by memalloc_noio_save function. - * Always make sure that that the given flags is the return value from the + * Always make sure that the given flags is the return value from the * pairing memalloc_noio_save call. */ static inline void memalloc_noio_restore(unsigned int flags) @@ -263,7 +259,7 @@ static inline unsigned int memalloc_nofs_save(void) * @flags: Flags to restore. * * Ends the implicit GFP_NOFS scope started by memalloc_nofs_save function. - * Always make sure that that the given flags is the return value from the + * Always make sure that the given flags is the return value from the * pairing memalloc_nofs_save call. */ static inline void memalloc_nofs_restore(unsigned int flags) diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 3e5b090c16d4..1bad18a1d8ba 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -654,17 +654,6 @@ static inline bool thread_group_leader(struct task_struct *p) return p->exit_signal >= 0; } -/* Do to the insanities of de_thread it is possible for a process - * to have the pid of the thread group leader without actually being - * the thread group leader. For iteration through the pids in proc - * all we care about is that we have a task with the appropriate - * pid, we don't actually care if we have the right task. - */ -static inline bool has_group_leader_pid(struct task_struct *p) -{ - return task_pid(p) == task_tgid(p); -} - static inline bool same_thread_group(struct task_struct *p1, struct task_struct *p2) { @@ -685,6 +674,8 @@ static inline int thread_group_empty(struct task_struct *p) #define delay_group_leader(p) \ (thread_group_leader(p) && !thread_group_empty(p)) +extern bool thread_group_exited(struct pid *pid); + extern struct sighand_struct *__lock_task_sighand(struct task_struct *task, unsigned long *flags); diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index d4f6215ee03f..3c31ba88aca5 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -7,14 +7,20 @@ struct ctl_table; #ifdef CONFIG_DETECT_HUNG_TASK + +#ifdef CONFIG_SMP +extern unsigned int sysctl_hung_task_all_cpu_backtrace; +#else +#define sysctl_hung_task_all_cpu_backtrace 0 +#endif /* CONFIG_SMP */ + extern int sysctl_hung_task_check_count; extern unsigned int sysctl_hung_task_panic; extern unsigned long sysctl_hung_task_timeout_secs; extern unsigned long sysctl_hung_task_check_interval_secs; extern int sysctl_hung_task_warnings; -extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos); +int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); #else /* Avoid need for ifdefs elsewhere in the code */ enum { sysctl_hung_task_timeout_secs = 0 }; @@ -43,8 +49,7 @@ extern __read_mostly unsigned int sysctl_sched_migration_cost; extern __read_mostly unsigned int sysctl_sched_nr_migrate; int sched_proc_update_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *length, - loff_t *ppos); + void *buffer, size_t *length, loff_t *ppos); #endif /* @@ -56,9 +61,13 @@ int sched_proc_update_handler(struct ctl_table *table, int write, extern unsigned int sysctl_sched_rt_period; extern int sysctl_sched_rt_runtime; +extern unsigned int sysctl_sched_dl_period_max; +extern unsigned int sysctl_sched_dl_period_min; + #ifdef CONFIG_UCLAMP_TASK extern unsigned int sysctl_sched_uclamp_util_min; extern unsigned int sysctl_sched_uclamp_util_max; +extern unsigned int sysctl_sched_uclamp_util_min_rt_default; #endif #ifdef CONFIG_CFS_BANDWIDTH @@ -72,33 +81,21 @@ extern unsigned int sysctl_sched_autogroup_enabled; extern int sysctl_sched_rr_timeslice; extern int sched_rr_timeslice; -extern int sched_rr_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); - -extern int sched_rt_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); - -#ifdef CONFIG_UCLAMP_TASK -extern int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); -#endif - -extern int sysctl_numa_balancing(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); - -extern int sysctl_schedstats(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); +int sched_rr_handler(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); +int sched_rt_handler(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); +int sysctl_sched_uclamp_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int sysctl_numa_balancing(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); +int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) extern unsigned int sysctl_sched_energy_aware; -extern int sched_energy_aware_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); +int sched_energy_aware_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); #endif #endif /* _LINUX_SCHED_SYSCTL_H */ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 38359071236a..a98965007eef 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -55,6 +55,7 @@ extern asmlinkage void schedule_tail(struct task_struct *prev); extern void init_idle(struct task_struct *idle, int cpu); extern int sched_fork(unsigned long clone_flags, struct task_struct *p); +extern void sched_post_fork(struct task_struct *p); extern void sched_dead(struct task_struct *p); void __noreturn do_task_dead(void); @@ -65,22 +66,9 @@ extern void fork_init(void); extern void release_task(struct task_struct * p); -#ifdef CONFIG_HAVE_COPY_THREAD_TLS -extern int copy_thread_tls(unsigned long, unsigned long, unsigned long, - struct task_struct *, unsigned long); -#else extern int copy_thread(unsigned long, unsigned long, unsigned long, - struct task_struct *); + struct task_struct *, unsigned long); -/* Architectures that haven't opted into copy_thread_tls get the tls argument - * via pt_regs, so ignore the tls argument passed via C. */ -static inline int copy_thread_tls( - unsigned long clone_flags, unsigned long sp, unsigned long arg, - struct task_struct *p, unsigned long tls) -{ - return copy_thread(clone_flags, sp, arg, p); -} -#endif extern void flush_thread(void); #ifdef CONFIG_HAVE_EXIT_THREAD @@ -96,12 +84,11 @@ extern void exit_files(struct task_struct *); extern void exit_itimers(struct signal_struct *); extern long _do_fork(struct kernel_clone_args *kargs); -extern bool legacy_clone_args_valid(const struct kernel_clone_args *kargs); -extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); struct task_struct *fork_idle(int); struct mm_struct *copy_init_mm(void); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); +int kernel_wait(pid_t pid, int *stat); extern void free_task(struct task_struct *tsk); @@ -126,6 +113,12 @@ static inline void put_task_struct(struct task_struct *t) __put_task_struct(t); } +static inline void put_task_struct_many(struct task_struct *t, int nr) +{ + if (refcount_sub_and_test(nr, &t->usage)) + __put_task_struct(t); +} + void put_task_struct_rcu_user(struct task_struct *task); #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 95253ad792b0..820511289857 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -11,21 +11,20 @@ */ #ifdef CONFIG_SMP -#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */ -#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */ -#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */ -#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ -#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ -#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ -#define SD_ASYM_CPUCAPACITY 0x0040 /* Domain members have different CPU capacities */ -#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share CPU capacity */ -#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ -#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share CPU pkg resources */ -#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ -#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ -#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ -#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ -#define SD_NUMA 0x4000 /* cross-node balancing */ +#define SD_BALANCE_NEWIDLE 0x0001 /* Balance when about to become idle */ +#define SD_BALANCE_EXEC 0x0002 /* Balance on exec */ +#define SD_BALANCE_FORK 0x0004 /* Balance on fork, clone */ +#define SD_BALANCE_WAKE 0x0008 /* Balance on wakeup */ +#define SD_WAKE_AFFINE 0x0010 /* Wake task to waking CPU */ +#define SD_ASYM_CPUCAPACITY 0x0020 /* Domain members have different CPU capacities */ +#define SD_SHARE_CPUCAPACITY 0x0040 /* Domain members share CPU capacity */ +#define SD_SHARE_POWERDOMAIN 0x0080 /* Domain members share power domain */ +#define SD_SHARE_PKG_RESOURCES 0x0100 /* Domain members share CPU pkg resources */ +#define SD_SERIALIZE 0x0200 /* Only a single load balancing instance */ +#define SD_ASYM_PACKING 0x0400 /* Place busy groups earlier in the domain */ +#define SD_PREFER_SIBLING 0x0800 /* Prefer to place tasks in a sibling domain */ +#define SD_OVERLAP 0x1000 /* sched_domains of this level overlap */ +#define SD_NUMA 0x2000 /* cross-node balancing */ #ifdef CONFIG_SCHED_SMT static inline int cpu_smt_flags(void) @@ -218,6 +217,16 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) #endif /* !CONFIG_SMP */ #ifndef arch_scale_cpu_capacity +/** + * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU. + * @cpu: the CPU in question. + * + * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e. + * + * max_perf(cpu) + * ----------------------------- * SCHED_CAPACITY_SCALE + * max(max_perf(c) : c \in CPUs) + */ static __always_inline unsigned long arch_scale_cpu_capacity(int cpu) { @@ -233,6 +242,13 @@ unsigned long arch_scale_thermal_pressure(int cpu) } #endif +#ifndef arch_set_thermal_pressure +static __always_inline +void arch_set_thermal_pressure(const struct cpumask *cpus, + unsigned long th_pressure) +{ } +#endif + static inline int task_node(const struct task_struct *p) { return cpu_to_node(task_cpu(p)); diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 917d88edb7b9..a8ec3b6093fc 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -36,6 +36,9 @@ struct user_struct { defined(CONFIG_NET) || defined(CONFIG_IO_URING) atomic_long_t locked_vm; #endif +#ifdef CONFIG_WATCH_QUEUE + atomic_t nr_watches; /* The number of watches this user currently has */ +#endif /* Miscellaneous per-user rate limit */ struct ratelimit_state ratelimit; diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h index 0bb04a96a6d4..528718e4ed52 100644 --- a/include/linux/sched_clock.h +++ b/include/linux/sched_clock.h @@ -6,6 +6,34 @@ #define LINUX_SCHED_CLOCK #ifdef CONFIG_GENERIC_SCHED_CLOCK +/** + * struct clock_read_data - data required to read from sched_clock() + * + * @epoch_ns: sched_clock() value at last update + * @epoch_cyc: Clock cycle value at last update. + * @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit + * clocks. + * @read_sched_clock: Current clock source (or dummy source when suspended). + * @mult: Multipler for scaled math conversion. + * @shift: Shift value for scaled math conversion. + * + * Care must be taken when updating this structure; it is read by + * some very hot code paths. It occupies <=40 bytes and, when combined + * with the seqcount used to synchronize access, comfortably fits into + * a 64 byte cache line. + */ +struct clock_read_data { + u64 epoch_ns; + u64 epoch_cyc; + u64 sched_clock_mask; + u64 (*read_sched_clock)(void); + u32 mult; + u32 shift; +}; + +extern struct clock_read_data *sched_clock_read_begin(unsigned int *seq); +extern int sched_clock_read_retry(unsigned int seq); + extern void generic_sched_clock_init(void); extern void sched_clock_register(u64 (*read)(void), int bits, diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index 5c873a59b387..7e5dd7d1e221 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -4,7 +4,12 @@ * * Copyright (C) 2018 ARM Ltd. */ + +#ifndef _LINUX_SCMI_PROTOCOL_H +#define _LINUX_SCMI_PROTOCOL_H + #include <linux/device.h> +#include <linux/notifier.h> #include <linux/types.h> #define SCMI_MAX_STR_SIZE 16 @@ -114,6 +119,8 @@ struct scmi_perf_ops { unsigned long *rate, bool poll); int (*est_power_get)(const struct scmi_handle *handle, u32 domain, unsigned long *rate, unsigned long *power); + bool (*fast_switch_possible)(const struct scmi_handle *handle, + struct device *dev); }; /** @@ -169,18 +176,13 @@ enum scmi_sensor_class { * * @count_get: get the count of sensors provided by SCMI * @info_get: get the information of the specified sensor - * @trip_point_notify: control notifications on cross-over events for - * the trip-points * @trip_point_config: selects and configures a trip-point of interest * @reading_get: gets the current value of the sensor */ struct scmi_sensor_ops { int (*count_get)(const struct scmi_handle *handle); - const struct scmi_sensor_info *(*info_get) (const struct scmi_handle *handle, u32 sensor_id); - int (*trip_point_notify)(const struct scmi_handle *handle, - u32 sensor_id, bool enable); int (*trip_point_config)(const struct scmi_handle *handle, u32 sensor_id, u8 trip_id, u64 trip_value); int (*reading_get)(const struct scmi_handle *handle, u32 sensor_id, @@ -208,6 +210,49 @@ struct scmi_reset_ops { }; /** + * struct scmi_notify_ops - represents notifications' operations provided by + * SCMI core + * @register_event_notifier: Register a notifier_block for the requested event + * @unregister_event_notifier: Unregister a notifier_block for the requested + * event + * + * A user can register/unregister its own notifier_block against the wanted + * platform instance regarding the desired event identified by the + * tuple: (proto_id, evt_id, src_id) using the provided register/unregister + * interface where: + * + * @handle: The handle identifying the platform instance to use + * @proto_id: The protocol ID as in SCMI Specification + * @evt_id: The message ID of the desired event as in SCMI Specification + * @src_id: A pointer to the desired source ID if different sources are + * possible for the protocol (like domain_id, sensor_id...etc) + * + * @src_id can be provided as NULL if it simply does NOT make sense for + * the protocol at hand, OR if the user is explicitly interested in + * receiving notifications from ANY existent source associated to the + * specified proto_id / evt_id. + * + * Received notifications are finally delivered to the registered users, + * invoking the callback provided with the notifier_block *nb as follows: + * + * int user_cb(nb, evt_id, report) + * + * with: + * + * @nb: The notifier block provided by the user + * @evt_id: The message ID of the delivered event + * @report: A custom struct describing the specific event delivered + */ +struct scmi_notify_ops { + int (*register_event_notifier)(const struct scmi_handle *handle, + u8 proto_id, u8 evt_id, u32 *src_id, + struct notifier_block *nb); + int (*unregister_event_notifier)(const struct scmi_handle *handle, + u8 proto_id, u8 evt_id, u32 *src_id, + struct notifier_block *nb); +}; + +/** * struct scmi_handle - Handle returned to ARM SCMI clients for usage. * * @dev: pointer to the SCMI device @@ -217,6 +262,7 @@ struct scmi_reset_ops { * @clk_ops: pointer to set of clock protocol operations * @sensor_ops: pointer to set of sensor protocol operations * @reset_ops: pointer to set of reset protocol operations + * @notify_ops: pointer to set of notifications related operations * @perf_priv: pointer to private data structure specific to performance * protocol(for internal use only) * @clk_priv: pointer to private data structure specific to clock @@ -227,6 +273,8 @@ struct scmi_reset_ops { * protocol(for internal use only) * @reset_priv: pointer to private data structure specific to reset * protocol(for internal use only) + * @notify_priv: pointer to private data structure specific to notifications + * (for internal use only) */ struct scmi_handle { struct device *dev; @@ -236,12 +284,14 @@ struct scmi_handle { struct scmi_power_ops *power_ops; struct scmi_sensor_ops *sensor_ops; struct scmi_reset_ops *reset_ops; + struct scmi_notify_ops *notify_ops; /* for protocol internal use */ void *perf_priv; void *clk_priv; void *power_priv; void *sensor_priv; void *reset_priv; + void *notify_priv; }; enum scmi_std_protocol { @@ -319,3 +369,59 @@ static inline void scmi_driver_unregister(struct scmi_driver *driver) {} typedef int (*scmi_prot_init_fn_t)(struct scmi_handle *); int scmi_protocol_register(int protocol_id, scmi_prot_init_fn_t fn); void scmi_protocol_unregister(int protocol_id); + +/* SCMI Notification API - Custom Event Reports */ +enum scmi_notification_events { + SCMI_EVENT_POWER_STATE_CHANGED = 0x0, + SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED = 0x0, + SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED = 0x1, + SCMI_EVENT_SENSOR_TRIP_POINT_EVENT = 0x0, + SCMI_EVENT_RESET_ISSUED = 0x0, + SCMI_EVENT_BASE_ERROR_EVENT = 0x0, +}; + +struct scmi_power_state_changed_report { + ktime_t timestamp; + unsigned int agent_id; + unsigned int domain_id; + unsigned int power_state; +}; + +struct scmi_perf_limits_report { + ktime_t timestamp; + unsigned int agent_id; + unsigned int domain_id; + unsigned int range_max; + unsigned int range_min; +}; + +struct scmi_perf_level_report { + ktime_t timestamp; + unsigned int agent_id; + unsigned int domain_id; + unsigned int performance_level; +}; + +struct scmi_sensor_trip_point_report { + ktime_t timestamp; + unsigned int agent_id; + unsigned int sensor_id; + unsigned int trip_point_desc; +}; + +struct scmi_reset_issued_report { + ktime_t timestamp; + unsigned int agent_id; + unsigned int domain_id; + unsigned int reset_state; +}; + +struct scmi_base_error_report { + ktime_t timestamp; + unsigned int agent_id; + bool fatal; + unsigned int cmd_count; + unsigned long long reports[]; +}; + +#endif /* _LINUX_SCMI_PROTOCOL_H */ diff --git a/include/linux/scpi_protocol.h b/include/linux/scpi_protocol.h index ecb004711acf..afbf8037d8db 100644 --- a/include/linux/scpi_protocol.h +++ b/include/linux/scpi_protocol.h @@ -4,6 +4,10 @@ * * Copyright (C) 2014 ARM Ltd. */ + +#ifndef _LINUX_SCPI_PROTOCOL_H +#define _LINUX_SCPI_PROTOCOL_H + #include <linux/types.h> struct scpi_opp { @@ -71,3 +75,5 @@ struct scpi_ops *get_scpi_ops(void); #else static inline struct scpi_ops *get_scpi_ops(void) { return NULL; } #endif + +#endif /* _LINUX_SCPI_PROTOCOL_H */ diff --git a/include/linux/scs.h b/include/linux/scs.h new file mode 100644 index 000000000000..6dec390cf154 --- /dev/null +++ b/include/linux/scs.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Shadow Call Stack support. + * + * Copyright (C) 2019 Google LLC + */ + +#ifndef _LINUX_SCS_H +#define _LINUX_SCS_H + +#include <linux/gfp.h> +#include <linux/poison.h> +#include <linux/sched.h> +#include <linux/sizes.h> + +#ifdef CONFIG_SHADOW_CALL_STACK + +/* + * In testing, 1 KiB shadow stack size (i.e. 128 stack frames on a 64-bit + * architecture) provided ~40% safety margin on stack usage while keeping + * memory allocation overhead reasonable. + */ +#define SCS_SIZE SZ_1K +#define GFP_SCS (GFP_KERNEL | __GFP_ZERO) + +/* An illegal pointer value to mark the end of the shadow stack. */ +#define SCS_END_MAGIC (0x5f6UL + POISON_POINTER_DELTA) + +/* Allocate a static per-CPU shadow stack */ +#define DEFINE_SCS(name) \ + DEFINE_PER_CPU(unsigned long [SCS_SIZE/sizeof(long)], name) \ + +#define task_scs(tsk) (task_thread_info(tsk)->scs_base) +#define task_scs_sp(tsk) (task_thread_info(tsk)->scs_sp) + +void scs_init(void); +int scs_prepare(struct task_struct *tsk, int node); +void scs_release(struct task_struct *tsk); + +static inline void scs_task_reset(struct task_struct *tsk) +{ + /* + * Reset the shadow stack to the base address in case the task + * is reused. + */ + task_scs_sp(tsk) = task_scs(tsk); +} + +static inline unsigned long *__scs_magic(void *s) +{ + return (unsigned long *)(s + SCS_SIZE) - 1; +} + +static inline bool task_scs_end_corrupted(struct task_struct *tsk) +{ + unsigned long *magic = __scs_magic(task_scs(tsk)); + unsigned long sz = task_scs_sp(tsk) - task_scs(tsk); + + return sz >= SCS_SIZE - 1 || READ_ONCE_NOCHECK(*magic) != SCS_END_MAGIC; +} + +#else /* CONFIG_SHADOW_CALL_STACK */ + +static inline void scs_init(void) {} +static inline void scs_task_reset(struct task_struct *tsk) {} +static inline int scs_prepare(struct task_struct *tsk, int node) { return 0; } +static inline void scs_release(struct task_struct *tsk) {} +static inline bool task_scs_end_corrupted(struct task_struct *tsk) { return false; } + +#endif /* CONFIG_SHADOW_CALL_STACK */ + +#endif /* _LINUX_SCS_H */ diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 8ccd82105de8..76731230bbc5 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -221,7 +221,7 @@ struct sctp_datahdr { __be16 stream; __be16 ssn; __u32 ppid; - __u8 payload[0]; + __u8 payload[]; }; struct sctp_data_chunk { @@ -269,7 +269,7 @@ struct sctp_inithdr { __be16 num_outbound_streams; __be16 num_inbound_streams; __be32 initial_tsn; - __u8 params[0]; + __u8 params[]; }; struct sctp_init_chunk { @@ -299,13 +299,13 @@ struct sctp_cookie_preserve_param { /* Section 3.3.2.1 Host Name Address (11) */ struct sctp_hostname_param { struct sctp_paramhdr param_hdr; - uint8_t hostname[0]; + uint8_t hostname[]; }; /* Section 3.3.2.1 Supported Address Types (12) */ struct sctp_supported_addrs_param { struct sctp_paramhdr param_hdr; - __be16 types[0]; + __be16 types[]; }; /* ADDIP Section 3.2.6 Adaptation Layer Indication */ @@ -317,25 +317,25 @@ struct sctp_adaptation_ind_param { /* ADDIP Section 4.2.7 Supported Extensions Parameter */ struct sctp_supported_ext_param { struct sctp_paramhdr param_hdr; - __u8 chunks[0]; + __u8 chunks[]; }; /* AUTH Section 3.1 Random */ struct sctp_random_param { struct sctp_paramhdr param_hdr; - __u8 random_val[0]; + __u8 random_val[]; }; /* AUTH Section 3.2 Chunk List */ struct sctp_chunks_param { struct sctp_paramhdr param_hdr; - __u8 chunks[0]; + __u8 chunks[]; }; /* AUTH Section 3.3 HMAC Algorithm */ struct sctp_hmac_algo_param { struct sctp_paramhdr param_hdr; - __be16 hmac_ids[0]; + __be16 hmac_ids[]; }; /* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): @@ -350,7 +350,7 @@ struct sctp_initack_chunk { /* Section 3.3.3.1 State Cookie (7) */ struct sctp_cookie_param { struct sctp_paramhdr p; - __u8 body[0]; + __u8 body[]; }; /* Section 3.3.3.1 Unrecognized Parameters (8) */ @@ -384,7 +384,7 @@ struct sctp_sackhdr { __be32 a_rwnd; __be16 num_gap_ack_blocks; __be16 num_dup_tsns; - union sctp_sack_variable variable[0]; + union sctp_sack_variable variable[]; }; struct sctp_sack_chunk { @@ -436,7 +436,7 @@ struct sctp_shutdown_chunk { struct sctp_errhdr { __be16 cause; __be16 length; - __u8 variable[0]; + __u8 variable[]; }; struct sctp_operr_chunk { @@ -594,7 +594,7 @@ struct sctp_fwdtsn_skip { struct sctp_fwdtsn_hdr { __be32 new_cum_tsn; - struct sctp_fwdtsn_skip skip[0]; + struct sctp_fwdtsn_skip skip[]; }; struct sctp_fwdtsn_chunk { @@ -611,7 +611,7 @@ struct sctp_ifwdtsn_skip { struct sctp_ifwdtsn_hdr { __be32 new_cum_tsn; - struct sctp_ifwdtsn_skip skip[0]; + struct sctp_ifwdtsn_skip skip[]; }; struct sctp_ifwdtsn_chunk { @@ -658,7 +658,7 @@ struct sctp_addip_param { struct sctp_addiphdr { __be32 serial; - __u8 params[0]; + __u8 params[]; }; struct sctp_addip_chunk { @@ -718,7 +718,7 @@ struct sctp_addip_chunk { struct sctp_authhdr { __be16 shkey_id; __be16 hmac_id; - __u8 hmac[0]; + __u8 hmac[]; }; struct sctp_auth_chunk { @@ -733,7 +733,7 @@ struct sctp_infox { struct sctp_reconf_chunk { struct sctp_chunkhdr chunk_hdr; - __u8 params[0]; + __u8 params[]; }; struct sctp_strreset_outreq { @@ -741,13 +741,13 @@ struct sctp_strreset_outreq { __be32 request_seq; __be32 response_seq; __be32 send_reset_at_tsn; - __be16 list_of_streams[0]; + __be16 list_of_streams[]; }; struct sctp_strreset_inreq { struct sctp_paramhdr param_hdr; __be32 request_seq; - __be16 list_of_streams[0]; + __be16 list_of_streams[]; }; struct sctp_strreset_tsnreq { diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 4192369b8418..02aef2844c38 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -10,9 +10,14 @@ SECCOMP_FILTER_FLAG_NEW_LISTENER | \ SECCOMP_FILTER_FLAG_TSYNC_ESRCH) +/* sizeof() the first published struct seccomp_notif_addfd */ +#define SECCOMP_NOTIFY_ADDFD_SIZE_VER0 24 +#define SECCOMP_NOTIFY_ADDFD_SIZE_LATEST SECCOMP_NOTIFY_ADDFD_SIZE_VER0 + #ifdef CONFIG_SECCOMP #include <linux/thread_info.h> +#include <linux/atomic.h> #include <asm/seccomp.h> struct seccomp_filter; @@ -29,6 +34,7 @@ struct seccomp_filter; */ struct seccomp { int mode; + atomic_t filter_count; struct seccomp_filter *filter; }; @@ -58,9 +64,11 @@ static inline int seccomp_mode(struct seccomp *s) struct seccomp { }; struct seccomp_filter { }; +struct seccomp_data; #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER static inline int secure_computing(void) { return 0; } +static inline int __secure_computing(const struct seccomp_data *sd) { return 0; } #else static inline void secure_computing_strict(int this_syscall) { return; } #endif @@ -82,10 +90,10 @@ static inline int seccomp_mode(struct seccomp *s) #endif /* CONFIG_SECCOMP */ #ifdef CONFIG_SECCOMP_FILTER -extern void put_seccomp_filter(struct task_struct *tsk); +extern void seccomp_filter_release(struct task_struct *tsk); extern void get_seccomp_filter(struct task_struct *tsk); #else /* CONFIG_SECCOMP_FILTER */ -static inline void put_seccomp_filter(struct task_struct *tsk) +static inline void seccomp_filter_release(struct task_struct *tsk) { return; } diff --git a/include/linux/security.h b/include/linux/security.h index a8d9310472df..0a0a03b36a3b 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -56,6 +56,8 @@ struct mm_struct; struct fs_context; struct fs_parameter; enum fs_value_type; +struct watch; +struct watch_notification; /* Default (no) options for the capable function */ #define CAP_OPT_NONE 0x0 @@ -140,7 +142,7 @@ extern int cap_capset(struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted); -extern int cap_bprm_set_creds(struct linux_binprm *bprm); +extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file); extern int cap_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); extern int cap_inode_removexattr(struct dentry *dentry, const char *name); @@ -211,7 +213,7 @@ struct request_sock; #ifdef CONFIG_MMU extern int mmap_min_addr_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); #endif /* security_inode_init_security callback function to write xattrs */ @@ -276,7 +278,8 @@ int security_quota_on(struct dentry *dentry); int security_syslog(int type); int security_settime64(const struct timespec64 *ts, const struct timezone *tz); int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); -int security_bprm_set_creds(struct linux_binprm *bprm); +int security_bprm_creds_for_exec(struct linux_binprm *bprm); +int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file); int security_bprm_check(struct linux_binprm *bprm); void security_bprm_committing_creds(struct linux_binprm *bprm); void security_bprm_committed_creds(struct linux_binprm *bprm); @@ -389,6 +392,8 @@ int security_kernel_post_read_file(struct file *file, char *buf, loff_t size, enum kernel_read_file_id id); int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags); +int security_task_fix_setgid(struct cred *new, const struct cred *old, + int flags); int security_task_setpgid(struct task_struct *p, pid_t pgid); int security_task_getpgid(struct task_struct *p); int security_task_getsid(struct task_struct *p); @@ -569,9 +574,15 @@ static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages)); } -static inline int security_bprm_set_creds(struct linux_binprm *bprm) +static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm) +{ + return 0; +} + +static inline int security_bprm_creds_from_file(struct linux_binprm *bprm, + struct file *file) { - return cap_bprm_set_creds(bprm); + return cap_bprm_creds_from_file(bprm, file); } static inline int security_bprm_check(struct linux_binprm *bprm) @@ -1027,6 +1038,13 @@ static inline int security_task_fix_setuid(struct cred *new, return cap_task_fix_setuid(new, old, flags); } +static inline int security_task_fix_setgid(struct cred *new, + const struct cred *old, + int flags) +{ + return 0; +} + static inline int security_task_setpgid(struct task_struct *p, pid_t pgid) { return 0; @@ -1275,6 +1293,28 @@ static inline int security_locked_down(enum lockdown_reason what) } #endif /* CONFIG_SECURITY */ +#if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE) +int security_post_notification(const struct cred *w_cred, + const struct cred *cred, + struct watch_notification *n); +#else +static inline int security_post_notification(const struct cred *w_cred, + const struct cred *cred, + struct watch_notification *n) +{ + return 0; +} +#endif + +#if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS) +int security_watch_key(struct key *key); +#else +static inline int security_watch_key(struct key *key) +{ + return 0; +} +#endif + #ifdef CONFIG_SECURITY_NETWORK int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk); @@ -1743,8 +1783,8 @@ static inline int security_path_chroot(const struct path *path) int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags); void security_key_free(struct key *key); -int security_key_permission(key_ref_t key_ref, - const struct cred *cred, unsigned perm); +int security_key_permission(key_ref_t key_ref, const struct cred *cred, + enum key_need_perm need_perm); int security_key_getsecurity(struct key *key, char **_buffer); #else @@ -1762,7 +1802,7 @@ static inline void security_key_free(struct key *key) static inline int security_key_permission(key_ref_t key_ref, const struct cred *cred, - unsigned perm) + enum key_need_perm need_perm) { return 0; } diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 1672cf6f7614..813614d4b71f 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -145,6 +145,25 @@ void *__seq_open_private(struct file *, const struct seq_operations *, int); int seq_open_private(struct file *, const struct seq_operations *, int); int seq_release_private(struct inode *, struct file *); +#define DEFINE_SEQ_ATTRIBUTE(__name) \ +static int __name ## _open(struct inode *inode, struct file *file) \ +{ \ + int ret = seq_open(file, &__name ## _sops); \ + if (!ret && inode->i_private) { \ + struct seq_file *seq_f = file->private_data; \ + seq_f->private = inode->i_private; \ + } \ + return ret; \ +} \ + \ +static const struct file_operations __name ## _fops = { \ + .owner = THIS_MODULE, \ + .open = __name ## _open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = seq_release, \ +} + #define DEFINE_SHOW_ATTRIBUTE(__name) \ static int __name ## _open(struct inode *inode, struct file *file) \ { \ diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 0491d963d47e..962d9768945f 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -1,49 +1,65 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_SEQLOCK_H #define __LINUX_SEQLOCK_H + /* - * Reader/writer consistent mechanism without starving writers. This type of - * lock for data where the reader wants a consistent set of information - * and is willing to retry if the information changes. There are two types - * of readers: - * 1. Sequence readers which never block a writer but they may have to retry - * if a writer is in progress by detecting change in sequence number. - * Writers do not wait for a sequence reader. - * 2. Locking readers which will wait if a writer or another locking reader - * is in progress. A locking reader in progress will also block a writer - * from going forward. Unlike the regular rwlock, the read lock here is - * exclusive so that only one locking reader can get it. - * - * This is not as cache friendly as brlock. Also, this may not work well - * for data that contains pointers, because any writer could - * invalidate a pointer that a reader was following. - * - * Expected non-blocking reader usage: - * do { - * seq = read_seqbegin(&foo); - * ... - * } while (read_seqretry(&foo, seq)); + * seqcount_t / seqlock_t - a reader-writer consistency mechanism with + * lockless readers (read-only retry loops), and no writer starvation. * + * See Documentation/locking/seqlock.rst * - * On non-SMP the spin locks disappear but the writer still needs - * to increment the sequence variables because an interrupt routine could - * change the state of the data. - * - * Based on x86_64 vsyscall gettimeofday - * by Keith Owens and Andrea Arcangeli + * Copyrights: + * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli + * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH */ -#include <linux/spinlock.h> -#include <linux/preempt.h> -#include <linux/lockdep.h> #include <linux/compiler.h> +#include <linux/kcsan-checks.h> +#include <linux/lockdep.h> +#include <linux/mutex.h> +#include <linux/preempt.h> +#include <linux/spinlock.h> + #include <asm/processor.h> /* - * Version using sequence counter only. - * This can be used when code has its own mutex protecting the - * updating starting before the write_seqcountbeqin() and ending - * after the write_seqcount_end(). + * The seqlock seqcount_t interface does not prescribe a precise sequence of + * read begin/retry/end. For readers, typically there is a call to + * read_seqcount_begin() and read_seqcount_retry(), however, there are more + * esoteric cases which do not follow this pattern. + * + * As a consequence, we take the following best-effort approach for raw usage + * via seqcount_t under KCSAN: upon beginning a seq-reader critical section, + * pessimistically mark the next KCSAN_SEQLOCK_REGION_MAX memory accesses as + * atomics; if there is a matching read_seqcount_retry() call, no following + * memory operations are considered atomic. Usage of the seqlock_t interface + * is not affected. + */ +#define KCSAN_SEQLOCK_REGION_MAX 1000 + +/* + * Sequence counters (seqcount_t) + * + * This is the raw counting mechanism, without any writer protection. + * + * Write side critical sections must be serialized and non-preemptible. + * + * If readers can be invoked from hardirq or softirq contexts, + * interrupts or bottom halves must also be respectively disabled before + * entering the write section. + * + * This mechanism can't be used if the protected data contains pointers, + * as the writer can invalidate a pointer that a reader is following. + * + * If the write serialization mechanism is one of the common kernel + * locking primitives, use a sequence counter with associated lock + * (seqcount_LOCKTYPE_t) instead. + * + * If it's desired to automatically handle the sequence counter writer + * serialization and non-preemptibility requirements, use a sequential + * lock (seqlock_t) instead. + * + * See Documentation/locking/seqlock.rst */ typedef struct seqcount { unsigned sequence; @@ -63,13 +79,18 @@ static inline void __seqcount_init(seqcount_t *s, const char *name, } #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define SEQCOUNT_DEP_MAP_INIT(lockname) \ - .dep_map = { .name = #lockname } \ -# define seqcount_init(s) \ - do { \ - static struct lock_class_key __key; \ - __seqcount_init((s), #s, &__key); \ +# define SEQCOUNT_DEP_MAP_INIT(lockname) \ + .dep_map = { .name = #lockname } + +/** + * seqcount_init() - runtime initializer for seqcount_t + * @s: Pointer to the seqcount_t instance + */ +# define seqcount_init(s) \ + do { \ + static struct lock_class_key __key; \ + __seqcount_init((s), #s, &__key); \ } while (0) static inline void seqcount_lockdep_reader_access(const seqcount_t *s) @@ -89,13 +110,149 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) # define seqcount_lockdep_reader_access(x) #endif -#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)} +/** + * SEQCNT_ZERO() - static initializer for seqcount_t + * @name: Name of the seqcount_t instance + */ +#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) } + +/* + * Sequence counters with associated locks (seqcount_LOCKTYPE_t) + * + * A sequence counter which associates the lock used for writer + * serialization at initialization time. This enables lockdep to validate + * that the write side critical section is properly serialized. + * + * For associated locks which do not implicitly disable preemption, + * preemption protection is enforced in the write side function. + * + * Lockdep is never used in any for the raw write variants. + * + * See Documentation/locking/seqlock.rst + */ + +#ifdef CONFIG_LOCKDEP +#define __SEQ_LOCK(expr) expr +#else +#define __SEQ_LOCK(expr) +#endif + +/** + * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPR associated + * @seqcount: The real sequence counter + * @lock: Pointer to the associated spinlock + * + * A plain sequence counter with external writer synchronization by a + * spinlock. The spinlock is associated to the sequence count in the + * static initializer or init function. This enables lockdep to validate + * that the write side critical section is properly serialized. + */ + +/** + * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t + * @s: Pointer to the seqcount_LOCKNAME_t instance + * @lock: Pointer to the associated LOCKTYPE + */ +/* + * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers + * @locktype: actual typename + * @lockname: name + * @preemptible: preemptibility of above locktype + * @lockmember: argument for lockdep_assert_held() + */ +#define SEQCOUNT_LOCKTYPE(locktype, lockname, preemptible, lockmember) \ +typedef struct seqcount_##lockname { \ + seqcount_t seqcount; \ + __SEQ_LOCK(locktype *lock); \ +} seqcount_##lockname##_t; \ + \ +static __always_inline void \ +seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \ +{ \ + seqcount_init(&s->seqcount); \ + __SEQ_LOCK(s->lock = lock); \ +} \ + \ +static __always_inline seqcount_t * \ +__seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \ +{ \ + return &s->seqcount; \ +} \ + \ +static __always_inline bool \ +__seqcount_##lockname##_preemptible(seqcount_##lockname##_t *s) \ +{ \ + return preemptible; \ +} \ + \ +static __always_inline void \ +__seqcount_##lockname##_assert(seqcount_##lockname##_t *s) \ +{ \ + __SEQ_LOCK(lockdep_assert_held(lockmember)); \ +} + +/* + * __seqprop() for seqcount_t + */ + +static inline seqcount_t *__seqcount_ptr(seqcount_t *s) +{ + return s; +} + +static inline bool __seqcount_preemptible(seqcount_t *s) +{ + return false; +} + +static inline void __seqcount_assert(seqcount_t *s) +{ + lockdep_assert_preemption_disabled(); +} + +SEQCOUNT_LOCKTYPE(raw_spinlock_t, raw_spinlock, false, s->lock) +SEQCOUNT_LOCKTYPE(spinlock_t, spinlock, false, s->lock) +SEQCOUNT_LOCKTYPE(rwlock_t, rwlock, false, s->lock) +SEQCOUNT_LOCKTYPE(struct mutex, mutex, true, s->lock) +SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base) + +/** + * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t + * @name: Name of the seqcount_LOCKNAME_t instance + * @lock: Pointer to the associated LOCKTYPE + */ + +#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \ + .seqcount = SEQCNT_ZERO(seq_name.seqcount), \ + __SEQ_LOCK(.lock = (assoc_lock)) \ +} + +#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) +#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock) + + +#define __seqprop_case(s, lockname, prop) \ + seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s)) + +#define __seqprop(s, prop) _Generic(*(s), \ + seqcount_t: __seqcount_##prop((void *)(s)), \ + __seqprop_case((s), raw_spinlock, prop), \ + __seqprop_case((s), spinlock, prop), \ + __seqprop_case((s), rwlock, prop), \ + __seqprop_case((s), mutex, prop), \ + __seqprop_case((s), ww_mutex, prop)) + +#define __seqcount_ptr(s) __seqprop(s, ptr) +#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible) +#define __seqcount_assert_lock_held(s) __seqprop(s, assert) /** - * __read_seqcount_begin - begin a seq-read critical section (without barrier) - * @s: pointer to seqcount_t - * Returns: count to be passed to read_seqcount_retry + * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() * barrier. Callers should ensure that smp_rmb() or equivalent ordering is @@ -104,8 +261,13 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) * * Use carefully, only in critical code, and comment how the barrier is * provided. + * + * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned __read_seqcount_begin(const seqcount_t *s) +#define __read_seqcount_begin(s) \ + __read_seqcount_t_begin(__seqcount_ptr(s)) + +static inline unsigned __read_seqcount_t_begin(const seqcount_t *s) { unsigned ret; @@ -115,82 +277,96 @@ repeat: cpu_relax(); goto repeat; } + kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); return ret; } /** - * raw_read_seqcount - Read the raw seqcount - * @s: pointer to seqcount_t - * Returns: count to be passed to read_seqcount_retry + * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * - * raw_read_seqcount opens a read critical section of the given - * seqcount without any lockdep checking and without checking or - * masking the LSB. Calling code is responsible for handling that. + * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned raw_read_seqcount(const seqcount_t *s) +#define raw_read_seqcount_begin(s) \ + raw_read_seqcount_t_begin(__seqcount_ptr(s)) + +static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s) { - unsigned ret = READ_ONCE(s->sequence); + unsigned ret = __read_seqcount_t_begin(s); smp_rmb(); return ret; } /** - * raw_read_seqcount_begin - start seq-read critical section w/o lockdep - * @s: pointer to seqcount_t - * Returns: count to be passed to read_seqcount_retry + * read_seqcount_begin() - begin a seqcount_t read critical section + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * - * raw_read_seqcount_begin opens a read critical section of the given - * seqcount, but without any lockdep checking. Validity of the critical - * section is tested by checking read_seqcount_retry function. + * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned raw_read_seqcount_begin(const seqcount_t *s) +#define read_seqcount_begin(s) \ + read_seqcount_t_begin(__seqcount_ptr(s)) + +static inline unsigned read_seqcount_t_begin(const seqcount_t *s) { - unsigned ret = __read_seqcount_begin(s); - smp_rmb(); - return ret; + seqcount_lockdep_reader_access(s); + return raw_read_seqcount_t_begin(s); } /** - * read_seqcount_begin - begin a seq-read critical section - * @s: pointer to seqcount_t - * Returns: count to be passed to read_seqcount_retry + * raw_read_seqcount() - read the raw seqcount_t counter value + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * - * read_seqcount_begin opens a read critical section of the given seqcount. - * Validity of the critical section is tested by checking read_seqcount_retry - * function. + * raw_read_seqcount opens a read critical section of the given + * seqcount_t, without any lockdep checking, and without checking or + * masking the sequence counter LSB. Calling code is responsible for + * handling that. + * + * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned read_seqcount_begin(const seqcount_t *s) +#define raw_read_seqcount(s) \ + raw_read_seqcount_t(__seqcount_ptr(s)) + +static inline unsigned raw_read_seqcount_t(const seqcount_t *s) { - seqcount_lockdep_reader_access(s); - return raw_read_seqcount_begin(s); + unsigned ret = READ_ONCE(s->sequence); + smp_rmb(); + kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); + return ret; } /** - * raw_seqcount_begin - begin a seq-read critical section - * @s: pointer to seqcount_t - * Returns: count to be passed to read_seqcount_retry + * raw_seqcount_begin() - begin a seqcount_t read critical section w/o + * lockdep and w/o counter stabilization + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants + * + * raw_seqcount_begin opens a read critical section of the given + * seqcount_t. Unlike read_seqcount_begin(), this function will not wait + * for the count to stabilize. If a writer is active when it begins, it + * will fail the read_seqcount_retry() at the end of the read critical + * section instead of stabilizing at the beginning of it. * - * raw_seqcount_begin opens a read critical section of the given seqcount. - * Validity of the critical section is tested by checking read_seqcount_retry - * function. + * Use this only in special kernel hot paths where the read section is + * small and has a high probability of success through other external + * means. It will save a single branching instruction. * - * Unlike read_seqcount_begin(), this function will not wait for the count - * to stabilize. If a writer is active when we begin, we will fail the - * read_seqcount_retry() instead of stabilizing at the beginning of the - * critical section. + * Return: count to be passed to read_seqcount_retry() */ -static inline unsigned raw_seqcount_begin(const seqcount_t *s) +#define raw_seqcount_begin(s) \ + raw_seqcount_t_begin(__seqcount_ptr(s)) + +static inline unsigned raw_seqcount_t_begin(const seqcount_t *s) { - unsigned ret = READ_ONCE(s->sequence); - smp_rmb(); - return ret & ~1; + /* + * If the counter is odd, let read_seqcount_retry() fail + * by decrementing the counter. + */ + return raw_read_seqcount_t(s) & ~1; } /** - * __read_seqcount_retry - end a seq-read critical section (without barrier) - * @s: pointer to seqcount_t - * @start: count, from read_seqcount_begin - * Returns: 1 if retry is required, else 0 + * __read_seqcount_retry() - end a seqcount_t read section w/o barrier + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants + * @start: count, from read_seqcount_begin() * * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() * barrier. Callers should ensure that smp_rmb() or equivalent ordering is @@ -199,84 +375,237 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s) * * Use carefully, only in critical code, and comment how the barrier is * provided. + * + * Return: true if a read section retry is required, else false */ -static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) +#define __read_seqcount_retry(s, start) \ + __read_seqcount_t_retry(__seqcount_ptr(s), start) + +static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start) { - return unlikely(s->sequence != start); + kcsan_atomic_next(0); + return unlikely(READ_ONCE(s->sequence) != start); } /** - * read_seqcount_retry - end a seq-read critical section - * @s: pointer to seqcount_t - * @start: count, from read_seqcount_begin - * Returns: 1 if retry is required, else 0 + * read_seqcount_retry() - end a seqcount_t read critical section + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants + * @start: count, from read_seqcount_begin() * - * read_seqcount_retry closes a read critical section of the given seqcount. - * If the critical section was invalid, it must be ignored (and typically - * retried). + * read_seqcount_retry closes the read critical section of given + * seqcount_t. If the critical section was invalid, it must be ignored + * (and typically retried). + * + * Return: true if a read section retry is required, else false */ -static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) +#define read_seqcount_retry(s, start) \ + read_seqcount_t_retry(__seqcount_ptr(s), start) + +static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start) { smp_rmb(); - return __read_seqcount_retry(s, start); + return __read_seqcount_t_retry(s, start); } +/** + * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants + */ +#define raw_write_seqcount_begin(s) \ +do { \ + if (__seqcount_lock_preemptible(s)) \ + preempt_disable(); \ + \ + raw_write_seqcount_t_begin(__seqcount_ptr(s)); \ +} while (0) - -static inline void raw_write_seqcount_begin(seqcount_t *s) +static inline void raw_write_seqcount_t_begin(seqcount_t *s) { + kcsan_nestable_atomic_begin(); s->sequence++; smp_wmb(); } -static inline void raw_write_seqcount_end(seqcount_t *s) +/** + * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants + */ +#define raw_write_seqcount_end(s) \ +do { \ + raw_write_seqcount_t_end(__seqcount_ptr(s)); \ + \ + if (__seqcount_lock_preemptible(s)) \ + preempt_enable(); \ +} while (0) + +static inline void raw_write_seqcount_t_end(seqcount_t *s) { smp_wmb(); s->sequence++; + kcsan_nestable_atomic_end(); } /** - * raw_write_seqcount_barrier - do a seq write barrier - * @s: pointer to seqcount_t + * write_seqcount_begin_nested() - start a seqcount_t write section with + * custom lockdep nesting level + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants + * @subclass: lockdep nesting level * - * This can be used to provide an ordering guarantee instead of the - * usual consistency guarantee. It is one wmb cheaper, because we can - * collapse the two back-to-back wmb()s. + * See Documentation/locking/lockdep-design.rst + */ +#define write_seqcount_begin_nested(s, subclass) \ +do { \ + __seqcount_assert_lock_held(s); \ + \ + if (__seqcount_lock_preemptible(s)) \ + preempt_disable(); \ + \ + write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \ +} while (0) + +static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass) +{ + raw_write_seqcount_t_begin(s); + seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); +} + +/** + * write_seqcount_begin() - start a seqcount_t write side critical section + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * - * seqcount_t seq; - * bool X = true, Y = false; + * write_seqcount_begin opens a write side critical section of the given + * seqcount_t. * - * void read(void) - * { - * bool x, y; + * Context: seqcount_t write side critical sections must be serialized and + * non-preemptible. If readers can be invoked from hardirq or softirq + * context, interrupts or bottom halves must be respectively disabled. + */ +#define write_seqcount_begin(s) \ +do { \ + __seqcount_assert_lock_held(s); \ + \ + if (__seqcount_lock_preemptible(s)) \ + preempt_disable(); \ + \ + write_seqcount_t_begin(__seqcount_ptr(s)); \ +} while (0) + +static inline void write_seqcount_t_begin(seqcount_t *s) +{ + write_seqcount_t_begin_nested(s, 0); +} + +/** + * write_seqcount_end() - end a seqcount_t write side critical section + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants + * + * The write section must've been opened with write_seqcount_begin(). + */ +#define write_seqcount_end(s) \ +do { \ + write_seqcount_t_end(__seqcount_ptr(s)); \ + \ + if (__seqcount_lock_preemptible(s)) \ + preempt_enable(); \ +} while (0) + +static inline void write_seqcount_t_end(seqcount_t *s) +{ + seqcount_release(&s->dep_map, _RET_IP_); + raw_write_seqcount_t_end(s); +} + +/** + * raw_write_seqcount_barrier() - do a seqcount_t write barrier + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants + * + * This can be used to provide an ordering guarantee instead of the usual + * consistency guarantee. It is one wmb cheaper, because it can collapse + * the two back-to-back wmb()s. + * + * Note that writes surrounding the barrier should be declared atomic (e.g. + * via WRITE_ONCE): a) to ensure the writes become visible to other threads + * atomically, avoiding compiler optimizations; b) to document which writes are + * meant to propagate to the reader critical section. This is necessary because + * neither writes before and after the barrier are enclosed in a seq-writer + * critical section that would ensure readers are aware of ongoing writes:: + * + * seqcount_t seq; + * bool X = true, Y = false; * - * do { - * int s = read_seqcount_begin(&seq); + * void read(void) + * { + * bool x, y; * - * x = X; y = Y; + * do { + * int s = read_seqcount_begin(&seq); * - * } while (read_seqcount_retry(&seq, s)); + * x = X; y = Y; * - * BUG_ON(!x && !y); + * } while (read_seqcount_retry(&seq, s)); + * + * BUG_ON(!x && !y); * } * * void write(void) * { - * Y = true; + * WRITE_ONCE(Y, true); * - * raw_write_seqcount_barrier(seq); + * raw_write_seqcount_barrier(seq); * - * X = false; + * WRITE_ONCE(X, false); * } */ -static inline void raw_write_seqcount_barrier(seqcount_t *s) +#define raw_write_seqcount_barrier(s) \ + raw_write_seqcount_t_barrier(__seqcount_ptr(s)) + +static inline void raw_write_seqcount_t_barrier(seqcount_t *s) { + kcsan_nestable_atomic_begin(); s->sequence++; smp_wmb(); s->sequence++; + kcsan_nestable_atomic_end(); +} + +/** + * write_seqcount_invalidate() - invalidate in-progress seqcount_t read + * side operations + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants + * + * After write_seqcount_invalidate, no seqcount_t read side operations + * will complete successfully and see data older than this. + */ +#define write_seqcount_invalidate(s) \ + write_seqcount_t_invalidate(__seqcount_ptr(s)) + +static inline void write_seqcount_t_invalidate(seqcount_t *s) +{ + smp_wmb(); + kcsan_nestable_atomic_begin(); + s->sequence+=2; + kcsan_nestable_atomic_end(); } -static inline int raw_read_seqcount_latch(seqcount_t *s) +/** + * raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants + * + * Use seqcount_t latching to switch between two storage places protected + * by a sequence counter. Doing so allows having interruptible, preemptible, + * seqcount_t write side critical sections. + * + * Check raw_write_seqcount_latch() for more details and a full reader and + * writer usage example. + * + * Return: sequence counter raw value. Use the lowest bit as an index for + * picking which data copy to read. The full counter value must then be + * checked with read_seqcount_retry(). + */ +#define raw_read_seqcount_latch(s) \ + raw_read_seqcount_t_latch(__seqcount_ptr(s)) + +static inline int raw_read_seqcount_t_latch(seqcount_t *s) { /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */ int seq = READ_ONCE(s->sequence); /* ^^^ */ @@ -284,8 +613,8 @@ static inline int raw_read_seqcount_latch(seqcount_t *s) } /** - * raw_write_seqcount_latch - redirect readers to even/odd copy - * @s: pointer to seqcount_t + * raw_write_seqcount_latch() - redirect readers to even/odd copy + * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants * * The latch technique is a multiversion concurrency control method that allows * queries during non-atomic modifications. If you can guarantee queries never @@ -301,66 +630,73 @@ static inline int raw_read_seqcount_latch(seqcount_t *s) * Very simply put: we first modify one copy and then the other. This ensures * there is always one copy in a stable state, ready to give us an answer. * - * The basic form is a data structure like: + * The basic form is a data structure like:: * - * struct latch_struct { - * seqcount_t seq; - * struct data_struct data[2]; - * }; + * struct latch_struct { + * seqcount_t seq; + * struct data_struct data[2]; + * }; * * Where a modification, which is assumed to be externally serialized, does the - * following: + * following:: * - * void latch_modify(struct latch_struct *latch, ...) - * { - * smp_wmb(); <- Ensure that the last data[1] update is visible - * latch->seq++; - * smp_wmb(); <- Ensure that the seqcount update is visible + * void latch_modify(struct latch_struct *latch, ...) + * { + * smp_wmb(); // Ensure that the last data[1] update is visible + * latch->seq++; + * smp_wmb(); // Ensure that the seqcount update is visible * - * modify(latch->data[0], ...); + * modify(latch->data[0], ...); * - * smp_wmb(); <- Ensure that the data[0] update is visible - * latch->seq++; - * smp_wmb(); <- Ensure that the seqcount update is visible + * smp_wmb(); // Ensure that the data[0] update is visible + * latch->seq++; + * smp_wmb(); // Ensure that the seqcount update is visible * - * modify(latch->data[1], ...); - * } + * modify(latch->data[1], ...); + * } * - * The query will have a form like: + * The query will have a form like:: * - * struct entry *latch_query(struct latch_struct *latch, ...) - * { - * struct entry *entry; - * unsigned seq, idx; + * struct entry *latch_query(struct latch_struct *latch, ...) + * { + * struct entry *entry; + * unsigned seq, idx; * - * do { - * seq = raw_read_seqcount_latch(&latch->seq); + * do { + * seq = raw_read_seqcount_latch(&latch->seq); * - * idx = seq & 0x01; - * entry = data_query(latch->data[idx], ...); + * idx = seq & 0x01; + * entry = data_query(latch->data[idx], ...); * - * smp_rmb(); - * } while (seq != latch->seq); + * // read_seqcount_retry() includes needed smp_rmb() + * } while (read_seqcount_retry(&latch->seq, seq)); * - * return entry; - * } + * return entry; + * } * * So during the modification, queries are first redirected to data[1]. Then we * modify data[0]. When that is complete, we redirect queries back to data[0] * and we can modify data[1]. * - * NOTE: The non-requirement for atomic modifications does _NOT_ include - * the publishing of new entries in the case where data is a dynamic - * data structure. + * NOTE: + * + * The non-requirement for atomic modifications does _NOT_ include + * the publishing of new entries in the case where data is a dynamic + * data structure. * - * An iteration might start in data[0] and get suspended long enough - * to miss an entire modification sequence, once it resumes it might - * observe the new entry. + * An iteration might start in data[0] and get suspended long enough + * to miss an entire modification sequence, once it resumes it might + * observe the new entry. * - * NOTE: When data is a dynamic data structure; one should use regular RCU - * patterns to manage the lifetimes of the objects within. + * NOTE: + * + * When data is a dynamic data structure; one should use regular RCU + * patterns to manage the lifetimes of the objects within. */ -static inline void raw_write_seqcount_latch(seqcount_t *s) +#define raw_write_seqcount_latch(s) \ + raw_write_seqcount_t_latch(__seqcount_ptr(s)) + +static inline void raw_write_seqcount_t_latch(seqcount_t *s) { smp_wmb(); /* prior stores before incrementing "sequence" */ s->sequence++; @@ -368,114 +704,162 @@ static inline void raw_write_seqcount_latch(seqcount_t *s) } /* - * Sequence counter only version assumes that callers are using their - * own mutexing. - */ -static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) -{ - raw_write_seqcount_begin(s); - seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); -} - -static inline void write_seqcount_begin(seqcount_t *s) -{ - write_seqcount_begin_nested(s, 0); -} - -static inline void write_seqcount_end(seqcount_t *s) -{ - seqcount_release(&s->dep_map, _RET_IP_); - raw_write_seqcount_end(s); -} - -/** - * write_seqcount_invalidate - invalidate in-progress read-side seq operations - * @s: pointer to seqcount_t + * Sequential locks (seqlock_t) * - * After write_seqcount_invalidate, no read-side seq operations will complete - * successfully and see data older than this. + * Sequence counters with an embedded spinlock for writer serialization + * and non-preemptibility. + * + * For more info, see: + * - Comments on top of seqcount_t + * - Documentation/locking/seqlock.rst */ -static inline void write_seqcount_invalidate(seqcount_t *s) -{ - smp_wmb(); - s->sequence+=2; -} - typedef struct { struct seqcount seqcount; spinlock_t lock; } seqlock_t; -/* - * These macros triggered gcc-3.x compile-time problems. We think these are - * OK now. Be cautious. - */ -#define __SEQLOCK_UNLOCKED(lockname) \ - { \ - .seqcount = SEQCNT_ZERO(lockname), \ - .lock = __SPIN_LOCK_UNLOCKED(lockname) \ +#define __SEQLOCK_UNLOCKED(lockname) \ + { \ + .seqcount = SEQCNT_ZERO(lockname), \ + .lock = __SPIN_LOCK_UNLOCKED(lockname) \ } -#define seqlock_init(x) \ - do { \ - seqcount_init(&(x)->seqcount); \ - spin_lock_init(&(x)->lock); \ +/** + * seqlock_init() - dynamic initializer for seqlock_t + * @sl: Pointer to the seqlock_t instance + */ +#define seqlock_init(sl) \ + do { \ + seqcount_init(&(sl)->seqcount); \ + spin_lock_init(&(sl)->lock); \ } while (0) -#define DEFINE_SEQLOCK(x) \ - seqlock_t x = __SEQLOCK_UNLOCKED(x) +/** + * DEFINE_SEQLOCK() - Define a statically allocated seqlock_t + * @sl: Name of the seqlock_t instance + */ +#define DEFINE_SEQLOCK(sl) \ + seqlock_t sl = __SEQLOCK_UNLOCKED(sl) -/* - * Read side functions for starting and finalizing a read side section. +/** + * read_seqbegin() - start a seqlock_t read side critical section + * @sl: Pointer to seqlock_t + * + * Return: count, to be passed to read_seqretry() */ static inline unsigned read_seqbegin(const seqlock_t *sl) { - return read_seqcount_begin(&sl->seqcount); + unsigned ret = read_seqcount_begin(&sl->seqcount); + + kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */ + kcsan_flat_atomic_begin(); + return ret; } +/** + * read_seqretry() - end a seqlock_t read side section + * @sl: Pointer to seqlock_t + * @start: count, from read_seqbegin() + * + * read_seqretry closes the read side critical section of given seqlock_t. + * If the critical section was invalid, it must be ignored (and typically + * retried). + * + * Return: true if a read section retry is required, else false + */ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) { + /* + * Assume not nested: read_seqretry() may be called multiple times when + * completing read critical section. + */ + kcsan_flat_atomic_end(); + return read_seqcount_retry(&sl->seqcount, start); } -/* - * Lock out other writers and update the count. - * Acts like a normal spin_lock/unlock. - * Don't need preempt_disable() because that is in the spin_lock already. +/** + * write_seqlock() - start a seqlock_t write side critical section + * @sl: Pointer to seqlock_t + * + * write_seqlock opens a write side critical section for the given + * seqlock_t. It also implicitly acquires the spinlock_t embedded inside + * that sequential lock. All seqlock_t write side sections are thus + * automatically serialized and non-preemptible. + * + * Context: if the seqlock_t read section, or other write side critical + * sections, can be invoked from hardirq or softirq contexts, use the + * _irqsave or _bh variants of this function instead. */ static inline void write_seqlock(seqlock_t *sl) { spin_lock(&sl->lock); - write_seqcount_begin(&sl->seqcount); + write_seqcount_t_begin(&sl->seqcount); } +/** + * write_sequnlock() - end a seqlock_t write side critical section + * @sl: Pointer to seqlock_t + * + * write_sequnlock closes the (serialized and non-preemptible) write side + * critical section of given seqlock_t. + */ static inline void write_sequnlock(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + write_seqcount_t_end(&sl->seqcount); spin_unlock(&sl->lock); } +/** + * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section + * @sl: Pointer to seqlock_t + * + * _bh variant of write_seqlock(). Use only if the read side section, or + * other write side sections, can be invoked from softirq contexts. + */ static inline void write_seqlock_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); - write_seqcount_begin(&sl->seqcount); + write_seqcount_t_begin(&sl->seqcount); } +/** + * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section + * @sl: Pointer to seqlock_t + * + * write_sequnlock_bh closes the serialized, non-preemptible, and + * softirqs-disabled, seqlock_t write side critical section opened with + * write_seqlock_bh(). + */ static inline void write_sequnlock_bh(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + write_seqcount_t_end(&sl->seqcount); spin_unlock_bh(&sl->lock); } +/** + * write_seqlock_irq() - start a non-interruptible seqlock_t write section + * @sl: Pointer to seqlock_t + * + * _irq variant of write_seqlock(). Use only if the read side section, or + * other write sections, can be invoked from hardirq contexts. + */ static inline void write_seqlock_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); - write_seqcount_begin(&sl->seqcount); + write_seqcount_t_begin(&sl->seqcount); } +/** + * write_sequnlock_irq() - end a non-interruptible seqlock_t write section + * @sl: Pointer to seqlock_t + * + * write_sequnlock_irq closes the serialized and non-interruptible + * seqlock_t write side section opened with write_seqlock_irq(). + */ static inline void write_sequnlock_irq(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + write_seqcount_t_end(&sl->seqcount); spin_unlock_irq(&sl->lock); } @@ -484,79 +868,112 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) unsigned long flags; spin_lock_irqsave(&sl->lock, flags); - write_seqcount_begin(&sl->seqcount); + write_seqcount_t_begin(&sl->seqcount); return flags; } +/** + * write_seqlock_irqsave() - start a non-interruptible seqlock_t write + * section + * @lock: Pointer to seqlock_t + * @flags: Stack-allocated storage for saving caller's local interrupt + * state, to be passed to write_sequnlock_irqrestore(). + * + * _irqsave variant of write_seqlock(). Use it only if the read side + * section, or other write sections, can be invoked from hardirq context. + */ #define write_seqlock_irqsave(lock, flags) \ do { flags = __write_seqlock_irqsave(lock); } while (0) +/** + * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write + * section + * @sl: Pointer to seqlock_t + * @flags: Caller's saved interrupt state, from write_seqlock_irqsave() + * + * write_sequnlock_irqrestore closes the serialized and non-interruptible + * seqlock_t write section previously opened with write_seqlock_irqsave(). + */ static inline void write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) { - write_seqcount_end(&sl->seqcount); + write_seqcount_t_end(&sl->seqcount); spin_unlock_irqrestore(&sl->lock, flags); } -/* - * A locking reader exclusively locks out other writers and locking readers, - * but doesn't update the sequence number. Acts like a normal spin_lock/unlock. - * Don't need preempt_disable() because that is in the spin_lock already. +/** + * read_seqlock_excl() - begin a seqlock_t locking reader section + * @sl: Pointer to seqlock_t + * + * read_seqlock_excl opens a seqlock_t locking reader critical section. A + * locking reader exclusively locks out *both* other writers *and* other + * locking readers, but it does not update the embedded sequence number. + * + * Locking readers act like a normal spin_lock()/spin_unlock(). + * + * Context: if the seqlock_t write section, *or other read sections*, can + * be invoked from hardirq or softirq contexts, use the _irqsave or _bh + * variant of this function instead. + * + * The opened read section must be closed with read_sequnlock_excl(). */ static inline void read_seqlock_excl(seqlock_t *sl) { spin_lock(&sl->lock); } +/** + * read_sequnlock_excl() - end a seqlock_t locking reader critical section + * @sl: Pointer to seqlock_t + */ static inline void read_sequnlock_excl(seqlock_t *sl) { spin_unlock(&sl->lock); } /** - * read_seqbegin_or_lock - begin a sequence number check or locking block - * @lock: sequence lock - * @seq : sequence number to be checked + * read_seqlock_excl_bh() - start a seqlock_t locking reader section with + * softirqs disabled + * @sl: Pointer to seqlock_t * - * First try it once optimistically without taking the lock. If that fails, - * take the lock. The sequence number is also used as a marker for deciding - * whether to be a reader (even) or writer (odd). - * N.B. seq must be initialized to an even number to begin with. + * _bh variant of read_seqlock_excl(). Use this variant only if the + * seqlock_t write side section, *or other read sections*, can be invoked + * from softirq contexts. */ -static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) -{ - if (!(*seq & 1)) /* Even */ - *seq = read_seqbegin(lock); - else /* Odd */ - read_seqlock_excl(lock); -} - -static inline int need_seqretry(seqlock_t *lock, int seq) -{ - return !(seq & 1) && read_seqretry(lock, seq); -} - -static inline void done_seqretry(seqlock_t *lock, int seq) -{ - if (seq & 1) - read_sequnlock_excl(lock); -} - static inline void read_seqlock_excl_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); } +/** + * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking + * reader section + * @sl: Pointer to seqlock_t + */ static inline void read_sequnlock_excl_bh(seqlock_t *sl) { spin_unlock_bh(&sl->lock); } +/** + * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking + * reader section + * @sl: Pointer to seqlock_t + * + * _irq variant of read_seqlock_excl(). Use this only if the seqlock_t + * write side section, *or other read sections*, can be invoked from a + * hardirq context. + */ static inline void read_seqlock_excl_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); } +/** + * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t + * locking reader section + * @sl: Pointer to seqlock_t + */ static inline void read_sequnlock_excl_irq(seqlock_t *sl) { spin_unlock_irq(&sl->lock); @@ -570,15 +987,117 @@ static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl) return flags; } +/** + * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t + * locking reader section + * @lock: Pointer to seqlock_t + * @flags: Stack-allocated storage for saving caller's local interrupt + * state, to be passed to read_sequnlock_excl_irqrestore(). + * + * _irqsave variant of read_seqlock_excl(). Use this only if the seqlock_t + * write side section, *or other read sections*, can be invoked from a + * hardirq context. + */ #define read_seqlock_excl_irqsave(lock, flags) \ do { flags = __read_seqlock_excl_irqsave(lock); } while (0) +/** + * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t + * locking reader section + * @sl: Pointer to seqlock_t + * @flags: Caller saved interrupt state, from read_seqlock_excl_irqsave() + */ static inline void read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) { spin_unlock_irqrestore(&sl->lock, flags); } +/** + * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader + * @lock: Pointer to seqlock_t + * @seq : Marker and return parameter. If the passed value is even, the + * reader will become a *lockless* seqlock_t reader as in read_seqbegin(). + * If the passed value is odd, the reader will become a *locking* reader + * as in read_seqlock_excl(). In the first call to this function, the + * caller *must* initialize and pass an even value to @seq; this way, a + * lockless read can be optimistically tried first. + * + * read_seqbegin_or_lock is an API designed to optimistically try a normal + * lockless seqlock_t read section first. If an odd counter is found, the + * lockless read trial has failed, and the next read iteration transforms + * itself into a full seqlock_t locking reader. + * + * This is typically used to avoid seqlock_t lockless readers starvation + * (too much retry loops) in the case of a sharp spike in write side + * activity. + * + * Context: if the seqlock_t write section, *or other read sections*, can + * be invoked from hardirq or softirq contexts, use the _irqsave or _bh + * variant of this function instead. + * + * Check Documentation/locking/seqlock.rst for template example code. + * + * Return: the encountered sequence counter value, through the @seq + * parameter, which is overloaded as a return parameter. This returned + * value must be checked with need_seqretry(). If the read section need to + * be retried, this returned value must also be passed as the @seq + * parameter of the next read_seqbegin_or_lock() iteration. + */ +static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) +{ + if (!(*seq & 1)) /* Even */ + *seq = read_seqbegin(lock); + else /* Odd */ + read_seqlock_excl(lock); +} + +/** + * need_seqretry() - validate seqlock_t "locking or lockless" read section + * @lock: Pointer to seqlock_t + * @seq: sequence count, from read_seqbegin_or_lock() + * + * Return: true if a read section retry is required, false otherwise + */ +static inline int need_seqretry(seqlock_t *lock, int seq) +{ + return !(seq & 1) && read_seqretry(lock, seq); +} + +/** + * done_seqretry() - end seqlock_t "locking or lockless" reader section + * @lock: Pointer to seqlock_t + * @seq: count, from read_seqbegin_or_lock() + * + * done_seqretry finishes the seqlock_t read side critical section started + * with read_seqbegin_or_lock() and validated by need_seqretry(). + */ +static inline void done_seqretry(seqlock_t *lock, int seq) +{ + if (seq & 1) + read_sequnlock_excl(lock); +} + +/** + * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or + * a non-interruptible locking reader + * @lock: Pointer to seqlock_t + * @seq: Marker and return parameter. Check read_seqbegin_or_lock(). + * + * This is the _irqsave variant of read_seqbegin_or_lock(). Use it only if + * the seqlock_t write section, *or other read sections*, can be invoked + * from hardirq context. + * + * Note: Interrupts will be disabled only for "locking reader" mode. + * + * Return: + * + * 1. The saved local interrupts state in case of a locking reader, to + * be passed to done_seqretry_irqrestore(). + * + * 2. The encountered sequence counter value, returned through @seq + * overloaded as a return parameter. Check read_seqbegin_or_lock(). + */ static inline unsigned long read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) { @@ -592,6 +1111,18 @@ read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) return flags; } +/** + * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a + * non-interruptible locking reader section + * @lock: Pointer to seqlock_t + * @seq: Count, from read_seqbegin_or_lock_irqsave() + * @flags: Caller's saved local interrupt state in case of a locking + * reader, also from read_seqbegin_or_lock_irqsave() + * + * This is the _irqrestore variant of done_seqretry(). The read section + * must've been opened with read_seqbegin_or_lock_irqsave(), and validated + * by need_seqretry(). + */ static inline void done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) { diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 6545f8cfc8fa..2b70f736b091 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -155,6 +155,8 @@ extern int early_serial_setup(struct uart_port *port); extern int early_serial8250_setup(struct earlycon_device *device, const char *options); +extern void serial8250_update_uartclk(struct uart_port *port, + unsigned int uartclk); extern void serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old); extern void serial8250_do_set_ldisc(struct uart_port *port, diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 92f5eba86052..8a99279a579b 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -29,6 +29,7 @@ struct uart_port; struct serial_struct; struct device; +struct gpio_desc; /* * This structure describes all the operations that can be done on the @@ -247,10 +248,12 @@ struct uart_port { unsigned char hub6; /* this should be in the 8250 driver */ unsigned char suspended; + unsigned char console_reinit; const char *name; /* port name */ struct attribute_group *attr_group; /* port specific attributes */ const struct attribute_group **tty_groups; /* all attributes (serial core use only) */ struct serial_rs485 rs485; + struct gpio_desc *rs485_term_gpio; /* enable RS485 bus termination */ struct serial_iso7816 iso7816; void *private_data; /* generic platform data pointer */ }; @@ -460,10 +463,104 @@ extern void uart_handle_cts_change(struct uart_port *uport, extern void uart_insert_char(struct uart_port *port, unsigned int status, unsigned int overrun, unsigned int ch, unsigned int flag); -extern int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch); -extern int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch); -extern void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long flags); -extern int uart_handle_break(struct uart_port *port); +#ifdef CONFIG_MAGIC_SYSRQ_SERIAL +#define SYSRQ_TIMEOUT (HZ * 5) + +bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch); + +static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) +{ + if (!port->sysrq) + return 0; + + if (ch && time_before(jiffies, port->sysrq)) { + if (sysrq_mask()) { + handle_sysrq(ch); + port->sysrq = 0; + return 1; + } + if (uart_try_toggle_sysrq(port, ch)) + return 1; + } + port->sysrq = 0; + + return 0; +} + +static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) +{ + if (!port->sysrq) + return 0; + + if (ch && time_before(jiffies, port->sysrq)) { + if (sysrq_mask()) { + port->sysrq_ch = ch; + port->sysrq = 0; + return 1; + } + if (uart_try_toggle_sysrq(port, ch)) + return 1; + } + port->sysrq = 0; + + return 0; +} + +static inline void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags) +{ + int sysrq_ch; + + if (!port->has_sysrq) { + spin_unlock_irqrestore(&port->lock, irqflags); + return; + } + + sysrq_ch = port->sysrq_ch; + port->sysrq_ch = 0; + + spin_unlock_irqrestore(&port->lock, irqflags); + + if (sysrq_ch) + handle_sysrq(sysrq_ch); +} +#else /* CONFIG_MAGIC_SYSRQ_SERIAL */ +static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) +{ + return 0; +} +static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) +{ + return 0; +} +static inline void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags) +{ + spin_unlock_irqrestore(&port->lock, irqflags); +} +#endif /* CONFIG_MAGIC_SYSRQ_SERIAL */ + +/* + * We do the SysRQ and SAK checking like this... + */ +static inline int uart_handle_break(struct uart_port *port) +{ + struct uart_state *state = port->state; + + if (port->handle_break) + port->handle_break(port); + +#ifdef CONFIG_MAGIC_SYSRQ_SERIAL + if (port->has_sysrq && uart_console(port)) { + if (!port->sysrq) { + port->sysrq = jiffies + SYSRQ_TIMEOUT; + return 1; + } + port->sysrq = 0; + } +#endif + if (port->flags & UPF_SAK) + do_SAK(state->port.tty); + return 0; +} /* * UART_ENABLE_MS - determine if port should enable modem status irqs @@ -472,5 +569,5 @@ extern int uart_handle_break(struct uart_port *port); (cflag) & CRTSCTS || \ !((cflag) & CLOCAL)) -void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf); +int uart_get_rs485_mode(struct uart_port *port); #endif /* LINUX_SERIAL_CORE_H */ diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index 86281ac7c305..860e0f843c12 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -26,7 +26,7 @@ static inline int set_direct_map_default_noflush(struct page *page) #endif #ifndef set_mce_nospec -static inline int set_mce_nospec(unsigned long pfn) +static inline int set_mce_nospec(unsigned long pfn, bool unmap) { return 0; } diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 7a35a6901221..a5a5d1d4d7b1 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -36,6 +36,9 @@ struct shmem_sb_info { unsigned char huge; /* Whether to try for hugepages */ kuid_t uid; /* Mount uid for root directory */ kgid_t gid; /* Mount gid for root directory */ + bool full_inums; /* If i_ino should be uint or ino_t */ + ino_t next_ino; /* The next per-sb inode number to use */ + ino_t __percpu *ino_batch; /* The next per-cpu inode number to use */ struct mempolicy *mpol; /* default memory policy for mappings */ spinlock_t shrinklist_lock; /* Protects shrinklist */ struct list_head shrinklist; /* List of shinkable inodes */ diff --git a/include/linux/signal.h b/include/linux/signal.h index 05bacd2ab135..7bbc0e9cf084 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -24,6 +24,14 @@ static inline void clear_siginfo(kernel_siginfo_t *info) #define SI_EXPANSION_SIZE (sizeof(struct siginfo) - sizeof(struct kernel_siginfo)) +static inline void copy_siginfo_to_external(siginfo_t *to, + const kernel_siginfo_t *from) +{ + memcpy(to, from, sizeof(*from)); + memset(((char *)to) + sizeof(struct kernel_siginfo), 0, + SI_EXPANSION_SIZE); +} + int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from); int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from); @@ -129,11 +137,11 @@ static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \ b3 = b->sig[3]; b2 = b->sig[2]; \ r->sig[3] = op(a3, b3); \ r->sig[2] = op(a2, b2); \ - /* fall through */ \ + fallthrough; \ case 2: \ a1 = a->sig[1]; b1 = b->sig[1]; \ r->sig[1] = op(a1, b1); \ - /* fall through */ \ + fallthrough; \ case 1: \ a0 = a->sig[0]; b0 = b->sig[0]; \ r->sig[0] = op(a0, b0); \ @@ -163,9 +171,9 @@ static inline void name(sigset_t *set) \ switch (_NSIG_WORDS) { \ case 4: set->sig[3] = op(set->sig[3]); \ set->sig[2] = op(set->sig[2]); \ - /* fall through */ \ + fallthrough; \ case 2: set->sig[1] = op(set->sig[1]); \ - /* fall through */ \ + fallthrough; \ case 1: set->sig[0] = op(set->sig[0]); \ break; \ default: \ @@ -186,7 +194,7 @@ static inline void sigemptyset(sigset_t *set) memset(set, 0, sizeof(sigset_t)); break; case 2: set->sig[1] = 0; - /* fall through */ + fallthrough; case 1: set->sig[0] = 0; break; } @@ -199,7 +207,7 @@ static inline void sigfillset(sigset_t *set) memset(set, -1, sizeof(sigset_t)); break; case 2: set->sig[1] = -1; - /* fall through */ + fallthrough; case 1: set->sig[0] = -1; break; } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3000c526f552..04a18e01b362 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -71,7 +71,7 @@ * NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain * TCP or UDP packets over IPv6. These are specifically * unencapsulated packets of the form IPv6|TCP or - * IPv4|UDP where the Next Header field in the IPv6 + * IPv6|UDP where the Next Header field in the IPv6 * header is either TCP or UDP. IPv6 extension headers * are not supported with this feature. This feature * cannot be set in features for a device with @@ -238,6 +238,7 @@ SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +struct ahash_request; struct net_device; struct scatterlist; struct pipe_inode_info; @@ -283,6 +284,7 @@ struct nf_bridge_info { */ struct tc_skb_ext { __u32 chain; + __u16 mru; }; #endif @@ -1054,7 +1056,16 @@ void kfree_skb(struct sk_buff *skb); void kfree_skb_list(struct sk_buff *segs); void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt); void skb_tx_error(struct sk_buff *skb); + +#ifdef CONFIG_TRACEPOINTS void consume_skb(struct sk_buff *skb); +#else +static inline void consume_skb(struct sk_buff *skb) +{ + return kfree_skb(skb); +} +#endif + void __consume_stateless_skb(struct sk_buff *skb); void __kfree_skb(struct sk_buff *skb); extern struct kmem_cache *skbuff_head_cache; @@ -1283,32 +1294,6 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector, const struct flow_dissector_key *key, unsigned int key_count); -#ifdef CONFIG_NET -int skb_flow_dissector_prog_query(const union bpf_attr *attr, - union bpf_attr __user *uattr); -int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr, - struct bpf_prog *prog); - -int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr); -#else -static inline int skb_flow_dissector_prog_query(const union bpf_attr *attr, - union bpf_attr __user *uattr) -{ - return -EOPNOTSUPP; -} - -static inline int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr, - struct bpf_prog *prog) -{ - return -EOPNOTSUPP; -} - -static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) -{ - return -EOPNOTSUPP; -} -#endif - struct bpf_flow_dissector; bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, __be16 proto, int nhoff, int hlen, unsigned int flags); @@ -1354,7 +1339,7 @@ void skb_flow_dissect_meta(const struct sk_buff *skb, void *target_container); /* Gets a skb connection tracking info, ctinfo map should be a - * a map of mapsize to translate enum ip_conntrack_info states + * map of mapsize to translate enum ip_conntrack_info states * to user states. */ void @@ -1368,6 +1353,10 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container); +void skb_flow_dissect_hash(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container); + static inline __u32 skb_get_hash(struct sk_buff *skb) { if (!skb->l4_hash && !skb->sw_hash) @@ -2678,7 +2667,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) * * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) * to reduce average number of cache lines per packet. - * get_rps_cpus() for example only access one 64 bytes aligned block : + * get_rps_cpu() for example only access one 64 bytes aligned block : * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) */ #ifndef NET_SKB_PAD @@ -3234,8 +3223,9 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error if @free_on_error is true. */ -static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len, - bool free_on_error) +static inline int __must_check __skb_put_padto(struct sk_buff *skb, + unsigned int len, + bool free_on_error) { unsigned int size = skb->len; @@ -3258,7 +3248,7 @@ static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len, * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error. */ -static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) +static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len) { return __skb_put_padto(skb, len, true); } @@ -3765,19 +3755,19 @@ static inline bool __skb_metadata_differs(const struct sk_buff *skb_a, #define __it(x, op) (x -= sizeof(u##op)) #define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op)) case 32: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 24: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 16: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 8: diffs |= __it_diff(a, b, 64); break; case 28: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 20: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 12: diffs |= __it_diff(a, b, 64); - /* fall through */ + fallthrough; case 4: diffs |= __it_diff(a, b, 32); break; } @@ -3838,7 +3828,7 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) * must call this function to return the skb back to the stack with a * timestamp. * - * @skb: clone of the the original outgoing packet + * @skb: clone of the original outgoing packet * @hwtstamps: hardware time stamps * */ @@ -3945,6 +3935,14 @@ static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb) } } +static inline void __skb_reset_checksum_unnecessary(struct sk_buff *skb) +{ + if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + skb->ip_summed = CHECKSUM_NONE; + skb->csum_level = 0; + } +} + /* Check if we need to perform checksum complete validation. * * Returns true if checksum complete is needed, false otherwise @@ -4165,7 +4163,7 @@ struct skb_ext { char data[] __aligned(8); }; -struct skb_ext *__skb_ext_alloc(void); +struct skb_ext *__skb_ext_alloc(gfp_t flags); void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, struct skb_ext *ext); void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id); diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index ad31c9fb7158..1e9ed840b9fc 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -430,6 +430,19 @@ static inline void psock_set_prog(struct bpf_prog **pprog, bpf_prog_put(prog); } +static inline int psock_replace_prog(struct bpf_prog **pprog, + struct bpf_prog *prog, + struct bpf_prog *old) +{ + if (cmpxchg(pprog, old, prog) != old) + return -ENOENT; + + if (old) + bpf_prog_put(old); + + return 0; +} + static inline void psock_progs_drop(struct sk_psock_progs *progs) { psock_set_prog(&progs->msg_parser, NULL); @@ -437,4 +450,12 @@ static inline void psock_progs_drop(struct sk_psock_progs *progs) psock_set_prog(&progs->skb_verdict, NULL); } +int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb); + +static inline bool sk_psock_strp_enabled(struct sk_psock *psock) +{ + if (!psock) + return false; + return psock->parser.enabled; +} #endif /* _LINUX_SKMSG_H */ diff --git a/include/linux/slab.h b/include/linux/slab.h index 6d454886bcaf..24df2393ec03 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -155,9 +155,6 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name, void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); -void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); -void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *); - /* * Please use this macro to create slab caches. Simply specify the * name of the structure and maybe some flags that are listed above. @@ -186,10 +183,12 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *); */ void * __must_check krealloc(const void *, size_t, gfp_t); void kfree(const void *); -void kzfree(const void *); +void kfree_sensitive(const void *); size_t __ksize(const void *); size_t ksize(const void *); +#define kzfree(x) kfree_sensitive(x) /* For backward compatibility */ + #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR void __check_heap_object(const void *ptr, unsigned long n, struct page *page, bool to_user); @@ -578,8 +577,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) return __kmalloc_node(size, flags, node); } -int memcg_update_all_caches(int num_memcgs); - /** * kmalloc_array - allocate memory for an array. * @n: number of elements. diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index abc7de77b988..9eb430c163c2 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -72,9 +72,6 @@ struct kmem_cache { int obj_offset; #endif /* CONFIG_DEBUG_SLAB */ -#ifdef CONFIG_MEMCG - struct memcg_cache_params memcg_params; -#endif #ifdef CONFIG_KASAN struct kasan_cache kasan_info; #endif @@ -114,4 +111,10 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache, return reciprocal_divide(offset, cache->reciprocal_buffer_size); } +static inline int objs_per_slab_page(const struct kmem_cache *cache, + const struct page *page) +{ + return cache->num; +} + #endif /* _LINUX_SLAB_DEF_H */ diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index d2153789bd9f..1be0ed5befa1 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -8,6 +8,7 @@ * (C) 2007 SGI, Christoph Lameter */ #include <linux/kobject.h> +#include <linux/reciprocal_div.h> enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ @@ -86,6 +87,7 @@ struct kmem_cache { unsigned long min_partial; unsigned int size; /* The size of an object including metadata */ unsigned int object_size;/* The size of an object without metadata */ + struct reciprocal_value reciprocal_size; unsigned int offset; /* Free pointer offset */ #ifdef CONFIG_SLUB_CPU_PARTIAL /* Number of per cpu partial objects to keep around */ @@ -106,17 +108,7 @@ struct kmem_cache { struct list_head list; /* List of slab caches */ #ifdef CONFIG_SYSFS struct kobject kobj; /* For sysfs */ - struct work_struct kobj_remove_work; #endif -#ifdef CONFIG_MEMCG - struct memcg_cache_params memcg_params; - /* For propagation, maximum size of a stored attr */ - unsigned int max_attr_size; -#ifdef CONFIG_SYSFS - struct kset *memcg_kset; -#endif -#endif - #ifdef CONFIG_SLAB_FREELIST_HARDENED unsigned long random; #endif @@ -182,4 +174,23 @@ static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, return result; } +/* Determine object index from a given position */ +static inline unsigned int __obj_to_index(const struct kmem_cache *cache, + void *addr, void *obj) +{ + return reciprocal_divide(kasan_reset_tag(obj) - addr, + cache->reciprocal_size); +} + +static inline unsigned int obj_to_index(const struct kmem_cache *cache, + const struct page *page, void *obj) +{ + return __obj_to_index(cache, page_address(page), obj); +} + +static inline int objs_per_slab_page(const struct kmem_cache *cache, + const struct page *page) +{ + return page->objects; +} #endif /* _LINUX_SLUB_DEF_H */ diff --git a/include/linux/smp.h b/include/linux/smp.h index cbc9162689d0..80d557ef8a11 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -12,21 +12,36 @@ #include <linux/list.h> #include <linux/cpumask.h> #include <linux/init.h> -#include <linux/llist.h> +#include <linux/smp_types.h> typedef void (*smp_call_func_t)(void *info); typedef bool (*smp_cond_func_t)(int cpu, void *info); + +/* + * structure shares (partial) layout with struct irq_work + */ struct __call_single_data { - struct llist_node llist; + union { + struct __call_single_node node; + struct { + struct llist_node llist; + unsigned int flags; + }; + }; smp_call_func_t func; void *info; - unsigned int flags; }; /* Use __aligned() to avoid to use 2 cache lines for 1 csd */ typedef struct __call_single_data call_single_data_t __aligned(sizeof(struct __call_single_data)); +/* + * Enqueue a llist_node on the call_single_queue; be very careful, read + * flush_smp_call_function_queue() in detail. + */ +extern void __smp_call_single_queue(int cpu, struct llist_node *node); + /* total number of cpus in this system (may exceed NR_CPUS) */ extern unsigned int total_cpus; @@ -227,8 +242,8 @@ static inline int get_boot_cpu_id(void) */ extern void arch_disable_smp_support(void); -extern void arch_enable_nonboot_cpus_begin(void); -extern void arch_enable_nonboot_cpus_end(void); +extern void arch_thaw_secondary_cpus_begin(void); +extern void arch_thaw_secondary_cpus_end(void); void smp_setup_processor_id(void); diff --git a/include/linux/smp_types.h b/include/linux/smp_types.h new file mode 100644 index 000000000000..364b3ae3e41d --- /dev/null +++ b/include/linux/smp_types.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_SMP_TYPES_H +#define __LINUX_SMP_TYPES_H + +#include <linux/llist.h> + +enum { + CSD_FLAG_LOCK = 0x01, + + IRQ_WORK_PENDING = 0x01, + IRQ_WORK_BUSY = 0x02, + IRQ_WORK_LAZY = 0x04, /* No IPI, wait for tick */ + IRQ_WORK_HARD_IRQ = 0x08, /* IRQ context on PREEMPT_RT */ + + IRQ_WORK_CLAIMED = (IRQ_WORK_PENDING | IRQ_WORK_BUSY), + + CSD_TYPE_ASYNC = 0x00, + CSD_TYPE_SYNC = 0x10, + CSD_TYPE_IRQ_WORK = 0x20, + CSD_TYPE_TTWU = 0x30, + + CSD_FLAG_TYPE_MASK = 0xF0, +}; + +/* + * struct __call_single_node is the primary type on + * smp.c:call_single_queue. + * + * flush_smp_call_function_queue() only reads the type from + * __call_single_node::u_flags as a regular load, the above + * (anonymous) enum defines all the bits of this word. + * + * Other bits are not modified until the type is known. + * + * CSD_TYPE_SYNC/ASYNC: + * struct { + * struct llist_node node; + * unsigned int flags; + * smp_call_func_t func; + * void *info; + * }; + * + * CSD_TYPE_IRQ_WORK: + * struct { + * struct llist_node node; + * atomic_t flags; + * void (*func)(struct irq_work *); + * }; + * + * CSD_TYPE_TTWU: + * struct { + * struct llist_node node; + * unsigned int flags; + * }; + * + */ + +struct __call_single_node { + struct llist_node llist; + union { + unsigned int u_flags; + atomic_t a_flags; + }; +}; + +#endif /* __LINUX_SMP_TYPES_H */ diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index a74c1d5acdf3..2249ecaf77e4 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -121,6 +121,15 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event); int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event); /** + * cmdq_pkt_set_event() - append set event command to the CMDQ packet + * @pkt: the CMDQ packet + * @event: the desired event to be set + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event); + +/** * cmdq_pkt_poll() - Append polling command to the CMDQ packet, ask GCE to * execute an instruction that wait for a specified * hardware register to check for the value w/o mask. @@ -152,6 +161,28 @@ int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys, */ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value, u32 mask); + +/** + * cmdq_pkt_assign() - Append logic assign command to the CMDQ packet, ask GCE + * to execute an instruction that set a constant value into + * internal register and use as value, mask or address in + * read/write instruction. + * @pkt: the CMDQ packet + * @reg_idx: the CMDQ internal register ID + * @value: the specified value + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value); + +/** + * cmdq_pkt_finalize() - Append EOC and jump command to pkt. + * @pkt: the CMDQ packet + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_finalize(struct cmdq_pkt *pkt); + /** * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ * packet and call back at the end of done packet diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h new file mode 100644 index 000000000000..7bab5d9a3d31 --- /dev/null +++ b/include/linux/soc/mediatek/mtk-mmsys.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015 MediaTek Inc. + */ + +#ifndef __MTK_MMSYS_H +#define __MTK_MMSYS_H + +enum mtk_ddp_comp_id; +struct device; + +void mtk_mmsys_ddp_connect(struct device *dev, + enum mtk_ddp_comp_id cur, + enum mtk_ddp_comp_id next); + +void mtk_mmsys_ddp_disconnect(struct device *dev, + enum mtk_ddp_comp_id cur, + enum mtk_ddp_comp_id next); + +#endif /* __MTK_MMSYS_H */ diff --git a/include/linux/soc/ti/k3-ringacc.h b/include/linux/soc/ti/k3-ringacc.h index 26f73df0a524..5a472eca5ee4 100644 --- a/include/linux/soc/ti/k3-ringacc.h +++ b/include/linux/soc/ti/k3-ringacc.h @@ -2,7 +2,7 @@ /* * K3 Ring Accelerator (RA) subsystem interface * - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com */ #ifndef __SOC_TI_K3_RINGACC_API_H_ @@ -107,6 +107,10 @@ struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np, struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc, int id, u32 flags); +int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc, + int fwd_id, int compl_id, + struct k3_ring **fwd_ring, + struct k3_ring **compl_ring); /** * k3_ringacc_ring_reset - ring reset * @ring: pointer on Ring diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h index 9745df6ed9d3..c75ef99c99ca 100644 --- a/include/linux/soc/ti/knav_qmss.h +++ b/include/linux/soc/ti/knav_qmss.h @@ -1,7 +1,7 @@ /* * Keystone Navigator Queue Management Sub-System header * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com * Author: Sandeep Nair <sandeep_n@ti.com> * Cyril Chemparathy <cyril@ti.com> * Santosh Shilimkar <santosh.shilimkar@ti.com> diff --git a/include/linux/soc/ti/ti-msgmgr.h b/include/linux/soc/ti/ti-msgmgr.h index eac8e0c6fe11..1f6e76d423cf 100644 --- a/include/linux/soc/ti/ti-msgmgr.h +++ b/include/linux/soc/ti/ti-msgmgr.h @@ -1,7 +1,7 @@ /* * Texas Instruments' Message Manager * - * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon * * This program is free software; you can redistribute it and/or modify diff --git a/include/linux/soc/ti/ti_sci_inta_msi.h b/include/linux/soc/ti/ti_sci_inta_msi.h index 11fb5048f5f6..e3aa8b14612e 100644 --- a/include/linux/soc/ti/ti_sci_inta_msi.h +++ b/include/linux/soc/ti/ti_sci_inta_msi.h @@ -2,7 +2,7 @@ /* * Texas Instruments' K3 TI SCI INTA MSI helper * - * Copyright (C) 2018-2019 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/ * Lokesh Vutla <lokeshvutla@ti.com> */ diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h index 9531ec823298..cf27b080e148 100644 --- a/include/linux/soc/ti/ti_sci_protocol.h +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -2,7 +2,7 @@ /* * Texas Instruments System Control Interface Protocol * - * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ * Nishanth Menon */ @@ -220,14 +220,17 @@ struct ti_sci_rm_core_ops { u16 *range_start, u16 *range_num); }; +#define TI_SCI_RESASG_SUBTYPE_IR_OUTPUT 0 +#define TI_SCI_RESASG_SUBTYPE_IA_VINT 0xa +#define TI_SCI_RESASG_SUBTYPE_GLOBAL_EVENT_SEVT 0xd /** * struct ti_sci_rm_irq_ops: IRQ management operations * @set_irq: Set an IRQ route between the requested source * and destination * @set_event_map: Set an Event based peripheral irq to Interrupt * Aggregator. - * @free_irq: Free an an IRQ route between the requested source - * destination. + * @free_irq: Free an IRQ route between the requested source + * and destination. * @free_event_map: Free an event based peripheral irq to Interrupt * Aggregator. */ @@ -556,6 +559,9 @@ u32 ti_sci_get_num_resources(struct ti_sci_resource *res); struct ti_sci_resource * devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, struct device *dev, u32 dev_id, char *of_prop); +struct ti_sci_resource * +devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, + u32 dev_id, u32 sub_type); #else /* CONFIG_TI_SCI_PROTOCOL */ @@ -609,6 +615,13 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, { return ERR_PTR(-EINVAL); } + +static inline struct ti_sci_resource * +devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev, + u32 dev_id, u32 sub_type); +{ + return ERR_PTR(-EINVAL); +} #endif /* CONFIG_TI_SCI_PROTOCOL */ #endif /* __TISCI_PROTOCOL_H */ diff --git a/include/linux/socket.h b/include/linux/socket.h index 54338fac45cb..e9cb30d8cbfb 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -10,6 +10,7 @@ #include <linux/compiler.h> /* __user */ #include <uapi/linux/socket.h> +struct file; struct pid; struct cred; struct socket; @@ -50,7 +51,17 @@ struct msghdr { void *msg_name; /* ptr to socket address structure */ int msg_namelen; /* size of socket address structure */ struct iov_iter msg_iter; /* data */ - void *msg_control; /* ancillary data */ + + /* + * Ancillary data. msg_control_user is the user buffer used for the + * recv* side when msg_control_is_user is set, msg_control is the kernel + * buffer used for all other cases. + */ + union { + void *msg_control; + void __user *msg_control_user; + }; + bool msg_control_is_user : 1; __kernel_size_t msg_controllen; /* ancillary data buffer length */ unsigned int msg_flags; /* flags on received message */ struct kiocb *msg_iocb; /* ptr to iocb for async requests */ @@ -94,7 +105,10 @@ struct cmsghdr { #define CMSG_ALIGN(len) ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) ) -#define CMSG_DATA(cmsg) ((void *)((char *)(cmsg) + sizeof(struct cmsghdr))) +#define CMSG_DATA(cmsg) \ + ((void *)(cmsg) + sizeof(struct cmsghdr)) +#define CMSG_USER_DATA(cmsg) \ + ((void __user *)(cmsg) + sizeof(struct cmsghdr)) #define CMSG_SPACE(len) (sizeof(struct cmsghdr) + CMSG_ALIGN(len)) #define CMSG_LEN(len) (sizeof(struct cmsghdr) + (len)) diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h new file mode 100644 index 000000000000..ea193414298b --- /dev/null +++ b/include/linux/sockptr.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 Christoph Hellwig. + * + * Support for "universal" pointers that can point to either kernel or userspace + * memory. + */ +#ifndef _LINUX_SOCKPTR_H +#define _LINUX_SOCKPTR_H + +#include <linux/slab.h> +#include <linux/uaccess.h> + +typedef struct { + union { + void *kernel; + void __user *user; + }; + bool is_kernel : 1; +} sockptr_t; + +static inline bool sockptr_is_kernel(sockptr_t sockptr) +{ + return sockptr.is_kernel; +} + +static inline sockptr_t KERNEL_SOCKPTR(void *p) +{ + return (sockptr_t) { .kernel = p, .is_kernel = true }; +} + +static inline sockptr_t USER_SOCKPTR(void __user *p) +{ + return (sockptr_t) { .user = p }; +} + +static inline bool sockptr_is_null(sockptr_t sockptr) +{ + if (sockptr_is_kernel(sockptr)) + return !sockptr.kernel; + return !sockptr.user; +} + +static inline int copy_from_sockptr_offset(void *dst, sockptr_t src, + size_t offset, size_t size) +{ + if (!sockptr_is_kernel(src)) + return copy_from_user(dst, src.user + offset, size); + memcpy(dst, src.kernel + offset, size); + return 0; +} + +static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) +{ + return copy_from_sockptr_offset(dst, src, 0, size); +} + +static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset, + const void *src, size_t size) +{ + if (!sockptr_is_kernel(dst)) + return copy_to_user(dst.user + offset, src, size); + memcpy(dst.kernel + offset, src, size); + return 0; +} + +static inline void *memdup_sockptr(sockptr_t src, size_t len) +{ + void *p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); + + if (!p) + return ERR_PTR(-ENOMEM); + if (copy_from_sockptr(p, src, len)) { + kfree(p); + return ERR_PTR(-EFAULT); + } + return p; +} + +static inline void *memdup_sockptr_nul(sockptr_t src, size_t len) +{ + char *p = kmalloc_track_caller(len + 1, GFP_KERNEL); + + if (!p) + return ERR_PTR(-ENOMEM); + if (copy_from_sockptr(p, src, len)) { + kfree(p); + return ERR_PTR(-EFAULT); + } + p[len] = '\0'; + return p; +} + +static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count) +{ + if (sockptr_is_kernel(src)) { + size_t len = min(strnlen(src.kernel, count - 1) + 1, count); + + memcpy(dst, src.kernel, len); + return len; + } + return strncpy_from_user(dst, src.user, count); +} + +#endif /* _LINUX_SOCKPTR_H */ diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 00f5826092e3..76052f12c9f7 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -88,10 +88,10 @@ enum sdw_slave_status { * @SDW_CLK_POST_DEPREPARE: post clock stop de-prepare */ enum sdw_clk_stop_type { - SDW_CLK_PRE_PREPARE = 0, - SDW_CLK_POST_PREPARE, - SDW_CLK_PRE_DEPREPARE, - SDW_CLK_POST_DEPREPARE, + SDW_CLK_PRE_PREPARE = 0, + SDW_CLK_POST_PREPARE, + SDW_CLK_PRE_DEPREPARE, + SDW_CLK_POST_DEPREPARE, }; /** @@ -152,19 +152,19 @@ enum sdw_data_direction { * * @SDW_PORT_DATA_MODE_NORMAL: Normal data mode where audio data is received * and transmitted. + * @SDW_PORT_DATA_MODE_PRBS: Test mode which uses a PRBS generator to produce + * a pseudo random data pattern that is transferred + * @SDW_PORT_DATA_MODE_STATIC_0: Simple test mode which uses static value of + * logic 0. The encoding will result in no signal transitions * @SDW_PORT_DATA_MODE_STATIC_1: Simple test mode which uses static value of * logic 1. The encoding will result in signal transitions at every bitslot * owned by this Port - * @SDW_PORT_DATA_MODE_STATIC_0: Simple test mode which uses static value of - * logic 0. The encoding will result in no signal transitions - * @SDW_PORT_DATA_MODE_PRBS: Test mode which uses a PRBS generator to produce - * a pseudo random data pattern that is transferred */ enum sdw_port_data_mode { SDW_PORT_DATA_MODE_NORMAL = 0, - SDW_PORT_DATA_MODE_STATIC_1 = 1, + SDW_PORT_DATA_MODE_PRBS = 1, SDW_PORT_DATA_MODE_STATIC_0 = 2, - SDW_PORT_DATA_MODE_PRBS = 3, + SDW_PORT_DATA_MODE_STATIC_1 = 3, }; /* @@ -291,8 +291,8 @@ struct sdw_dpn_audio_mode { * implementation-defined interrupts * @max_ch: Maximum channels supported * @min_ch: Minimum channels supported - * @num_ch: Number of discrete channels supported - * @ch: Discrete channels supported + * @num_channels: Number of discrete channels supported + * @channels: Discrete channels supported * @num_ch_combinations: Number of channel combinations supported * @ch_combinations: Channel combinations supported * @modes: SDW mode supported @@ -316,8 +316,8 @@ struct sdw_dpn_prop { u32 imp_def_interrupts; u32 max_ch; u32 min_ch; - u32 num_ch; - u32 *ch; + u32 num_channels; + u32 *channels; u32 num_ch_combinations; u32 *ch_combinations; u32 modes; @@ -426,8 +426,7 @@ int sdw_slave_read_prop(struct sdw_slave *slave); * struct sdw_slave_id - Slave ID * @mfg_id: MIPI Manufacturer ID * @part_id: Device Part ID - * @class_id: MIPI Class ID, unused now. - * Currently a placeholder in MIPI SoundWire Spec + * @class_id: MIPI Class ID (defined starting with SoundWire 1.2 spec) * @unique_id: Device unique ID * @sdw_version: SDW version implemented * @@ -632,6 +631,19 @@ struct sdw_slave { #define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev) +/** + * struct sdw_master_device - SoundWire 'Master Device' representation + * @dev: Linux device for this Master + * @bus: Bus handle shortcut + */ +struct sdw_master_device { + struct device dev; + struct sdw_bus *bus; +}; + +#define dev_to_sdw_master_device(d) \ + container_of(d, struct sdw_master_device, dev) + struct sdw_driver { const char *name; @@ -646,10 +658,14 @@ struct sdw_driver { struct device_driver driver; }; -#define SDW_SLAVE_ENTRY(_mfg_id, _part_id, _drv_data) \ - { .mfg_id = (_mfg_id), .part_id = (_part_id), \ +#define SDW_SLAVE_ENTRY_EXT(_mfg_id, _part_id, _version, _c_id, _drv_data) \ + { .mfg_id = (_mfg_id), .part_id = (_part_id), \ + .sdw_version = (_version), .class_id = (_c_id), \ .driver_data = (unsigned long)(_drv_data) } +#define SDW_SLAVE_ENTRY(_mfg_id, _part_id, _drv_data) \ + SDW_SLAVE_ENTRY_EXT((_mfg_id), (_part_id), 0, 0, (_drv_data)) + int sdw_handle_slave_status(struct sdw_bus *bus, enum sdw_slave_status status[]); @@ -787,8 +803,10 @@ struct sdw_master_ops { /** * struct sdw_bus - SoundWire bus - * @dev: Master linux device + * @dev: Shortcut to &bus->md->dev to avoid changing the entire code. + * @md: Master device * @link_id: Link id number, can be 0 to N, unique for each Master + * @id: bus system-wide unique id * @slaves: list of Slaves on this bus * @assigned: Bitmap for Slave device numbers. * Bit set implies used number, bit clear implies unused number. @@ -812,7 +830,9 @@ struct sdw_master_ops { */ struct sdw_bus { struct device *dev; + struct sdw_master_device *md; unsigned int link_id; + int id; struct list_head slaves; DECLARE_BITMAP(assigned, SDW_MAX_DEVICES); struct mutex bus_lock; @@ -832,8 +852,9 @@ struct sdw_bus { bool multi_link; }; -int sdw_add_bus_master(struct sdw_bus *bus); -void sdw_delete_bus_master(struct sdw_bus *bus); +int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, + struct fwnode_handle *fwnode); +void sdw_bus_master_delete(struct sdw_bus *bus); /** * sdw_port_config: Master or Slave Port configuration @@ -934,10 +955,12 @@ int sdw_stream_remove_master(struct sdw_bus *bus, struct sdw_stream_runtime *stream); int sdw_stream_remove_slave(struct sdw_slave *slave, struct sdw_stream_runtime *stream); +int sdw_startup_stream(void *sdw_substream); int sdw_prepare_stream(struct sdw_stream_runtime *stream); int sdw_enable_stream(struct sdw_stream_runtime *stream); int sdw_disable_stream(struct sdw_stream_runtime *stream); int sdw_deprepare_stream(struct sdw_stream_runtime *stream); +void sdw_shutdown_stream(void *sdw_substream); int sdw_bus_prep_clk_stop(struct sdw_bus *bus); int sdw_bus_clk_stop(struct sdw_bus *bus); int sdw_bus_exit_clk_stop(struct sdw_bus *bus); diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index 979b41b5dcb4..120ffddc03d2 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -115,6 +115,7 @@ struct sdw_intel_slave_id { * links * @link_list: list to handle interrupts across all links * @shim_lock: mutex to handle concurrent rmw access to shared SHIM registers. + * @shim_mask: flags to track initialization of SHIM shared registers */ struct sdw_intel_ctx { int count; @@ -126,6 +127,7 @@ struct sdw_intel_ctx { struct sdw_intel_slave_id *ids; struct list_head link_list; struct mutex shim_lock; /* lock for access to shared SHIM registers */ + u32 shim_mask; }; /** diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h index a686f7988156..5d3c271af7d1 100644 --- a/include/linux/soundwire/sdw_registers.h +++ b/include/linux/soundwire/sdw_registers.h @@ -12,7 +12,7 @@ #define SDW_REG_SHIFT(n) (ffs(n) - 1) /* - * SDW registers as defined by MIPI 1.1 Spec + * SDW registers as defined by MIPI 1.2 Spec */ #define SDW_REGADDR GENMASK(14, 0) #define SDW_SCP_ADDRPAGE2_MASK GENMASK(22, 15) @@ -43,6 +43,8 @@ #define SDW_DP0_INT_TEST_FAIL BIT(0) #define SDW_DP0_INT_PORT_READY BIT(1) #define SDW_DP0_INT_BRA_FAILURE BIT(2) +#define SDW_DP0_SDCA_CASCADE BIT(3) +/* BIT(4) not allocated in SoundWire specification 1.2 */ #define SDW_DP0_INT_IMPDEF1 BIT(5) #define SDW_DP0_INT_IMPDEF2 BIT(6) #define SDW_DP0_INT_IMPDEF3 BIT(7) @@ -106,6 +108,20 @@ #define SDW_SCP_ADDRPAGE2 0x49 #define SDW_SCP_KEEPEREN 0x4A #define SDW_SCP_BANKDELAY 0x4B +#define SDW_SCP_COMMIT 0x4C + +#define SDW_SCP_BUS_CLOCK_BASE 0x4D +#define SDW_SCP_BASE_CLOCK_FREQ GENMASK(2, 0) +#define SDW_SCP_BASE_CLOCK_UNKNOWN 0x0 +#define SDW_SCP_BASE_CLOCK_19200000_HZ 0x1 +#define SDW_SCP_BASE_CLOCK_24000000_HZ 0x2 +#define SDW_SCP_BASE_CLOCK_24576000_HZ 0x3 +#define SDW_SCP_BASE_CLOCK_22579200_HZ 0x4 +#define SDW_SCP_BASE_CLOCK_32000000_HZ 0x5 +#define SDW_SCP_BASE_CLOCK_RESERVED 0x6 +#define SDW_SCP_BASE_CLOCK_IMP_DEF 0x7 + +/* 0x4E is not allocated in SoundWire specification 1.2 */ #define SDW_SCP_TESTMODE 0x4F #define SDW_SCP_DEVID_0 0x50 #define SDW_SCP_DEVID_1 0x51 @@ -114,12 +130,111 @@ #define SDW_SCP_DEVID_4 0x54 #define SDW_SCP_DEVID_5 0x55 +/* Both INT and STATUS register are same */ +#define SDW_SCP_SDCA_INT1 0x58 +#define SDW_SCP_SDCA_INT_SDCA_0 BIT(0) +#define SDW_SCP_SDCA_INT_SDCA_1 BIT(1) +#define SDW_SCP_SDCA_INT_SDCA_2 BIT(2) +#define SDW_SCP_SDCA_INT_SDCA_3 BIT(3) +#define SDW_SCP_SDCA_INT_SDCA_4 BIT(4) +#define SDW_SCP_SDCA_INT_SDCA_5 BIT(5) +#define SDW_SCP_SDCA_INT_SDCA_6 BIT(6) +#define SDW_SCP_SDCA_INT_SDCA_7 BIT(7) + +#define SDW_SCP_SDCA_INT2 0x59 +#define SDW_SCP_SDCA_INT_SDCA_8 BIT(0) +#define SDW_SCP_SDCA_INT_SDCA_9 BIT(1) +#define SDW_SCP_SDCA_INT_SDCA_10 BIT(2) +#define SDW_SCP_SDCA_INT_SDCA_11 BIT(3) +#define SDW_SCP_SDCA_INT_SDCA_12 BIT(4) +#define SDW_SCP_SDCA_INT_SDCA_13 BIT(5) +#define SDW_SCP_SDCA_INT_SDCA_14 BIT(6) +#define SDW_SCP_SDCA_INT_SDCA_15 BIT(7) + +#define SDW_SCP_SDCA_INT3 0x5A +#define SDW_SCP_SDCA_INT_SDCA_16 BIT(0) +#define SDW_SCP_SDCA_INT_SDCA_17 BIT(1) +#define SDW_SCP_SDCA_INT_SDCA_18 BIT(2) +#define SDW_SCP_SDCA_INT_SDCA_19 BIT(3) +#define SDW_SCP_SDCA_INT_SDCA_20 BIT(4) +#define SDW_SCP_SDCA_INT_SDCA_21 BIT(5) +#define SDW_SCP_SDCA_INT_SDCA_22 BIT(6) +#define SDW_SCP_SDCA_INT_SDCA_23 BIT(7) + +#define SDW_SCP_SDCA_INT4 0x5B +#define SDW_SCP_SDCA_INT_SDCA_24 BIT(0) +#define SDW_SCP_SDCA_INT_SDCA_25 BIT(1) +#define SDW_SCP_SDCA_INT_SDCA_26 BIT(2) +#define SDW_SCP_SDCA_INT_SDCA_27 BIT(3) +#define SDW_SCP_SDCA_INT_SDCA_28 BIT(4) +#define SDW_SCP_SDCA_INT_SDCA_29 BIT(5) +#define SDW_SCP_SDCA_INT_SDCA_30 BIT(6) +/* BIT(7) not allocated in SoundWire 1.2 specification */ + +#define SDW_SCP_SDCA_INTMASK1 0x5C +#define SDW_SCP_SDCA_INTMASK_SDCA_0 BIT(0) +#define SDW_SCP_SDCA_INTMASK_SDCA_1 BIT(1) +#define SDW_SCP_SDCA_INTMASK_SDCA_2 BIT(2) +#define SDW_SCP_SDCA_INTMASK_SDCA_3 BIT(3) +#define SDW_SCP_SDCA_INTMASK_SDCA_4 BIT(4) +#define SDW_SCP_SDCA_INTMASK_SDCA_5 BIT(5) +#define SDW_SCP_SDCA_INTMASK_SDCA_6 BIT(6) +#define SDW_SCP_SDCA_INTMASK_SDCA_7 BIT(7) + +#define SDW_SCP_SDCA_INTMASK2 0x5D +#define SDW_SCP_SDCA_INTMASK_SDCA_8 BIT(0) +#define SDW_SCP_SDCA_INTMASK_SDCA_9 BIT(1) +#define SDW_SCP_SDCA_INTMASK_SDCA_10 BIT(2) +#define SDW_SCP_SDCA_INTMASK_SDCA_11 BIT(3) +#define SDW_SCP_SDCA_INTMASK_SDCA_12 BIT(4) +#define SDW_SCP_SDCA_INTMASK_SDCA_13 BIT(5) +#define SDW_SCP_SDCA_INTMASK_SDCA_14 BIT(6) +#define SDW_SCP_SDCA_INTMASK_SDCA_15 BIT(7) + +#define SDW_SCP_SDCA_INTMASK3 0x5E +#define SDW_SCP_SDCA_INTMASK_SDCA_16 BIT(0) +#define SDW_SCP_SDCA_INTMASK_SDCA_17 BIT(1) +#define SDW_SCP_SDCA_INTMASK_SDCA_18 BIT(2) +#define SDW_SCP_SDCA_INTMASK_SDCA_19 BIT(3) +#define SDW_SCP_SDCA_INTMASK_SDCA_20 BIT(4) +#define SDW_SCP_SDCA_INTMASK_SDCA_21 BIT(5) +#define SDW_SCP_SDCA_INTMASK_SDCA_22 BIT(6) +#define SDW_SCP_SDCA_INTMASK_SDCA_23 BIT(7) + +#define SDW_SCP_SDCA_INTMASK4 0x5F +#define SDW_SCP_SDCA_INTMASK_SDCA_24 BIT(0) +#define SDW_SCP_SDCA_INTMASK_SDCA_25 BIT(1) +#define SDW_SCP_SDCA_INTMASK_SDCA_26 BIT(2) +#define SDW_SCP_SDCA_INTMASK_SDCA_27 BIT(3) +#define SDW_SCP_SDCA_INTMASK_SDCA_28 BIT(4) +#define SDW_SCP_SDCA_INTMASK_SDCA_29 BIT(5) +#define SDW_SCP_SDCA_INTMASK_SDCA_30 BIT(6) +/* BIT(7) not allocated in SoundWire 1.2 specification */ + /* Banked Registers */ #define SDW_SCP_FRAMECTRL_B0 0x60 #define SDW_SCP_FRAMECTRL_B1 (0x60 + SDW_BANK1_OFFSET) #define SDW_SCP_NEXTFRAME_B0 0x61 #define SDW_SCP_NEXTFRAME_B1 (0x61 + SDW_BANK1_OFFSET) +#define SDW_SCP_BUSCLOCK_SCALE_B0 0x62 +#define SDW_SCP_BUSCLOCK_SCALE_B1 (0x62 + SDW_BANK1_OFFSET) +#define SDW_SCP_CLOCK_SCALE GENMASK(3, 0) + +/* PHY registers - CTRL and STAT are the same address */ +#define SDW_SCP_PHY_OUT_CTRL_0 0x80 +#define SDW_SCP_PHY_OUT_CTRL_1 0x81 +#define SDW_SCP_PHY_OUT_CTRL_2 0x82 +#define SDW_SCP_PHY_OUT_CTRL_3 0x83 +#define SDW_SCP_PHY_OUT_CTRL_4 0x84 +#define SDW_SCP_PHY_OUT_CTRL_5 0x85 +#define SDW_SCP_PHY_OUT_CTRL_6 0x86 +#define SDW_SCP_PHY_OUT_CTRL_7 0x87 + +#define SDW_SCP_CAP_LOAD_CTRL GENMASK(2, 0) +#define SDW_SCP_DRIVE_STRENGTH_CTRL GENMASK(5, 3) +#define SDW_SCP_SLEW_TIME_CTRL GENMASK(7, 6) + /* Both INT and STATUS register is same */ #define SDW_DPN_INT(n) (0x0 + SDW_DPN_SIZE * (n)) #define SDW_DPN_INTMASK(n) (0x1 + SDW_DPN_SIZE * (n)) diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h index aaa7f4267c14..52eb66cd11bc 100644 --- a/include/linux/soundwire/sdw_type.h +++ b/include/linux/soundwire/sdw_type.h @@ -5,6 +5,13 @@ #define __SOUNDWIRE_TYPES_H extern struct bus_type sdw_bus_type; +extern struct device_type sdw_slave_type; +extern struct device_type sdw_master_type; + +static inline int is_sdw_slave(const struct device *dev) +{ + return dev->type == &sdw_slave_type; +} #define drv_to_sdw_driver(_drv) container_of(_drv, struct sdw_driver, driver) @@ -14,7 +21,7 @@ extern struct bus_type sdw_bus_type; int __sdw_register_driver(struct sdw_driver *drv, struct module *owner); void sdw_unregister_driver(struct sdw_driver *drv); -int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size); +int sdw_slave_uevent(struct device *dev, struct kobj_uevent_env *env); /** * module_sdw_driver() - Helper macro for registering a Soundwire driver diff --git a/include/linux/spi/altera.h b/include/linux/spi/altera.h new file mode 100644 index 000000000000..2d42641499a6 --- /dev/null +++ b/include/linux/spi/altera.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Header File for Altera SPI Driver. + */ +#ifndef __LINUX_SPI_ALTERA_H +#define __LINUX_SPI_ALTERA_H + +#include <linux/regmap.h> +#include <linux/spi/spi.h> +#include <linux/types.h> + +/** + * struct altera_spi_platform_data - Platform data of the Altera SPI driver + * @mode_bits: Mode bits of SPI master. + * @num_chipselect: Number of chipselects. + * @bits_per_word_mask: bitmask of supported bits_per_word for transfers. + * @num_devices: Number of devices that shall be added when the driver + * is probed. + * @devices: The devices to add. + */ +struct altera_spi_platform_data { + u16 mode_bits; + u16 num_chipselect; + u32 bits_per_word_mask; + u16 num_devices; + struct spi_board_info *devices; +}; + +#endif /* __LINUX_SPI_ALTERA_H */ diff --git a/include/linux/spi/l4f00242t03.h b/include/linux/spi/l4f00242t03.h deleted file mode 100644 index 831a5de7a0e2..000000000000 --- a/include/linux/spi/l4f00242t03.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * l4f00242t03.h -- Platform glue for Epson L4F00242T03 LCD - * - * Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com> - * Based on Marek Vasut work in lms283gf05.h -*/ - -#ifndef _INCLUDE_LINUX_SPI_L4F00242T03_H_ -#define _INCLUDE_LINUX_SPI_L4F00242T03_H_ - -struct l4f00242t03_pdata { - unsigned int reset_gpio; - unsigned int data_enable_gpio; -}; - -#endif /* _INCLUDE_LINUX_SPI_L4F00242T03_H_ */ diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h deleted file mode 100644 index 738a45b435f2..000000000000 --- a/include/linux/spi/mcp23s08.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -struct mcp23s08_platform_data { - /* For mcp23s08, up to 4 slaves (numbered 0..3) can share one SPI - * chipselect, each providing 1 gpio_chip instance with 8 gpios. - * For mpc23s17, up to 8 slaves (numbered 0..7) can share one SPI - * chipselect, each providing 1 gpio_chip (port A + port B) with - * 16 gpios. - */ - u32 spi_present_mask; - - /* "base" is the number of the first GPIO or -1 for dynamic - * assignment. If there are gaps in chip addressing the GPIO - * numbers are sequential .. so for example if only slaves 0 - * and 3 are present, their GPIOs range from base to base+15 - * (or base+31 for s17 variant). - */ - unsigned base; -}; diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index af9ff2f0f1b2..159463cc659c 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -17,6 +17,7 @@ { \ .buswidth = __buswidth, \ .opcode = __opcode, \ + .nbytes = 1, \ } #define SPI_MEM_OP_ADDR(__nbytes, __val, __buswidth) \ @@ -69,11 +70,15 @@ enum spi_mem_data_dir { /** * struct spi_mem_op - describes a SPI memory operation + * @cmd.nbytes: number of opcode bytes (only 1 or 2 are valid). The opcode is + * sent MSB-first. * @cmd.buswidth: number of IO lines used to transmit the command * @cmd.opcode: operation opcode + * @cmd.dtr: whether the command opcode should be sent in DTR mode or not * @addr.nbytes: number of address bytes to send. Can be zero if the operation * does not need to send an address * @addr.buswidth: number of IO lines used to transmit the address cycles + * @addr.dtr: whether the address should be sent in DTR mode or not * @addr.val: address value. This value is always sent MSB first on the bus. * Note that only @addr.nbytes are taken into account in this * address value, so users should make sure the value fits in the @@ -81,7 +86,9 @@ enum spi_mem_data_dir { * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can * be zero if the operation does not require dummy bytes * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes + * @dummy.dtr: whether the dummy bytes should be sent in DTR mode or not * @data.buswidth: number of IO lanes used to send/receive the data + * @data.dtr: whether the data should be sent in DTR mode or not * @data.dir: direction of the transfer * @data.nbytes: number of data bytes to send/receive. Can be zero if the * operation does not involve transferring data @@ -90,23 +97,28 @@ enum spi_mem_data_dir { */ struct spi_mem_op { struct { + u8 nbytes; u8 buswidth; - u8 opcode; + u8 dtr : 1; + u16 opcode; } cmd; struct { u8 nbytes; u8 buswidth; + u8 dtr : 1; u64 val; } addr; struct { u8 nbytes; u8 buswidth; + u8 dtr : 1; } dummy; struct { u8 buswidth; + u8 dtr : 1; enum spi_mem_data_dir dir; unsigned int nbytes; union { diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 38286de779e3..99380c0825db 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -329,6 +329,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * every chipselect is connected to a slave. * @dma_alignment: SPI controller constraint on DMA buffers alignment. * @mode_bits: flags understood by this controller driver + * @buswidth_override_bits: flags to override for this controller driver * @bits_per_word_mask: A mask indicating which values of bits_per_word are * supported by the driver. Bit n indicates that a bits_per_word n+1 is * supported. If set, the SPI core will reject any transfer with an @@ -358,8 +359,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @cleanup: frees controller-specific state * @can_dma: determine whether this controller supports DMA * @queued: whether this controller is providing an internal message queue - * @kworker: thread struct for message pump - * @kworker_task: pointer to task for message pump kworker thread + * @kworker: pointer to thread struct for message pump * @pump_messages: work struct for scheduling work to the message pump * @queue_lock: spinlock to syncronise access to message queue * @queue: message queue @@ -368,6 +368,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @cur_msg_prepared: spi_prepare_message was called for the currently * in-flight message * @cur_msg_mapped: message has been mapped for DMA + * @last_cs_enable: was enable true on the last call to set_cs. + * @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs. * @xfer_completion: used by core transfer_one_message() * @busy: message pump is busy * @running: message pump is running @@ -394,6 +396,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * for example doing DMA mapping. Called from threaded * context. * @transfer_one: transfer a single spi_transfer. + * * - return 0 if the transfer is finished, * - return 1 if the transfer is still in progress. When * the driver is finished with this transfer it must @@ -446,6 +449,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * If the driver does not set this, the SPI core takes the snapshot as * close to the driver hand-over as possible. * @irq_flags: Interrupt enable state during PTP system timestamping + * @fallback: fallback to pio if dma transfer return failure with + * SPI_TRANS_FAIL_NO_START. * * Each SPI controller can communicate with one or more @spi_device * children. These make a small bus, sharing MOSI, MISO and SCK signals @@ -588,8 +593,7 @@ struct spi_controller { * Over time we expect SPI drivers to be phased over to this API. */ bool queued; - struct kthread_worker kworker; - struct task_struct *kworker_task; + struct kthread_worker *kworker; struct kthread_work pump_messages; spinlock_t queue_lock; struct list_head queue; @@ -601,6 +605,9 @@ struct spi_controller { bool auto_runtime_pm; bool cur_msg_prepared; bool cur_msg_mapped; + bool last_cs_enable; + bool last_cs_mode_high; + bool fallback; struct completion xfer_completion; size_t max_dma_len; @@ -840,12 +847,8 @@ extern void spi_res_release(struct spi_controller *ctlr, * processed the word, i.e. the "pre" timestamp should be taken before * transmitting the "pre" word, and the "post" timestamp after receiving * transmit confirmation from the controller for the "post" word. - * @timestamped_pre: Set by the SPI controller driver to denote it has acted - * upon the @ptp_sts request. Not set when the SPI core has taken care of - * the task. SPI device drivers are free to print a warning if this comes - * back unset and they need the better resolution. - * @timestamped_post: See above. The reason why both exist is that these - * booleans are also used to keep state in the core SPI logic. + * @timestamped: true if the transfer has been timestamped + * @error: Error status logged by spi controller driver. * * SPI transfers always write the same number of bytes as they read. * Protocol drivers should always provide @rx_buf and/or @tx_buf. @@ -939,6 +942,9 @@ struct spi_transfer { bool timestamped; struct list_head transfer_list; + +#define SPI_TRANS_FAIL_NO_START BIT(0) + u16 error; }; /** @@ -961,7 +967,7 @@ struct spi_transfer { * each represented by a struct spi_transfer. The sequence is "atomic" * in the sense that no other spi_message may use that SPI bus until that * sequence completes. On some systems, many such sequences can execute as - * as single programmed DMA transfer. On all systems, these messages are + * a single programmed DMA transfer. On all systems, these messages are * queued, and might complete after transactions to other devices. Messages * sent to a given spi_device are always executed in FIFO order. * @@ -1224,7 +1230,7 @@ extern int spi_bus_unlock(struct spi_controller *ctlr); * * For more specific semantics see spi_sync(). * - * Return: Return: zero on success, else a negative error code. + * Return: zero on success, else a negative error code. */ static inline int spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers, diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index d3770b3f9d9a..f2f12d746dbd 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -56,6 +56,7 @@ #include <linux/kernel.h> #include <linux/stringify.h> #include <linux/bottom_half.h> +#include <linux/lockdep.h> #include <asm/barrier.h> #include <asm/mmiowb.h> diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 6102e6bff3ae..b981caafe8bf 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -15,7 +15,7 @@ # include <linux/spinlock_types_up.h> #endif -#include <linux/lockdep.h> +#include <linux/lockdep_types.h> typedef struct raw_spinlock { arch_spinlock_t raw_lock; diff --git a/include/linux/splice.h b/include/linux/splice.h index ebbbfea48aa0..5c47013f708e 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h @@ -82,6 +82,9 @@ extern long do_splice(struct file *in, loff_t __user *off_in, struct file *out, loff_t __user *off_out, size_t len, unsigned int flags); +extern long do_tee(struct file *in, struct file *out, size_t len, + unsigned int flags); + /* * for dynamic pipe sizing */ diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h index 3d5c3271a9a8..a59db2f08e76 100644 --- a/include/linux/stackleak.h +++ b/include/linux/stackleak.h @@ -25,7 +25,7 @@ static inline void stackleak_task_init(struct task_struct *t) #ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE int stack_erasing_sysctl(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); #endif #else /* !CONFIG_GCC_PLUGIN_STACKLEAK */ diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 83bd8cb475d7..b7af8cc13eda 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -64,7 +64,7 @@ void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, struct stack_trace { unsigned int nr_entries, max_entries; unsigned long *entries; - int skip; /* input argument: How many entries to skip */ + unsigned int skip; /* input argument: How many entries to skip */ }; extern void save_stack_trace(struct stack_trace *trace); diff --git a/include/linux/stat.h b/include/linux/stat.h index 528c4baad091..56614af83d4a 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -47,6 +47,7 @@ struct kstat { struct timespec64 ctime; struct timespec64 btime; /* File creation time */ u64 blocks; + u64 mnt_id; }; #endif diff --git a/include/linux/string.h b/include/linux/string.h index 6dfbb2efa815..9b7a0632e87a 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -272,6 +272,31 @@ void __read_overflow3(void) __compiletime_error("detected read beyond size of ob void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter"); #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE) + +#ifdef CONFIG_KASAN +extern void *__underlying_memchr(const void *p, int c, __kernel_size_t size) __RENAME(memchr); +extern int __underlying_memcmp(const void *p, const void *q, __kernel_size_t size) __RENAME(memcmp); +extern void *__underlying_memcpy(void *p, const void *q, __kernel_size_t size) __RENAME(memcpy); +extern void *__underlying_memmove(void *p, const void *q, __kernel_size_t size) __RENAME(memmove); +extern void *__underlying_memset(void *p, int c, __kernel_size_t size) __RENAME(memset); +extern char *__underlying_strcat(char *p, const char *q) __RENAME(strcat); +extern char *__underlying_strcpy(char *p, const char *q) __RENAME(strcpy); +extern __kernel_size_t __underlying_strlen(const char *p) __RENAME(strlen); +extern char *__underlying_strncat(char *p, const char *q, __kernel_size_t count) __RENAME(strncat); +extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) __RENAME(strncpy); +#else +#define __underlying_memchr __builtin_memchr +#define __underlying_memcmp __builtin_memcmp +#define __underlying_memcpy __builtin_memcpy +#define __underlying_memmove __builtin_memmove +#define __underlying_memset __builtin_memset +#define __underlying_strcat __builtin_strcat +#define __underlying_strcpy __builtin_strcpy +#define __underlying_strlen __builtin_strlen +#define __underlying_strncat __builtin_strncat +#define __underlying_strncpy __builtin_strncpy +#endif + __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) { size_t p_size = __builtin_object_size(p, 0); @@ -279,14 +304,14 @@ __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) __write_overflow(); if (p_size < size) fortify_panic(__func__); - return __builtin_strncpy(p, q, size); + return __underlying_strncpy(p, q, size); } __FORTIFY_INLINE char *strcat(char *p, const char *q) { size_t p_size = __builtin_object_size(p, 0); if (p_size == (size_t)-1) - return __builtin_strcat(p, q); + return __underlying_strcat(p, q); if (strlcat(p, q, p_size) >= p_size) fortify_panic(__func__); return p; @@ -300,7 +325,7 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p) /* Work around gcc excess stack consumption issue */ if (p_size == (size_t)-1 || (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0')) - return __builtin_strlen(p); + return __underlying_strlen(p); ret = strnlen(p, p_size); if (p_size <= ret) fortify_panic(__func__); @@ -333,7 +358,7 @@ __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) __write_overflow(); if (len >= p_size) fortify_panic(__func__); - __builtin_memcpy(p, q, len); + __underlying_memcpy(p, q, len); p[len] = '\0'; } return ret; @@ -346,12 +371,12 @@ __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) size_t p_size = __builtin_object_size(p, 0); size_t q_size = __builtin_object_size(q, 0); if (p_size == (size_t)-1 && q_size == (size_t)-1) - return __builtin_strncat(p, q, count); + return __underlying_strncat(p, q, count); p_len = strlen(p); copy_len = strnlen(q, count); if (p_size < p_len + copy_len + 1) fortify_panic(__func__); - __builtin_memcpy(p + p_len, q, copy_len); + __underlying_memcpy(p + p_len, q, copy_len); p[p_len + copy_len] = '\0'; return p; } @@ -363,7 +388,7 @@ __FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size) __write_overflow(); if (p_size < size) fortify_panic(__func__); - return __builtin_memset(p, c, size); + return __underlying_memset(p, c, size); } __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) @@ -378,7 +403,7 @@ __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) } if (p_size < size || q_size < size) fortify_panic(__func__); - return __builtin_memcpy(p, q, size); + return __underlying_memcpy(p, q, size); } __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) @@ -393,7 +418,7 @@ __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) } if (p_size < size || q_size < size) fortify_panic(__func__); - return __builtin_memmove(p, q, size); + return __underlying_memmove(p, q, size); } extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan); @@ -419,7 +444,7 @@ __FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size) } if (p_size < size || q_size < size) fortify_panic(__func__); - return __builtin_memcmp(p, q, size); + return __underlying_memcmp(p, q, size); } __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) @@ -429,7 +454,7 @@ __FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size) __read_overflow(); if (p_size < size) fortify_panic(__func__); - return __builtin_memchr(p, c, size); + return __underlying_memchr(p, c, size); } void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv); @@ -460,11 +485,22 @@ __FORTIFY_INLINE char *strcpy(char *p, const char *q) size_t p_size = __builtin_object_size(p, 0); size_t q_size = __builtin_object_size(q, 0); if (p_size == (size_t)-1 && q_size == (size_t)-1) - return __builtin_strcpy(p, q); + return __underlying_strcpy(p, q); memcpy(p, q, strlen(q) + 1); return p; } +/* Don't use these outside the FORITFY_SOURCE implementation */ +#undef __underlying_memchr +#undef __underlying_memcmp +#undef __underlying_memcpy +#undef __underlying_memmove +#undef __underlying_memset +#undef __underlying_strcat +#undef __underlying_strcpy +#undef __underlying_strlen +#undef __underlying_strncat +#undef __underlying_strncpy #endif /** diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index c28955132234..86f150c2a6b6 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -2,6 +2,7 @@ #ifndef _LINUX_STRING_HELPERS_H_ #define _LINUX_STRING_HELPERS_H_ +#include <linux/ctype.h> #include <linux/types.h> struct file; @@ -75,6 +76,20 @@ static inline int string_escape_str_any_np(const char *src, char *dst, return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, only); } +static inline void string_upper(char *dst, const char *src) +{ + do { + *dst++ = toupper(*src); + } while (*src++); +} + +static inline void string_lower(char *dst, const char *src) +{ + do { + *dst++ = tolower(*src); + } while (*src++); +} + char *kstrdup_quotable(const char *src, gfp_t gfp); char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp); char *kstrdup_quotable_file(struct file *file, gfp_t gfp); diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 4f6b28487f28..98da816b5fc2 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -76,7 +76,7 @@ struct rpc_auth { unsigned int au_verfsize; /* size of reply verifier */ unsigned int au_ralign; /* words before UL header */ - unsigned int au_flags; + unsigned long au_flags; const struct rpc_authops *au_ops; rpc_authflavor_t au_flavor; /* pseudoflavor (note may * differ from the flavor in @@ -89,7 +89,8 @@ struct rpc_auth { }; /* rpc_auth au_flags */ -#define RPCAUTH_AUTH_DATATOUCH 0x00000002 +#define RPCAUTH_AUTH_DATATOUCH (1) +#define RPCAUTH_AUTH_UPDATE_SLACK (2) struct rpc_auth_create_args { rpc_authflavor_t pseudoflavor; diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index bc07e51f20d1..bf4ac8a0268c 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h @@ -84,6 +84,7 @@ struct pf_desc { u32 service; char *name; char *auth_domain_name; + struct auth_domain *domain; bool datatouch; }; diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index 320c672d84de..4af31bbc8802 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h @@ -124,4 +124,78 @@ rpcrdma_decode_buffer_size(u8 val) return ((unsigned int)val + 1) << 10; } +/** + * xdr_encode_rdma_segment - Encode contents of an RDMA segment + * @p: Pointer into a send buffer + * @handle: The RDMA handle to encode + * @length: The RDMA length to encode + * @offset: The RDMA offset to encode + * + * Return value: + * Pointer to the XDR position that follows the encoded RDMA segment + */ +static inline __be32 *xdr_encode_rdma_segment(__be32 *p, u32 handle, + u32 length, u64 offset) +{ + *p++ = cpu_to_be32(handle); + *p++ = cpu_to_be32(length); + return xdr_encode_hyper(p, offset); +} + +/** + * xdr_encode_read_segment - Encode contents of a Read segment + * @p: Pointer into a send buffer + * @position: The position to encode + * @handle: The RDMA handle to encode + * @length: The RDMA length to encode + * @offset: The RDMA offset to encode + * + * Return value: + * Pointer to the XDR position that follows the encoded Read segment + */ +static inline __be32 *xdr_encode_read_segment(__be32 *p, u32 position, + u32 handle, u32 length, + u64 offset) +{ + *p++ = cpu_to_be32(position); + return xdr_encode_rdma_segment(p, handle, length, offset); +} + +/** + * xdr_decode_rdma_segment - Decode contents of an RDMA segment + * @p: Pointer to the undecoded RDMA segment + * @handle: Upon return, the RDMA handle + * @length: Upon return, the RDMA length + * @offset: Upon return, the RDMA offset + * + * Return value: + * Pointer to the XDR item that follows the RDMA segment + */ +static inline __be32 *xdr_decode_rdma_segment(__be32 *p, u32 *handle, + u32 *length, u64 *offset) +{ + *handle = be32_to_cpup(p++); + *length = be32_to_cpup(p++); + return xdr_decode_hyper(p, offset); +} + +/** + * xdr_decode_read_segment - Decode contents of a Read segment + * @p: Pointer to the undecoded Read segment + * @position: Upon return, the segment's position + * @handle: Upon return, the RDMA handle + * @length: Upon return, the RDMA length + * @offset: Upon return, the RDMA offset + * + * Return value: + * Pointer to the XDR item that follows the Read segment + */ +static inline __be32 *xdr_decode_read_segment(__be32 *p, u32 *position, + u32 *handle, u32 *length, + u64 *offset) +{ + *position = be32_to_cpup(p++); + return xdr_decode_rdma_segment(p, handle, length, offset); +} + #endif /* _LINUX_SUNRPC_RPC_RDMA_H */ diff --git a/include/linux/sunrpc/rpc_rdma_cid.h b/include/linux/sunrpc/rpc_rdma_cid.h new file mode 100644 index 000000000000..be24ab2baa6a --- /dev/null +++ b/include/linux/sunrpc/rpc_rdma_cid.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * * Copyright (c) 2020, Oracle and/or its affiliates. + */ + +#ifndef RPC_RDMA_CID_H +#define RPC_RDMA_CID_H + +/* + * The rpc_rdma_cid struct records completion ID information. A + * completion ID matches an incoming Send or Receive completion + * to a Completion Queue and to a previous ib_post_*(). The ID + * can then be displayed in an error message or recorded in a + * trace record. + * + * This struct is shared between the server and client RPC/RDMA + * transport implementations. + */ +struct rpc_rdma_cid { + u32 ci_queue_id; + int ci_completion_id; +}; + +#endif /* RPC_RDMA_CID_H */ diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index fd390894a584..386628b36bc7 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -254,6 +254,7 @@ struct svc_rqst { struct page * *rq_page_end; /* one past the last page */ struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */ + struct bio_vec rq_bvec[RPCSVC_MAXPAGES]; __be32 rq_xid; /* transmission id */ u32 rq_prog; /* program number */ @@ -299,6 +300,7 @@ struct svc_rqst { struct net *rq_bc_net; /* pointer to backchannel's * net namespace */ + void ** rq_lease_breaker; /* The v4 client breaking a lease */ }; #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net) diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index cbcfbd0521e3..9dc3a3b88391 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -46,9 +46,9 @@ #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/rpc_rdma.h> +#include <linux/sunrpc/rpc_rdma_cid.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> -#define SVCRDMA_DEBUG /* Default and maximum inline threshold sizes */ enum { @@ -110,6 +110,8 @@ struct svcxprt_rdma { struct work_struct sc_work; struct llist_head sc_recv_ctxts; + + atomic_t sc_completion_ids; }; /* sc_flags */ #define RDMAXPRT_CONN_PENDING 3 @@ -130,6 +132,7 @@ struct svc_rdma_recv_ctxt { struct list_head rc_list; struct ib_recv_wr rc_recv_wr; struct ib_cqe rc_cqe; + struct rpc_rdma_cid rc_cid; struct ib_sge rc_recv_sge; void *rc_recv_buf; struct xdr_buf rc_arg; @@ -148,6 +151,8 @@ struct svc_rdma_recv_ctxt { struct svc_rdma_send_ctxt { struct list_head sc_list; + struct rpc_rdma_cid sc_cid; + struct ib_send_wr sc_send_wr; struct ib_cqe sc_cqe; struct xdr_buf sc_hdrbuf; @@ -160,9 +165,8 @@ struct svc_rdma_send_ctxt { }; /* svc_rdma_backchannel.c */ -extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, - __be32 *rdma_resp, - struct xdr_buf *rcvbuf); +extern void svc_rdma_handle_bc_reply(struct svc_rqst *rqstp, + struct svc_rdma_recv_ctxt *rctxt); /* svc_rdma_recvfrom.c */ extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma); @@ -192,20 +196,21 @@ extern struct svc_rdma_send_ctxt * svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma); extern void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt); -extern int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr); +extern int svc_rdma_send(struct svcxprt_rdma *rdma, + struct svc_rdma_send_ctxt *ctxt); extern int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, const struct svc_rdma_recv_ctxt *rctxt, struct xdr_buf *xdr); +extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, + struct svc_rdma_send_ctxt *sctxt, + struct svc_rdma_recv_ctxt *rctxt, + int status); extern int svc_rdma_sendto(struct svc_rqst *); extern int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); /* svc_rdma_transport.c */ -extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); -extern void svc_sq_reap(struct svcxprt_rdma *); -extern void svc_rq_reap(struct svcxprt_rdma *); - extern struct svc_xprt_class svc_rdma_class; #ifdef CONFIG_SUNRPC_BACKCHANNEL extern struct svc_xprt_class svc_rdma_bc_class; diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 9e1e046de176..aca35ab5cff2 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h @@ -117,6 +117,12 @@ static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u return 0; } +static inline bool svc_xprt_is_dead(const struct svc_xprt *xprt) +{ + return (test_bit(XPT_DEAD, &xprt->xpt_flags) != 0) || + (test_bit(XPT_CLOSE, &xprt->xpt_flags) != 0); +} + int svc_reg_xprt_class(struct svc_xprt_class *); void svc_unreg_xprt_class(struct svc_xprt_class *); void svc_xprt_init(struct net *, struct svc_xprt_class *, struct svc_xprt *, diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h index ca39a388dc22..f09c82b0a7ae 100644 --- a/include/linux/sunrpc/svcauth_gss.h +++ b/include/linux/sunrpc/svcauth_gss.h @@ -20,7 +20,8 @@ int gss_svc_init(void); void gss_svc_shutdown(void); int gss_svc_init_net(struct net *net); void gss_svc_shutdown_net(struct net *net); -int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); +struct auth_domain *svcauth_gss_register_pseudoflavor(u32 pseudoflavor, + char *name); u32 svcauth_gss_flavor(struct auth_domain *dom); #endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index 771baadaee9d..b7ac7fe68306 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -28,7 +28,7 @@ struct svc_sock { /* private TCP part */ /* On-the-wire fragment header: */ - __be32 sk_reclen; + __be32 sk_marker; /* As we receive a record, this includes the length received so * far (including the fragment header): */ u32 sk_tcplen; @@ -41,12 +41,12 @@ struct svc_sock { static inline u32 svc_sock_reclen(struct svc_sock *svsk) { - return ntohl(svsk->sk_reclen) & RPC_FRAGMENT_SIZE_MASK; + return be32_to_cpu(svsk->sk_marker) & RPC_FRAGMENT_SIZE_MASK; } static inline u32 svc_sock_final_rec(struct svc_sock *svsk) { - return ntohl(svsk->sk_reclen) & RPC_LAST_STREAM_FRAGMENT; + return be32_to_cpu(svsk->sk_marker) & RPC_LAST_STREAM_FRAGMENT; } /* diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 22c207b2425f..5a6a81b7cd9f 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -475,6 +475,32 @@ xdr_stream_encode_uint32_array(struct xdr_stream *xdr, } /** + * xdr_item_is_absent - symbolically handle XDR discriminators + * @p: pointer to undecoded discriminator + * + * Return values: + * %true if the following XDR item is absent + * %false if the following XDR item is present + */ +static inline bool xdr_item_is_absent(const __be32 *p) +{ + return *p == xdr_zero; +} + +/** + * xdr_item_is_present - symbolically handle XDR discriminators + * @p: pointer to undecoded discriminator + * + * Return values: + * %true if the following XDR item is present + * %false if the following XDR item is absent + */ +static inline bool xdr_item_is_present(const __be32 *p) +{ + return *p != xdr_zero; +} + +/** * xdr_stream_decode_u32 - Decode a 32-bit integer * @xdr: pointer to xdr_stream * @ptr: location to store integer diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index e64bd8222f55..a603d48d2b2c 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -101,6 +101,7 @@ struct rpc_rqst { * used in the softirq. */ unsigned long rq_majortimeo; /* major timeout alarm */ + unsigned long rq_minortimeo; /* minor timeout alarm */ unsigned long rq_timeout; /* Current timeout value */ ktime_t rq_rtt; /* round-trip time */ unsigned int rq_retries; /* # of retries */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 4fcc6fd0cbd6..cb9afad82a90 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -453,6 +453,8 @@ extern bool hibernation_available(void); asmlinkage int swsusp_save(void); extern struct pbe *restore_pblist; int pfn_is_nosave(unsigned long pfn); + +int hibernate_quiet_exec(int (*func)(void *data), void *data); #else /* CONFIG_HIBERNATION */ static inline void register_nosave_region(unsigned long b, unsigned long e) {} static inline void register_nosave_region_late(unsigned long b, unsigned long e) {} @@ -464,8 +466,18 @@ static inline void hibernation_set_ops(const struct platform_hibernation_ops *op static inline int hibernate(void) { return -ENOSYS; } static inline bool system_entering_hibernation(void) { return false; } static inline bool hibernation_available(void) { return false; } + +static inline int hibernate_quiet_exec(int (*func)(void *data), void *data) { + return -ENOTSUPP; +} #endif /* CONFIG_HIBERNATION */ +#ifdef CONFIG_HIBERNATION_SNAPSHOT_DEV +int is_hibernate_resume_dev(const struct inode *); +#else +static inline int is_hibernate_resume_dev(const struct inode *i) { return 0; } +#endif + /* Hibernation and suspend events */ #define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ #define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */ diff --git a/include/linux/swait.h b/include/linux/swait.h index 73e06e9986d4..6a8c22b8c2a5 100644 --- a/include/linux/swait.h +++ b/include/linux/swait.h @@ -9,23 +9,10 @@ #include <asm/current.h> /* - * BROKEN wait-queues. - * - * These "simple" wait-queues are broken garbage, and should never be - * used. The comments below claim that they are "similar" to regular - * wait-queues, but the semantics are actually completely different, and - * every single user we have ever had has been buggy (or pointless). - * - * A "swake_up_one()" only wakes up _one_ waiter, which is not at all what - * "wake_up()" does, and has led to problems. In other cases, it has - * been fine, because there's only ever one waiter (kvm), but in that - * case gthe whole "simple" wait-queue is just pointless to begin with, - * since there is no "queue". Use "wake_up_process()" with a direct - * pointer instead. - * - * While these are very similar to regular wait queues (wait.h) the most - * important difference is that the simple waitqueue allows for deterministic - * behaviour -- IOW it has strictly bounded IRQ and lock hold times. + * Simple waitqueues are semantically very different to regular wait queues + * (wait.h). The most important difference is that the simple waitqueue allows + * for deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold + * times. * * Mainly, this is accomplished by two things. Firstly not allowing swake_up_all * from IRQ disabled, and dropping the lock upon every wakeup, giving a higher @@ -39,7 +26,7 @@ * sleeper state. * * - the !exclusive mode; because that leads to O(n) wakeups, everything is - * exclusive. + * exclusive. As such swake_up_one will only ever awake _one_ waiter. * * - custom wake callback functions; because you cannot give any guarantees * about random code. This also allows swait to be used in RT, such that diff --git a/include/linux/swap.h b/include/linux/swap.h index e1bbf7a16b27..661046994db4 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -183,12 +183,17 @@ enum { #define SWAP_CLUSTER_MAX 32UL #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX -#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ -#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ +/* Bit flag in swap_map */ #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ -#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ -#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ -#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ +#define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */ + +/* Special value in first swap_map */ +#define SWAP_MAP_MAX 0x3e /* Max count */ +#define SWAP_MAP_BAD 0x3f /* Note page is bad */ +#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */ + +/* Special value in each swap_map continuation */ +#define SWAP_CONT_MAX 0x7f /* Max count */ /* * We use this to track usage of a cluster. A cluster is a block of swap disk @@ -247,6 +252,7 @@ struct swap_info_struct { unsigned int inuse_pages; /* number of those currently in use */ unsigned int cluster_next; /* likely index for next allocation */ unsigned int cluster_nr; /* countdown to next cluster search */ + unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ struct rb_root swap_extent_root;/* root of the swap extent rbtree */ struct block_device *bdev; /* swap device or bdev of swap file */ @@ -307,6 +313,7 @@ struct vma_swap_readahead { }; /* linux/mm/workingset.c */ +void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg); void workingset_refault(struct page *page, void *shadow); void workingset_activation(struct page *page); @@ -321,22 +328,23 @@ void workingset_update_node(struct xa_node *node); /* linux/mm/page_alloc.c */ extern unsigned long totalreserve_pages; extern unsigned long nr_free_buffer_pages(void); -extern unsigned long nr_free_pagecache_pages(void); /* Definition of global_zone_page_state not available yet */ #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES) /* linux/mm/swap.c */ +extern void lru_note_cost(struct lruvec *lruvec, bool file, + unsigned int nr_pages); +extern void lru_note_cost_page(struct page *); extern void lru_cache_add(struct page *); -extern void lru_cache_add_anon(struct page *page); -extern void lru_cache_add_file(struct page *page); extern void lru_add_page_tail(struct page *page, struct page *page_tail, struct lruvec *lruvec, struct list_head *head); extern void activate_page(struct page *); extern void mark_page_accessed(struct page *); extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); +extern void lru_add_drain_cpu_zone(struct zone *zone); extern void lru_add_drain_all(void); extern void rotate_reclaimable_page(struct page *page); extern void deactivate_file_page(struct page *page); @@ -344,7 +352,7 @@ extern void deactivate_page(struct page *page); extern void mark_page_lazyfree(struct page *page); extern void swap_setup(void); -extern void lru_cache_add_active_or_unevictable(struct page *page, +extern void lru_cache_add_inactive_or_unevictable(struct page *page, struct vm_area_struct *vma); /* linux/mm/vmscan.c */ @@ -363,7 +371,6 @@ extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem, extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; extern int remove_mapping(struct address_space *mapping, struct page *page); -extern unsigned long vm_total_pages; extern unsigned long reclaim_pages(struct list_head *page_list); #ifdef CONFIG_NUMA @@ -407,10 +414,14 @@ extern struct address_space *swapper_spaces[]; extern unsigned long total_swapcache_pages(void); extern void show_swap_cache_info(void); extern int add_to_swap(struct page *page); -extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); -extern int __add_to_swap_cache(struct page *page, swp_entry_t entry); -extern void __delete_from_swap_cache(struct page *, swp_entry_t entry); +extern void *get_shadow_from_swap_cache(swp_entry_t entry); +extern int add_to_swap_cache(struct page *page, swp_entry_t entry, + gfp_t gfp, void **shadowp); +extern void __delete_from_swap_cache(struct page *page, + swp_entry_t entry, void *shadow); extern void delete_from_swap_cache(struct page *); +extern void clear_shadow_from_swap_cache(int type, unsigned long begin, + unsigned long end); extern void free_page_and_swap_cache(struct page *); extern void free_pages_and_swap_cache(struct page **, int); extern struct page *lookup_swap_cache(swp_entry_t entry, @@ -563,14 +574,19 @@ static inline int add_to_swap(struct page *page) return 0; } +static inline void *get_shadow_from_swap_cache(swp_entry_t entry) +{ + return NULL; +} + static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, - gfp_t gfp_mask) + gfp_t gfp_mask, void **shadowp) { return -1; } static inline void __delete_from_swap_cache(struct page *page, - swp_entry_t entry) + swp_entry_t entry, void *shadow) { } @@ -578,6 +594,11 @@ static inline void delete_from_swap_cache(struct page *page) { } +static inline void clear_shadow_from_swap_cache(int type, unsigned long begin, + unsigned long end) +{ +} + static inline int page_swapcount(struct page *page) { return 0; @@ -645,11 +666,9 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) #endif #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) -extern void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node, - gfp_t gfp_mask); +extern void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask); #else -static inline void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, - int node, gfp_t gfp_mask) +static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) { } #endif diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 1815065d52f3..75ac7f8ae93c 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -47,7 +47,6 @@ struct stat64; struct statfs; struct statfs64; struct statx; -struct __sysctl_args; struct sysinfo; struct timespec; struct __kernel_old_timeval; @@ -263,7 +262,7 @@ static inline void addr_limit_user_check(void) return; #endif - if (CHECK_DATA_CORRUPTION(!segment_eq(get_fs(), USER_DS), + if (CHECK_DATA_CORRUPTION(uaccess_kernel(), "Invalid address limit on user-mode return")) force_sig(SIGKILL); @@ -428,6 +427,8 @@ asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length); #endif asmlinkage long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); asmlinkage long sys_faccessat(int dfd, const char __user *filename, int mode); +asmlinkage long sys_faccessat2(int dfd, const char __user *filename, int mode, + int flags); asmlinkage long sys_chdir(const char __user *filename); asmlinkage long sys_fchdir(unsigned int fd); asmlinkage long sys_chroot(const char __user *filename); @@ -442,6 +443,8 @@ asmlinkage long sys_openat(int dfd, const char __user *filename, int flags, asmlinkage long sys_openat2(int dfd, const char __user *filename, struct open_how *how, size_t size); asmlinkage long sys_close(unsigned int fd); +asmlinkage long sys_close_range(unsigned int fd, unsigned int max_fd, + unsigned int flags); asmlinkage long sys_vhangup(void); /* fs/pipe.c */ @@ -1113,7 +1116,6 @@ asmlinkage long sys_send(int, void __user *, size_t, unsigned); asmlinkage long sys_bdflush(int func, long data); asmlinkage long sys_oldumount(char __user *name); asmlinkage long sys_uselib(const char __user *library); -asmlinkage long sys_sysctl(struct __sysctl_args __user *args); asmlinkage long sys_sysfs(int option, unsigned long arg1, unsigned long arg2); asmlinkage long sys_fork(void); @@ -1233,18 +1235,8 @@ asmlinkage long sys_ni_syscall(void); * Instead, use one of the functions which work equivalently, such as * the ksys_xyzyyz() functions prototyped below. */ - -int ksys_umount(char __user *name, int flags); -int ksys_dup(unsigned int fildes); -int ksys_chroot(const char __user *filename); ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count); -int ksys_chdir(const char __user *filename); -int ksys_fchmod(unsigned int fd, umode_t mode); int ksys_fchown(unsigned int fd, uid_t user, gid_t group); -int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent, - unsigned int count); -int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg); -off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence); ssize_t ksys_read(unsigned int fd, char __user *buf, size_t count); void ksys_sync(void); int ksys_unshare(unsigned long unshare_flags); @@ -1278,68 +1270,6 @@ int compat_ksys_ipc(u32 call, int first, int second, * The following kernel syscall equivalents are just wrappers to fs-internal * functions. Therefore, provide stubs to be inlined at the callsites. */ -extern long do_unlinkat(int dfd, struct filename *name); - -static inline long ksys_unlink(const char __user *pathname) -{ - return do_unlinkat(AT_FDCWD, getname(pathname)); -} - -extern long do_rmdir(int dfd, const char __user *pathname); - -static inline long ksys_rmdir(const char __user *pathname) -{ - return do_rmdir(AT_FDCWD, pathname); -} - -extern long do_mkdirat(int dfd, const char __user *pathname, umode_t mode); - -static inline long ksys_mkdir(const char __user *pathname, umode_t mode) -{ - return do_mkdirat(AT_FDCWD, pathname, mode); -} - -extern long do_symlinkat(const char __user *oldname, int newdfd, - const char __user *newname); - -static inline long ksys_symlink(const char __user *oldname, - const char __user *newname) -{ - return do_symlinkat(oldname, AT_FDCWD, newname); -} - -extern long do_mknodat(int dfd, const char __user *filename, umode_t mode, - unsigned int dev); - -static inline long ksys_mknod(const char __user *filename, umode_t mode, - unsigned int dev) -{ - return do_mknodat(AT_FDCWD, filename, mode, dev); -} - -extern int do_linkat(int olddfd, const char __user *oldname, int newdfd, - const char __user *newname, int flags); - -static inline long ksys_link(const char __user *oldname, - const char __user *newname) -{ - return do_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0); -} - -extern int do_fchmodat(int dfd, const char __user *filename, umode_t mode); - -static inline int ksys_chmod(const char __user *filename, umode_t mode) -{ - return do_fchmodat(AT_FDCWD, filename, mode); -} - -extern long do_faccessat(int dfd, const char __user *filename, int mode); - -static inline long ksys_access(const char __user *filename, int mode) -{ - return do_faccessat(AT_FDCWD, filename, mode); -} - extern int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group, int flag); @@ -1358,7 +1288,7 @@ static inline long ksys_lchown(const char __user *filename, uid_t user, extern long do_sys_ftruncate(unsigned int fd, loff_t length, int small); -static inline long ksys_ftruncate(unsigned int fd, unsigned long length) +static inline long ksys_ftruncate(unsigned int fd, loff_t length) { return do_sys_ftruncate(fd, length, 1); } @@ -1375,17 +1305,6 @@ static inline int ksys_close(unsigned int fd) return __close_fd(current->files, fd); } -extern long do_sys_open(int dfd, const char __user *filename, int flags, - umode_t mode); - -static inline long ksys_open(const char __user *filename, int flags, - umode_t mode) -{ - if (force_o_largefile()) - flags |= O_LARGEFILE; - return do_sys_open(AT_FDCWD, filename, flags, mode); -} - extern long do_sys_truncate(const char __user *pathname, loff_t length); static inline long ksys_truncate(const char __user *pathname, loff_t length) @@ -1422,4 +1341,8 @@ long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, unsigned int nsops, const struct old_timespec32 __user *timeout); +int __sys_getsockopt(int fd, int level, int optname, char __user *optval, + int __user *optlen); +int __sys_setsockopt(int fd, int level, int optname, char __user *optval, + int optlen); #endif diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 02fa84493f23..51298a4f4623 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -44,35 +44,26 @@ struct ctl_dir; extern const int sysctl_vals[]; -typedef int proc_handler (struct ctl_table *ctl, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); - -extern int proc_dostring(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_dointvec(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_douintvec(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_dointvec_minmax(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_douintvec_minmax(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); -extern int proc_dointvec_jiffies(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_dointvec_ms_jiffies(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_doulongvec_minmax(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, - void __user *, size_t *, loff_t *); -extern int proc_do_large_bitmap(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -extern int proc_do_static_key(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); +typedef int proc_handler(struct ctl_table *ctl, int write, void *buffer, + size_t *lenp, loff_t *ppos); + +int proc_dostring(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_dointvec(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_douintvec_minmax(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); +int proc_dointvec_jiffies(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_dointvec_userhz_jiffies(struct ctl_table *, int, void *, size_t *, + loff_t *); +int proc_dointvec_ms_jiffies(struct ctl_table *, int, void *, size_t *, + loff_t *); +int proc_doulongvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, void *, + size_t *, loff_t *); +int proc_do_large_bitmap(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_do_static_key(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); /* * Register a set of sysctl names by calling register_sysctl_table @@ -83,15 +74,13 @@ extern int proc_do_static_key(struct ctl_table *table, int write, * sysctl names can be mirrored automatically under /proc/sys. The * procname supplied controls /proc naming. * - * The table's mode will be honoured both for sys_sysctl(2) and - * proc-fs access. + * The table's mode will be honoured for proc-fs access. * * Leaf nodes in the sysctl tree will be represented by a single file * under /proc; non-leaf nodes will be represented by directories. A * null procname disables /proc mirroring at this node. * - * sysctl(2) can automatically manage read and write requests through - * the sysctl table. The data and maxlen fields of the ctl_table + * The data and maxlen fields of the ctl_table * struct enable minimal validation of the values being written to be * performed, and the mode field allows minimal authentication. * @@ -206,8 +195,17 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, void unregister_sysctl_table(struct ctl_table_header * table); extern int sysctl_init(void); +void do_sysctl_args(void); + +extern int pwrsw_enabled; +extern int unaligned_enabled; +extern int unaligned_dump_stack; +extern int no_unaligned_warning; extern struct ctl_table sysctl_mount_point[]; +extern struct ctl_table random_table[]; +extern struct ctl_table firmware_config_table[]; +extern struct ctl_table epoll_table[]; #else /* CONFIG_SYSCTL */ static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table) @@ -236,9 +234,12 @@ static inline void setup_sysctl_set(struct ctl_table_set *p, { } +static inline void do_sysctl_args(void) +{ +} #endif /* CONFIG_SYSCTL */ -int sysctl_max_threads(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); +int sysctl_max_threads(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); #endif /* _LINUX_SYSCTL_H */ diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 80bb865b3a33..34e84122f635 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -7,7 +7,7 @@ * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007 Tejun Heo <teheo@suse.de> * - * Please see Documentation/filesystems/sysfs.txt for more information. + * Please see Documentation/filesystems/sysfs.rst for more information. */ #ifndef _SYSFS_H_ @@ -123,6 +123,13 @@ struct attribute_group { .show = _name##_show, \ } +#define __ATTR_RW_MODE(_name, _mode) { \ + .attr = { .name = __stringify(_name), \ + .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ + .show = _name##_show, \ + .store = _name##_store, \ +} + #define __ATTR_WO(_name) { \ .attr = { .name = __stringify(_name), .mode = 0200 }, \ .store = _name##_store, \ diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h index 8e159e16850f..3a582ec7a2f1 100644 --- a/include/linux/sysrq.h +++ b/include/linux/sysrq.h @@ -30,10 +30,10 @@ #define SYSRQ_ENABLE_RTNICE 0x0100 struct sysrq_key_op { - void (*handler)(int); - char *help_msg; - char *action_msg; - int enable_mask; + void (* const handler)(int); + const char * const help_msg; + const char * const action_msg; + const int enable_mask; }; #ifdef CONFIG_MAGIC_SYSRQ @@ -45,9 +45,9 @@ struct sysrq_key_op { void handle_sysrq(int key); void __handle_sysrq(int key, bool check_mask); -int register_sysrq_key(int key, struct sysrq_key_op *op); -int unregister_sysrq_key(int key, struct sysrq_key_op *op); -struct sysrq_key_op *__sysrq_get_key_op(int key); +int register_sysrq_key(int key, const struct sysrq_key_op *op); +int unregister_sysrq_key(int key, const struct sysrq_key_op *op); +extern const struct sysrq_key_op *__sysrq_reboot_op; int sysrq_toggle_support(int enable_mask); int sysrq_mask(void); @@ -62,12 +62,12 @@ static inline void __handle_sysrq(int key, bool check_mask) { } -static inline int register_sysrq_key(int key, struct sysrq_key_op *op) +static inline int register_sysrq_key(int key, const struct sysrq_key_op *op) { return -EINVAL; } -static inline int unregister_sysrq_key(int key, struct sysrq_key_op *op) +static inline int unregister_sysrq_key(int key, const struct sysrq_key_op *op) { return -EINVAL; } diff --git a/include/linux/task_work.h b/include/linux/task_work.h index bd9a6a91c097..0fb93aafa478 100644 --- a/include/linux/task_work.h +++ b/include/linux/task_work.h @@ -13,7 +13,10 @@ init_task_work(struct callback_head *twork, task_work_func_t func) twork->func = func; } -int task_work_add(struct task_struct *task, struct callback_head *twork, bool); +#define TWA_RESUME 1 +#define TWA_SIGNAL 2 +int task_work_add(struct task_struct *task, struct callback_head *twork, int); + struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t); void task_work_run(void); diff --git a/include/linux/tboot.h b/include/linux/tboot.h index 5424bc6feac8..5146d2574e85 100644 --- a/include/linux/tboot.h +++ b/include/linux/tboot.h @@ -44,7 +44,7 @@ struct tboot_acpi_generic_address { /* * combines Sx info from FADT and FACS tables per ACPI 2.0+ spec - * (http://www.acpi.info/) + * (https://uefi.org/specifications) */ struct tboot_acpi_sleep_info { struct tboot_acpi_generic_address pm1a_cnt_blk; @@ -121,13 +121,7 @@ struct tboot { #define TBOOT_UUID {0xff, 0x8d, 0x3c, 0x66, 0xb3, 0xe8, 0x82, 0x4b, 0xbf,\ 0xaa, 0x19, 0xea, 0x4d, 0x5, 0x7a, 0x8} -extern struct tboot *tboot; - -static inline int tboot_enabled(void) -{ - return tboot != NULL; -} - +bool tboot_enabled(void); extern void tboot_probe(void); extern void tboot_shutdown(u32 shutdown_type); extern struct acpi_table_header *tboot_get_dmar_table( diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 4f8159e90ce1..14b62d7df942 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -120,6 +120,9 @@ struct tcp_request_sock { u64 snt_synack; /* first SYNACK sent time */ bool tfo_listener; bool is_mptcp; +#if IS_ENABLED(CONFIG_MPTCP) + bool drop_req; +#endif u32 txhash; u32 rcv_isn; u32 snt_isn; @@ -217,6 +220,9 @@ struct tcp_sock { } rack; u16 advmss; /* Advertised MSS */ u8 compressed_ack; + u8 dup_ack_counter:2, + tlp_retrans:1, /* TLP is a retransmission */ + unused:5; u32 chrono_start; /* Start time in jiffies of a TCP chrono */ u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ u8 chrono_type:2, /* current chronograph type */ @@ -239,7 +245,7 @@ struct tcp_sock { save_syn:1, /* Save headers of SYN packet */ is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */ syn_smc:1; /* SYN includes SMC */ - u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ + u32 tlp_high_seq; /* snd_nxt at the time of TLP */ u32 tcp_tx_delay; /* delay (in usec) added to TX packets */ u64 tcp_wstamp_ns; /* departure time for next sent data packet */ @@ -478,7 +484,8 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp) tp->saved_syn = NULL; } -struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk); +struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, + const struct sk_buff *orig_skb); static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) { @@ -493,4 +500,14 @@ static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount, int shiftlen); +void tcp_sock_set_cork(struct sock *sk, bool on); +int tcp_sock_set_keepcnt(struct sock *sk, int val); +int tcp_sock_set_keepidle_locked(struct sock *sk, int val); +int tcp_sock_set_keepidle(struct sock *sk, int val); +int tcp_sock_set_keepintvl(struct sock *sk, int val); +void tcp_sock_set_nodelay(struct sock *sk); +void tcp_sock_set_quickack(struct sock *sk, int val); +int tcp_sock_set_syncnt(struct sock *sk, int val); +void tcp_sock_set_user_timeout(struct sock *sk, u32 val); + #endif /* _LINUX_TCP_H */ diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 1412e9cc79ce..d074302989dd 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -26,6 +26,7 @@ #define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */ #define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */ #define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */ +#define TEE_SHM_KERNEL_MAPPED BIT(6) /* Memory mapped in kernel space */ struct device; struct tee_device; @@ -166,6 +167,22 @@ int tee_device_register(struct tee_device *teedev); void tee_device_unregister(struct tee_device *teedev); /** + * tee_session_calc_client_uuid() - Calculates client UUID for session + * @uuid: Resulting UUID + * @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*) + * @connectuon_data: Connection data for opening session + * + * Based on connection method calculates UUIDv5 based client UUID. + * + * For group based logins verifies that calling process has specified + * credentials. + * + * @return < 0 on failure + */ +int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, + const u8 connection_data[TEE_IOCTL_UUID_LEN]); + +/** * struct tee_shm - shared memory object * @ctx: context using the object * @paddr: physical address of the shared memory diff --git a/include/linux/thermal.h b/include/linux/thermal.h index c91b1e344d56..42ef807e5d84 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -32,32 +32,10 @@ /* use value, which < 0K, to indicate an invalid/uninitialized temperature */ #define THERMAL_TEMP_INVALID -274000 -/* Default Thermal Governor */ -#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) -#define DEFAULT_THERMAL_GOVERNOR "step_wise" -#elif defined(CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE) -#define DEFAULT_THERMAL_GOVERNOR "fair_share" -#elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE) -#define DEFAULT_THERMAL_GOVERNOR "user_space" -#elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR) -#define DEFAULT_THERMAL_GOVERNOR "power_allocator" -#endif - struct thermal_zone_device; struct thermal_cooling_device; struct thermal_instance; - -enum thermal_device_mode { - THERMAL_DEVICE_DISABLED = 0, - THERMAL_DEVICE_ENABLED, -}; - -enum thermal_trip_type { - THERMAL_TRIP_ACTIVE = 0, - THERMAL_TRIP_PASSIVE, - THERMAL_TRIP_HOT, - THERMAL_TRIP_CRITICAL, -}; +struct thermal_attr; enum thermal_trend { THERMAL_TREND_STABLE, /* temperature is stable */ @@ -86,9 +64,7 @@ struct thermal_zone_device_ops { struct thermal_cooling_device *); int (*get_temp) (struct thermal_zone_device *, int *); int (*set_trips) (struct thermal_zone_device *, int, int); - int (*get_mode) (struct thermal_zone_device *, - enum thermal_device_mode *); - int (*set_mode) (struct thermal_zone_device *, + int (*change_mode) (struct thermal_zone_device *, enum thermal_device_mode); int (*get_trip_type) (struct thermal_zone_device *, int, enum thermal_trip_type *); @@ -130,11 +106,6 @@ struct thermal_cooling_device { struct list_head node; }; -struct thermal_attr { - struct device_attribute attr; - char name[THERMAL_NAME_LENGTH]; -}; - /** * struct thermal_zone_device - structure for a thermal zone * @id: unique id number for each thermal zone @@ -143,6 +114,7 @@ struct thermal_attr { * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature * @trip_type_attrs: attributes for trip points for sysfs: trip type * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis + * @mode: current mode of this thermal zone * @devdata: private pointer for device private data * @trips: number of trip points the thermal zone supports * @trips_disabled; bitmap for disabled trips @@ -185,6 +157,7 @@ struct thermal_zone_device { struct thermal_attr *trip_temp_attrs; struct thermal_attr *trip_type_attrs; struct thermal_attr *trip_hyst_attrs; + enum thermal_device_mode mode; void *devdata; int trips; unsigned long trips_disabled; /* bitmap for disabled trips */ @@ -318,11 +291,6 @@ struct thermal_zone_params { int offset; }; -struct thermal_genl_event { - u32 orig; - enum events event; -}; - /** * struct thermal_zone_of_device_ops - scallbacks for handling DT based zones * @@ -347,21 +315,6 @@ struct thermal_zone_of_device_ops { int (*set_trip_temp)(void *, int, int); }; -/** - * struct thermal_trip - representation of a point in temperature domain - * @np: pointer to struct device_node that this trip point was created from - * @temperature: temperature value in miliCelsius - * @hysteresis: relative hysteresis in miliCelsius - * @type: trip point type - */ - -struct thermal_trip { - struct device_node *np; - int temperature; - int hysteresis; - enum thermal_trip_type type; -}; - /* Function declarations */ #ifdef CONFIG_THERMAL_OF int thermal_zone_of_get_sensor_id(struct device_node *tz_np, @@ -413,19 +366,7 @@ void devm_thermal_zone_of_sensor_unregister(struct device *dev, #endif -#if IS_ENABLED(CONFIG_THERMAL) -static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) -{ - return cdev->ops->get_requested_power && cdev->ops->state2power && - cdev->ops->power2state; -} - -int power_actor_get_max_power(struct thermal_cooling_device *, - struct thermal_zone_device *tz, u32 *max_power); -int power_actor_get_min_power(struct thermal_cooling_device *, - struct thermal_zone_device *tz, u32 *min_power); -int power_actor_set_power(struct thermal_cooling_device *, - struct thermal_instance *, u32); +#ifdef CONFIG_THERMAL struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, void *, struct thermal_zone_device_ops *, struct thermal_zone_params *, int, int); @@ -439,7 +380,6 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, struct thermal_cooling_device *); void thermal_zone_device_update(struct thermal_zone_device *, enum thermal_notify_event); -void thermal_zone_set_trips(struct thermal_zone_device *); struct thermal_cooling_device *thermal_cooling_device_register(const char *, void *, const struct thermal_cooling_device_ops *); @@ -457,24 +397,11 @@ int thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp); int thermal_zone_get_slope(struct thermal_zone_device *tz); int thermal_zone_get_offset(struct thermal_zone_device *tz); -int get_tz_trend(struct thermal_zone_device *, int); -struct thermal_instance *get_thermal_instance(struct thermal_zone_device *, - struct thermal_cooling_device *, int); void thermal_cdev_update(struct thermal_cooling_device *); void thermal_notify_framework(struct thermal_zone_device *, int); +int thermal_zone_device_enable(struct thermal_zone_device *tz); +int thermal_zone_device_disable(struct thermal_zone_device *tz); #else -static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) -{ return false; } -static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, - struct thermal_zone_device *tz, u32 *max_power) -{ return 0; } -static inline int power_actor_get_min_power(struct thermal_cooling_device *cdev, - struct thermal_zone_device *tz, - u32 *min_power) -{ return -ENODEV; } -static inline int power_actor_set_power(struct thermal_cooling_device *cdev, - struct thermal_instance *tz, u32 power) -{ return 0; } static inline struct thermal_zone_device *thermal_zone_device_register( const char *type, int trips, int mask, void *devdata, struct thermal_zone_device_ops *ops, @@ -484,21 +411,6 @@ static inline struct thermal_zone_device *thermal_zone_device_register( static inline void thermal_zone_device_unregister( struct thermal_zone_device *tz) { } -static inline int thermal_zone_bind_cooling_device( - struct thermal_zone_device *tz, int trip, - struct thermal_cooling_device *cdev, - unsigned long upper, unsigned long lower, - unsigned int weight) -{ return -ENODEV; } -static inline int thermal_zone_unbind_cooling_device( - struct thermal_zone_device *tz, int trip, - struct thermal_cooling_device *cdev) -{ return -ENODEV; } -static inline void thermal_zone_device_update(struct thermal_zone_device *tz, - enum thermal_notify_event event) -{ } -static inline void thermal_zone_set_trips(struct thermal_zone_device *tz) -{ } static inline struct thermal_cooling_device * thermal_cooling_device_register(char *type, void *devdata, const struct thermal_cooling_device_ops *ops) @@ -530,17 +442,18 @@ static inline int thermal_zone_get_slope( static inline int thermal_zone_get_offset( struct thermal_zone_device *tz) { return -ENODEV; } -static inline int get_tz_trend(struct thermal_zone_device *tz, int trip) -{ return -ENODEV; } -static inline struct thermal_instance * -get_thermal_instance(struct thermal_zone_device *tz, - struct thermal_cooling_device *cdev, int trip) -{ return ERR_PTR(-ENODEV); } + static inline void thermal_cdev_update(struct thermal_cooling_device *cdev) { } static inline void thermal_notify_framework(struct thermal_zone_device *tz, int trip) { } + +static inline int thermal_zone_device_enable(struct thermal_zone_device *tz) +{ return -ENODEV; } + +static inline int thermal_zone_device_disable(struct thermal_zone_device *tz) +{ return -ENODEV; } #endif /* CONFIG_THERMAL */ #endif /* __THERMAL_H__ */ diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index ece782ef5466..5db2b11ab085 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -80,7 +80,7 @@ struct tb { int index; enum tb_security_level security_level; size_t nboot_acl; - unsigned long privdata[0]; + unsigned long privdata[]; }; extern struct bus_type tb_bus_type; @@ -504,8 +504,6 @@ struct tb_ring { #define RING_FLAG_NO_SUSPEND BIT(0) /* Configure the ring to be in frame mode */ #define RING_FLAG_FRAME BIT(1) -/* Enable end-to-end flow control */ -#define RING_FLAG_E2E BIT(2) struct ring_frame; typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled); diff --git a/include/linux/tifm.h b/include/linux/tifm.h index 299cbb8c63bb..44073d06710f 100644 --- a/include/linux/tifm.h +++ b/include/linux/tifm.h @@ -124,7 +124,7 @@ struct tifm_adapter { int (*has_ms_pif)(struct tifm_adapter *fm, struct tifm_dev *sock); - struct tifm_dev *sockets[0]; + struct tifm_dev *sockets[]; }; struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets, diff --git a/include/linux/time.h b/include/linux/time.h index 4c325bf44ce0..b142cb5f5a53 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -3,7 +3,6 @@ #define _LINUX_TIME_H # include <linux/cache.h> -# include <linux/seqlock.h> # include <linux/math64.h> # include <linux/time64.h> diff --git a/include/linux/time_namespace.h b/include/linux/time_namespace.h index 824d54e057eb..5b6031385db0 100644 --- a/include/linux/time_namespace.h +++ b/include/linux/time_namespace.h @@ -33,6 +33,7 @@ extern struct time_namespace init_time_ns; #ifdef CONFIG_TIME_NS extern int vdso_join_timens(struct task_struct *task, struct time_namespace *ns); +extern void timens_commit(struct task_struct *tsk, struct time_namespace *ns); static inline struct time_namespace *get_time_ns(struct time_namespace *ns) { @@ -96,6 +97,11 @@ static inline int vdso_join_timens(struct task_struct *task, return 0; } +static inline void timens_commit(struct task_struct *tsk, + struct time_namespace *ns) +{ +} + static inline struct time_namespace *get_time_ns(struct time_namespace *ns) { return NULL; diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index b27e2ffa96c1..d5471d6fa778 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -222,9 +222,9 @@ extern bool timekeeping_rtc_skipresume(void); extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta); -/* +/** * struct system_time_snapshot - simultaneous raw/real time capture with - * counter value + * counter value * @cycles: Clocksource counter value to produce the system times * @real: Realtime system time * @raw: Monotonic raw system time @@ -239,9 +239,9 @@ struct system_time_snapshot { u8 cs_was_changed_seq; }; -/* +/** * struct system_device_crosststamp - system/device cross-timestamp - * (syncronized capture) + * (synchronized capture) * @device: Device time * @sys_realtime: Realtime simultaneous with device time * @sys_monoraw: Monotonic raw simultaneous with device time @@ -252,12 +252,12 @@ struct system_device_crosststamp { ktime_t sys_monoraw; }; -/* +/** * struct system_counterval_t - system counter value with the pointer to the - * corresponding clocksource + * corresponding clocksource * @cycles: System counter value * @cs: Clocksource corresponding to system counter value. Used by - * timekeeping code to verify comparibility of two cycle values + * timekeeping code to verify comparibility of two cycle values */ struct system_counterval_t { u64 cycles; diff --git a/include/linux/timer.h b/include/linux/timer.h index 0dc19a8c39c9..07910ae5ddd9 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -201,8 +201,7 @@ struct ctl_table; extern unsigned int sysctl_timer_migration; int timer_migration_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); + void *buffer, size_t *lenp, loff_t *ppos); #endif unsigned long __round_jiffies(unsigned long j, int cpu); diff --git a/include/linux/torture.h b/include/linux/torture.h index 6241f59e2d6f..7f65bd1dd307 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h @@ -55,6 +55,11 @@ struct torture_random_state { #define DEFINE_TORTURE_RANDOM_PERCPU(name) \ DEFINE_PER_CPU(struct torture_random_state, name) unsigned long torture_random(struct torture_random_state *trsp); +static inline void torture_random_init(struct torture_random_state *trsp) +{ + trsp->trs_state = 0; + trsp->trs_count = 0; +} /* Task shuffler, which causes CPUs to occasionally go idle. */ void torture_shuffle_task_register(struct task_struct *tp); @@ -89,7 +94,7 @@ void _torture_stop_kthread(char *m, struct task_struct **tp); #ifdef CONFIG_PREEMPTION #define torture_preempt_schedule() preempt_schedule() #else -#define torture_preempt_schedule() +#define torture_preempt_schedule() do { } while (0) #endif #endif /* __LINUX_TORTURE_H */ diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 03e9b184411b..8f4ff39f51e7 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -96,6 +96,7 @@ struct tpm_space { u8 *context_buf; u32 session_tbl[3]; u8 *session_buf; + u32 buf_size; }; struct tpm_bios_log { diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index c253461b1c4e..739ba9a03ec1 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -81,6 +81,8 @@ struct tcg_efi_specid_event_algs { u16 digest_size; } __packed; +#define TCG_SPECID_SIG "Spec ID Event03" + struct tcg_efi_specid_event_head { u8 signature[16]; u32 platform_class; @@ -97,7 +99,7 @@ struct tcg_pcr_event { u32 event_type; u8 digest[20]; u32 event_size; - u8 event[0]; + u8 event[]; } __packed; struct tcg_event_field { @@ -171,6 +173,7 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, int i; int j; u32 count, event_type; + const u8 zero_digest[sizeof(event_header->digest)] = {0}; marker = event; marker_start = marker; @@ -198,10 +201,26 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, count = READ_ONCE(event->count); event_type = READ_ONCE(event->event_type); + /* Verify that it's the log header */ + if (event_header->pcr_idx != 0 || + event_header->event_type != NO_ACTION || + memcmp(event_header->digest, zero_digest, sizeof(zero_digest))) { + size = 0; + goto out; + } + efispecid = (struct tcg_efi_specid_event_head *)event_header->event; - /* Check if event is malformed. */ - if (count > efispecid->num_algs) { + /* + * Perform validation of the event in order to identify malformed + * events. This function may be asked to parse arbitrary byte sequences + * immediately following a valid event log. The caller expects this + * function to recognize that the byte sequence is not a valid event + * and to return an event size of 0. + */ + if (memcmp(efispecid->signature, TCG_SPECID_SIG, + sizeof(TCG_SPECID_SIG)) || + !efispecid->num_algs || count != efispecid->num_algs) { size = 0; goto out; } diff --git a/include/linux/trace.h b/include/linux/trace.h index 7fd86d3c691f..36d255d66f88 100644 --- a/include/linux/trace.h +++ b/include/linux/trace.h @@ -29,6 +29,7 @@ struct trace_array; void trace_printk_init_buffers(void); int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...); +int trace_array_init_printk(struct trace_array *tr); void trace_array_put(struct trace_array *tr); struct trace_array *trace_array_get_by_name(const char *name); int trace_array_destroy(struct trace_array *tr); diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index a1fecf311621..598fec9f9dbf 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -116,8 +116,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #define __TRACEPOINT_ENTRY(name) \ static tracepoint_ptr_t __tracepoint_ptr_##name __used \ - __attribute__((section("__tracepoints_ptrs"))) = \ - &__tracepoint_##name + __section(__tracepoints_ptrs) = &__tracepoint_##name #endif #endif /* _LINUX_TRACEPOINT_H */ @@ -280,9 +279,9 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) */ #define DEFINE_TRACE_FN(name, reg, unreg) \ static const char __tpstrtab_##name[] \ - __attribute__((section("__tracepoints_strings"))) = #name; \ - struct tracepoint __tracepoint_##name \ - __attribute__((section("__tracepoints"), used)) = \ + __section(__tracepoints_strings) = #name; \ + struct tracepoint __tracepoint_##name __used \ + __section(__tracepoints) = \ { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\ __TRACEPOINT_ENTRY(name); @@ -361,7 +360,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) static const char *___tp_str __tracepoint_string = str; \ ___tp_str; \ }) -#define __tracepoint_string __attribute__((section("__tracepoint_str"))) +#define __tracepoint_string __used __section(__tracepoint_str) #else /* * tracepoint_string() is used to save the string address for userspace diff --git a/include/linux/types.h b/include/linux/types.h index d3021c879179..a147977602b5 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -167,6 +167,8 @@ typedef struct { int counter; } atomic_t; +#define ATOMIC_INIT(i) { (i) } + #ifdef CONFIG_64BIT typedef struct { s64 counter; diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h index 9de5c10293f5..c6abb79501b3 100644 --- a/include/linux/u64_stats_sync.h +++ b/include/linux/u64_stats_sync.h @@ -3,33 +3,36 @@ #define _LINUX_U64_STATS_SYNC_H /* - * To properly implement 64bits network statistics on 32bit and 64bit hosts, - * we provide a synchronization point, that is a noop on 64bit or UP kernels. + * Protect against 64-bit values tearing on 32-bit architectures. This is + * typically used for statistics read/update in different subsystems. * * Key points : - * 1) Use a seqcount on SMP 32bits, with low overhead. - * 2) Whole thing is a noop on 64bit arches or UP kernels. - * 3) Write side must ensure mutual exclusion or one seqcount update could + * + * - Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP. + * - The whole thing is a no-op on 64-bit architectures. + * + * Usage constraints: + * + * 1) Write side must ensure mutual exclusion, or one seqcount update could * be lost, thus blocking readers forever. - * If this synchronization point is not a mutex, but a spinlock or - * spinlock_bh() or disable_bh() : - * 3.1) Write side should not sleep. - * 3.2) Write side should not allow preemption. - * 3.3) If applicable, interrupts should be disabled. * - * 4) If reader fetches several counters, there is no guarantee the whole values - * are consistent (remember point 1) : this is a noop on 64bit arches anyway) + * 2) Write side must disable preemption, or a seqcount reader can preempt the + * writer and also spin forever. + * + * 3) Write side must use the _irqsave() variant if other writers, or a reader, + * can be invoked from an IRQ context. * - * 5) readers are allowed to sleep or be preempted/interrupted : They perform - * pure reads. But if they have to fetch many values, it's better to not allow - * preemptions/interruptions to avoid many retries. + * 4) If reader fetches several counters, there is no guarantee the whole values + * are consistent w.r.t. each other (remember point #2: seqcounts are not + * used for 64bit architectures). * - * 6) If counter might be written by an interrupt, readers should block interrupts. - * (On UP, there is no seqcount_t protection, a reader allowing interrupts could - * read partial values) + * 5) Readers are allowed to sleep or be preempted/interrupted: they perform + * pure reads. * - * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and - * u64_stats_fetch_retry_irq() helpers + * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats + * might be updated from a hardirq or softirq context (remember point #1: + * seqcounts are not used for UP kernels). 32-bit UP stat readers could read + * corrupted 64-bit values otherwise. * * Usage : * diff --git a/include/linux/uacce.h b/include/linux/uacce.h index 0e215e6d0534..454c2f6672d7 100644 --- a/include/linux/uacce.h +++ b/include/linux/uacce.h @@ -68,19 +68,21 @@ enum uacce_q_state { * @uacce: pointer to uacce * @priv: private pointer * @wait: wait queue head - * @list: index into uacce_mm - * @uacce_mm: the corresponding mm + * @list: index into uacce queues list * @qfrs: pointer of qfr regions * @state: queue state machine + * @pasid: pasid associated to the mm + * @handle: iommu_sva handle returned by iommu_sva_bind_device() */ struct uacce_queue { struct uacce_device *uacce; void *priv; wait_queue_head_t wait; struct list_head list; - struct uacce_mm *uacce_mm; struct uacce_qfile_region *qfrs[UACCE_MAX_REGION]; enum uacce_q_state state; + int pasid; + struct iommu_sva *handle; }; /** @@ -96,8 +98,8 @@ struct uacce_queue { * @cdev: cdev of the uacce * @dev: dev of the uacce * @priv: private pointer of the uacce - * @mm_list: list head of uacce_mm->list - * @mm_lock: lock for mm_list + * @queues: list of queues + * @queues_lock: lock for queues list * @inode: core vfs */ struct uacce_device { @@ -112,27 +114,9 @@ struct uacce_device { struct cdev *cdev; struct device dev; void *priv; - struct list_head mm_list; - struct mutex mm_lock; - struct inode *inode; -}; - -/** - * struct uacce_mm - keep track of queues bound to a process - * @list: index into uacce_device - * @queues: list of queues - * @mm: the mm struct - * @lock: protects the list of queues - * @pasid: pasid of the uacce_mm - * @handle: iommu_sva handle return from iommu_sva_bind_device - */ -struct uacce_mm { - struct list_head list; struct list_head queues; - struct mm_struct *mm; - struct mutex lock; - int pasid; - struct iommu_sva *handle; + struct mutex queues_lock; + struct inode *inode; }; #if IS_ENABLED(CONFIG_UACCE) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 67f016010aad..94b285411659 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -2,15 +2,31 @@ #ifndef __LINUX_UACCESS_H__ #define __LINUX_UACCESS_H__ +#include <linux/instrumented.h> #include <linux/sched.h> #include <linux/thread_info.h> -#include <linux/kasan-checks.h> - -#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS) #include <asm/uaccess.h> /* + * Force the uaccess routines to be wired up for actual userspace access, + * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone + * using force_uaccess_end below. + */ +static inline mm_segment_t force_uaccess_begin(void) +{ + mm_segment_t fs = get_fs(); + + set_fs(USER_DS); + return fs; +} + +static inline void force_uaccess_end(mm_segment_t oldfs) +{ + set_fs(oldfs); +} + +/* * Architectures should provide two primitives (raw_copy_{to,from}_user()) * and get rid of their private instances of copy_{to,from}_user() and * __copy_{to,from}_user{,_inatomic}(). @@ -58,7 +74,7 @@ static __always_inline __must_check unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { - kasan_check_write(to, n); + instrument_copy_from_user(to, from, n); check_object_size(to, n, false); return raw_copy_from_user(to, from, n); } @@ -67,7 +83,7 @@ static __always_inline __must_check unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { might_fault(); - kasan_check_write(to, n); + instrument_copy_from_user(to, from, n); check_object_size(to, n, false); return raw_copy_from_user(to, from, n); } @@ -88,7 +104,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) static __always_inline __must_check unsigned long __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { - kasan_check_read(from, n); + instrument_copy_to_user(to, from, n); check_object_size(from, n, true); return raw_copy_to_user(to, from, n); } @@ -97,7 +113,7 @@ static __always_inline __must_check unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); - kasan_check_read(from, n); + instrument_copy_to_user(to, from, n); check_object_size(from, n, true); return raw_copy_to_user(to, from, n); } @@ -109,7 +125,7 @@ _copy_from_user(void *to, const void __user *from, unsigned long n) unsigned long res = n; might_fault(); if (likely(access_ok(from, n))) { - kasan_check_write(to, n); + instrument_copy_from_user(to, from, n); res = raw_copy_from_user(to, from, n); } if (unlikely(res)) @@ -127,7 +143,7 @@ _copy_to_user(void __user *to, const void *from, unsigned long n) { might_fault(); if (access_ok(to, n)) { - kasan_check_read(from, n); + instrument_copy_to_user(to, from, n); n = raw_copy_to_user(to, from, n); } return n; @@ -301,72 +317,33 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src, return 0; } -/* - * probe_kernel_read(): safely attempt to read from a location - * @dst: pointer to the buffer that shall take the data - * @src: address to read from - * @size: size of the data chunk - * - * Safely read from address @src to the buffer at @dst. If a kernel fault - * happens, handle that and return -EFAULT. - */ -extern long probe_kernel_read(void *dst, const void *src, size_t size); -extern long probe_kernel_read_strict(void *dst, const void *src, size_t size); -extern long __probe_kernel_read(void *dst, const void *src, size_t size); +bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size); -/* - * probe_user_read(): safely attempt to read from a location in user space - * @dst: pointer to the buffer that shall take the data - * @src: address to read from - * @size: size of the data chunk - * - * Safely read from address @src to the buffer at @dst. If a kernel fault - * happens, handle that and return -EFAULT. - */ -extern long probe_user_read(void *dst, const void __user *src, size_t size); -extern long __probe_user_read(void *dst, const void __user *src, size_t size); +long copy_from_kernel_nofault(void *dst, const void *src, size_t size); +long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size); -/* - * probe_kernel_write(): safely attempt to write to a location - * @dst: address to write to - * @src: pointer to the data that shall be written - * @size: size of the data chunk - * - * Safely write to address @dst from the buffer at @src. If a kernel fault - * happens, handle that and return -EFAULT. - */ -extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); -extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); +long copy_from_user_nofault(void *dst, const void __user *src, size_t size); +long notrace copy_to_user_nofault(void __user *dst, const void *src, + size_t size); -/* - * probe_user_write(): safely attempt to write to a location in user space - * @dst: address to write to - * @src: pointer to the data that shall be written - * @size: size of the data chunk - * - * Safely write to address @dst from the buffer at @src. If a kernel fault - * happens, handle that and return -EFAULT. - */ -extern long notrace probe_user_write(void __user *dst, const void *src, size_t size); -extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size); +long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, + long count); -extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); -extern long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr, - long count); -extern long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); -extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, - long count); -extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count); +long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, + long count); +long strnlen_user_nofault(const void __user *unsafe_addr, long count); /** - * probe_kernel_address(): safely attempt to read from a location - * @addr: address to read from - * @retval: read into this variable + * get_kernel_nofault(): safely attempt to read from a location + * @val: read into this variable + * @ptr: address to read from * * Returns 0 on success, or -EFAULT. */ -#define probe_kernel_address(addr, retval) \ - probe_kernel_read(&retval, addr, sizeof(retval)) +#define get_kernel_nofault(val, ptr) ({ \ + const typeof(val) *__gk_ptr = (ptr); \ + copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\ +}) #ifndef user_access_begin #define user_access_begin(ptr,len) access_ok(ptr, len) @@ -378,6 +355,14 @@ extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count); static inline unsigned long user_access_save(void) { return 0UL; } static inline void user_access_restore(unsigned long flags) { } #endif +#ifndef user_write_access_begin +#define user_write_access_begin user_access_begin +#define user_write_access_end user_access_end +#endif +#ifndef user_read_access_begin +#define user_read_access_begin user_access_begin +#define user_read_access_end user_access_end +#endif #ifdef CONFIG_HARDENED_USERCOPY void usercopy_warn(const char *name, const char *detail, bool to_user, diff --git a/include/linux/uio.h b/include/linux/uio.h index 9576fd8158d7..3835a8a8e9ea 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -7,7 +7,6 @@ #include <linux/kernel.h> #include <linux/thread_info.h> -#include <crypto/hash.h> #include <uapi/linux/uio.h> struct page; diff --git a/include/linux/umh.h b/include/linux/umh.h index 0c08de356d0d..244aff638220 100644 --- a/include/linux/umh.h +++ b/include/linux/umh.h @@ -22,10 +22,8 @@ struct subprocess_info { const char *path; char **argv; char **envp; - struct file *file; int wait; int retval; - pid_t pid; int (*init)(struct subprocess_info *info, struct cred *new); void (*cleanup)(struct subprocess_info *info); void *data; @@ -40,19 +38,6 @@ call_usermodehelper_setup(const char *path, char **argv, char **envp, int (*init)(struct subprocess_info *info, struct cred *new), void (*cleanup)(struct subprocess_info *), void *data); -struct subprocess_info *call_usermodehelper_setup_file(struct file *file, - int (*init)(struct subprocess_info *info, struct cred *new), - void (*cleanup)(struct subprocess_info *), void *data); -struct umh_info { - const char *cmdline; - struct file *pipe_to_umh; - struct file *pipe_from_umh; - struct list_head list; - void (*cleanup)(struct umh_info *info); - pid_t pid; -}; -int fork_usermode_blob(void *data, size_t len, struct umh_info *info); - extern int call_usermodehelper_exec(struct subprocess_info *info, int wait); diff --git a/include/linux/usb.h b/include/linux/usb.h index 9f3c721c70dc..20c555db4621 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -341,7 +341,7 @@ struct usb_interface_cache { * @interface: array of pointers to usb_interface structures, one for each * interface in the configuration. The number of interfaces is stored * in desc.bNumInterfaces. These pointers are valid only while the - * the configuration is active. + * configuration is active. * @intf_cache: array of pointers to usb_interface_cache structures, one * for each interface in the configuration. These structures exist * for the entire life of the device. @@ -422,7 +422,7 @@ struct usb_devmap { * Allocated per bus (tree of devices) we have: */ struct usb_bus { - struct device *controller; /* host/master side hardware */ + struct device *controller; /* host side hardware */ struct device *sysdev; /* as seen from firmware or bus */ int busnum; /* Bus number (in order of reg) */ const char *bus_name; /* stable id (PCI slot_name etc) */ @@ -620,9 +620,9 @@ struct usb3_lpm_parameters { * Management to be disabled for this usb_device. This count should only * be manipulated by those functions, with the bandwidth_mutex is held. * @hub_delay: cached value consisting of: - * parent->hub_delay + wHubDelay + tTPTransmissionDelay (40ns) - * + * parent->hub_delay + wHubDelay + tTPTransmissionDelay (40ns) * Will be used as wValue for SetIsochDelay requests. + * @use_generic_driver: ask driver core to reprobe using the generic driver. * * Notes: * Usbcore drivers should not set usbdev->state directly. Instead use @@ -1215,6 +1215,7 @@ struct usb_driver { * struct usb_device_driver - identifies USB device driver to usbcore * @name: The driver name should be unique among USB drivers, * and should normally be the same as the module name. + * @match: If set, used for better device/driver matching. * @probe: Called to see if the driver is willing to manage a particular * device. If it is, probe returns zero and uses dev_set_drvdata() * to associate driver-specific data with the device. If unwilling @@ -1227,13 +1228,16 @@ struct usb_driver { * @dev_groups: Attributes attached to the device that will be created once it * is bound to the driver. * @drvwrap: Driver-model core structure wrapper. + * @id_table: used with @match() to select better matching driver at + * probe() time. * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend * for devices bound to this driver. * @generic_subclass: if set to 1, the generic USB driver's probe, disconnect, * resume and suspend functions will be called in addition to the driver's * own, so this part of the setup does not need to be replicated. * - * USB drivers must provide all the fields listed above except drvwrap. + * USB drivers must provide all the fields listed above except drvwrap, + * match, and id_table. */ struct usb_device_driver { const char *name; diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index 58b83066bea4..604c6c514a50 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -6,13 +6,13 @@ * Wireless USB 1.0 (spread around). Linux has several APIs in C that * need these: * - * - the master/host side Linux-USB kernel driver API; + * - the host side Linux-USB kernel driver API; * - the "usbfs" user space API; and - * - the Linux "gadget" slave/device/peripheral side driver API. + * - the Linux "gadget" device/peripheral side driver API. * * USB 2.0 adds an additional "On The Go" (OTG) mode, which lets systems - * act either as a USB master/host or as a USB slave/device. That means - * the master and slave side APIs benefit from working well together. + * act either as a USB host or as a USB device. That means the host and + * device side APIs benefit from working well together. * * There's also "Wireless USB", using low power short range radios for * peripheral interconnection but otherwise building on the USB framework. diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index edd89b7c8f18..025b41687ce9 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -67,6 +67,7 @@ struct ci_hdrc_platform_data { #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 #define CI_HDRC_IMX_HSIC_ACTIVE_EVENT 2 #define CI_HDRC_IMX_HSIC_SUSPEND_EVENT 3 +#define CI_HDRC_CONTROLLER_VBUS_EVENT 4 int (*notify_event) (struct ci_hdrc *ci, unsigned event); struct regulator *reg_vbus; struct usb_otg_caps ci_otg_caps; @@ -98,5 +99,7 @@ struct platform_device *ci_hdrc_add_device(struct device *dev, struct ci_hdrc_platform_data *platdata); /* Remove ci hdrc device */ void ci_hdrc_remove_device(struct platform_device *pdev); +/* Get current available role */ +enum usb_dr_mode ci_hdrc_query_available_role(struct platform_device *pdev); #endif diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 8675e145ea8b..2040696d75b6 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -249,6 +249,9 @@ int usb_function_activate(struct usb_function *); int usb_interface_id(struct usb_configuration *, struct usb_function *); +int config_ep_by_speed_and_alt(struct usb_gadget *g, struct usb_function *f, + struct usb_ep *_ep, u8 alt); + int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, struct usb_ep *_ep); diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 9411c08a5c7e..52ce1f6b8f83 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -4,7 +4,8 @@ * * We call the USB code inside a Linux-based peripheral device a "gadget" * driver, except for the hardware-specific bus glue. One USB host can - * master many USB gadgets, but the gadgets are only slaved to one host. + * talk to many USB gadgets, but the gadgets are only able to communicate + * to one host. * * * (C) Copyright 2002-2004 by David Brownell @@ -42,6 +43,8 @@ struct usb_ep; * @num_mapped_sgs: number of SG entries mapped to DMA (internal) * @length: Length of that data * @stream_id: The stream id, when USB3.0 bulk streams are being used + * @is_last: Indicates if this is the last request of a stream_id before + * switching to a different stream (required for DWC3 controllers). * @no_interrupt: If true, hints that no completion irq is needed. * Helpful sometimes with deep request queues that are handled * directly by DMA controllers. @@ -104,6 +107,7 @@ struct usb_request { unsigned num_mapped_sgs; unsigned stream_id:16; + unsigned is_last:1; unsigned no_interrupt:1; unsigned zero:1; unsigned short_not_ok:1; @@ -325,7 +329,7 @@ struct usb_gadget_ops { }; /** - * struct usb_gadget - represents a usb slave device + * struct usb_gadget - represents a usb device * @work: (internal use) Workqueue to be used for sysfs_notify() * @udc: struct usb_udc pointer for this gadget * @ops: Function pointers used to access hardware-specific operations. @@ -373,6 +377,7 @@ struct usb_gadget_ops { * @connected: True if gadget is connected. * @lpm_capable: If the gadget max_speed is FULL or HIGH, this flag * indicates that it supports LPM as per the LPM ECN & errata. + * @irq: the interrupt number for device controller. * * Gadgets have a mostly-portable "gadget driver" implementing device * functions, handling all usb configurations and interfaces. Gadget @@ -427,6 +432,7 @@ struct usb_gadget { unsigned deactivated:1; unsigned connected:1; unsigned lpm_capable:1; + int irq; }; #define work_to_gadget(w) (container_of((w), struct usb_gadget, work)) @@ -597,7 +603,7 @@ static inline int usb_gadget_activate(struct usb_gadget *gadget) /*-------------------------------------------------------------------------*/ /** - * struct usb_gadget_driver - driver for usb 'slave' devices + * struct usb_gadget_driver - driver for usb gadget devices * @function: String describing the gadget's function * @max_speed: Highest speed the driver handles. * @setup: Invoked for ep0 control requests that aren't handled by @@ -725,7 +731,7 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver); * it will first disconnect(). The driver is also requested * to unbind() and clean up any device state, before this procedure * finally returns. It's expected that the unbind() functions - * will in in exit sections, so may not be linked in some kernels. + * will be in exit sections, so may not be linked in some kernels. */ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver); @@ -773,6 +779,9 @@ struct usb_gadget_string_container { /* put descriptor for string with that id into buf (buflen >= 256) */ int usb_gadget_get_string(const struct usb_gadget_strings *table, int id, u8 *buf); +/* check if the given language identifier is valid */ +bool usb_validate_langid(u16 langid); + /*-------------------------------------------------------------------------*/ /* utility to simplify managing config descriptors */ diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index e12105ed3834..3dbb42c637c1 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -479,7 +479,8 @@ extern void usb_hcd_platform_shutdown(struct platform_device *dev); struct pci_dev; struct pci_device_id; extern int usb_hcd_pci_probe(struct pci_dev *dev, - const struct pci_device_id *id); + const struct pci_device_id *id, + const struct hc_driver *driver); extern void usb_hcd_pci_remove(struct pci_dev *dev); extern void usb_hcd_pci_shutdown(struct pci_dev *dev); diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h index a665d7f21142..b6c233e79bd4 100644 --- a/include/linux/usb/pd.h +++ b/include/linux/usb/pd.h @@ -483,4 +483,5 @@ static inline unsigned int rdo_max_power(u32 rdo) #define PD_N_CAPS_COUNT (PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP) #define PD_N_HARD_RESET_COUNT 2 +#define PD_T_BIST_CONT_MODE 50 /* 30 - 60 ms */ #endif /* __LINUX_USB_PD_H */ diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h index 35b8e15efaa0..68bdc4e2f5a9 100644 --- a/include/linux/usb/pd_vdo.h +++ b/include/linux/usb/pd_vdo.h @@ -249,7 +249,7 @@ * SVDM Discover SVIDs request -> response * * Request is properly formatted VDM Header with discover SVIDs command. - * Response is a set of SVIDs of all all supported SVIDs with all zero's to + * Response is a set of SVIDs of all supported SVIDs with all zero's to * mark the end of SVIDs. If more than 12 SVIDs are supported command SHOULD be * repeated. */ diff --git a/include/linux/usb/phy_companion.h b/include/linux/usb/phy_companion.h index 407f530061cd..263196f05015 100644 --- a/include/linux/usb/phy_companion.h +++ b/include/linux/usb/phy_companion.h @@ -2,7 +2,7 @@ /* * phy-companion.h -- phy companion to indicate the comparator part of PHY * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index 22c1f579afe3..5e4c497f54d6 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -69,7 +69,7 @@ /* Hub needs extra delay after resetting its port. */ #define USB_QUIRK_HUB_SLOW_RESET BIT(14) -/* device has blacklisted endpoints */ -#define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15) +/* device has endpoints that should be ignored */ +#define USB_QUIRK_ENDPOINT_IGNORE BIT(15) #endif /* __LINUX_USB_QUIRKS_H */ diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 14cac4a1ae8f..8e67eff9039f 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h @@ -17,7 +17,6 @@ #include <linux/kref.h> #include <linux/mutex.h> #include <linux/serial.h> -#include <linux/sysrq.h> #include <linux/kfifo.h> /* The maximum number of ports one device can grab at once */ @@ -213,7 +212,7 @@ struct usb_serial_endpoints { * Return 0 to continue on with the initialization sequence. Anything * else will abort it. * @attach: pointer to the driver's attach function. - * This will be called when the struct usb_serial structure is fully set + * This will be called when the struct usb_serial structure is fully * set up. Do any local initialization of the device, or any private * memory structure allocation at this point in time. * @disconnect: pointer to the driver's disconnect function. This will be @@ -316,19 +315,19 @@ struct usb_serial_driver { #define to_usb_serial_driver(d) \ container_of(d, struct usb_serial_driver, driver) -extern int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[], +int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[], const char *name, const struct usb_device_id *id_table); -extern void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]); -extern void usb_serial_port_softint(struct usb_serial_port *port); +void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]); +void usb_serial_port_softint(struct usb_serial_port *port); -extern int usb_serial_suspend(struct usb_interface *intf, pm_message_t message); -extern int usb_serial_resume(struct usb_interface *intf); +int usb_serial_suspend(struct usb_interface *intf, pm_message_t message); +int usb_serial_resume(struct usb_interface *intf); /* USB Serial console functions */ #ifdef CONFIG_USB_SERIAL_CONSOLE -extern void usb_serial_console_init(int minor); -extern void usb_serial_console_exit(void); -extern void usb_serial_console_disconnect(struct usb_serial *serial); +void usb_serial_console_init(int minor); +void usb_serial_console_exit(void); +void usb_serial_console_disconnect(struct usb_serial *serial); #else static inline void usb_serial_console_init(int minor) { } static inline void usb_serial_console_exit(void) { } @@ -336,45 +335,49 @@ static inline void usb_serial_console_disconnect(struct usb_serial *serial) {} #endif /* Functions needed by other parts of the usbserial core */ -extern struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor); -extern void usb_serial_put(struct usb_serial *serial); -extern int usb_serial_generic_open(struct tty_struct *tty, - struct usb_serial_port *port); -extern int usb_serial_generic_write_start(struct usb_serial_port *port, - gfp_t mem_flags); -extern int usb_serial_generic_write(struct tty_struct *tty, - struct usb_serial_port *port, const unsigned char *buf, int count); -extern void usb_serial_generic_close(struct usb_serial_port *port); -extern int usb_serial_generic_resume(struct usb_serial *serial); -extern int usb_serial_generic_write_room(struct tty_struct *tty); -extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); -extern void usb_serial_generic_wait_until_sent(struct tty_struct *tty, - long timeout); -extern void usb_serial_generic_read_bulk_callback(struct urb *urb); -extern void usb_serial_generic_write_bulk_callback(struct urb *urb); -extern void usb_serial_generic_throttle(struct tty_struct *tty); -extern void usb_serial_generic_unthrottle(struct tty_struct *tty); -extern int usb_serial_generic_tiocmiwait(struct tty_struct *tty, - unsigned long arg); -extern int usb_serial_generic_get_icount(struct tty_struct *tty, - struct serial_icounter_struct *icount); -extern int usb_serial_generic_register(void); -extern void usb_serial_generic_deregister(void); -extern int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port, - gfp_t mem_flags); -extern void usb_serial_generic_process_read_urb(struct urb *urb); -extern int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port, - void *dest, size_t size); -extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port, - unsigned int ch); -extern int usb_serial_handle_break(struct usb_serial_port *port); -extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, - struct tty_struct *tty, - unsigned int status); +struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor); +void usb_serial_put(struct usb_serial *serial); +int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port); +int usb_serial_generic_write_start(struct usb_serial_port *port, gfp_t mem_flags); +int usb_serial_generic_write(struct tty_struct *tty, struct usb_serial_port *port, + const unsigned char *buf, int count); +void usb_serial_generic_close(struct usb_serial_port *port); +int usb_serial_generic_resume(struct usb_serial *serial); +int usb_serial_generic_write_room(struct tty_struct *tty); +int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); +void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout); +void usb_serial_generic_read_bulk_callback(struct urb *urb); +void usb_serial_generic_write_bulk_callback(struct urb *urb); +void usb_serial_generic_throttle(struct tty_struct *tty); +void usb_serial_generic_unthrottle(struct tty_struct *tty); +int usb_serial_generic_tiocmiwait(struct tty_struct *tty, unsigned long arg); +int usb_serial_generic_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount); +int usb_serial_generic_register(void); +void usb_serial_generic_deregister(void); +int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port, gfp_t mem_flags); +void usb_serial_generic_process_read_urb(struct urb *urb); +int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port, void *dest, size_t size); + +#if defined(CONFIG_USB_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) +int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch); +int usb_serial_handle_break(struct usb_serial_port *port); +#else +static inline int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch) +{ + return 0; +} +static inline int usb_serial_handle_break(struct usb_serial_port *port) +{ + return 0; +} +#endif + +void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, + struct tty_struct *tty, unsigned int status); -extern int usb_serial_bus_register(struct usb_serial_driver *device); -extern void usb_serial_bus_deregister(struct usb_serial_driver *device); +int usb_serial_bus_register(struct usb_serial_driver *device); +void usb_serial_bus_deregister(struct usb_serial_driver *device); extern struct bus_type usb_serial_bus_type; extern struct tty_driver *usb_serial_tty_driver; diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h index e7979c01c351..89f58760cf48 100644 --- a/include/linux/usb/tcpm.h +++ b/include/linux/usb/tcpm.h @@ -79,6 +79,7 @@ enum tcpm_transmit_type { * @try_role: Optional; called to set a preferred role * @pd_transmit:Called to transmit PD message * @mux: Pointer to multiplexer data + * @set_bist_data: Turn on/off bist data mode for compliance testing */ struct tcpc_dev { struct fwnode_handle *fwnode; @@ -103,6 +104,7 @@ struct tcpc_dev { int (*try_role)(struct tcpc_dev *dev, int role); int (*pd_transmit)(struct tcpc_dev *dev, enum tcpm_transmit_type type, const struct pd_message *msg); + int (*set_bist_data)(struct tcpc_dev *dev, bool on); }; struct tcpm_port; diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index b00a2642a9cd..9cb1bec94b71 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -73,6 +73,20 @@ enum typec_orientation { }; /* + * struct enter_usb_data - Enter_USB Message details + * @eudo: Enter_USB Data Object + * @active_link_training: Active Cable Plug Link Training + * + * @active_link_training is a flag that should be set with uni-directional SBRX + * communication, and left 0 with passive cables and with bi-directional SBRX + * communication. + */ +struct enter_usb_data { + u32 eudo; + unsigned char active_link_training:1; +}; + +/* * struct usb_pd_identity - USB Power Delivery identity data * @id_header: ID Header VDO * @cert_stat: Cert Stat VDO @@ -254,6 +268,7 @@ int typec_set_mode(struct typec_port *port, int mode); void *typec_get_drvdata(struct typec_port *port); +int typec_find_orientation(const char *name); int typec_find_port_power_role(const char *name); int typec_find_power_role(const char *name); int typec_find_port_data_role(const char *name); diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h index d834e236c6df..a4b65eaa0f62 100644 --- a/include/linux/usb/typec_altmode.h +++ b/include/linux/usb/typec_altmode.h @@ -95,13 +95,7 @@ enum { * * Port drivers can use TYPEC_MODE_AUDIO and TYPEC_MODE_DEBUG as the mode * value for typec_set_mode() when accessory modes are supported. - */ -enum { - TYPEC_MODE_AUDIO = TYPEC_STATE_MODAL, /* Audio Accessory */ - TYPEC_MODE_DEBUG, /* Debug Accessory */ -}; - -/* + * * USB4 also requires that the pins on the connector are repurposed, just like * Alternate Modes. USB4 mode is however not entered with the Enter Mode Command * like the Alternate Modes are, but instead with a special Enter_USB Message. @@ -112,9 +106,11 @@ enum { * state values, just like the Accessory Modes. */ enum { - TYPEC_MODE_USB2 = TYPEC_MODE_DEBUG, /* USB 2.0 mode */ + TYPEC_MODE_USB2 = TYPEC_STATE_MODAL, /* USB 2.0 mode */ TYPEC_MODE_USB3, /* USB 3.2 mode */ - TYPEC_MODE_USB4 /* USB4 mode */ + TYPEC_MODE_USB4, /* USB4 mode */ + TYPEC_MODE_AUDIO, /* Audio Accessory */ + TYPEC_MODE_DEBUG, /* Debug Accessory */ }; #define TYPEC_MODAL_STATE(_state_) ((_state_) + TYPEC_STATE_MODAL) diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index b0bff3083278..2e4f7721fc4e 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -207,6 +207,7 @@ struct cdc_state { struct usb_interface *data; }; +extern void usbnet_cdc_update_filter(struct usbnet *dev); extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *); extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf); extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *); @@ -273,6 +274,7 @@ extern int usbnet_set_link_ksettings(struct net_device *net, extern u32 usbnet_get_link(struct net_device *net); extern u32 usbnet_get_msglevel(struct net_device *); extern void usbnet_set_msglevel(struct net_device *, u32); +extern void usbnet_set_rx_mode(struct net_device *net); extern void usbnet_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); extern int usbnet_nway_reset(struct net_device *net); diff --git a/include/linux/usermode_driver.h b/include/linux/usermode_driver.h new file mode 100644 index 000000000000..073a9e0ec07d --- /dev/null +++ b/include/linux/usermode_driver.h @@ -0,0 +1,18 @@ +#ifndef __LINUX_USERMODE_DRIVER_H__ +#define __LINUX_USERMODE_DRIVER_H__ + +#include <linux/umh.h> +#include <linux/path.h> + +struct umd_info { + const char *driver_name; + struct file *pipe_to_umh; + struct file *pipe_from_umh; + struct path wd; + struct pid *tgid; +}; +int umd_load_blob(struct umd_info *info, const void *data, size_t len); +int umd_unload_blob(struct umd_info *info); +int fork_usermode_driver(struct umd_info *info); + +#endif /* __LINUX_USERMODE_DRIVER_H__ */ diff --git a/include/linux/uuid.h b/include/linux/uuid.h index d41b0d3e9474..8cdc0d3567cd 100644 --- a/include/linux/uuid.h +++ b/include/linux/uuid.h @@ -98,8 +98,6 @@ int guid_parse(const char *uuid, guid_t *u); int uuid_parse(const char *uuid, uuid_t *u); /* backwards compatibility, don't use in new code */ -#define uuid_le_to_bin(guid, u) guid_parse(guid, u) - static inline int uuid_le_cmp(const guid_t u1, const guid_t u2) { return memcmp(&u1, &u2, sizeof(guid_t)); diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h index ff56c443180c..db8a7d118093 100644 --- a/include/linux/vbox_utils.h +++ b/include/linux/vbox_utils.h @@ -16,6 +16,7 @@ struct vbg_dev; __printf(1, 2) void vbg_info(const char *fmt, ...); __printf(1, 2) void vbg_warn(const char *fmt, ...); __printf(1, 2) void vbg_err(const char *fmt, ...); +__printf(1, 2) void vbg_err_ratelimited(const char *fmt, ...); /* Only use backdoor logging for non-dynamic debug builds */ #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG) diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 5453af87a33e..eae0bfd87d91 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -18,17 +18,38 @@ struct vdpa_callback { }; /** + * vDPA notification area + * @addr: base address of the notification area + * @size: size of the notification area + */ +struct vdpa_notification_area { + resource_size_t addr; + resource_size_t size; +}; + +/** + * vDPA vq_state definition + * @avail_index: available index + */ +struct vdpa_vq_state { + u16 avail_index; +}; + +/** * vDPA device - representation of a vDPA device * @dev: underlying device * @dma_dev: the actual device that is performing DMA * @config: the configuration ops for this device. * @index: device index + * @features_valid: were features initialized? for legacy guests */ struct vdpa_device { struct device dev; struct device *dma_dev; const struct vdpa_config_ops *config; unsigned int index; + bool features_valid; + int nvqs; }; /** @@ -67,12 +88,22 @@ struct vdpa_device { * @set_vq_state: Set the state for a virtqueue * @vdev: vdpa device * @idx: virtqueue index - * @state: virtqueue state (last_avail_idx) + * @state: pointer to set virtqueue state (last_avail_idx) * Returns integer: success (0) or error (< 0) * @get_vq_state: Get the state for a virtqueue * @vdev: vdpa device * @idx: virtqueue index - * Returns virtqueue state (last_avail_idx) + * @state: pointer to returned state (last_avail_idx) + * @get_vq_notification: Get the notification area for a virtqueue + * @vdev: vdpa device + * @idx: virtqueue index + * Returns the notifcation area + * @get_vq_irq: Get the irq number of a virtqueue (optional, + * but must implemented if require vq irq offloading) + * @vdev: vdpa device + * @idx: virtqueue index + * Returns int: irq number of a virtqueue, + * negative number if no irq assigned. * @get_vq_align: Get the virtqueue align requirement * for the device * @vdev: vdpa device @@ -160,8 +191,14 @@ struct vdpa_config_ops { struct vdpa_callback *cb); void (*set_vq_ready)(struct vdpa_device *vdev, u16 idx, bool ready); bool (*get_vq_ready)(struct vdpa_device *vdev, u16 idx); - int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, u64 state); - u64 (*get_vq_state)(struct vdpa_device *vdev, u16 idx); + int (*set_vq_state)(struct vdpa_device *vdev, u16 idx, + const struct vdpa_vq_state *state); + int (*get_vq_state)(struct vdpa_device *vdev, u16 idx, + struct vdpa_vq_state *state); + struct vdpa_notification_area + (*get_vq_notification)(struct vdpa_device *vdev, u16 idx); + /* vq irq is not expected to be changed once DRIVER_OK is set */ + int (*get_vq_irq)(struct vdpa_device *vdv, u16 idx); /* Device ops */ u32 (*get_vq_align)(struct vdpa_device *vdev); @@ -192,11 +229,12 @@ struct vdpa_config_ops { struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, + int nvqs, size_t size); -#define vdpa_alloc_device(dev_struct, member, parent, config) \ +#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs) \ container_of(__vdpa_alloc_device( \ - parent, config, \ + parent, config, nvqs, \ sizeof(dev_struct) + \ BUILD_BUG_ON_ZERO(offsetof( \ dev_struct, member))), \ @@ -250,4 +288,36 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev) { return vdev->dma_dev; } + +static inline void vdpa_reset(struct vdpa_device *vdev) +{ + const struct vdpa_config_ops *ops = vdev->config; + + vdev->features_valid = false; + ops->set_status(vdev, 0); +} + +static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features) +{ + const struct vdpa_config_ops *ops = vdev->config; + + vdev->features_valid = true; + return ops->set_features(vdev, features); +} + + +static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset, + void *buf, unsigned int len) +{ + const struct vdpa_config_ops *ops = vdev->config; + + /* + * Config accesses aren't supposed to trigger before features are set. + * If it does happen we assume a legacy guest. + */ + if (!vdev->features_valid) + vdpa_set_features(vdev, 0); + ops->get_config(vdev, offset, buf, len); +} + #endif /* _LINUX_VDPA_H */ diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h index dc236577b92f..1eaaa93c37bf 100644 --- a/include/linux/vermagic.h +++ b/include/linux/vermagic.h @@ -2,6 +2,10 @@ #ifndef _LINUX_VERMAGIC_H #define _LINUX_VERMAGIC_H +#ifndef INCLUDE_VERMAGIC +#error "This header can be included from kernel/module.c or *.mod.c only" +#endif + #include <generated/utsrelease.h> #include <asm/vermagic.h> diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h index 0e130b5077a5..2f9dd072f11f 100644 --- a/include/linux/vexpress.h +++ b/include/linux/vexpress.h @@ -10,38 +10,8 @@ #include <linux/device.h> #include <linux/regmap.h> -#define VEXPRESS_SITE_MB 0 -#define VEXPRESS_SITE_DB1 1 -#define VEXPRESS_SITE_DB2 2 -#define VEXPRESS_SITE_MASTER 0xf - -/* Config infrastructure */ - -void vexpress_config_set_master(u32 site); -u32 vexpress_config_get_master(void); - -void vexpress_config_lock(void *arg); -void vexpress_config_unlock(void *arg); - -int vexpress_config_get_topo(struct device_node *node, u32 *site, - u32 *position, u32 *dcc); - -/* Config bridge API */ - -struct vexpress_config_bridge_ops { - struct regmap * (*regmap_init)(struct device *dev, void *context); - void (*regmap_exit)(struct regmap *regmap, void *context); -}; - -struct device *vexpress_config_bridge_register(struct device *parent, - struct vexpress_config_bridge_ops *ops, void *context); - /* Config regmap API */ struct regmap *devm_regmap_init_vexpress_config(struct device *dev); -/* Platform control */ - -void vexpress_flags_set(u32 data); - #endif diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 5d92ee15d098..38d3c6a8dc7e 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -76,7 +76,9 @@ struct vfio_iommu_driver_ops { struct iommu_group *group); void (*detach_group)(void *iommu_data, struct iommu_group *group); - int (*pin_pages)(void *iommu_data, unsigned long *user_pfn, + int (*pin_pages)(void *iommu_data, + struct iommu_group *group, + unsigned long *user_pfn, int npage, int prot, unsigned long *phys_pfn); int (*unpin_pages)(void *iommu_data, diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index 553b34c8b5f7..977caf96c8d2 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h @@ -110,12 +110,6 @@ static inline int vga_get_uninterruptible(struct pci_dev *pdev, } #if defined(CONFIG_VGA_ARB) -extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc); -#else -static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; } -#endif - -#if defined(CONFIG_VGA_ARB) extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); #else #define vga_put(pdev, rsrc) diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 16c0ed6c50a7..219037f4c08d 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h @@ -57,6 +57,7 @@ #define __LINUX_VIDEODEV2_H #include <linux/time.h> /* need struct timeval */ +#include <linux/kernel.h> #include <uapi/linux/videodev2.h> #endif /* __LINUX_VIDEODEV2_H */ diff --git a/include/linux/virtio_caif.h b/include/linux/virtio_caif.h index 5d2d3124ca3d..ea722479510c 100644 --- a/include/linux/virtio_caif.h +++ b/include/linux/virtio_caif.h @@ -11,9 +11,9 @@ #include <linux/types.h> struct virtio_caif_transf_config { - u16 headroom; - u16 tailroom; - u32 mtu; + __virtio16 headroom; + __virtio16 tailroom; + __virtio32 mtu; u8 reserved[4]; }; diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index bb4cc4910750..8fe857e27ef3 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -6,6 +6,7 @@ #include <linux/bug.h> #include <linux/virtio.h> #include <linux/virtio_byteorder.h> +#include <linux/compiler_types.h> #include <uapi/linux/virtio_config.h> struct irq_affinity; @@ -162,16 +163,16 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev, } /** - * virtio_has_iommu_quirk - determine whether this device has the iommu quirk + * virtio_has_dma_quirk - determine whether this device has the DMA quirk * @vdev: the device */ -static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev) +static inline bool virtio_has_dma_quirk(const struct virtio_device *vdev) { /* * Note the reverse polarity of the quirk feature (compared to most * other features), this is for compatibility with legacy systems. */ - return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); + return !virtio_has_feature(vdev, VIRTIO_F_ACCESS_PLATFORM); } static inline @@ -287,70 +288,133 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) return __cpu_to_virtio64(virtio_is_little_endian(vdev), val); } +#define virtio_to_cpu(vdev, x) \ + _Generic((x), \ + __u8: (x), \ + __virtio16: virtio16_to_cpu((vdev), (x)), \ + __virtio32: virtio32_to_cpu((vdev), (x)), \ + __virtio64: virtio64_to_cpu((vdev), (x)) \ + ) + +#define cpu_to_virtio(vdev, x, m) \ + _Generic((m), \ + __u8: (x), \ + __virtio16: cpu_to_virtio16((vdev), (x)), \ + __virtio32: cpu_to_virtio32((vdev), (x)), \ + __virtio64: cpu_to_virtio64((vdev), (x)) \ + ) + +#define __virtio_native_type(structname, member) \ + typeof(virtio_to_cpu(NULL, ((structname*)0)->member)) + /* Config space accessors. */ #define virtio_cread(vdev, structname, member, ptr) \ do { \ + typeof(((structname*)0)->member) virtio_cread_v; \ + \ might_sleep(); \ - /* Must match the member's type, and be integer */ \ - if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ - (*ptr) = 1; \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_to_cpu((vdev), virtio_cread_v)), *(ptr)); \ \ - switch (sizeof(*ptr)) { \ + switch (sizeof(virtio_cread_v)) { \ case 1: \ - *(ptr) = virtio_cread8(vdev, \ - offsetof(structname, member)); \ - break; \ case 2: \ - *(ptr) = virtio_cread16(vdev, \ - offsetof(structname, member)); \ - break; \ case 4: \ - *(ptr) = virtio_cread32(vdev, \ - offsetof(structname, member)); \ - break; \ - case 8: \ - *(ptr) = virtio_cread64(vdev, \ - offsetof(structname, member)); \ + vdev->config->get((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + sizeof(virtio_cread_v)); \ break; \ default: \ - BUG(); \ + __virtio_cread_many((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + 1, \ + sizeof(virtio_cread_v)); \ + break; \ } \ + *(ptr) = virtio_to_cpu(vdev, virtio_cread_v); \ } while(0) /* Config space accessors. */ #define virtio_cwrite(vdev, structname, member, ptr) \ do { \ + typeof(((structname*)0)->member) virtio_cwrite_v = \ + cpu_to_virtio(vdev, *(ptr), ((structname*)0)->member); \ + \ + might_sleep(); \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_to_cpu((vdev), virtio_cwrite_v)), *(ptr)); \ + \ + vdev->config->set((vdev), offsetof(structname, member), \ + &virtio_cwrite_v, \ + sizeof(virtio_cwrite_v)); \ + } while(0) + +/* + * Nothing virtio-specific about these, but let's worry about generalizing + * these later. + */ +#define virtio_le_to_cpu(x) \ + _Generic((x), \ + __u8: (u8)(x), \ + __le16: (u16)le16_to_cpu(x), \ + __le32: (u32)le32_to_cpu(x), \ + __le64: (u64)le64_to_cpu(x) \ + ) + +#define virtio_cpu_to_le(x, m) \ + _Generic((m), \ + __u8: (x), \ + __le16: cpu_to_le16(x), \ + __le32: cpu_to_le32(x), \ + __le64: cpu_to_le64(x) \ + ) + +/* LE (e.g. modern) Config space accessors. */ +#define virtio_cread_le(vdev, structname, member, ptr) \ + do { \ + typeof(((structname*)0)->member) virtio_cread_v; \ + \ might_sleep(); \ - /* Must match the member's type, and be integer */ \ - if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ - BUG_ON((*ptr) == 1); \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_le_to_cpu(virtio_cread_v)), *(ptr)); \ \ - switch (sizeof(*ptr)) { \ + switch (sizeof(virtio_cread_v)) { \ case 1: \ - virtio_cwrite8(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ - break; \ case 2: \ - virtio_cwrite16(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ - break; \ case 4: \ - virtio_cwrite32(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ - break; \ - case 8: \ - virtio_cwrite64(vdev, \ - offsetof(structname, member), \ - *(ptr)); \ + vdev->config->get((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + sizeof(virtio_cread_v)); \ break; \ default: \ - BUG(); \ + __virtio_cread_many((vdev), \ + offsetof(structname, member), \ + &virtio_cread_v, \ + 1, \ + sizeof(virtio_cread_v)); \ + break; \ } \ + *(ptr) = virtio_le_to_cpu(virtio_cread_v); \ + } while(0) + +#define virtio_cwrite_le(vdev, structname, member, ptr) \ + do { \ + typeof(((structname*)0)->member) virtio_cwrite_v = \ + virtio_cpu_to_le(*(ptr), ((structname*)0)->member); \ + \ + might_sleep(); \ + /* Sanity check: must match the member's type */ \ + typecheck(typeof(virtio_le_to_cpu(virtio_cwrite_v)), *(ptr)); \ + \ + vdev->config->set((vdev), offsetof(structname, member), \ + &virtio_cwrite_v, \ + sizeof(virtio_cwrite_v)); \ } while(0) + /* Read @count fields, @bytes each. */ static inline void __virtio_cread_many(struct virtio_device *vdev, unsigned int offset, @@ -399,53 +463,60 @@ static inline void virtio_cwrite8(struct virtio_device *vdev, static inline u16 virtio_cread16(struct virtio_device *vdev, unsigned int offset) { - u16 ret; + __virtio16 ret; might_sleep(); vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return virtio16_to_cpu(vdev, (__force __virtio16)ret); + return virtio16_to_cpu(vdev, ret); } static inline void virtio_cwrite16(struct virtio_device *vdev, unsigned int offset, u16 val) { + __virtio16 v; + might_sleep(); - val = (__force u16)cpu_to_virtio16(vdev, val); - vdev->config->set(vdev, offset, &val, sizeof(val)); + v = cpu_to_virtio16(vdev, val); + vdev->config->set(vdev, offset, &v, sizeof(v)); } static inline u32 virtio_cread32(struct virtio_device *vdev, unsigned int offset) { - u32 ret; + __virtio32 ret; might_sleep(); vdev->config->get(vdev, offset, &ret, sizeof(ret)); - return virtio32_to_cpu(vdev, (__force __virtio32)ret); + return virtio32_to_cpu(vdev, ret); } static inline void virtio_cwrite32(struct virtio_device *vdev, unsigned int offset, u32 val) { + __virtio32 v; + might_sleep(); - val = (__force u32)cpu_to_virtio32(vdev, val); - vdev->config->set(vdev, offset, &val, sizeof(val)); + v = cpu_to_virtio32(vdev, val); + vdev->config->set(vdev, offset, &v, sizeof(v)); } static inline u64 virtio_cread64(struct virtio_device *vdev, unsigned int offset) { - u64 ret; + __virtio64 ret; + __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret)); - return virtio64_to_cpu(vdev, (__force __virtio64)ret); + return virtio64_to_cpu(vdev, ret); } static inline void virtio_cwrite64(struct virtio_device *vdev, unsigned int offset, u64 val) { + __virtio64 v; + might_sleep(); - val = (__force u64)cpu_to_virtio64(vdev, val); - vdev->config->set(vdev, offset, &val, sizeof(val)); + v = cpu_to_virtio64(vdev, val); + vdev->config->set(vdev, offset, &v, sizeof(v)); } /* Conditional config space accessors. */ @@ -459,4 +530,14 @@ static inline void virtio_cwrite64(struct virtio_device *vdev, _r; \ }) +/* Conditional config space accessors. */ +#define virtio_cread_le_feature(vdev, fbit, structname, member, ptr) \ + ({ \ + int _r = 0; \ + if (!virtio_has_feature(vdev, fbit)) \ + _r = -ENOENT; \ + else \ + virtio_cread_le((vdev), structname, member, ptr); \ + _r; \ + }) #endif /* _LINUX_VIRTIO_CONFIG_H */ diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index 3dc70adfe5f5..b485b13fa50b 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -46,16 +46,15 @@ static inline void virtio_wmb(bool weak_barriers) dma_wmb(); } -static inline void virtio_store_mb(bool weak_barriers, - __virtio16 *p, __virtio16 v) -{ - if (weak_barriers) { - virt_store_mb(*p, v); - } else { - WRITE_ONCE(*p, v); - mb(); - } -} +#define virtio_store_mb(weak_barriers, p, v) \ +do { \ + if (weak_barriers) { \ + virt_store_mb(*p, v); \ + } else { \ + WRITE_ONCE(*p, v); \ + mb(); \ + } \ +} while (0) \ struct virtio_device; struct virtqueue; diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index ffef0f279747..18e75974d4e3 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -30,11 +30,16 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PGFAULT, PGMAJFAULT, PGLAZYFREED, PGREFILL, + PGREUSE, PGSTEAL_KSWAPD, PGSTEAL_DIRECT, PGSCAN_KSWAPD, PGSCAN_DIRECT, PGSCAN_DIRECT_THROTTLE, + PGSCAN_ANON, + PGSCAN_FILE, + PGSTEAL_ANON, + PGSTEAL_FILE, #ifdef CONFIG_NUMA PGSCAN_ZONE_RECLAIM_FAILED, #endif @@ -52,6 +57,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #endif #ifdef CONFIG_MIGRATION PGMIGRATE_SUCCESS, PGMIGRATE_FAIL, + THP_MIGRATION_SUCCESS, + THP_MIGRATION_FAIL, + THP_MIGRATION_SPLIT, #endif #ifdef CONFIG_COMPACTION COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED, diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index a95d3cc74d79..0221f852a7e1 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -88,8 +88,7 @@ struct vmap_area { * Highlevel APIs for driver use */ extern void vm_unmap_ram(const void *mem, unsigned int count); -extern void *vm_map_ram(struct page **pages, unsigned int count, - int node, pgprot_t prot); +extern void *vm_map_ram(struct page **pages, unsigned int count, int node); extern void vm_unmap_aliases(void); #ifdef CONFIG_MMU @@ -107,26 +106,15 @@ extern void *vzalloc(unsigned long size); extern void *vmalloc_user(unsigned long size); extern void *vmalloc_node(unsigned long size, int node); extern void *vzalloc_node(unsigned long size, int node); -extern void *vmalloc_user_node_flags(unsigned long size, int node, gfp_t flags); -extern void *vmalloc_exec(unsigned long size); extern void *vmalloc_32(unsigned long size); extern void *vmalloc_32_user(unsigned long size); -extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); +extern void *__vmalloc(unsigned long size, gfp_t gfp_mask); extern void *__vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, unsigned long vm_flags, int node, const void *caller); -#ifndef CONFIG_MMU -extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags); -static inline void *__vmalloc_node_flags_caller(unsigned long size, int node, - gfp_t flags, void *caller) -{ - return __vmalloc_node_flags(size, node, flags); -} -#else -extern void *__vmalloc_node_flags_caller(unsigned long size, - int node, gfp_t flags, void *caller); -#endif +void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, + int node, const void *caller); extern void vfree(const void *addr); extern void vfree_atomic(const void *addr); @@ -141,8 +129,22 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff); -void vmalloc_sync_mappings(void); -void vmalloc_sync_unmappings(void); + +/* + * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values + * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings() + * needs to be called. + */ +#ifndef ARCH_PAGE_TABLE_SYNC_MASK +#define ARCH_PAGE_TABLE_SYNC_MASK 0 +#endif + +/* + * There is no default implementation for arch_sync_kernel_mappings(). It is + * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK + * is 0. + */ +void arch_sync_kernel_mappings(unsigned long start, unsigned long end); /* * Lowlevel-APIs (not for driver use!) @@ -161,8 +163,6 @@ static inline size_t get_vm_area_size(const struct vm_struct *area) extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags); extern struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, const void *caller); -extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, - unsigned long start, unsigned long end); extern struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, unsigned long start, unsigned long end, @@ -170,11 +170,11 @@ extern struct vm_struct *__get_vm_area_caller(unsigned long size, extern struct vm_struct *remove_vm_area(const void *addr); extern struct vm_struct *find_vm_area(const void *addr); -extern int map_vm_area(struct vm_struct *area, pgprot_t prot, - struct page **pages); #ifdef CONFIG_MMU extern int map_kernel_range_noflush(unsigned long start, unsigned long size, pgprot_t prot, struct page **pages); +int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot, + struct page **pages); extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); extern void unmap_kernel_range(unsigned long addr, unsigned long size); static inline void set_vm_flush_reset_perms(void *addr) @@ -191,14 +191,12 @@ map_kernel_range_noflush(unsigned long start, unsigned long size, { return size >> PAGE_SHIFT; } +#define map_kernel_range map_kernel_range_noflush static inline void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) { } -static inline void -unmap_kernel_range(unsigned long addr, unsigned long size) -{ -} +#define unmap_kernel_range unmap_kernel_range_noflush static inline void set_vm_flush_reset_perms(void *addr) { } diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 292485f3d24d..7557c1070fd7 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -8,6 +8,7 @@ #include <linux/vm_event_item.h> #include <linux/atomic.h> #include <linux/static_key.h> +#include <linux/mmdebug.h> extern int sysctl_stat_interval; @@ -16,8 +17,8 @@ extern int sysctl_stat_interval; #define DISABLE_NUMA_STAT 0 extern int sysctl_vm_numa_stat; DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key); -extern int sysctl_vm_numa_stat_handler(struct ctl_table *table, - int write, void __user *buffer, size_t *length, loff_t *ppos); +int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, + void *buffer, size_t *length, loff_t *ppos); #endif struct reclaim_stat { @@ -26,9 +27,11 @@ struct reclaim_stat { unsigned nr_congested; unsigned nr_writeback; unsigned nr_immediate; + unsigned nr_pageout; unsigned nr_activate[2]; unsigned nr_ref_keep; unsigned nr_unmap_fail; + unsigned nr_lazyfree_fail; }; enum writeback_stat_item { @@ -190,7 +193,8 @@ static inline unsigned long global_zone_page_state(enum zone_stat_item item) return x; } -static inline unsigned long global_node_page_state(enum node_stat_item item) +static inline +unsigned long global_node_page_state_pages(enum node_stat_item item) { long x = atomic_long_read(&vm_node_stat[item]); #ifdef CONFIG_SMP @@ -200,6 +204,13 @@ static inline unsigned long global_node_page_state(enum node_stat_item item) return x; } +static inline unsigned long global_node_page_state(enum node_stat_item item) +{ + VM_WARN_ON_ONCE(vmstat_item_in_bytes(item)); + + return global_node_page_state_pages(item); +} + static inline unsigned long zone_page_state(struct zone *zone, enum zone_stat_item item) { @@ -240,9 +251,12 @@ extern unsigned long sum_zone_node_page_state(int node, extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item); extern unsigned long node_page_state(struct pglist_data *pgdat, enum node_stat_item item); +extern unsigned long node_page_state_pages(struct pglist_data *pgdat, + enum node_stat_item item); #else #define sum_zone_node_page_state(node, item) global_zone_page_state(item) #define node_page_state(node, item) global_node_page_state(item) +#define node_page_state_pages(node, item) global_node_page_state_pages(item) #endif /* CONFIG_NUMA */ #ifdef CONFIG_SMP @@ -274,8 +288,8 @@ void cpu_vm_stats_fold(int cpu); void refresh_zone_stat_thresholds(void); struct ctl_table; -int vmstat_refresh(struct ctl_table *, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); +int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp, + loff_t *ppos); void drain_zonestat(struct zone *zone, struct per_cpu_pageset *); @@ -298,6 +312,11 @@ static inline void __mod_zone_page_state(struct zone *zone, static inline void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, int delta) { + if (vmstat_item_in_bytes(item)) { + VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); + delta >>= PAGE_SHIFT; + } + node_page_state_add(delta, pgdat, item); } diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index fefb5292403b..be0afe6f379b 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -159,7 +159,7 @@ static inline bool vmci_handle_is_invalid(struct vmci_handle h) */ #define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID #define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID -static const struct vmci_handle VMCI_ANON_SRC_HANDLE = { +static const struct vmci_handle __maybe_unused VMCI_ANON_SRC_HANDLE = { .context = VMCI_ANON_SRC_CONTEXT_ID, .resource = VMCI_ANON_SRC_RESOURCE_ID }; diff --git a/include/linux/vringh.h b/include/linux/vringh.h index 9e2763d7c159..59bd50f99291 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h @@ -105,9 +105,9 @@ struct vringh_kiov { /* Helpers for userspace vrings. */ int vringh_init_user(struct vringh *vrh, u64 features, unsigned int num, bool weak_barriers, - struct vring_desc __user *desc, - struct vring_avail __user *avail, - struct vring_used __user *used); + vring_desc_t __user *desc, + vring_avail_t __user *avail, + vring_used_t __user *used); static inline void vringh_iov_init(struct vringh_iov *iov, struct iovec *iovec, unsigned num) diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index abf5bccf906a..349e39c3ab60 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -74,8 +74,6 @@ int con_set_default_unimap(struct vc_data *vc); void con_free_unimap(struct vc_data *vc); int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); -#define vc_translate(vc, c) ((vc)->vc_translate[(c) | \ - ((vc)->vc_toggle_meta ? 0x80 : 0)]) #else static inline int con_set_trans_old(unsigned char __user *table) { @@ -124,7 +122,6 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc) return 0; } -#define vc_translate(vc, c) (c) #endif /* vt.c */ diff --git a/include/linux/wait.h b/include/linux/wait.h index feeb6be5cad6..27fb99cfeb02 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -21,6 +21,7 @@ int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int #define WQ_FLAG_WOKEN 0x02 #define WQ_FLAG_BOOKMARK 0x04 #define WQ_FLAG_CUSTOM 0x08 +#define WQ_FLAG_DONE 0x10 /* * A single wait-queue entry structure: @@ -1149,4 +1150,6 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i (wait)->flags = 0; \ } while (0) +bool try_invoke_on_locked_down_task(struct task_struct *p, bool (*func)(struct task_struct *t, void *arg), void *arg); + #endif /* _LINUX_WAIT_H */ diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h new file mode 100644 index 000000000000..c994d1b2cdba --- /dev/null +++ b/include/linux/watch_queue.h @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* User-mappable watch queue + * + * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * See Documentation/watch_queue.rst + */ + +#ifndef _LINUX_WATCH_QUEUE_H +#define _LINUX_WATCH_QUEUE_H + +#include <uapi/linux/watch_queue.h> +#include <linux/kref.h> +#include <linux/rcupdate.h> + +#ifdef CONFIG_WATCH_QUEUE + +struct cred; + +struct watch_type_filter { + enum watch_notification_type type; + __u32 subtype_filter[1]; /* Bitmask of subtypes to filter on */ + __u32 info_filter; /* Filter on watch_notification::info */ + __u32 info_mask; /* Mask of relevant bits in info_filter */ +}; + +struct watch_filter { + union { + struct rcu_head rcu; + unsigned long type_filter[2]; /* Bitmask of accepted types */ + }; + u32 nr_filters; /* Number of filters */ + struct watch_type_filter filters[]; +}; + +struct watch_queue { + struct rcu_head rcu; + struct watch_filter __rcu *filter; + struct pipe_inode_info *pipe; /* The pipe we're using as a buffer */ + struct hlist_head watches; /* Contributory watches */ + struct page **notes; /* Preallocated notifications */ + unsigned long *notes_bitmap; /* Allocation bitmap for notes */ + struct kref usage; /* Object usage count */ + spinlock_t lock; + unsigned int nr_notes; /* Number of notes */ + unsigned int nr_pages; /* Number of pages in notes[] */ + bool defunct; /* T when queues closed */ +}; + +/* + * Representation of a watch on an object. + */ +struct watch { + union { + struct rcu_head rcu; + u32 info_id; /* ID to be OR'd in to info field */ + }; + struct watch_queue __rcu *queue; /* Queue to post events to */ + struct hlist_node queue_node; /* Link in queue->watches */ + struct watch_list __rcu *watch_list; + struct hlist_node list_node; /* Link in watch_list->watchers */ + const struct cred *cred; /* Creds of the owner of the watch */ + void *private; /* Private data for the watched object */ + u64 id; /* Internal identifier */ + struct kref usage; /* Object usage count */ +}; + +/* + * List of watches on an object. + */ +struct watch_list { + struct rcu_head rcu; + struct hlist_head watchers; + void (*release_watch)(struct watch *); + spinlock_t lock; +}; + +extern void __post_watch_notification(struct watch_list *, + struct watch_notification *, + const struct cred *, + u64); +extern struct watch_queue *get_watch_queue(int); +extern void put_watch_queue(struct watch_queue *); +extern void init_watch(struct watch *, struct watch_queue *); +extern int add_watch_to_object(struct watch *, struct watch_list *); +extern int remove_watch_from_object(struct watch_list *, struct watch_queue *, u64, bool); +extern long watch_queue_set_size(struct pipe_inode_info *, unsigned int); +extern long watch_queue_set_filter(struct pipe_inode_info *, + struct watch_notification_filter __user *); +extern int watch_queue_init(struct pipe_inode_info *); +extern void watch_queue_clear(struct watch_queue *); + +static inline void init_watch_list(struct watch_list *wlist, + void (*release_watch)(struct watch *)) +{ + INIT_HLIST_HEAD(&wlist->watchers); + spin_lock_init(&wlist->lock); + wlist->release_watch = release_watch; +} + +static inline void post_watch_notification(struct watch_list *wlist, + struct watch_notification *n, + const struct cred *cred, + u64 id) +{ + if (unlikely(wlist)) + __post_watch_notification(wlist, n, cred, id); +} + +static inline void remove_watch_list(struct watch_list *wlist, u64 id) +{ + if (wlist) { + remove_watch_from_object(wlist, NULL, id, true); + kfree_rcu(wlist, rcu); + } +} + +/** + * watch_sizeof - Calculate the information part of the size of a watch record, + * given the structure size. + */ +#define watch_sizeof(STRUCT) (sizeof(STRUCT) << WATCH_INFO_LENGTH__SHIFT) + +#else +static inline int watch_queue_init(struct pipe_inode_info *pipe) +{ + return -ENOPKG; +} + +#endif + +#endif /* _LINUX_WATCH_QUEUE_H */ diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 417d9f37077a..9b19e6bb68b5 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -37,15 +37,15 @@ struct watchdog_governor; * * The watchdog_ops structure contains a list of low-level operations * that control a watchdog device. It also contains the module that owns - * these operations. The start and stop function are mandatory, all other + * these operations. The start function is mandatory, all other * functions are optional. */ struct watchdog_ops { struct module *owner; /* mandatory operations */ int (*start)(struct watchdog_device *); - int (*stop)(struct watchdog_device *); /* optional operations */ + int (*stop)(struct watchdog_device *); int (*ping)(struct watchdog_device *); unsigned int (*status)(struct watchdog_device *); int (*set_timeout)(struct watchdog_device *, unsigned int); @@ -210,6 +210,8 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd, extern int watchdog_register_device(struct watchdog_device *); extern void watchdog_unregister_device(struct watchdog_device *); +int watchdog_set_last_hw_keepalive(struct watchdog_device *, unsigned int); + /* devres register variant */ int devm_watchdog_register_device(struct device *dev, struct watchdog_device *); diff --git a/include/linux/wimax/debug.h b/include/linux/wimax/debug.h index 4dd2c1cea6a9..cdae052bcdcd 100644 --- a/include/linux/wimax/debug.h +++ b/include/linux/wimax/debug.h @@ -184,8 +184,8 @@ do { \ /* - * CPP sintatic sugar to generate A_B like symbol names when one of - * the arguments is a a preprocessor #define. + * CPP syntactic sugar to generate A_B like symbol names when one of + * the arguments is a preprocessor #define. */ #define __D_PASTE__(varname, modulename) varname##_##modulename #define __D_PASTE(varname, modulename) (__D_PASTE__(varname, modulename)) diff --git a/include/linux/wkup_m3_ipc.h b/include/linux/wkup_m3_ipc.h index e497e621dbb7..3f496967b538 100644 --- a/include/linux/wkup_m3_ipc.h +++ b/include/linux/wkup_m3_ipc.h @@ -1,7 +1,7 @@ /* * TI Wakeup M3 for AMx3 SoCs Power Management Routines * - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Dave Gerlach <d-gerlach@ti.com> * * This program is free software; you can redistribute it and/or diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 8b505d22fc0e..26de0cae2a0a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -62,7 +62,7 @@ enum { WORK_CPU_UNBOUND = NR_CPUS, /* - * Reserve 7 bits off of pwq pointer w/ debugobjects turned off. + * Reserve 8 bits off of pwq pointer w/ debugobjects turned off. * This makes pwqs aligned to 256 bytes and allows 15 workqueue * flush colors. */ diff --git a/include/linux/writeback.h b/include/linux/writeback.h index a19d845dd7eb..8e5c5bb16e2d 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -197,6 +197,7 @@ void wakeup_flusher_threads(enum wb_reason reason); void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi, enum wb_reason reason); void inode_wait_for_writeback(struct inode *inode); +void inode_io_list_del(struct inode *inode); /* writeback.h requires fs.h; it, too, is not included from here. */ static inline void wait_on_inode(struct inode *inode) @@ -362,24 +363,18 @@ extern int vm_highmem_is_dirtyable; extern int block_dump; extern int laptop_mode; -extern int dirty_background_ratio_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); -extern int dirty_background_bytes_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); -extern int dirty_ratio_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); -extern int dirty_bytes_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, - loff_t *ppos); +int dirty_background_ratio_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int dirty_background_bytes_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int dirty_ratio_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); +int dirty_bytes_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); int dirtytime_interval_handler(struct ctl_table *table, int write, - void __user *buffer, size_t *lenp, loff_t *ppos); - -struct ctl_table; -int dirty_writeback_centisecs_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); + void *buffer, size_t *lenp, loff_t *ppos); +int dirty_writeback_centisecs_handler(struct ctl_table *table, int write, + void *buffer, size_t *lenp, loff_t *ppos); void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index d7554252404c..850424e5d030 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -48,14 +48,6 @@ struct ww_acquire_ctx { #endif }; -struct ww_mutex { - struct mutex base; - struct ww_acquire_ctx *ctx; -#ifdef CONFIG_DEBUG_MUTEXES - struct ww_class *ww_class; -#endif -}; - #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \ , .ww_class = class diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 14c893433139..b4d70e7568b2 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -576,7 +576,7 @@ void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); * * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. - * Return: The entry which used to be at this index. + * Return: The old entry at this index or xa_err() if an error happened. */ static inline void *xa_store_bh(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) @@ -602,7 +602,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index, * * Context: Process context. Takes and releases the xa_lock while * disabling interrupts. - * Return: The entry which used to be at this index. + * Return: The old entry at this index or xa_err() if an error happened. */ static inline void *xa_store_irq(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 47eaa34f8761..10b4dc2709f0 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -15,6 +15,7 @@ #include <linux/slab.h> #include <linux/types.h> #include <linux/spinlock.h> +#include <linux/mm.h> #include <uapi/linux/xattr.h> struct inode; @@ -51,14 +52,18 @@ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int); int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int); +int __vfs_setxattr_locked(struct dentry *, const char *, const void *, size_t, int, struct inode **); int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); int __vfs_removexattr(struct dentry *, const char *); +int __vfs_removexattr_locked(struct dentry *, const char *, struct inode **); int vfs_removexattr(struct dentry *, const char *); ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value, size_t size, gfp_t flags); +int xattr_supported_namespace(struct inode *inode, const char *prefix); + static inline const char *xattr_prefix(const struct xattr_handler *handler) { return handler->prefix ?: handler->name; @@ -94,7 +99,7 @@ static inline void simple_xattrs_free(struct simple_xattrs *xattrs) list_for_each_entry_safe(xattr, node, &xattrs->head, list) { kfree(xattr->name); - kfree(xattr); + kvfree(xattr); } } diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h index 52b073fea17f..df42511438d0 100644 --- a/include/linux/xxhash.h +++ b/include/linux/xxhash.h @@ -34,7 +34,7 @@ * ("BSD"). * * You can contact the author at: - * - xxHash homepage: http://cyan4973.github.io/xxHash/ + * - xxHash homepage: https://cyan4973.github.io/xxHash/ * - xxHash source repository: https://github.com/Cyan4973/xxHash */ diff --git a/include/linux/xz.h b/include/linux/xz.h index 64cffa6ddfce..9884c8440188 100644 --- a/include/linux/xz.h +++ b/include/linux/xz.h @@ -2,7 +2,7 @@ * XZ decompressor * * Authors: Lasse Collin <lasse.collin@tukaani.org> - * Igor Pavlov <http://7-zip.org/> + * Igor Pavlov <https://7-zip.org/> * * This file has been put into the public domain. * You can do whatever you want with this file. @@ -28,7 +28,7 @@ * enum xz_mode - Operation mode * * @XZ_SINGLE: Single-call mode. This uses less RAM than - * than multi-call modes, because the LZMA2 + * multi-call modes, because the LZMA2 * dictionary doesn't need to be allocated as * part of the decoder state. All required data * structures are allocated at initialization, diff --git a/include/linux/zlib.h b/include/linux/zlib.h index c757d848a758..78ede944c082 100644 --- a/include/linux/zlib.h +++ b/include/linux/zlib.h @@ -23,7 +23,7 @@ The data format used by the zlib library is described by RFCs (Request for - Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt + Comments) 1950 to 1952 in the files https://www.ietf.org/rfc/rfc1950.txt (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format). */ diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 2219cce81ca4..0fdbf653b173 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -20,7 +20,7 @@ * zsmalloc mapping modes * * NOTE: These only make a difference when a mapped object spans pages. - * They also have no effect when PGTABLE_MAPPING is selected. + * They also have no effect when ZSMALLOC_PGTABLE_MAPPING is selected. */ enum zs_mapmode { ZS_MM_RW, /* normal read-write mapping */ |