diff options
Diffstat (limited to 'include/linux')
188 files changed, 3288 insertions, 2639 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index fbc2146050a4..143ce7e0bee1 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -577,7 +577,6 @@ extern u32 osc_sb_native_usb4_control; #define OSC_PCI_MSI_SUPPORT 0x00000010 #define OSC_PCI_EDR_SUPPORT 0x00000080 #define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100 -#define OSC_PCI_SUPPORT_MASKS 0x0000019f /* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */ #define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001 @@ -587,7 +586,6 @@ extern u32 osc_sb_native_usb4_control; #define OSC_PCI_EXPRESS_CAPABILITY_CONTROL 0x00000010 #define OSC_PCI_EXPRESS_LTR_CONTROL 0x00000020 #define OSC_PCI_EXPRESS_DPC_CONTROL 0x00000080 -#define OSC_PCI_CONTROL_MASKS 0x000000bf #define ACPI_GSB_ACCESS_ATTRIB_QUICK 0x00000002 #define ACPI_GSB_ACCESS_ATTRIB_SEND_RCV 0x00000004 @@ -1016,6 +1014,7 @@ int acpi_subsys_runtime_suspend(struct device *dev); int acpi_subsys_runtime_resume(struct device *dev); int acpi_dev_pm_attach(struct device *dev, bool power_on); bool acpi_storage_d3(struct device *dev); +bool acpi_dev_state_d0(struct device *dev); #else static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } @@ -1027,6 +1026,10 @@ static inline bool acpi_storage_d3(struct device *dev) { return false; } +static inline bool acpi_dev_state_d0(struct device *dev) +{ + return true; +} #endif #if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index c68d87b87283..edfcf7a14dcd 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -122,24 +122,6 @@ struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t); void amba_device_put(struct amba_device *); int amba_device_add(struct amba_device *, struct resource *); int amba_device_register(struct amba_device *, struct resource *); -struct amba_device *amba_apb_device_add(struct device *parent, const char *name, - resource_size_t base, size_t size, - int irq1, int irq2, void *pdata, - unsigned int periphid); -struct amba_device *amba_ahb_device_add(struct device *parent, const char *name, - resource_size_t base, size_t size, - int irq1, int irq2, void *pdata, - unsigned int periphid); -struct amba_device * -amba_apb_device_add_res(struct device *parent, const char *name, - resource_size_t base, size_t size, int irq1, - int irq2, void *pdata, unsigned int periphid, - struct resource *resbase); -struct amba_device * -amba_ahb_device_add_res(struct device *parent, const char *name, - resource_size_t base, size_t size, int irq1, - int irq2, void *pdata, unsigned int periphid, - struct resource *resbase); void amba_device_unregister(struct amba_device *); struct amba_device *amba_find_device(const char *, struct device *, unsigned int, unsigned int); int amba_request_regions(struct amba_device *, const char *); diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h index 505c679b6a9b..85651e41ded8 100644 --- a/include/linux/arm_ffa.h +++ b/include/linux/arm_ffa.h @@ -262,6 +262,8 @@ struct ffa_dev_ops { int (*memory_reclaim)(u64 g_handle, u32 flags); int (*memory_share)(struct ffa_device *dev, struct ffa_mem_ops_args *args); + int (*memory_lend)(struct ffa_device *dev, + struct ffa_mem_ops_args *args); }; #endif /* _LINUX_ARM_FFA_H */ diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 33207004cfde..993c5628a726 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -103,6 +103,9 @@ struct wb_completion { * change as blkcg is disabled and enabled higher up in the hierarchy, a wb * is tested for blkcg after lookup and removed from index on mismatch so * that a new wb for the combination can be created. + * + * Each bdi_writeback that is not embedded into the backing_dev_info must hold + * a reference to the parent backing_dev_info. See cgwb_create() for details. */ struct bdi_writeback { struct backing_dev_info *bdi; /* our parent bdi */ diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 9c14f0a8dbe5..483979c1b9f4 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -141,7 +141,6 @@ static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) } long congestion_wait(int sync, long timeout); -long wait_iff_congested(int sync, long timeout); static inline bool mapping_can_writeback(struct address_space *mapping) { diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8682663e7368..2949d9ac7484 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -798,6 +798,7 @@ void blk_mq_start_hw_queues(struct request_queue *q); void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_quiesce_queue(struct request_queue *q); +void blk_mq_wait_quiesce_done(struct request_queue *q); void blk_mq_unquiesce_queue(struct request_queue *q); void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h index eed86eb0a1de..fc53e0ad56d9 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h @@ -2,6 +2,7 @@ #ifndef _LINUX_BH_H #define _LINUX_BH_H +#include <linux/instruction_pointer.h> #include <linux/preempt.h> #if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_TRACE_IRQFLAGS) diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 3536ab432b30..11820a430d6c 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -157,26 +157,6 @@ struct cgroup_bpf { int cgroup_bpf_inherit(struct cgroup *cgrp); void cgroup_bpf_offline(struct cgroup *cgrp); -int __cgroup_bpf_attach(struct cgroup *cgrp, - struct bpf_prog *prog, struct bpf_prog *replace_prog, - struct bpf_cgroup_link *link, - enum bpf_attach_type type, u32 flags); -int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, - struct bpf_cgroup_link *link, - enum bpf_attach_type type); -int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, - union bpf_attr __user *uattr); - -/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */ -int cgroup_bpf_attach(struct cgroup *cgrp, - struct bpf_prog *prog, struct bpf_prog *replace_prog, - struct bpf_cgroup_link *link, enum bpf_attach_type type, - u32 flags); -int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, - enum bpf_attach_type type); -int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, - union bpf_attr __user *uattr); - int __cgroup_bpf_run_filter_skb(struct sock *sk, struct sk_buff *skb, enum cgroup_bpf_attach_type atype); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index df3410bff4b0..56098c866704 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -484,6 +484,12 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) aux->ctx_field_size = size; } +static inline bool bpf_pseudo_func(const struct bpf_insn *insn) +{ + return insn->code == (BPF_LD | BPF_IMM | BPF_DW) && + insn->src_reg == BPF_PSEUDO_FUNC; +} + struct bpf_prog_ops { int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h index d128ad1570aa..3650e926e93f 100644 --- a/include/linux/clk/tegra.h +++ b/include/linux/clk/tegra.h @@ -42,6 +42,7 @@ struct tegra_cpu_car_ops { #endif }; +#ifdef CONFIG_ARCH_TEGRA extern struct tegra_cpu_car_ops *tegra_cpu_car_ops; static inline void tegra_wait_cpu_in_reset(u32 cpu) @@ -83,8 +84,29 @@ static inline void tegra_disable_cpu_clock(u32 cpu) tegra_cpu_car_ops->disable_clock(cpu); } +#else +static inline void tegra_wait_cpu_in_reset(u32 cpu) +{ +} -#ifdef CONFIG_PM_SLEEP +static inline void tegra_put_cpu_in_reset(u32 cpu) +{ +} + +static inline void tegra_cpu_out_of_reset(u32 cpu) +{ +} + +static inline void tegra_enable_cpu_clock(u32 cpu) +{ +} + +static inline void tegra_disable_cpu_clock(u32 cpu) +{ +} +#endif + +#if defined(CONFIG_ARCH_TEGRA) && defined(CONFIG_PM_SLEEP) static inline bool tegra_cpu_rail_off_ready(void) { if (WARN_ON(!tegra_cpu_car_ops->rail_off_ready)) diff --git a/include/linux/cma.h b/include/linux/cma.h index 53fd8c3cdbd0..bd801023504b 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -46,6 +46,7 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, struct cma **res_cma); extern struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align, bool no_warn); +extern bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count); extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long count); extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 7bbd8df02532..ccbbd31b3aae 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -150,3 +150,11 @@ #else #define __diag_GCC_8(s) #endif + +/* + * Prior to 9.1, -Wno-alloc-size-larger-than (and therefore the "alloc_size" + * attribute) do not work, and must be disabled. + */ +#if GCC_VERSION < 90100 +#undef __alloc_size__ +#endif diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index e6ec63403965..b9121afd8733 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -34,6 +34,15 @@ #define __aligned_largest __attribute__((__aligned__)) /* + * Note: do not use this directly. Instead, use __alloc_size() since it is conditionally + * available and includes other attributes. + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alloc_005fsize-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#alloc-size + */ +#define __alloc_size__(x, ...) __attribute__((__alloc_size__(x, ## __VA_ARGS__))) + +/* * Note: users of __always_inline currently do not write "inline" themselves, * which seems to be required by gcc to apply the attribute according * to its docs (and also "warning: always_inline function might not be @@ -104,7 +113,6 @@ #define __deprecated /* - * Optional: only supported since gcc >= 5.1 * Optional: not supported by clang * Optional: not supported by icc * @@ -153,6 +161,7 @@ /* * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-malloc-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#malloc */ #define __malloc __attribute__((__malloc__)) diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 05ceb2e92b0e..1d32f4c03c9e 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -250,6 +250,18 @@ struct ftrace_likely_data { # define __cficanonical #endif +/* + * Any place that could be marked with the "alloc_size" attribute is also + * a place to be marked with the "malloc" attribute. Do this as part of the + * __alloc_size macro to avoid redundant attributes and to avoid missing a + * __malloc marking. + */ +#ifdef __alloc_size__ +# define __alloc_size(x, ...) __alloc_size__(x, ## __VA_ARGS__) __malloc +#else +# define __alloc_size(x, ...) __malloc +#endif + #ifndef asm_volatile_goto #define asm_volatile_goto(x...) asm goto(x) #endif @@ -293,7 +305,13 @@ struct ftrace_likely_data { #ifdef __OPTIMIZE__ # define __compiletime_assert(condition, msg, prefix, suffix) \ do { \ - extern void prefix ## suffix(void) __compiletime_error(msg); \ + /* \ + * __noreturn is needed to give the compiler enough \ + * information to avoid certain possibly-uninitialized \ + * warnings (regardless of the build failing). \ + */ \ + __noreturn extern void prefix ## suffix(void) \ + __compiletime_error(msg); \ if (!(condition)) \ prefix ## suffix(); \ } while (0) diff --git a/include/linux/console.h b/include/linux/console.h index 20874db50bc8..a97f277cfdfa 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -149,6 +149,8 @@ struct console { short flags; short index; int cflag; + uint ispeed; + uint ospeed; void *data; struct console *next; }; diff --git a/include/linux/container_of.h b/include/linux/container_of.h new file mode 100644 index 000000000000..2f4944b791b8 --- /dev/null +++ b/include/linux/container_of.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_CONTAINER_OF_H +#define _LINUX_CONTAINER_OF_H + +#include <linux/build_bug.h> +#include <linux/err.h> + +#define typeof_member(T, m) typeof(((T*)0)->m) + +/** + * container_of - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + void *__mptr = (void *)(ptr); \ + static_assert(__same_type(*(ptr), ((type *)0)->member) || \ + __same_type(*(ptr), void), \ + "pointer type mismatch in container_of()"); \ + ((type *)(__mptr - offsetof(type, member))); }) + +/** + * container_of_safe - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + * If IS_ERR_OR_NULL(ptr), ptr is returned unchanged. + */ +#define container_of_safe(ptr, type, member) ({ \ + void *__mptr = (void *)(ptr); \ + static_assert(__same_type(*(ptr), ((type *)0)->member) || \ + __same_type(*(ptr), void), \ + "pointer type mismatch in container_of_safe()"); \ + IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \ + ((type *)(__mptr - offsetof(type, member))); }) + +#endif /* _LINUX_CONTAINER_OF_H */ diff --git a/include/linux/counter.h b/include/linux/counter.h index d16ce2819b48..b7d0a00a61cf 100644 --- a/include/linux/counter.h +++ b/include/linux/counter.h @@ -6,417 +6,280 @@ #ifndef _COUNTER_H_ #define _COUNTER_H_ -#include <linux/counter_enum.h> +#include <linux/cdev.h> #include <linux/device.h> +#include <linux/kernel.h> +#include <linux/kfifo.h> +#include <linux/mutex.h> +#include <linux/spinlock_types.h> #include <linux/types.h> - -enum counter_count_direction { - COUNTER_COUNT_DIRECTION_FORWARD = 0, - COUNTER_COUNT_DIRECTION_BACKWARD -}; -extern const char *const counter_count_direction_str[2]; - -enum counter_count_mode { - COUNTER_COUNT_MODE_NORMAL = 0, - COUNTER_COUNT_MODE_RANGE_LIMIT, - COUNTER_COUNT_MODE_NON_RECYCLE, - COUNTER_COUNT_MODE_MODULO_N -}; -extern const char *const counter_count_mode_str[4]; +#include <linux/wait.h> +#include <uapi/linux/counter.h> struct counter_device; +struct counter_count; +struct counter_synapse; struct counter_signal; +enum counter_comp_type { + COUNTER_COMP_U8, + COUNTER_COMP_U64, + COUNTER_COMP_BOOL, + COUNTER_COMP_SIGNAL_LEVEL, + COUNTER_COMP_FUNCTION, + COUNTER_COMP_SYNAPSE_ACTION, + COUNTER_COMP_ENUM, + COUNTER_COMP_COUNT_DIRECTION, + COUNTER_COMP_COUNT_MODE, +}; + /** - * struct counter_signal_ext - Counter Signal extensions - * @name: attribute name - * @read: read callback for this attribute; may be NULL - * @write: write callback for this attribute; may be NULL - * @priv: data private to the driver + * struct counter_comp - Counter component node + * @type: Counter component data type + * @name: device-specific component name + * @priv: component-relevant data + * @action_read Synapse action mode read callback. The read value of the + * respective Synapse action mode should be passed back via + * the action parameter. + * @device_u8_read Device u8 component read callback. The read value of the + * respective Device u8 component should be passed back via + * the val parameter. + * @count_u8_read Count u8 component read callback. The read value of the + * respective Count u8 component should be passed back via + * the val parameter. + * @signal_u8_read Signal u8 component read callback. The read value of the + * respective Signal u8 component should be passed back via + * the val parameter. + * @device_u32_read Device u32 component read callback. The read value of + * the respective Device u32 component should be passed + * back via the val parameter. + * @count_u32_read Count u32 component read callback. The read value of the + * respective Count u32 component should be passed back via + * the val parameter. + * @signal_u32_read Signal u32 component read callback. The read value of + * the respective Signal u32 component should be passed + * back via the val parameter. + * @device_u64_read Device u64 component read callback. The read value of + * the respective Device u64 component should be passed + * back via the val parameter. + * @count_u64_read Count u64 component read callback. The read value of the + * respective Count u64 component should be passed back via + * the val parameter. + * @signal_u64_read Signal u64 component read callback. The read value of + * the respective Signal u64 component should be passed + * back via the val parameter. + * @action_write Synapse action mode write callback. The write value of + * the respective Synapse action mode is passed via the + * action parameter. + * @device_u8_write Device u8 component write callback. The write value of + * the respective Device u8 component is passed via the val + * parameter. + * @count_u8_write Count u8 component write callback. The write value of + * the respective Count u8 component is passed via the val + * parameter. + * @signal_u8_write Signal u8 component write callback. The write value of + * the respective Signal u8 component is passed via the val + * parameter. + * @device_u32_write Device u32 component write callback. The write value of + * the respective Device u32 component is passed via the + * val parameter. + * @count_u32_write Count u32 component write callback. The write value of + * the respective Count u32 component is passed via the val + * parameter. + * @signal_u32_write Signal u32 component write callback. The write value of + * the respective Signal u32 component is passed via the + * val parameter. + * @device_u64_write Device u64 component write callback. The write value of + * the respective Device u64 component is passed via the + * val parameter. + * @count_u64_write Count u64 component write callback. The write value of + * the respective Count u64 component is passed via the val + * parameter. + * @signal_u64_write Signal u64 component write callback. The write value of + * the respective Signal u64 component is passed via the + * val parameter. */ -struct counter_signal_ext { +struct counter_comp { + enum counter_comp_type type; const char *name; - ssize_t (*read)(struct counter_device *counter, - struct counter_signal *signal, void *priv, char *buf); - ssize_t (*write)(struct counter_device *counter, - struct counter_signal *signal, void *priv, - const char *buf, size_t len); void *priv; + union { + int (*action_read)(struct counter_device *counter, + struct counter_count *count, + struct counter_synapse *synapse, + enum counter_synapse_action *action); + int (*device_u8_read)(struct counter_device *counter, u8 *val); + int (*count_u8_read)(struct counter_device *counter, + struct counter_count *count, u8 *val); + int (*signal_u8_read)(struct counter_device *counter, + struct counter_signal *signal, u8 *val); + int (*device_u32_read)(struct counter_device *counter, + u32 *val); + int (*count_u32_read)(struct counter_device *counter, + struct counter_count *count, u32 *val); + int (*signal_u32_read)(struct counter_device *counter, + struct counter_signal *signal, u32 *val); + int (*device_u64_read)(struct counter_device *counter, + u64 *val); + int (*count_u64_read)(struct counter_device *counter, + struct counter_count *count, u64 *val); + int (*signal_u64_read)(struct counter_device *counter, + struct counter_signal *signal, u64 *val); + }; + union { + int (*action_write)(struct counter_device *counter, + struct counter_count *count, + struct counter_synapse *synapse, + enum counter_synapse_action action); + int (*device_u8_write)(struct counter_device *counter, u8 val); + int (*count_u8_write)(struct counter_device *counter, + struct counter_count *count, u8 val); + int (*signal_u8_write)(struct counter_device *counter, + struct counter_signal *signal, u8 val); + int (*device_u32_write)(struct counter_device *counter, + u32 val); + int (*count_u32_write)(struct counter_device *counter, + struct counter_count *count, u32 val); + int (*signal_u32_write)(struct counter_device *counter, + struct counter_signal *signal, u32 val); + int (*device_u64_write)(struct counter_device *counter, + u64 val); + int (*count_u64_write)(struct counter_device *counter, + struct counter_count *count, u64 val); + int (*signal_u64_write)(struct counter_device *counter, + struct counter_signal *signal, u64 val); + }; }; /** * struct counter_signal - Counter Signal node - * @id: unique ID used to identify signal - * @name: device-specific Signal name; ideally, this should match the name - * as it appears in the datasheet documentation - * @ext: optional array of Counter Signal extensions - * @num_ext: number of Counter Signal extensions specified in @ext - * @priv: optional private data supplied by driver + * @id: unique ID used to identify the Signal + * @name: device-specific Signal name + * @ext: optional array of Signal extensions + * @num_ext: number of Signal extensions specified in @ext */ struct counter_signal { int id; const char *name; - const struct counter_signal_ext *ext; + struct counter_comp *ext; size_t num_ext; - - void *priv; -}; - -/** - * struct counter_signal_enum_ext - Signal enum extension attribute - * @items: Array of strings - * @num_items: Number of items specified in @items - * @set: Set callback function; may be NULL - * @get: Get callback function; may be NULL - * - * The counter_signal_enum_ext structure can be used to implement enum style - * Signal extension attributes. Enum style attributes are those which have a set - * of strings that map to unsigned integer values. The Generic Counter Signal - * enum extension helper code takes care of mapping between value and string, as - * well as generating a "_available" file which contains a list of all available - * items. The get callback is used to query the currently active item; the index - * of the item within the respective items array is returned via the 'item' - * parameter. The set callback is called when the attribute is updated; the - * 'item' parameter contains the index of the newly activated item within the - * respective items array. - */ -struct counter_signal_enum_ext { - const char * const *items; - size_t num_items; - int (*get)(struct counter_device *counter, - struct counter_signal *signal, size_t *item); - int (*set)(struct counter_device *counter, - struct counter_signal *signal, size_t item); -}; - -/** - * COUNTER_SIGNAL_ENUM() - Initialize Signal enum extension - * @_name: Attribute name - * @_e: Pointer to a counter_signal_enum_ext structure - * - * This should usually be used together with COUNTER_SIGNAL_ENUM_AVAILABLE() - */ -#define COUNTER_SIGNAL_ENUM(_name, _e) \ -{ \ - .name = (_name), \ - .read = counter_signal_enum_read, \ - .write = counter_signal_enum_write, \ - .priv = (_e) \ -} - -/** - * COUNTER_SIGNAL_ENUM_AVAILABLE() - Initialize Signal enum available extension - * @_name: Attribute name ("_available" will be appended to the name) - * @_e: Pointer to a counter_signal_enum_ext structure - * - * Creates a read only attribute that lists all the available enum items in a - * newline separated list. This should usually be used together with - * COUNTER_SIGNAL_ENUM() - */ -#define COUNTER_SIGNAL_ENUM_AVAILABLE(_name, _e) \ -{ \ - .name = (_name "_available"), \ - .read = counter_signal_enum_available_read, \ - .priv = (_e) \ -} - -enum counter_synapse_action { - COUNTER_SYNAPSE_ACTION_NONE = 0, - COUNTER_SYNAPSE_ACTION_RISING_EDGE, - COUNTER_SYNAPSE_ACTION_FALLING_EDGE, - COUNTER_SYNAPSE_ACTION_BOTH_EDGES }; /** * struct counter_synapse - Counter Synapse node - * @action: index of current action mode * @actions_list: array of available action modes * @num_actions: number of action modes specified in @actions_list - * @signal: pointer to associated signal + * @signal: pointer to the associated Signal */ struct counter_synapse { - size_t action; const enum counter_synapse_action *actions_list; size_t num_actions; struct counter_signal *signal; }; -struct counter_count; - -/** - * struct counter_count_ext - Counter Count extension - * @name: attribute name - * @read: read callback for this attribute; may be NULL - * @write: write callback for this attribute; may be NULL - * @priv: data private to the driver - */ -struct counter_count_ext { - const char *name; - ssize_t (*read)(struct counter_device *counter, - struct counter_count *count, void *priv, char *buf); - ssize_t (*write)(struct counter_device *counter, - struct counter_count *count, void *priv, - const char *buf, size_t len); - void *priv; -}; - -enum counter_function { - COUNTER_FUNCTION_INCREASE = 0, - COUNTER_FUNCTION_DECREASE, - COUNTER_FUNCTION_PULSE_DIRECTION, - COUNTER_FUNCTION_QUADRATURE_X1_A, - COUNTER_FUNCTION_QUADRATURE_X1_B, - COUNTER_FUNCTION_QUADRATURE_X2_A, - COUNTER_FUNCTION_QUADRATURE_X2_B, - COUNTER_FUNCTION_QUADRATURE_X4 -}; - /** * struct counter_count - Counter Count node - * @id: unique ID used to identify Count - * @name: device-specific Count name; ideally, this should match - * the name as it appears in the datasheet documentation - * @function: index of current function mode - * @functions_list: array available function modes + * @id: unique ID used to identify the Count + * @name: device-specific Count name + * @functions_list: array of available function modes * @num_functions: number of function modes specified in @functions_list - * @synapses: array of synapses for initialization - * @num_synapses: number of synapses specified in @synapses - * @ext: optional array of Counter Count extensions - * @num_ext: number of Counter Count extensions specified in @ext - * @priv: optional private data supplied by driver + * @synapses: array of Synapses for initialization + * @num_synapses: number of Synapses specified in @synapses + * @ext: optional array of Count extensions + * @num_ext: number of Count extensions specified in @ext */ struct counter_count { int id; const char *name; - size_t function; const enum counter_function *functions_list; size_t num_functions; struct counter_synapse *synapses; size_t num_synapses; - const struct counter_count_ext *ext; + struct counter_comp *ext; size_t num_ext; - - void *priv; }; /** - * struct counter_count_enum_ext - Count enum extension attribute - * @items: Array of strings - * @num_items: Number of items specified in @items - * @set: Set callback function; may be NULL - * @get: Get callback function; may be NULL - * - * The counter_count_enum_ext structure can be used to implement enum style - * Count extension attributes. Enum style attributes are those which have a set - * of strings that map to unsigned integer values. The Generic Counter Count - * enum extension helper code takes care of mapping between value and string, as - * well as generating a "_available" file which contains a list of all available - * items. The get callback is used to query the currently active item; the index - * of the item within the respective items array is returned via the 'item' - * parameter. The set callback is called when the attribute is updated; the - * 'item' parameter contains the index of the newly activated item within the - * respective items array. + * struct counter_event_node - Counter Event node + * @l: list of current watching Counter events + * @event: event that triggers + * @channel: event channel + * @comp_list: list of components to watch when event triggers */ -struct counter_count_enum_ext { - const char * const *items; - size_t num_items; - int (*get)(struct counter_device *counter, struct counter_count *count, - size_t *item); - int (*set)(struct counter_device *counter, struct counter_count *count, - size_t item); -}; - -/** - * COUNTER_COUNT_ENUM() - Initialize Count enum extension - * @_name: Attribute name - * @_e: Pointer to a counter_count_enum_ext structure - * - * This should usually be used together with COUNTER_COUNT_ENUM_AVAILABLE() - */ -#define COUNTER_COUNT_ENUM(_name, _e) \ -{ \ - .name = (_name), \ - .read = counter_count_enum_read, \ - .write = counter_count_enum_write, \ - .priv = (_e) \ -} - -/** - * COUNTER_COUNT_ENUM_AVAILABLE() - Initialize Count enum available extension - * @_name: Attribute name ("_available" will be appended to the name) - * @_e: Pointer to a counter_count_enum_ext structure - * - * Creates a read only attribute that lists all the available enum items in a - * newline separated list. This should usually be used together with - * COUNTER_COUNT_ENUM() - */ -#define COUNTER_COUNT_ENUM_AVAILABLE(_name, _e) \ -{ \ - .name = (_name "_available"), \ - .read = counter_count_enum_available_read, \ - .priv = (_e) \ -} - -/** - * struct counter_device_attr_group - internal container for attribute group - * @attr_group: Counter sysfs attributes group - * @attr_list: list to keep track of created Counter sysfs attributes - * @num_attr: number of Counter sysfs attributes - */ -struct counter_device_attr_group { - struct attribute_group attr_group; - struct list_head attr_list; - size_t num_attr; -}; - -/** - * struct counter_device_state - internal state container for a Counter device - * @id: unique ID used to identify the Counter - * @dev: internal device structure - * @groups_list: attribute groups list (for Signals, Counts, and ext) - * @num_groups: number of attribute groups containers - * @groups: Counter sysfs attribute groups (to populate @dev.groups) - */ -struct counter_device_state { - int id; - struct device dev; - struct counter_device_attr_group *groups_list; - size_t num_groups; - const struct attribute_group **groups; -}; - -enum counter_signal_level { - COUNTER_SIGNAL_LEVEL_LOW, - COUNTER_SIGNAL_LEVEL_HIGH, +struct counter_event_node { + struct list_head l; + u8 event; + u8 channel; + struct list_head comp_list; }; /** * struct counter_ops - Callbacks from driver - * @signal_read: optional read callback for Signal attribute. The read - * level of the respective Signal should be passed back via - * the level parameter. - * @count_read: optional read callback for Count attribute. The read - * value of the respective Count should be passed back via - * the val parameter. - * @count_write: optional write callback for Count attribute. The write - * value for the respective Count is passed in via the val + * @signal_read: optional read callback for Signals. The read level of + * the respective Signal should be passed back via the + * level parameter. + * @count_read: read callback for Counts. The read value of the + * respective Count should be passed back via the value + * parameter. + * @count_write: optional write callback for Counts. The write value for + * the respective Count is passed in via the value * parameter. - * @function_get: function to get the current count function mode. Returns - * 0 on success and negative error code on error. The index - * of the respective Count's returned function mode should - * be passed back via the function parameter. - * @function_set: function to set the count function mode. function is the - * index of the requested function mode from the respective - * Count's functions_list array. - * @action_get: function to get the current action mode. Returns 0 on - * success and negative error code on error. The index of - * the respective Synapse's returned action mode should be + * @function_read: read callback the Count function modes. The read + * function mode of the respective Count should be passed + * back via the function parameter. + * @function_write: optional write callback for Count function modes. The + * function mode to write for the respective Count is + * passed in via the function parameter. + * @action_read: optional read callback the Synapse action modes. The + * read action mode of the respective Synapse should be * passed back via the action parameter. - * @action_set: function to set the action mode. action is the index of - * the requested action mode from the respective Synapse's - * actions_list array. + * @action_write: optional write callback for Synapse action modes. The + * action mode to write for the respective Synapse is + * passed in via the action parameter. + * @events_configure: optional write callback to configure events. The list of + * struct counter_event_node may be accessed via the + * events_list member of the counter parameter. + * @watch_validate: optional callback to validate a watch. The Counter + * component watch configuration is passed in via the watch + * parameter. A return value of 0 indicates a valid Counter + * component watch configuration. */ struct counter_ops { int (*signal_read)(struct counter_device *counter, struct counter_signal *signal, enum counter_signal_level *level); int (*count_read)(struct counter_device *counter, - struct counter_count *count, unsigned long *val); + struct counter_count *count, u64 *value); int (*count_write)(struct counter_device *counter, - struct counter_count *count, unsigned long val); - int (*function_get)(struct counter_device *counter, - struct counter_count *count, size_t *function); - int (*function_set)(struct counter_device *counter, - struct counter_count *count, size_t function); - int (*action_get)(struct counter_device *counter, - struct counter_count *count, - struct counter_synapse *synapse, size_t *action); - int (*action_set)(struct counter_device *counter, - struct counter_count *count, - struct counter_synapse *synapse, size_t action); -}; - -/** - * struct counter_device_ext - Counter device extension - * @name: attribute name - * @read: read callback for this attribute; may be NULL - * @write: write callback for this attribute; may be NULL - * @priv: data private to the driver - */ -struct counter_device_ext { - const char *name; - ssize_t (*read)(struct counter_device *counter, void *priv, char *buf); - ssize_t (*write)(struct counter_device *counter, void *priv, - const char *buf, size_t len); - void *priv; -}; - -/** - * struct counter_device_enum_ext - Counter enum extension attribute - * @items: Array of strings - * @num_items: Number of items specified in @items - * @set: Set callback function; may be NULL - * @get: Get callback function; may be NULL - * - * The counter_device_enum_ext structure can be used to implement enum style - * Counter extension attributes. Enum style attributes are those which have a - * set of strings that map to unsigned integer values. The Generic Counter enum - * extension helper code takes care of mapping between value and string, as well - * as generating a "_available" file which contains a list of all available - * items. The get callback is used to query the currently active item; the index - * of the item within the respective items array is returned via the 'item' - * parameter. The set callback is called when the attribute is updated; the - * 'item' parameter contains the index of the newly activated item within the - * respective items array. - */ -struct counter_device_enum_ext { - const char * const *items; - size_t num_items; - int (*get)(struct counter_device *counter, size_t *item); - int (*set)(struct counter_device *counter, size_t item); + struct counter_count *count, u64 value); + int (*function_read)(struct counter_device *counter, + struct counter_count *count, + enum counter_function *function); + int (*function_write)(struct counter_device *counter, + struct counter_count *count, + enum counter_function function); + int (*action_read)(struct counter_device *counter, + struct counter_count *count, + struct counter_synapse *synapse, + enum counter_synapse_action *action); + int (*action_write)(struct counter_device *counter, + struct counter_count *count, + struct counter_synapse *synapse, + enum counter_synapse_action action); + int (*events_configure)(struct counter_device *counter); + int (*watch_validate)(struct counter_device *counter, + const struct counter_watch *watch); }; /** - * COUNTER_DEVICE_ENUM() - Initialize Counter enum extension - * @_name: Attribute name - * @_e: Pointer to a counter_device_enum_ext structure - * - * This should usually be used together with COUNTER_DEVICE_ENUM_AVAILABLE() - */ -#define COUNTER_DEVICE_ENUM(_name, _e) \ -{ \ - .name = (_name), \ - .read = counter_device_enum_read, \ - .write = counter_device_enum_write, \ - .priv = (_e) \ -} - -/** - * COUNTER_DEVICE_ENUM_AVAILABLE() - Initialize Counter enum available extension - * @_name: Attribute name ("_available" will be appended to the name) - * @_e: Pointer to a counter_device_enum_ext structure - * - * Creates a read only attribute that lists all the available enum items in a - * newline separated list. This should usually be used together with - * COUNTER_DEVICE_ENUM() - */ -#define COUNTER_DEVICE_ENUM_AVAILABLE(_name, _e) \ -{ \ - .name = (_name "_available"), \ - .read = counter_device_enum_available_read, \ - .priv = (_e) \ -} - -/** * struct counter_device - Counter data structure - * @name: name of the device as it appears in the datasheet + * @name: name of the device * @parent: optional parent device providing the counters - * @device_state: internal device state container * @ops: callbacks from driver * @signals: array of Signals * @num_signals: number of Signals specified in @signals @@ -425,11 +288,21 @@ struct counter_device_enum_ext { * @ext: optional array of Counter device extensions * @num_ext: number of Counter device extensions specified in @ext * @priv: optional private data supplied by driver + * @dev: internal device structure + * @chrdev: internal character device structure + * @events_list: list of current watching Counter events + * @events_list_lock: lock to protect Counter events list operations + * @next_events_list: list of next watching Counter events + * @n_events_list_lock: lock to protect Counter next events list operations + * @events: queue of detected Counter events + * @events_wait: wait queue to allow blocking reads of Counter events + * @events_in_lock: lock to protect Counter events queue in operations + * @events_out_lock: lock to protect Counter events queue out operations + * @ops_exist_lock: lock to prevent use during removal */ struct counter_device { const char *name; struct device *parent; - struct counter_device_state *device_state; const struct counter_ops *ops; @@ -438,17 +311,171 @@ struct counter_device { struct counter_count *counts; size_t num_counts; - const struct counter_device_ext *ext; + struct counter_comp *ext; size_t num_ext; void *priv; + + struct device dev; + struct cdev chrdev; + struct list_head events_list; + spinlock_t events_list_lock; + struct list_head next_events_list; + struct mutex n_events_list_lock; + DECLARE_KFIFO_PTR(events, struct counter_event); + wait_queue_head_t events_wait; + spinlock_t events_in_lock; + struct mutex events_out_lock; + struct mutex ops_exist_lock; }; int counter_register(struct counter_device *const counter); void counter_unregister(struct counter_device *const counter); int devm_counter_register(struct device *dev, struct counter_device *const counter); -void devm_counter_unregister(struct device *dev, - struct counter_device *const counter); +void counter_push_event(struct counter_device *const counter, const u8 event, + const u8 channel); + +#define COUNTER_COMP_DEVICE_U8(_name, _read, _write) \ +{ \ + .type = COUNTER_COMP_U8, \ + .name = (_name), \ + .device_u8_read = (_read), \ + .device_u8_write = (_write), \ +} +#define COUNTER_COMP_COUNT_U8(_name, _read, _write) \ +{ \ + .type = COUNTER_COMP_U8, \ + .name = (_name), \ + .count_u8_read = (_read), \ + .count_u8_write = (_write), \ +} +#define COUNTER_COMP_SIGNAL_U8(_name, _read, _write) \ +{ \ + .type = COUNTER_COMP_U8, \ + .name = (_name), \ + .signal_u8_read = (_read), \ + .signal_u8_write = (_write), \ +} + +#define COUNTER_COMP_DEVICE_U64(_name, _read, _write) \ +{ \ + .type = COUNTER_COMP_U64, \ + .name = (_name), \ + .device_u64_read = (_read), \ + .device_u64_write = (_write), \ +} +#define COUNTER_COMP_COUNT_U64(_name, _read, _write) \ +{ \ + .type = COUNTER_COMP_U64, \ + .name = (_name), \ + .count_u64_read = (_read), \ + .count_u64_write = (_write), \ +} +#define COUNTER_COMP_SIGNAL_U64(_name, _read, _write) \ +{ \ + .type = COUNTER_COMP_U64, \ + .name = (_name), \ + .signal_u64_read = (_read), \ + .signal_u64_write = (_write), \ +} + +#define COUNTER_COMP_DEVICE_BOOL(_name, _read, _write) \ +{ \ + .type = COUNTER_COMP_BOOL, \ + .name = (_name), \ + .device_u8_read = (_read), \ + .device_u8_write = (_write), \ +} +#define COUNTER_COMP_COUNT_BOOL(_name, _read, _write) \ +{ \ + .type = COUNTER_COMP_BOOL, \ + .name = (_name), \ + .count_u8_read = (_read), \ + .count_u8_write = (_write), \ +} +#define COUNTER_COMP_SIGNAL_BOOL(_name, _read, _write) \ +{ \ + .type = COUNTER_COMP_BOOL, \ + .name = (_name), \ + .signal_u8_read = (_read), \ + .signal_u8_write = (_write), \ +} + +struct counter_available { + union { + const u32 *enums; + const char *const *strs; + }; + size_t num_items; +}; + +#define DEFINE_COUNTER_AVAILABLE(_name, _enums) \ + struct counter_available _name = { \ + .enums = (_enums), \ + .num_items = ARRAY_SIZE(_enums), \ + } + +#define DEFINE_COUNTER_ENUM(_name, _strs) \ + struct counter_available _name = { \ + .strs = (_strs), \ + .num_items = ARRAY_SIZE(_strs), \ + } + +#define COUNTER_COMP_DEVICE_ENUM(_name, _get, _set, _available) \ +{ \ + .type = COUNTER_COMP_ENUM, \ + .name = (_name), \ + .device_u32_read = (_get), \ + .device_u32_write = (_set), \ + .priv = &(_available), \ +} +#define COUNTER_COMP_COUNT_ENUM(_name, _get, _set, _available) \ +{ \ + .type = COUNTER_COMP_ENUM, \ + .name = (_name), \ + .count_u32_read = (_get), \ + .count_u32_write = (_set), \ + .priv = &(_available), \ +} +#define COUNTER_COMP_SIGNAL_ENUM(_name, _get, _set, _available) \ +{ \ + .type = COUNTER_COMP_ENUM, \ + .name = (_name), \ + .signal_u32_read = (_get), \ + .signal_u32_write = (_set), \ + .priv = &(_available), \ +} + +#define COUNTER_COMP_CEILING(_read, _write) \ + COUNTER_COMP_COUNT_U64("ceiling", _read, _write) + +#define COUNTER_COMP_COUNT_MODE(_read, _write, _available) \ +{ \ + .type = COUNTER_COMP_COUNT_MODE, \ + .name = "count_mode", \ + .count_u32_read = (_read), \ + .count_u32_write = (_write), \ + .priv = &(_available), \ +} + +#define COUNTER_COMP_DIRECTION(_read) \ +{ \ + .type = COUNTER_COMP_COUNT_DIRECTION, \ + .name = "direction", \ + .count_u32_read = (_read), \ +} + +#define COUNTER_COMP_ENABLE(_read, _write) \ + COUNTER_COMP_COUNT_BOOL("enable", _read, _write) + +#define COUNTER_COMP_FLOOR(_read, _write) \ + COUNTER_COMP_COUNT_U64("floor", _read, _write) + +#define COUNTER_COMP_PRESET(_read, _write) \ + COUNTER_COMP_COUNT_U64("preset", _read, _write) + +#define COUNTER_COMP_PRESET_ENABLE(_read, _write) \ + COUNTER_COMP_COUNT_BOOL("preset_enable", _read, _write) #endif /* _COUNTER_H_ */ diff --git a/include/linux/counter_enum.h b/include/linux/counter_enum.h deleted file mode 100644 index 9f917298a88f..000000000000 --- a/include/linux/counter_enum.h +++ /dev/null @@ -1,45 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Counter interface enum functions - * Copyright (C) 2018 William Breathitt Gray - */ -#ifndef _COUNTER_ENUM_H_ -#define _COUNTER_ENUM_H_ - -#include <linux/types.h> - -struct counter_device; -struct counter_signal; -struct counter_count; - -ssize_t counter_signal_enum_read(struct counter_device *counter, - struct counter_signal *signal, void *priv, - char *buf); -ssize_t counter_signal_enum_write(struct counter_device *counter, - struct counter_signal *signal, void *priv, - const char *buf, size_t len); - -ssize_t counter_signal_enum_available_read(struct counter_device *counter, - struct counter_signal *signal, - void *priv, char *buf); - -ssize_t counter_count_enum_read(struct counter_device *counter, - struct counter_count *count, void *priv, - char *buf); -ssize_t counter_count_enum_write(struct counter_device *counter, - struct counter_count *count, void *priv, - const char *buf, size_t len); - -ssize_t counter_count_enum_available_read(struct counter_device *counter, - struct counter_count *count, - void *priv, char *buf); - -ssize_t counter_device_enum_read(struct counter_device *counter, void *priv, - char *buf); -ssize_t counter_device_enum_write(struct counter_device *counter, void *priv, - const char *buf, size_t len); - -ssize_t counter_device_enum_available_read(struct counter_device *counter, - void *priv, char *buf); - -#endif /* _COUNTER_ENUM_H_ */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index ff88bb3e44fc..1ab29e61b078 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -119,6 +119,13 @@ struct cpufreq_policy { bool strict_target; /* + * Set if inefficient frequencies were found in the frequency table. + * This indicates if the relation flag CPUFREQ_RELATION_E can be + * honored. + */ + bool efficiencies_available; + + /* * Preferred average time interval between consecutive invocations of * the driver to set the frequency for this policy. To be set by the * scaling driver (0, which is the default, means no preference). @@ -273,6 +280,12 @@ static inline void cpufreq_stats_record_transition(struct cpufreq_policy *policy #define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */ #define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */ #define CPUFREQ_RELATION_C 2 /* closest frequency to target */ +/* relation flags */ +#define CPUFREQ_RELATION_E BIT(2) /* Get if possible an efficient frequency */ + +#define CPUFREQ_RELATION_LE (CPUFREQ_RELATION_L | CPUFREQ_RELATION_E) +#define CPUFREQ_RELATION_HE (CPUFREQ_RELATION_H | CPUFREQ_RELATION_E) +#define CPUFREQ_RELATION_CE (CPUFREQ_RELATION_C | CPUFREQ_RELATION_E) struct freq_attr { struct attribute attr; @@ -385,7 +398,7 @@ struct cpufreq_driver { /* flags */ /* - * Set by drivers that need to update internale upper and lower boundaries along + * Set by drivers that need to update internal upper and lower boundaries along * with the target frequency and so the core and governors should also invoke * the diver if the target frequency does not change, but the policy min or max * may have changed. @@ -627,9 +640,11 @@ struct cpufreq_governor *cpufreq_fallback_governor(void); static inline void cpufreq_policy_apply_limits(struct cpufreq_policy *policy) { if (policy->max < policy->cur) - __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); + __cpufreq_driver_target(policy, policy->max, + CPUFREQ_RELATION_HE); else if (policy->min > policy->cur) - __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); + __cpufreq_driver_target(policy, policy->min, + CPUFREQ_RELATION_LE); } /* Governor attribute set */ @@ -660,10 +675,11 @@ struct governor_attr { *********************************************************************/ /* Special Values of .frequency field */ -#define CPUFREQ_ENTRY_INVALID ~0u -#define CPUFREQ_TABLE_END ~1u +#define CPUFREQ_ENTRY_INVALID ~0u +#define CPUFREQ_TABLE_END ~1u /* Special Values of .flags field */ -#define CPUFREQ_BOOST_FREQ (1 << 0) +#define CPUFREQ_BOOST_FREQ (1 << 0) +#define CPUFREQ_INEFFICIENT_FREQ (1 << 1) struct cpufreq_frequency_table { unsigned int flags; @@ -740,6 +756,22 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev, continue; \ else +/** + * cpufreq_for_each_efficient_entry_idx - iterate with index over a cpufreq + * frequency_table excluding CPUFREQ_ENTRY_INVALID and + * CPUFREQ_INEFFICIENT_FREQ frequencies. + * @pos: the &struct cpufreq_frequency_table to use as a loop cursor. + * @table: the &struct cpufreq_frequency_table to iterate over. + * @idx: the table entry currently being processed. + * @efficiencies: set to true to only iterate over efficient frequencies. + */ + +#define cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) \ + cpufreq_for_each_valid_entry_idx(pos, table, idx) \ + if (efficiencies && (pos->flags & CPUFREQ_INEFFICIENT_FREQ)) \ + continue; \ + else + int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table); @@ -764,14 +796,15 @@ bool policy_has_boost_freq(struct cpufreq_policy *policy); /* Find lowest freq at or above target in a table in ascending order */ static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy, - unsigned int target_freq) + unsigned int target_freq, + bool efficiencies) { struct cpufreq_frequency_table *table = policy->freq_table; struct cpufreq_frequency_table *pos; unsigned int freq; int idx, best = -1; - cpufreq_for_each_valid_entry_idx(pos, table, idx) { + cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { freq = pos->frequency; if (freq >= target_freq) @@ -785,14 +818,15 @@ static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy, /* Find lowest freq at or above target in a table in descending order */ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, - unsigned int target_freq) + unsigned int target_freq, + bool efficiencies) { struct cpufreq_frequency_table *table = policy->freq_table; struct cpufreq_frequency_table *pos; unsigned int freq; int idx, best = -1; - cpufreq_for_each_valid_entry_idx(pos, table, idx) { + cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { freq = pos->frequency; if (freq == target_freq) @@ -815,26 +849,30 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, /* Works only on sorted freq-tables */ static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, - unsigned int target_freq) + unsigned int target_freq, + bool efficiencies) { target_freq = clamp_val(target_freq, policy->min, policy->max); if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) - return cpufreq_table_find_index_al(policy, target_freq); + return cpufreq_table_find_index_al(policy, target_freq, + efficiencies); else - return cpufreq_table_find_index_dl(policy, target_freq); + return cpufreq_table_find_index_dl(policy, target_freq, + efficiencies); } /* Find highest freq at or below target in a table in ascending order */ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, - unsigned int target_freq) + unsigned int target_freq, + bool efficiencies) { struct cpufreq_frequency_table *table = policy->freq_table; struct cpufreq_frequency_table *pos; unsigned int freq; int idx, best = -1; - cpufreq_for_each_valid_entry_idx(pos, table, idx) { + cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { freq = pos->frequency; if (freq == target_freq) @@ -857,14 +895,15 @@ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, /* Find highest freq at or below target in a table in descending order */ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, - unsigned int target_freq) + unsigned int target_freq, + bool efficiencies) { struct cpufreq_frequency_table *table = policy->freq_table; struct cpufreq_frequency_table *pos; unsigned int freq; int idx, best = -1; - cpufreq_for_each_valid_entry_idx(pos, table, idx) { + cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { freq = pos->frequency; if (freq <= target_freq) @@ -878,26 +917,30 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, /* Works only on sorted freq-tables */ static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, - unsigned int target_freq) + unsigned int target_freq, + bool efficiencies) { target_freq = clamp_val(target_freq, policy->min, policy->max); if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) - return cpufreq_table_find_index_ah(policy, target_freq); + return cpufreq_table_find_index_ah(policy, target_freq, + efficiencies); else - return cpufreq_table_find_index_dh(policy, target_freq); + return cpufreq_table_find_index_dh(policy, target_freq, + efficiencies); } /* Find closest freq to target in a table in ascending order */ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, - unsigned int target_freq) + unsigned int target_freq, + bool efficiencies) { struct cpufreq_frequency_table *table = policy->freq_table; struct cpufreq_frequency_table *pos; unsigned int freq; int idx, best = -1; - cpufreq_for_each_valid_entry_idx(pos, table, idx) { + cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { freq = pos->frequency; if (freq == target_freq) @@ -924,14 +967,15 @@ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, /* Find closest freq to target in a table in descending order */ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, - unsigned int target_freq) + unsigned int target_freq, + bool efficiencies) { struct cpufreq_frequency_table *table = policy->freq_table; struct cpufreq_frequency_table *pos; unsigned int freq; int idx, best = -1; - cpufreq_for_each_valid_entry_idx(pos, table, idx) { + cpufreq_for_each_efficient_entry_idx(pos, table, idx, efficiencies) { freq = pos->frequency; if (freq == target_freq) @@ -958,35 +1002,58 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, /* Works only on sorted freq-tables */ static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, - unsigned int target_freq) + unsigned int target_freq, + bool efficiencies) { target_freq = clamp_val(target_freq, policy->min, policy->max); if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) - return cpufreq_table_find_index_ac(policy, target_freq); + return cpufreq_table_find_index_ac(policy, target_freq, + efficiencies); else - return cpufreq_table_find_index_dc(policy, target_freq); + return cpufreq_table_find_index_dc(policy, target_freq, + efficiencies); } static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { + bool efficiencies = policy->efficiencies_available && + (relation & CPUFREQ_RELATION_E); + int idx; + + /* cpufreq_table_index_unsorted() has no use for this flag anyway */ + relation &= ~CPUFREQ_RELATION_E; + if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)) return cpufreq_table_index_unsorted(policy, target_freq, relation); - +retry: switch (relation) { case CPUFREQ_RELATION_L: - return cpufreq_table_find_index_l(policy, target_freq); + idx = cpufreq_table_find_index_l(policy, target_freq, + efficiencies); + break; case CPUFREQ_RELATION_H: - return cpufreq_table_find_index_h(policy, target_freq); + idx = cpufreq_table_find_index_h(policy, target_freq, + efficiencies); + break; case CPUFREQ_RELATION_C: - return cpufreq_table_find_index_c(policy, target_freq); + idx = cpufreq_table_find_index_c(policy, target_freq, + efficiencies); + break; default: WARN_ON_ONCE(1); return 0; } + + if (idx < 0 && efficiencies) { + efficiencies = false; + goto retry; + } + + return idx; } static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy *policy) @@ -1003,6 +1070,37 @@ static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy return count; } +/** + * cpufreq_table_set_inefficient() - Mark a frequency as inefficient + * @policy: the &struct cpufreq_policy containing the inefficient frequency + * @frequency: the inefficient frequency + * + * The &struct cpufreq_policy must use a sorted frequency table + * + * Return: %0 on success or a negative errno code + */ + +static inline int +cpufreq_table_set_inefficient(struct cpufreq_policy *policy, + unsigned int frequency) +{ + struct cpufreq_frequency_table *pos; + + /* Not supported */ + if (policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED) + return -EINVAL; + + cpufreq_for_each_valid_entry(pos, policy->freq_table) { + if (pos->frequency == frequency) { + pos->flags |= CPUFREQ_INEFFICIENT_FREQ; + policy->efficiencies_available = true; + return 0; + } + } + + return -EINVAL; +} + static inline int parse_perf_domain(int cpu, const char *list_name, const char *cell_name) { @@ -1041,7 +1139,7 @@ static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_ if (cpu == pcpu) continue; - ret = parse_perf_domain(pcpu, list_name, cell_name); + ret = parse_perf_domain(cpu, list_name, cell_name); if (ret < 0) continue; @@ -1071,6 +1169,13 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy) return false; } +static inline int +cpufreq_table_set_inefficient(struct cpufreq_policy *policy, + unsigned int frequency) +{ + return -EINVAL; +} + static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name, const char *cell_name, struct cpumask *cpumask) { diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 991911048857..773c83730906 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -99,6 +99,7 @@ enum cpuhp_state { CPUHP_LUSTRE_CFS_DEAD, CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, CPUHP_PADATA_DEAD, + CPUHP_AP_DTPM_CPU_DEAD, CPUHP_WORKQUEUE_PREP, CPUHP_POWER_NUMA_PREPARE, CPUHP_HRTIMERS_PREPARE, @@ -246,7 +247,6 @@ enum cpuhp_state { CPUHP_AP_MM_DEMOTION_ONLINE, CPUHP_AP_X86_HPET_ONLINE, CPUHP_AP_X86_KVM_CLK_ONLINE, - CPUHP_AP_DTPM_CPU_ONLINE, CPUHP_AP_ACTIVE, CPUHP_ONLINE, }; diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index d2b9c41c8edf..d58e0476ee8e 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -34,6 +34,8 @@ */ extern struct static_key_false cpusets_pre_enable_key; extern struct static_key_false cpusets_enabled_key; +extern struct static_key_false cpusets_insane_config_key; + static inline bool cpusets_enabled(void) { return static_branch_unlikely(&cpusets_enabled_key); @@ -51,6 +53,19 @@ static inline void cpuset_dec(void) static_branch_dec_cpuslocked(&cpusets_pre_enable_key); } +/* + * This will get enabled whenever a cpuset configuration is considered + * unsupportable in general. E.g. movable only node which cannot satisfy + * any non movable allocations (see update_nodemask). Page allocator + * needs to make additional checks for those configurations and this + * check is meant to guard those checks without any overhead for sane + * configurations. + */ +static inline bool cpusets_insane_config(void) +{ + return static_branch_unlikely(&cpusets_insane_config_key); +} + extern int cpuset_init(void); extern void cpuset_init_smp(void); extern void cpuset_force_rebuild(void); @@ -167,6 +182,8 @@ static inline void set_mems_allowed(nodemask_t nodemask) static inline bool cpusets_enabled(void) { return false; } +static inline bool cpusets_insane_config(void) { return false; } + static inline int cpuset_init(void) { return 0; } static inline void cpuset_init_smp(void) {} diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 2618577a4d6d..620821549b23 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -8,8 +8,6 @@ #include <linux/pgtable.h> #include <uapi/linux/vmcore.h> -#include <linux/pgtable.h> /* for pgprot_t */ - /* For IS_ENABLED(CONFIG_CRASH_DUMP) */ #define ELFCORE_ADDR_MAX (-1ULL) #define ELFCORE_ADDR_ERR (-2ULL) @@ -91,12 +89,32 @@ static inline void vmcore_unusable(void) elfcorehdr_addr = ELFCORE_ADDR_ERR; } -#define HAVE_OLDMEM_PFN_IS_RAM 1 -extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)); -extern void unregister_oldmem_pfn_is_ram(void); +/** + * struct vmcore_cb - driver callbacks for /proc/vmcore handling + * @pfn_is_ram: check whether a PFN really is RAM and should be accessed when + * reading the vmcore. Will return "true" if it is RAM or if the + * callback cannot tell. If any callback returns "false", it's not + * RAM and the page must not be accessed; zeroes should be + * indicated in the vmcore instead. For example, a ballooned page + * contains no data and reading from such a page will cause high + * load in the hypervisor. + * @next: List head to manage registered callbacks internally; initialized by + * register_vmcore_cb(). + * + * vmcore callbacks allow drivers managing physical memory ranges to + * coordinate with vmcore handling code, for example, to prevent accessing + * physical memory ranges that should not be accessed when reading the vmcore, + * although included in the vmcore header as memory ranges to dump. + */ +struct vmcore_cb { + bool (*pfn_is_ram)(struct vmcore_cb *cb, unsigned long pfn); + struct list_head next; +}; +extern void register_vmcore_cb(struct vmcore_cb *cb); +extern void unregister_vmcore_cb(struct vmcore_cb *cb); #else /* !CONFIG_CRASH_DUMP */ -static inline bool is_kdump_kernel(void) { return 0; } +static inline bool is_kdump_kernel(void) { return false; } #endif /* CONFIG_CRASH_DUMP */ /* Device Dump information to be filled by drivers */ diff --git a/include/linux/damon.h b/include/linux/damon.h index d68b67b8d458..b4d4be3cc987 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -14,6 +14,8 @@ /* Minimal region size. Every damon_region is aligned by this. */ #define DAMON_MIN_REGION PAGE_SIZE +/* Max priority score for DAMON-based operation schemes */ +#define DAMOS_MAX_SCORE (99) /** * struct damon_addr_range - Represents an address region of [@start, @end). @@ -31,12 +33,22 @@ struct damon_addr_range { * @sampling_addr: Address of the sample for the next access check. * @nr_accesses: Access frequency of this region. * @list: List head for siblings. + * @age: Age of this region. + * + * @age is initially zero, increased for each aggregation interval, and reset + * to zero again if the access frequency is significantly changed. If two + * regions are merged into a new region, both @nr_accesses and @age of the new + * region are set as region size-weighted average of those of the two regions. */ struct damon_region { struct damon_addr_range ar; unsigned long sampling_addr; unsigned int nr_accesses; struct list_head list; + + unsigned int age; +/* private: Internal value for age calculation. */ + unsigned int last_nr_accesses; }; /** @@ -59,16 +71,180 @@ struct damon_target { struct list_head list; }; +/** + * enum damos_action - Represents an action of a Data Access Monitoring-based + * Operation Scheme. + * + * @DAMOS_WILLNEED: Call ``madvise()`` for the region with MADV_WILLNEED. + * @DAMOS_COLD: Call ``madvise()`` for the region with MADV_COLD. + * @DAMOS_PAGEOUT: Call ``madvise()`` for the region with MADV_PAGEOUT. + * @DAMOS_HUGEPAGE: Call ``madvise()`` for the region with MADV_HUGEPAGE. + * @DAMOS_NOHUGEPAGE: Call ``madvise()`` for the region with MADV_NOHUGEPAGE. + * @DAMOS_STAT: Do nothing but count the stat. + */ +enum damos_action { + DAMOS_WILLNEED, + DAMOS_COLD, + DAMOS_PAGEOUT, + DAMOS_HUGEPAGE, + DAMOS_NOHUGEPAGE, + DAMOS_STAT, /* Do nothing but only record the stat */ +}; + +/** + * struct damos_quota - Controls the aggressiveness of the given scheme. + * @ms: Maximum milliseconds that the scheme can use. + * @sz: Maximum bytes of memory that the action can be applied. + * @reset_interval: Charge reset interval in milliseconds. + * + * @weight_sz: Weight of the region's size for prioritization. + * @weight_nr_accesses: Weight of the region's nr_accesses for prioritization. + * @weight_age: Weight of the region's age for prioritization. + * + * To avoid consuming too much CPU time or IO resources for applying the + * &struct damos->action to large memory, DAMON allows users to set time and/or + * size quotas. The quotas can be set by writing non-zero values to &ms and + * &sz, respectively. If the time quota is set, DAMON tries to use only up to + * &ms milliseconds within &reset_interval for applying the action. If the + * size quota is set, DAMON tries to apply the action only up to &sz bytes + * within &reset_interval. + * + * Internally, the time quota is transformed to a size quota using estimated + * throughput of the scheme's action. DAMON then compares it against &sz and + * uses smaller one as the effective quota. + * + * For selecting regions within the quota, DAMON prioritizes current scheme's + * target memory regions using the &struct damon_primitive->get_scheme_score. + * You could customize the prioritization logic by setting &weight_sz, + * &weight_nr_accesses, and &weight_age, because monitoring primitives are + * encouraged to respect those. + */ +struct damos_quota { + unsigned long ms; + unsigned long sz; + unsigned long reset_interval; + + unsigned int weight_sz; + unsigned int weight_nr_accesses; + unsigned int weight_age; + +/* private: */ + /* For throughput estimation */ + unsigned long total_charged_sz; + unsigned long total_charged_ns; + + unsigned long esz; /* Effective size quota in bytes */ + + /* For charging the quota */ + unsigned long charged_sz; + unsigned long charged_from; + struct damon_target *charge_target_from; + unsigned long charge_addr_from; + + /* For prioritization */ + unsigned long histogram[DAMOS_MAX_SCORE + 1]; + unsigned int min_score; +}; + +/** + * enum damos_wmark_metric - Represents the watermark metric. + * + * @DAMOS_WMARK_NONE: Ignore the watermarks of the given scheme. + * @DAMOS_WMARK_FREE_MEM_RATE: Free memory rate of the system in [0,1000]. + */ +enum damos_wmark_metric { + DAMOS_WMARK_NONE, + DAMOS_WMARK_FREE_MEM_RATE, +}; + +/** + * struct damos_watermarks - Controls when a given scheme should be activated. + * @metric: Metric for the watermarks. + * @interval: Watermarks check time interval in microseconds. + * @high: High watermark. + * @mid: Middle watermark. + * @low: Low watermark. + * + * If &metric is &DAMOS_WMARK_NONE, the scheme is always active. Being active + * means DAMON does monitoring and applying the action of the scheme to + * appropriate memory regions. Else, DAMON checks &metric of the system for at + * least every &interval microseconds and works as below. + * + * If &metric is higher than &high, the scheme is inactivated. If &metric is + * between &mid and &low, the scheme is activated. If &metric is lower than + * &low, the scheme is inactivated. + */ +struct damos_watermarks { + enum damos_wmark_metric metric; + unsigned long interval; + unsigned long high; + unsigned long mid; + unsigned long low; + +/* private: */ + bool activated; +}; + +/** + * struct damos - Represents a Data Access Monitoring-based Operation Scheme. + * @min_sz_region: Minimum size of target regions. + * @max_sz_region: Maximum size of target regions. + * @min_nr_accesses: Minimum ``->nr_accesses`` of target regions. + * @max_nr_accesses: Maximum ``->nr_accesses`` of target regions. + * @min_age_region: Minimum age of target regions. + * @max_age_region: Maximum age of target regions. + * @action: &damo_action to be applied to the target regions. + * @quota: Control the aggressiveness of this scheme. + * @wmarks: Watermarks for automated (in)activation of this scheme. + * @stat_count: Total number of regions that this scheme is applied. + * @stat_sz: Total size of regions that this scheme is applied. + * @list: List head for siblings. + * + * For each aggregation interval, DAMON finds regions which fit in the + * condition (&min_sz_region, &max_sz_region, &min_nr_accesses, + * &max_nr_accesses, &min_age_region, &max_age_region) and applies &action to + * those. To avoid consuming too much CPU time or IO resources for the + * &action, "a is used. + * + * To do the work only when needed, schemes can be activated for specific + * system situations using &wmarks. If all schemes that registered to the + * monitoring context are inactive, DAMON stops monitoring either, and just + * repeatedly checks the watermarks. + * + * If all schemes that registered to a &struct damon_ctx are inactive, DAMON + * stops monitoring and just repeatedly checks the watermarks. + * + * After applying the &action to each region, &stat_count and &stat_sz is + * updated to reflect the number of regions and total size of regions that the + * &action is applied. + */ +struct damos { + unsigned long min_sz_region; + unsigned long max_sz_region; + unsigned int min_nr_accesses; + unsigned int max_nr_accesses; + unsigned int min_age_region; + unsigned int max_age_region; + enum damos_action action; + struct damos_quota quota; + struct damos_watermarks wmarks; + unsigned long stat_count; + unsigned long stat_sz; + struct list_head list; +}; + struct damon_ctx; /** - * struct damon_primitive Monitoring primitives for given use cases. + * struct damon_primitive - Monitoring primitives for given use cases. * * @init: Initialize primitive-internal data structures. * @update: Update primitive-internal data structures. * @prepare_access_checks: Prepare next access check of target regions. * @check_accesses: Check the accesses to target regions. * @reset_aggregated: Reset aggregated accesses monitoring results. + * @get_scheme_score: Get the score of a region for a scheme. + * @apply_scheme: Apply a DAMON-based operation scheme. * @target_valid: Determine if the target is valid. * @cleanup: Clean up the context. * @@ -94,6 +270,11 @@ struct damon_ctx; * of its update. The value will be used for regions adjustment threshold. * @reset_aggregated should reset the access monitoring results that aggregated * by @check_accesses. + * @get_scheme_score should return the priority score of a region for a scheme + * as an integer in [0, &DAMOS_MAX_SCORE]. + * @apply_scheme is called from @kdamond when a region for user provided + * DAMON-based operation scheme is found. It should apply the scheme's action + * to the region. This is not used for &DAMON_ARBITRARY_TARGET case. * @target_valid should check whether the target is still valid for the * monitoring. * @cleanup is called from @kdamond just before its termination. @@ -104,12 +285,17 @@ struct damon_primitive { void (*prepare_access_checks)(struct damon_ctx *context); unsigned int (*check_accesses)(struct damon_ctx *context); void (*reset_aggregated)(struct damon_ctx *context); + int (*get_scheme_score)(struct damon_ctx *context, + struct damon_target *t, struct damon_region *r, + struct damos *scheme); + int (*apply_scheme)(struct damon_ctx *context, struct damon_target *t, + struct damon_region *r, struct damos *scheme); bool (*target_valid)(void *target); void (*cleanup)(struct damon_ctx *context); }; -/* - * struct damon_callback Monitoring events notification callbacks. +/** + * struct damon_callback - Monitoring events notification callbacks. * * @before_start: Called before starting the monitoring. * @after_sampling: Called after each sampling. @@ -136,7 +322,7 @@ struct damon_callback { int (*before_start)(struct damon_ctx *context); int (*after_sampling)(struct damon_ctx *context); int (*after_aggregation)(struct damon_ctx *context); - int (*before_terminate)(struct damon_ctx *context); + void (*before_terminate)(struct damon_ctx *context); }; /** @@ -182,6 +368,7 @@ struct damon_callback { * @min_nr_regions: The minimum number of adaptive monitoring regions. * @max_nr_regions: The maximum number of adaptive monitoring regions. * @adaptive_targets: Head of monitoring targets (&damon_target) list. + * @schemes: Head of schemes (&damos) list. */ struct damon_ctx { unsigned long sample_interval; @@ -194,7 +381,6 @@ struct damon_ctx { /* public: */ struct task_struct *kdamond; - bool kdamond_stop; struct mutex kdamond_lock; struct damon_primitive primitive; @@ -203,6 +389,7 @@ struct damon_ctx { unsigned long min_nr_regions; unsigned long max_nr_regions; struct list_head adaptive_targets; + struct list_head schemes; }; #define damon_next_region(r) \ @@ -211,6 +398,9 @@ struct damon_ctx { #define damon_prev_region(r) \ (container_of(r->list.prev, struct damon_region, list)) +#define damon_last_region(t) \ + (list_last_entry(&t->regions_list, struct damon_region, list)) + #define damon_for_each_region(r, t) \ list_for_each_entry(r, &t->regions_list, list) @@ -223,6 +413,12 @@ struct damon_ctx { #define damon_for_each_target_safe(t, next, ctx) \ list_for_each_entry_safe(t, next, &(ctx)->adaptive_targets, list) +#define damon_for_each_scheme(s, ctx) \ + list_for_each_entry(s, &(ctx)->schemes, list) + +#define damon_for_each_scheme_safe(s, next, ctx) \ + list_for_each_entry_safe(s, next, &(ctx)->schemes, list) + #ifdef CONFIG_DAMON struct damon_region *damon_new_region(unsigned long start, unsigned long end); @@ -232,8 +428,18 @@ inline void damon_insert_region(struct damon_region *r, void damon_add_region(struct damon_region *r, struct damon_target *t); void damon_destroy_region(struct damon_region *r, struct damon_target *t); +struct damos *damon_new_scheme( + unsigned long min_sz_region, unsigned long max_sz_region, + unsigned int min_nr_accesses, unsigned int max_nr_accesses, + unsigned int min_age_region, unsigned int max_age_region, + enum damos_action action, struct damos_quota *quota, + struct damos_watermarks *wmarks); +void damon_add_scheme(struct damon_ctx *ctx, struct damos *s); +void damon_destroy_scheme(struct damos *s); + struct damon_target *damon_new_target(unsigned long id); void damon_add_target(struct damon_ctx *ctx, struct damon_target *t); +bool damon_targets_empty(struct damon_ctx *ctx); void damon_free_target(struct damon_target *t); void damon_destroy_target(struct damon_target *t); unsigned int damon_nr_regions(struct damon_target *t); @@ -245,6 +451,8 @@ int damon_set_targets(struct damon_ctx *ctx, int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int, unsigned long aggr_int, unsigned long primitive_upd_int, unsigned long min_nr_reg, unsigned long max_nr_reg); +int damon_set_schemes(struct damon_ctx *ctx, + struct damos **schemes, ssize_t nr_schemes); int damon_nr_running_ctxs(void); int damon_start(struct damon_ctx **ctxs, int nr_ctxs); @@ -261,8 +469,26 @@ void damon_va_prepare_access_checks(struct damon_ctx *ctx); unsigned int damon_va_check_accesses(struct damon_ctx *ctx); bool damon_va_target_valid(void *t); void damon_va_cleanup(struct damon_ctx *ctx); +int damon_va_apply_scheme(struct damon_ctx *context, struct damon_target *t, + struct damon_region *r, struct damos *scheme); +int damon_va_scheme_score(struct damon_ctx *context, struct damon_target *t, + struct damon_region *r, struct damos *scheme); void damon_va_set_primitives(struct damon_ctx *ctx); #endif /* CONFIG_DAMON_VADDR */ +#ifdef CONFIG_DAMON_PADDR + +/* Monitoring primitives for the physical memory address space */ +void damon_pa_prepare_access_checks(struct damon_ctx *ctx); +unsigned int damon_pa_check_accesses(struct damon_ctx *ctx); +bool damon_pa_target_valid(void *t); +int damon_pa_apply_scheme(struct damon_ctx *context, struct damon_target *t, + struct damon_region *r, struct damos *scheme); +int damon_pa_scheme_score(struct damon_ctx *context, struct damon_target *t, + struct damon_region *r, struct damos *scheme); +void damon_pa_set_primitives(struct damon_ctx *ctx); + +#endif /* CONFIG_DAMON_PADDR */ + #endif /* _DAMON_H */ diff --git a/include/linux/dax.h b/include/linux/dax.h index 2619d94c308d..8623caa67388 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -38,8 +38,6 @@ struct dax_operations { int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); }; -extern struct attribute_group dax_attribute_group; - #if IS_ENABLED(CONFIG_DAX) struct dax_device *alloc_dax(void *private, const char *host, const struct dax_operations *ops, unsigned long flags); diff --git a/include/linux/delay.h b/include/linux/delay.h index 1d0e2ce6b6d9..8eacf67eb212 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -19,7 +19,7 @@ * https://lists.openwall.net/linux-kernel/2011/01/09/56 */ -#include <linux/kernel.h> +#include <linux/math.h> extern unsigned long loops_per_jiffy; diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h index 062777a45a74..a039ab809753 100644 --- a/include/linux/device/bus.h +++ b/include/linux/device/bus.h @@ -143,6 +143,7 @@ int device_match_of_node(struct device *dev, const void *np); int device_match_fwnode(struct device *dev, const void *fwnode); int device_match_devt(struct device *dev, const void *pdevt); int device_match_acpi_dev(struct device *dev, const void *adev); +int device_match_acpi_handle(struct device *dev, const void *handle); int device_match_any(struct device *dev, const void *unused); /* iterator helpers for buses */ diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 8b32b4bdd590..42a323a73c61 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -86,8 +86,8 @@ struct dma_buf_ops { * @pin: * * This is called by dma_buf_pin() and lets the exporter know that the - * DMA-buf can't be moved any more. The exporter should pin the buffer - * into system memory to make sure it is generally accessible by other + * DMA-buf can't be moved any more. Ideally, the exporter should + * pin the buffer so that it is generally accessible by all * devices. * * This is called with the &dmabuf.resv object locked and is mutual @@ -420,6 +420,13 @@ struct dma_buf { * - Dynamic importers should set fences for any access that they can't * disable immediately from their &dma_buf_attach_ops.move_notify * callback. + * + * IMPORTANT: + * + * All drivers must obey the struct dma_resv rules, specifically the + * rules for updating fences, see &dma_resv.fence_excl and + * &dma_resv.fence. If these dependency rules are broken access tracking + * can be lost resulting in use after free issues. */ struct dma_resv *resv; @@ -433,7 +440,7 @@ struct dma_buf { wait_queue_head_t *poll; __poll_t active; - } cb_excl, cb_shared; + } cb_in, cb_out; #ifdef CONFIG_DMABUF_SYSFS_STATS /** * @sysfs_entry: diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 6ffb4b2c6371..a706b7bf51d7 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -214,19 +214,15 @@ struct dma_fence_ops { * Custom wait implementation, defaults to dma_fence_default_wait() if * not set. * - * The dma_fence_default_wait implementation should work for any fence, as long - * as @enable_signaling works correctly. This hook allows drivers to - * have an optimized version for the case where a process context is - * already available, e.g. if @enable_signaling for the general case - * needs to set up a worker thread. + * Deprecated and should not be used by new implementations. Only used + * by existing implementations which need special handling for their + * hardware reset procedure. * * Must return -ERESTARTSYS if the wait is intr = true and the wait was * interrupted, and remaining jiffies if fence has signaled, or 0 if wait * timed out. Can also return other error values on custom implementations, * which should be treated as if the fence is signaled. For example a hardware * lockup could be reported like that. - * - * This callback is optional. */ signed long (*wait)(struct dma_fence *fence, bool intr, signed long timeout); @@ -590,26 +586,4 @@ struct dma_fence *dma_fence_get_stub(void); struct dma_fence *dma_fence_allocate_private_stub(void); u64 dma_fence_context_alloc(unsigned num); -#define DMA_FENCE_TRACE(f, fmt, args...) \ - do { \ - struct dma_fence *__ff = (f); \ - if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ - pr_info("f %llu#%llu: " fmt, \ - __ff->context, __ff->seqno, ##args); \ - } while (0) - -#define DMA_FENCE_WARN(f, fmt, args...) \ - do { \ - struct dma_fence *__ff = (f); \ - pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\ - ##args); \ - } while (0) - -#define DMA_FENCE_ERR(f, fmt, args...) \ - do { \ - struct dma_fence *__ff = (f); \ - pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \ - ##args); \ - } while (0) - #endif /* __LINUX_DMA_FENCE_H */ diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 39fefb86780b..dbd235ab447f 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -62,19 +62,211 @@ struct dma_resv_list { /** * struct dma_resv - a reservation object manages fences for a buffer - * @lock: update side lock - * @seq: sequence count for managing RCU read-side synchronization - * @fence_excl: the exclusive fence, if there is one currently - * @fence: list of current shared fences + * + * There are multiple uses for this, with sometimes slightly different rules in + * how the fence slots are used. + * + * One use is to synchronize cross-driver access to a struct dma_buf, either for + * dynamic buffer management or just to handle implicit synchronization between + * different users of the buffer in userspace. See &dma_buf.resv for a more + * in-depth discussion. + * + * The other major use is to manage access and locking within a driver in a + * buffer based memory manager. struct ttm_buffer_object is the canonical + * example here, since this is where reservation objects originated from. But + * use in drivers is spreading and some drivers also manage struct + * drm_gem_object with the same scheme. */ struct dma_resv { + /** + * @lock: + * + * Update side lock. Don't use directly, instead use the wrapper + * functions like dma_resv_lock() and dma_resv_unlock(). + * + * Drivers which use the reservation object to manage memory dynamically + * also use this lock to protect buffer object state like placement, + * allocation policies or throughout command submission. + */ struct ww_mutex lock; + + /** + * @seq: + * + * Sequence count for managing RCU read-side synchronization, allows + * read-only access to @fence_excl and @fence while ensuring we take a + * consistent snapshot. + */ seqcount_ww_mutex_t seq; + /** + * @fence_excl: + * + * The exclusive fence, if there is one currently. + * + * There are two ways to update this fence: + * + * - First by calling dma_resv_add_excl_fence(), which replaces all + * fences attached to the reservation object. To guarantee that no + * fences are lost, this new fence must signal only after all previous + * fences, both shared and exclusive, have signalled. In some cases it + * is convenient to achieve that by attaching a struct dma_fence_array + * with all the new and old fences. + * + * - Alternatively the fence can be set directly, which leaves the + * shared fences unchanged. To guarantee that no fences are lost, this + * new fence must signal only after the previous exclusive fence has + * signalled. Since the shared fences are staying intact, it is not + * necessary to maintain any ordering against those. If semantically + * only a new access is added without actually treating the previous + * one as a dependency the exclusive fences can be strung together + * using struct dma_fence_chain. + * + * Note that actual semantics of what an exclusive or shared fence mean + * is defined by the user, for reservation objects shared across drivers + * see &dma_buf.resv. + */ struct dma_fence __rcu *fence_excl; + + /** + * @fence: + * + * List of current shared fences. + * + * There are no ordering constraints of shared fences against the + * exclusive fence slot. If a waiter needs to wait for all access, it + * has to wait for both sets of fences to signal. + * + * A new fence is added by calling dma_resv_add_shared_fence(). Since + * this often needs to be done past the point of no return in command + * submission it cannot fail, and therefore sufficient slots need to be + * reserved by calling dma_resv_reserve_shared(). + * + * Note that actual semantics of what an exclusive or shared fence mean + * is defined by the user, for reservation objects shared across drivers + * see &dma_buf.resv. + */ struct dma_resv_list __rcu *fence; }; +/** + * struct dma_resv_iter - current position into the dma_resv fences + * + * Don't touch this directly in the driver, use the accessor function instead. + */ +struct dma_resv_iter { + /** @obj: The dma_resv object we iterate over */ + struct dma_resv *obj; + + /** @all_fences: If all fences should be returned */ + bool all_fences; + + /** @fence: the currently handled fence */ + struct dma_fence *fence; + + /** @seq: sequence number to check for modifications */ + unsigned int seq; + + /** @index: index into the shared fences */ + unsigned int index; + + /** @fences: the shared fences; private, *MUST* not dereference */ + struct dma_resv_list *fences; + + /** @shared_count: number of shared fences */ + unsigned int shared_count; + + /** @is_restarted: true if this is the first returned fence */ + bool is_restarted; +}; + +struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor); +struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor); +struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor); +struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor); + +/** + * dma_resv_iter_begin - initialize a dma_resv_iter object + * @cursor: The dma_resv_iter object to initialize + * @obj: The dma_resv object which we want to iterate over + * @all_fences: If all fences should be returned or just the exclusive one + */ +static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor, + struct dma_resv *obj, + bool all_fences) +{ + cursor->obj = obj; + cursor->all_fences = all_fences; + cursor->fence = NULL; +} + +/** + * dma_resv_iter_end - cleanup a dma_resv_iter object + * @cursor: the dma_resv_iter object which should be cleaned up + * + * Make sure that the reference to the fence in the cursor is properly + * dropped. + */ +static inline void dma_resv_iter_end(struct dma_resv_iter *cursor) +{ + dma_fence_put(cursor->fence); +} + +/** + * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one + * @cursor: the cursor of the current position + * + * Returns true if the currently returned fence is the exclusive one. + */ +static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor) +{ + return cursor->index == 0; +} + +/** + * dma_resv_iter_is_restarted - test if this is the first fence after a restart + * @cursor: the cursor with the current position + * + * Return true if this is the first fence in an iteration after a restart. + */ +static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) +{ + return cursor->is_restarted; +} + +/** + * dma_resv_for_each_fence_unlocked - unlocked fence iterator + * @cursor: a struct dma_resv_iter pointer + * @fence: the current fence + * + * Iterate over the fences in a struct dma_resv object without holding the + * &dma_resv.lock and using RCU instead. The cursor needs to be initialized + * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside + * the iterator a reference to the dma_fence is held and the RCU lock dropped. + * When the dma_resv is modified the iteration starts over again. + */ +#define dma_resv_for_each_fence_unlocked(cursor, fence) \ + for (fence = dma_resv_iter_first_unlocked(cursor); \ + fence; fence = dma_resv_iter_next_unlocked(cursor)) + +/** + * dma_resv_for_each_fence - fence iterator + * @cursor: a struct dma_resv_iter pointer + * @obj: a dma_resv object pointer + * @all_fences: true if all fences should be returned + * @fence: the current fence + * + * Iterate over the fences in a struct dma_resv object while holding the + * &dma_resv.lock. @all_fences controls if the shared fences are returned as + * well. The cursor initialisation is part of the iterator and the fence stays + * valid as long as the lock is held and so no extra reference to the fence is + * taken. + */ +#define dma_resv_for_each_fence(cursor, obj, all_fences, fence) \ + for (dma_resv_iter_begin(cursor, obj, all_fences), \ + fence = dma_resv_iter_first(cursor); fence; \ + fence = dma_resv_iter_next(cursor)) + #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) @@ -98,6 +290,13 @@ static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {} * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation * object may be locked by itself by passing NULL as @ctx. + * + * When a die situation is indicated by returning -EDEADLK all locks held by + * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj. + * + * Unlocked by calling dma_resv_unlock(). + * + * See also dma_resv_lock_interruptible() for the interruptible variant. */ static inline int dma_resv_lock(struct dma_resv *obj, struct ww_acquire_ctx *ctx) @@ -119,6 +318,12 @@ static inline int dma_resv_lock(struct dma_resv *obj, * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation * object may be locked by itself by passing NULL as @ctx. + * + * When a die situation is indicated by returning -EDEADLK all locks held by + * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on + * @obj. + * + * Unlocked by calling dma_resv_unlock(). */ static inline int dma_resv_lock_interruptible(struct dma_resv *obj, struct ww_acquire_ctx *ctx) @@ -134,6 +339,8 @@ static inline int dma_resv_lock_interruptible(struct dma_resv *obj, * Acquires the reservation object after a die case. This function * will sleep until the lock becomes available. See dma_resv_lock() as * well. + * + * See also dma_resv_lock_slow_interruptible() for the interruptible variant. */ static inline void dma_resv_lock_slow(struct dma_resv *obj, struct ww_acquire_ctx *ctx) @@ -167,7 +374,7 @@ static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj, * if they overlap with a writer. * * Also note that since no context is provided, no deadlock protection is - * possible. + * possible, which is also not needed for a trylock. * * Returns true if the lock was acquired, false otherwise. */ @@ -193,6 +400,11 @@ static inline bool dma_resv_is_locked(struct dma_resv *obj) * * Returns the context used to lock a reservation object or NULL if no context * was used or the object is not locked at all. + * + * WARNING: This interface is pretty horrible, but TTM needs it because it + * doesn't pass the struct ww_acquire_ctx around in some very long callchains. + * Everyone else just uses it to check whether they're holding a reservation or + * not. */ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) { diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index e5c2c9e71bf1..9000f3ffce8b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -944,10 +944,8 @@ struct dma_device { void (*device_issue_pending)(struct dma_chan *chan); void (*device_release)(struct dma_device *dev); /* debugfs support */ -#ifdef CONFIG_DEBUG_FS void (*dbg_summary_show)(struct seq_file *s, struct dma_device *dev); struct dentry *dbg_dev_root; -#endif }; static inline int dmaengine_slave_config(struct dma_chan *chan, diff --git a/include/linux/dmar.h b/include/linux/dmar.h index e04436a7ff27..45e903d84733 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -131,6 +131,14 @@ static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg) return 0; } +#ifdef CONFIG_DMAR_DEBUG +void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, + unsigned long long addr, u32 pasid); +#else +static inline void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, + unsigned long long addr, u32 pasid) {} +#endif + #ifdef CONFIG_INTEL_IOMMU extern int iommu_detected, no_iommu; extern int intel_iommu_init(void); diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h index d42010cf5468..7ee708ad7df2 100644 --- a/include/linux/dsa/ocelot.h +++ b/include/linux/dsa/ocelot.h @@ -12,6 +12,7 @@ struct ocelot_skb_cb { struct sk_buff *clone; unsigned int ptp_class; /* valid only for clones */ + u32 tstamp_lo; u8 ptp_cmd; u8 ts_id; }; diff --git a/include/linux/dtpm.h b/include/linux/dtpm.h index e80a332e3d8a..2890f6370eb9 100644 --- a/include/linux/dtpm.h +++ b/include/linux/dtpm.h @@ -23,34 +23,32 @@ struct dtpm { u64 power_max; u64 power_min; int weight; - void *private; }; struct dtpm_ops { u64 (*set_power_uw)(struct dtpm *, u64); u64 (*get_power_uw)(struct dtpm *); + int (*update_power_uw)(struct dtpm *); void (*release)(struct dtpm *); }; -struct dtpm_descr; - -typedef int (*dtpm_init_t)(struct dtpm_descr *); +typedef int (*dtpm_init_t)(void); struct dtpm_descr { - struct dtpm *parent; - const char *name; dtpm_init_t init; }; /* Init section thermal table */ -extern struct dtpm_descr *__dtpm_table[]; -extern struct dtpm_descr *__dtpm_table_end[]; +extern struct dtpm_descr __dtpm_table[]; +extern struct dtpm_descr __dtpm_table_end[]; -#define DTPM_TABLE_ENTRY(name) \ - static typeof(name) *__dtpm_table_entry_##name \ - __used __section("__dtpm_table") = &name +#define DTPM_TABLE_ENTRY(name, __init) \ + static struct dtpm_descr __dtpm_table_entry_##name \ + __used __section("__dtpm_table") = { \ + .init = __init, \ + } -#define DTPM_DECLARE(name) DTPM_TABLE_ENTRY(name) +#define DTPM_DECLARE(name, init) DTPM_TABLE_ENTRY(name, init) #define for_each_dtpm_table(__dtpm) \ for (__dtpm = __dtpm_table; \ @@ -62,11 +60,11 @@ static inline struct dtpm *to_dtpm(struct powercap_zone *zone) return container_of(zone, struct dtpm, zone); } -int dtpm_update_power(struct dtpm *dtpm, u64 power_min, u64 power_max); +int dtpm_update_power(struct dtpm *dtpm); int dtpm_release_zone(struct powercap_zone *pcz); -struct dtpm *dtpm_alloc(struct dtpm_ops *ops); +void dtpm_init(struct dtpm *dtpm, struct dtpm_ops *ops); void dtpm_unregister(struct dtpm *dtpm); diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 39dcadd492b5..6377adc3b78d 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -17,19 +17,30 @@ * device). It can be a total power: static and dynamic. * @cost: The cost coefficient associated with this level, used during * energy calculation. Equal to: power * max_frequency / frequency + * @flags: see "em_perf_state flags" description below. */ struct em_perf_state { unsigned long frequency; unsigned long power; unsigned long cost; + unsigned long flags; }; +/* + * em_perf_state flags: + * + * EM_PERF_STATE_INEFFICIENT: The performance state is inefficient. There is + * in this em_perf_domain, another performance state with a higher frequency + * but a lower or equal power cost. Such inefficient states are ignored when + * using em_pd_get_efficient_*() functions. + */ +#define EM_PERF_STATE_INEFFICIENT BIT(0) + /** * struct em_perf_domain - Performance domain * @table: List of performance states, in ascending order * @nr_perf_states: Number of performance states - * @milliwatts: Flag indicating the power values are in milli-Watts - * or some other scale. + * @flags: See "em_perf_domain flags" * @cpus: Cpumask covering the CPUs of the domain. It's here * for performance reasons to avoid potential cache * misses during energy calculations in the scheduler @@ -44,10 +55,22 @@ struct em_perf_state { struct em_perf_domain { struct em_perf_state *table; int nr_perf_states; - int milliwatts; + unsigned long flags; unsigned long cpus[]; }; +/* + * em_perf_domain flags: + * + * EM_PERF_DOMAIN_MILLIWATTS: The power values are in milli-Watts or some + * other scale. + * + * EM_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip inefficient states when estimating + * energy consumption. + */ +#define EM_PERF_DOMAIN_MILLIWATTS BIT(0) +#define EM_PERF_DOMAIN_SKIP_INEFFICIENCIES BIT(1) + #define em_span_cpus(em) (to_cpumask((em)->cpus)) #ifdef CONFIG_ENERGY_MODEL @@ -102,6 +125,37 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, void em_dev_unregister_perf_domain(struct device *dev); /** + * em_pd_get_efficient_state() - Get an efficient performance state from the EM + * @pd : Performance domain for which we want an efficient frequency + * @freq : Frequency to map with the EM + * + * It is called from the scheduler code quite frequently and as a consequence + * doesn't implement any check. + * + * Return: An efficient performance state, high enough to meet @freq + * requirement. + */ +static inline +struct em_perf_state *em_pd_get_efficient_state(struct em_perf_domain *pd, + unsigned long freq) +{ + struct em_perf_state *ps; + int i; + + for (i = 0; i < pd->nr_perf_states; i++) { + ps = &pd->table[i]; + if (ps->frequency >= freq) { + if (pd->flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES && + ps->flags & EM_PERF_STATE_INEFFICIENT) + continue; + break; + } + } + + return ps; +} + +/** * em_cpu_energy() - Estimates the energy consumed by the CPUs of a * performance domain * @pd : performance domain for which energy has to be estimated @@ -123,7 +177,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, { unsigned long freq, scale_cpu; struct em_perf_state *ps; - int i, cpu; + int cpu; if (!sum_util) return 0; @@ -148,11 +202,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, * Find the lowest performance state of the Energy Model above the * requested frequency. */ - for (i = 0; i < pd->nr_perf_states; i++) { - ps = &pd->table[i]; - if (ps->frequency >= freq) - break; - } + ps = em_pd_get_efficient_state(pd, freq); /* * The capacity of a CPU in the domain at the performance state (ps) diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h index 1e7bf78cb382..aba348d58ff6 100644 --- a/include/linux/ethtool_netlink.h +++ b/include/linux/ethtool_netlink.h @@ -10,6 +10,9 @@ #define __ETHTOOL_LINK_MODE_MASK_NWORDS \ DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32) +#define ETHTOOL_PAUSE_STAT_CNT (__ETHTOOL_A_PAUSE_STAT_CNT - \ + ETHTOOL_A_PAUSE_STAT_TX_FRAMES) + enum ethtool_multicast_groups { ETHNL_MCGRP_MONITOR, }; diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index eec3b7c40811..616af2ea20f3 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -84,13 +84,20 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */ */ #define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE) +/* Events that can be reported with event->fd */ +#define FANOTIFY_FD_EVENTS (FANOTIFY_PATH_EVENTS | FANOTIFY_PERM_EVENTS) + /* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */ #define FANOTIFY_INODE_EVENTS (FANOTIFY_DIRENT_EVENTS | \ FAN_ATTRIB | FAN_MOVE_SELF | FAN_DELETE_SELF) +/* Events that can only be reported with data type FSNOTIFY_EVENT_ERROR */ +#define FANOTIFY_ERROR_EVENTS (FAN_FS_ERROR) + /* Events that user can request to be notified on */ #define FANOTIFY_EVENTS (FANOTIFY_PATH_EVENTS | \ - FANOTIFY_INODE_EVENTS) + FANOTIFY_INODE_EVENTS | \ + FANOTIFY_ERROR_EVENTS) /* Events that require a permission response from user */ #define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \ diff --git a/include/linux/fb.h b/include/linux/fb.h index 5950f8f5dc74..6f3db99ab990 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -262,7 +262,7 @@ struct fb_ops { /* Draws a rectangle */ void (*fb_fillrect) (struct fb_info *info, const struct fb_fillrect *rect); - /* Copy data from area to another */ + /* Copy data from area to another. Obsolete. */ void (*fb_copyarea) (struct fb_info *info, const struct fb_copyarea *region); /* Draws a image to the display */ void (*fb_imageblit) (struct fb_info *info, const struct fb_image *image); diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 25109192cebe..3b057dfc8284 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -20,23 +20,19 @@ struct firmware { struct module; struct device; -struct builtin_fw { - char *name; - void *data; - unsigned long size; -}; - -/* We have to play tricks here much like stringify() to get the - __COUNTER__ macro to be expanded as we want it */ -#define __fw_concat1(x, y) x##y -#define __fw_concat(x, y) __fw_concat1(x, y) - -#define DECLARE_BUILTIN_FIRMWARE(name, blob) \ - DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob)) - -#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \ - static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \ - __used __section(".builtin_fw") = { name, blob, size } +/* + * Built-in firmware functionality is only available if FW_LOADER=y, but not + * FW_LOADER=m + */ +#ifdef CONFIG_FW_LOADER +bool firmware_request_builtin(struct firmware *fw, const char *name); +#else +static inline bool firmware_request_builtin(struct firmware *fw, + const char *name) +{ + return false; +} +#endif #if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE)) int request_firmware(const struct firmware **fw, const char *name, diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h new file mode 100644 index 000000000000..9ad9eaaaa552 --- /dev/null +++ b/include/linux/firmware/cirrus/cs_dsp.h @@ -0,0 +1,242 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * cs_dsp.h -- Cirrus Logic DSP firmware support + * + * Based on sound/soc/codecs/wm_adsp.h + * + * Copyright 2012 Wolfson Microelectronics plc + * Copyright (C) 2015-2021 Cirrus Logic, Inc. and + * Cirrus Logic International Semiconductor Ltd. + */ +#ifndef __CS_DSP_H +#define __CS_DSP_H + +#define CS_ADSP2_REGION_0 BIT(0) +#define CS_ADSP2_REGION_1 BIT(1) +#define CS_ADSP2_REGION_2 BIT(2) +#define CS_ADSP2_REGION_3 BIT(3) +#define CS_ADSP2_REGION_4 BIT(4) +#define CS_ADSP2_REGION_5 BIT(5) +#define CS_ADSP2_REGION_6 BIT(6) +#define CS_ADSP2_REGION_7 BIT(7) +#define CS_ADSP2_REGION_8 BIT(8) +#define CS_ADSP2_REGION_9 BIT(9) +#define CS_ADSP2_REGION_1_9 (CS_ADSP2_REGION_1 | \ + CS_ADSP2_REGION_2 | CS_ADSP2_REGION_3 | \ + CS_ADSP2_REGION_4 | CS_ADSP2_REGION_5 | \ + CS_ADSP2_REGION_6 | CS_ADSP2_REGION_7 | \ + CS_ADSP2_REGION_8 | CS_ADSP2_REGION_9) +#define CS_ADSP2_REGION_ALL (CS_ADSP2_REGION_0 | CS_ADSP2_REGION_1_9) + +#define CS_DSP_DATA_WORD_SIZE 3 + +#define CS_DSP_ACKED_CTL_TIMEOUT_MS 100 +#define CS_DSP_ACKED_CTL_N_QUICKPOLLS 10 +#define CS_DSP_ACKED_CTL_MIN_VALUE 0 +#define CS_DSP_ACKED_CTL_MAX_VALUE 0xFFFFFF + +/** + * struct cs_dsp_region - Describes a logical memory region in DSP address space + * @type: Memory region type + * @base: Address of region + */ +struct cs_dsp_region { + int type; + unsigned int base; +}; + +/** + * struct cs_dsp_alg_region - Describes a logical algorithm region in DSP address space + * @list: List node for internal use + * @alg: Algorithm id + * @type: Memory region type + * @base: Address of region + */ +struct cs_dsp_alg_region { + struct list_head list; + unsigned int alg; + int type; + unsigned int base; +}; + +/** + * struct cs_dsp_coeff_ctl - Describes a coefficient control + * @fw_name: Name of the firmware + * @subname: Name of the control parsed from the WMFW + * @subname_len: Length of subname + * @alg_region: Logical region associated with this control + * @dsp: DSP instance associated with this control + * @enabled: Flag indicating whether control is enabled + * @list: List node for internal use + * @cache: Cached value of the control + * @offset: Offset of control within alg_region + * @len: Length of the cached value + * @set: Flag indicating the value has been written by the user + * @flags: Bitfield of WMFW_CTL_FLAG_ control flags defined in wmfw.h + * @type: One of the WMFW_CTL_TYPE_ control types defined in wmfw.h + * @priv: For use by the client + */ +struct cs_dsp_coeff_ctl { + const char *fw_name; + /* Subname is needed to match with firmware */ + const char *subname; + unsigned int subname_len; + struct cs_dsp_alg_region alg_region; + struct cs_dsp *dsp; + unsigned int enabled:1; + struct list_head list; + void *cache; + unsigned int offset; + size_t len; + unsigned int set:1; + unsigned int flags; + unsigned int type; + + void *priv; +}; + +struct cs_dsp_ops; +struct cs_dsp_client_ops; + +/** + * struct cs_dsp - Configuration and state of a Cirrus Logic DSP + * @name: The name of the DSP instance + * @rev: Revision of the DSP + * @num: DSP instance number + * @type: Type of DSP + * @dev: Driver model representation of the device + * @regmap: Register map of the device + * @ops: Function pointers for internal callbacks + * @client_ops: Function pointers for client callbacks + * @base: Address of the DSP registers + * @base_sysinfo: Address of the sysinfo register (Halo only) + * @sysclk_reg: Address of the sysclk register (ADSP1 only) + * @sysclk_mask: Mask of frequency bits within sysclk register (ADSP1 only) + * @sysclk_shift: Shift of frequency bits within sysclk register (ADSP1 only) + * @alg_regions: List of currently loaded algorithm regions + * @fw_file_name: Filename of the current firmware + * @fw_name: Name of the current firmware + * @fw_id: ID of the current firmware, obtained from the wmfw + * @fw_id_version: Version of the firmware, obtained from the wmfw + * @fw_vendor_id: Vendor of the firmware, obtained from the wmfw + * @mem: DSP memory region descriptions + * @num_mems: Number of memory regions in this DSP + * @fw_ver: Version of the wmfw file format + * @booted: Flag indicating DSP has been configured + * @running: Flag indicating DSP is executing firmware + * @ctl_list: Controls defined within the loaded DSP firmware + * @lock_regions: Enable MPU traps on specified memory regions + * @pwr_lock: Lock used to serialize accesses + * @debugfs_root: Debugfs directory for this DSP instance + * @wmfw_file_name: Filename of the currently loaded firmware + * @bin_file_name: Filename of the currently loaded coefficients + */ +struct cs_dsp { + const char *name; + int rev; + int num; + int type; + struct device *dev; + struct regmap *regmap; + + const struct cs_dsp_ops *ops; + const struct cs_dsp_client_ops *client_ops; + + unsigned int base; + unsigned int base_sysinfo; + unsigned int sysclk_reg; + unsigned int sysclk_mask; + unsigned int sysclk_shift; + + struct list_head alg_regions; + + const char *fw_name; + unsigned int fw_id; + unsigned int fw_id_version; + unsigned int fw_vendor_id; + + const struct cs_dsp_region *mem; + int num_mems; + + int fw_ver; + + bool booted; + bool running; + + struct list_head ctl_list; + + struct mutex pwr_lock; + + unsigned int lock_regions; + +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_root; + char *wmfw_file_name; + char *bin_file_name; +#endif +}; + +/** + * struct cs_dsp_client_ops - client callbacks + * @control_add: Called under the pwr_lock when a control is created + * @control_remove: Called under the pwr_lock when a control is destroyed + * @post_run: Called under the pwr_lock by cs_dsp_run() + * @post_stop: Called under the pwr_lock by cs_dsp_stop() + * @watchdog_expired: Called when a watchdog expiry is detected + * + * These callbacks give the cs_dsp client an opportunity to respond to events + * or to perform actions atomically. + */ +struct cs_dsp_client_ops { + int (*control_add)(struct cs_dsp_coeff_ctl *ctl); + void (*control_remove)(struct cs_dsp_coeff_ctl *ctl); + int (*post_run)(struct cs_dsp *dsp); + void (*post_stop)(struct cs_dsp *dsp); + void (*watchdog_expired)(struct cs_dsp *dsp); +}; + +int cs_dsp_adsp1_init(struct cs_dsp *dsp); +int cs_dsp_adsp2_init(struct cs_dsp *dsp); +int cs_dsp_halo_init(struct cs_dsp *dsp); + +int cs_dsp_adsp1_power_up(struct cs_dsp *dsp, + const struct firmware *wmfw_firmware, char *wmfw_filename, + const struct firmware *coeff_firmware, char *coeff_filename, + const char *fw_name); +void cs_dsp_adsp1_power_down(struct cs_dsp *dsp); +int cs_dsp_power_up(struct cs_dsp *dsp, + const struct firmware *wmfw_firmware, char *wmfw_filename, + const struct firmware *coeff_firmware, char *coeff_filename, + const char *fw_name); +void cs_dsp_power_down(struct cs_dsp *dsp); +int cs_dsp_run(struct cs_dsp *dsp); +void cs_dsp_stop(struct cs_dsp *dsp); + +void cs_dsp_remove(struct cs_dsp *dsp); + +int cs_dsp_set_dspclk(struct cs_dsp *dsp, unsigned int freq); +void cs_dsp_adsp2_bus_error(struct cs_dsp *dsp); +void cs_dsp_halo_bus_error(struct cs_dsp *dsp); +void cs_dsp_halo_wdt_expire(struct cs_dsp *dsp); + +void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root); +void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp); + +int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int event_id); +int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, const void *buf, size_t len); +int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, void *buf, size_t len); +struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, int type, + unsigned int alg); + +int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, + unsigned int num_words, __be32 *data); +int cs_dsp_read_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 *data); +int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 data); +void cs_dsp_remove_padding(u32 *buf, int nwords); + +struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp, + int type, unsigned int id); + +const char *cs_dsp_mem_region_name(unsigned int type); + +#endif diff --git a/include/linux/firmware/cirrus/wmfw.h b/include/linux/firmware/cirrus/wmfw.h new file mode 100644 index 000000000000..a19bf7c6fc8b --- /dev/null +++ b/include/linux/firmware/cirrus/wmfw.h @@ -0,0 +1,202 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * wmfw.h - Wolfson firmware format information + * + * Copyright 2012 Wolfson Microelectronics plc + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + */ + +#ifndef __WMFW_H +#define __WMFW_H + +#include <linux/types.h> + +#define WMFW_MAX_ALG_NAME 256 +#define WMFW_MAX_ALG_DESCR_NAME 256 + +#define WMFW_MAX_COEFF_NAME 256 +#define WMFW_MAX_COEFF_DESCR_NAME 256 + +#define WMFW_CTL_FLAG_SYS 0x8000 +#define WMFW_CTL_FLAG_VOLATILE 0x0004 +#define WMFW_CTL_FLAG_WRITEABLE 0x0002 +#define WMFW_CTL_FLAG_READABLE 0x0001 + +#define WMFW_CTL_TYPE_BYTES 0x0004 /* byte control */ + +/* Non-ALSA coefficient types start at 0x1000 */ +#define WMFW_CTL_TYPE_ACKED 0x1000 /* acked control */ +#define WMFW_CTL_TYPE_HOSTEVENT 0x1001 /* event control */ +#define WMFW_CTL_TYPE_HOST_BUFFER 0x1002 /* host buffer pointer */ + +struct wmfw_header { + char magic[4]; + __le32 len; + __le16 rev; + u8 core; + u8 ver; +} __packed; + +struct wmfw_footer { + __le64 timestamp; + __le32 checksum; +} __packed; + +struct wmfw_adsp1_sizes { + __le32 dm; + __le32 pm; + __le32 zm; +} __packed; + +struct wmfw_adsp2_sizes { + __le32 xm; + __le32 ym; + __le32 pm; + __le32 zm; +} __packed; + +struct wmfw_region { + union { + __be32 type; + __le32 offset; + }; + __le32 len; + u8 data[]; +} __packed; + +struct wmfw_id_hdr { + __be32 core_id; + __be32 core_rev; + __be32 id; + __be32 ver; +} __packed; + +struct wmfw_v3_id_hdr { + __be32 core_id; + __be32 block_rev; + __be32 vendor_id; + __be32 id; + __be32 ver; +} __packed; + +struct wmfw_adsp1_id_hdr { + struct wmfw_id_hdr fw; + __be32 zm; + __be32 dm; + __be32 n_algs; +} __packed; + +struct wmfw_adsp2_id_hdr { + struct wmfw_id_hdr fw; + __be32 zm; + __be32 xm; + __be32 ym; + __be32 n_algs; +} __packed; + +struct wmfw_halo_id_hdr { + struct wmfw_v3_id_hdr fw; + __be32 xm_base; + __be32 xm_size; + __be32 ym_base; + __be32 ym_size; + __be32 n_algs; +} __packed; + +struct wmfw_alg_hdr { + __be32 id; + __be32 ver; +} __packed; + +struct wmfw_adsp1_alg_hdr { + struct wmfw_alg_hdr alg; + __be32 zm; + __be32 dm; +} __packed; + +struct wmfw_adsp2_alg_hdr { + struct wmfw_alg_hdr alg; + __be32 zm; + __be32 xm; + __be32 ym; +} __packed; + +struct wmfw_halo_alg_hdr { + struct wmfw_alg_hdr alg; + __be32 xm_base; + __be32 xm_size; + __be32 ym_base; + __be32 ym_size; +} __packed; + +struct wmfw_adsp_alg_data { + __le32 id; + u8 name[WMFW_MAX_ALG_NAME]; + u8 descr[WMFW_MAX_ALG_DESCR_NAME]; + __le32 ncoeff; + u8 data[]; +} __packed; + +struct wmfw_adsp_coeff_data { + struct { + __le16 offset; + __le16 type; + __le32 size; + } hdr; + u8 name[WMFW_MAX_COEFF_NAME]; + u8 descr[WMFW_MAX_COEFF_DESCR_NAME]; + __le16 ctl_type; + __le16 flags; + __le32 len; + u8 data[]; +} __packed; + +struct wmfw_coeff_hdr { + u8 magic[4]; + __le32 len; + union { + __be32 rev; + __le32 ver; + }; + union { + __be32 core; + __le32 core_ver; + }; + u8 data[]; +} __packed; + +struct wmfw_coeff_item { + __le16 offset; + __le16 type; + __le32 id; + __le32 ver; + __le32 sr; + __le32 len; + u8 data[]; +} __packed; + +#define WMFW_ADSP1 1 +#define WMFW_ADSP2 2 +#define WMFW_HALO 4 + +#define WMFW_ABSOLUTE 0xf0 +#define WMFW_ALGORITHM_DATA 0xf2 +#define WMFW_METADATA 0xfc +#define WMFW_NAME_TEXT 0xfe +#define WMFW_INFO_TEXT 0xff + +#define WMFW_ADSP1_PM 2 +#define WMFW_ADSP1_DM 3 +#define WMFW_ADSP1_ZM 4 + +#define WMFW_ADSP2_PM 2 +#define WMFW_ADSP2_ZM 4 +#define WMFW_ADSP2_XM 5 +#define WMFW_ADSP2_YM 6 + +#define WMFW_HALO_PM_PACKED 0x10 +#define WMFW_HALO_XM_PACKED 0x11 +#define WMFW_HALO_YM_PACKED 0x12 + +#endif diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 4c70a6e2141e..47fd4e52a423 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -72,6 +72,8 @@ enum pm_api_id { PM_SET_REQUIREMENT = 15, PM_RESET_ASSERT = 17, PM_RESET_GET_STATUS = 18, + PM_MMIO_WRITE = 19, + PM_MMIO_READ = 20, PM_PM_INIT_FINALIZE = 21, PM_FPGA_LOAD = 22, PM_FPGA_GET_STATUS = 23, @@ -397,6 +399,8 @@ int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select); int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset, const enum zynqmp_pm_reset_action assert_flag); int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status); +unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode); +int zynqmp_pm_bootmode_write(u32 ps_mode); int zynqmp_pm_init_finalize(void); int zynqmp_pm_set_suspend_mode(u32 mode); int zynqmp_pm_request_node(const u32 node, const u32 capabilities, @@ -532,6 +536,16 @@ static inline int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, return -ENODEV; } +static inline unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_bootmode_write(u32 ps_mode) +{ + return -ENODEV; +} + static inline int zynqmp_pm_init_finalize(void) { return -ENODEV; diff --git a/include/linux/fs.h b/include/linux/fs.h index f3cfca5edc9a..1cb616fc1105 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1440,6 +1440,7 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_I_UNTRUSTED_MOUNTER 0x00000040 #define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */ +#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */ /* Possible states of 'frozen' field */ enum { @@ -3192,6 +3193,7 @@ static inline void remove_inode_hash(struct inode *inode) } extern void inode_sb_list_add(struct inode *inode); +extern void inode_add_lru(struct inode *inode); extern int sb_set_blocksize(struct super_block *, int); extern int sb_min_blocksize(struct super_block *, int); @@ -3383,6 +3385,8 @@ extern int simple_open(struct inode *inode, struct file *file); extern int simple_link(struct dentry *, struct inode *, struct dentry *); extern int simple_unlink(struct inode *, struct dentry *); extern int simple_rmdir(struct inode *, struct dentry *); +extern int simple_rename_exchange(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry); extern int simple_rename(struct user_namespace *, struct inode *, struct dentry *, struct inode *, struct dentry *, unsigned int); diff --git a/include/linux/fscache.h b/include/linux/fscache.h index a4dab5998613..3b2282c157f7 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -167,7 +167,7 @@ struct fscache_cookie { static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie) { - return test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags); + return fscache_cookie_valid(cookie) && test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags); } /* diff --git a/include/linux/fsi-occ.h b/include/linux/fsi-occ.h index d4cdc2aa6e33..7ee3dbd7f4b3 100644 --- a/include/linux/fsi-occ.h +++ b/include/linux/fsi-occ.h @@ -19,6 +19,8 @@ struct device; #define OCC_RESP_CRIT_OCB 0xE3 #define OCC_RESP_CRIT_HW 0xE4 +#define OCC_MAX_RESP_WORDS 2048 + int fsi_occ_submit(struct device *dev, const void *request, size_t req_len, void *response, size_t *resp_len); diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h index 30ece3ae6df7..e026f6c48b49 100644 --- a/include/linux/fsl/mc.h +++ b/include/linux/fsl/mc.h @@ -620,6 +620,20 @@ int dpcon_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); +int fsl_mc_obj_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int obj_id, + char *obj_type, + u16 *token); + +int fsl_mc_obj_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int fsl_mc_obj_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + /** * struct dpcon_attr - Structure representing DPCON attributes * @id: DPCON object ID diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 12d3a7d308ab..787545e87eeb 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -26,20 +26,20 @@ * FS_EVENT_ON_CHILD mask on the parent inode and will not be reported if only * the child is interested and not the parent. */ -static inline void fsnotify_name(struct inode *dir, __u32 mask, - struct inode *child, - const struct qstr *name, u32 cookie) +static inline int fsnotify_name(__u32 mask, const void *data, int data_type, + struct inode *dir, const struct qstr *name, + u32 cookie) { if (atomic_long_read(&dir->i_sb->s_fsnotify_connectors) == 0) - return; + return 0; - fsnotify(mask, child, FSNOTIFY_EVENT_INODE, dir, name, NULL, cookie); + return fsnotify(mask, data, data_type, dir, name, NULL, cookie); } static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, __u32 mask) { - fsnotify_name(dir, mask, d_inode(dentry), &dentry->d_name, 0); + fsnotify_name(mask, dentry, FSNOTIFY_EVENT_DENTRY, dir, &dentry->d_name, 0); } static inline void fsnotify_inode(struct inode *inode, __u32 mask) @@ -86,7 +86,7 @@ notify_child: */ static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask) { - fsnotify_parent(dentry, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE); + fsnotify_parent(dentry, mask, dentry, FSNOTIFY_EVENT_DENTRY); } static inline int fsnotify_file(struct file *file, __u32 mask) @@ -154,8 +154,10 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, new_dir_mask |= FS_ISDIR; } - fsnotify_name(old_dir, old_dir_mask, source, old_name, fs_cookie); - fsnotify_name(new_dir, new_dir_mask, source, new_name, fs_cookie); + fsnotify_name(old_dir_mask, source, FSNOTIFY_EVENT_INODE, + old_dir, old_name, fs_cookie); + fsnotify_name(new_dir_mask, source, FSNOTIFY_EVENT_INODE, + new_dir, new_name, fs_cookie); if (target) fsnotify_link_count(target); @@ -190,16 +192,22 @@ static inline void fsnotify_inoderemove(struct inode *inode) /* * fsnotify_create - 'name' was linked in + * + * Caller must make sure that dentry->d_name is stable. + * Note: some filesystems (e.g. kernfs) leave @dentry negative and instantiate + * ->d_inode later */ -static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) +static inline void fsnotify_create(struct inode *dir, struct dentry *dentry) { - audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE); + audit_inode_child(dir, dentry, AUDIT_TYPE_CHILD_CREATE); - fsnotify_dirent(inode, dentry, FS_CREATE); + fsnotify_dirent(dir, dentry, FS_CREATE); } /* * fsnotify_link - new hardlink in 'inode' directory + * + * Caller must make sure that new_dentry->d_name is stable. * Note: We have to pass also the linked inode ptr as some filesystems leave * new_dentry->d_inode NULL and instantiate inode pointer later */ @@ -209,7 +217,8 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, fsnotify_link_count(inode); audit_inode_child(dir, new_dentry, AUDIT_TYPE_CHILD_CREATE); - fsnotify_name(dir, FS_CREATE, inode, &new_dentry->d_name, 0); + fsnotify_name(FS_CREATE, inode, FSNOTIFY_EVENT_INODE, + dir, &new_dentry->d_name, 0); } /* @@ -227,12 +236,16 @@ static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry) /* * fsnotify_mkdir - directory 'name' was created + * + * Caller must make sure that dentry->d_name is stable. + * Note: some filesystems (e.g. kernfs) leave @dentry negative and instantiate + * ->d_inode later */ -static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) +static inline void fsnotify_mkdir(struct inode *dir, struct dentry *dentry) { - audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE); + audit_inode_child(dir, dentry, AUDIT_TYPE_CHILD_CREATE); - fsnotify_dirent(inode, dentry, FS_CREATE | FS_ISDIR); + fsnotify_dirent(dir, dentry, FS_CREATE | FS_ISDIR); } /* @@ -326,4 +339,17 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) fsnotify_dentry(dentry, mask); } +static inline int fsnotify_sb_error(struct super_block *sb, struct inode *inode, + int error) +{ + struct fs_error_report report = { + .error = error, + .inode = inode, + .sb = sb, + }; + + return fsnotify(FS_ERROR, &report, FSNOTIFY_EVENT_ERROR, + NULL, NULL, NULL, 0); +} + #endif /* _LINUX_FS_NOTIFY_H */ diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 1ce66748a2d2..51ef2b079bfa 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -19,6 +19,7 @@ #include <linux/atomic.h> #include <linux/user_namespace.h> #include <linux/refcount.h> +#include <linux/mempool.h> /* * IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily @@ -42,6 +43,12 @@ #define FS_UNMOUNT 0x00002000 /* inode on umount fs */ #define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ +#define FS_ERROR 0x00008000 /* Filesystem Error (fanotify) */ + +/* + * FS_IN_IGNORED overloads FS_ERROR. It is only used internally by inotify + * which does not support FS_ERROR. + */ #define FS_IN_IGNORED 0x00008000 /* last inotify event here */ #define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */ @@ -95,7 +102,8 @@ #define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \ FS_EVENTS_POSS_ON_CHILD | \ FS_DELETE_SELF | FS_MOVE_SELF | FS_DN_RENAME | \ - FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED) + FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \ + FS_ERROR) /* Extra flags that may be reported with event or control handling of events */ #define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \ @@ -136,6 +144,7 @@ struct mem_cgroup; * @dir: optional directory associated with event - * if @file_name is not NULL, this is the directory that * @file_name is relative to. + * Either @inode or @dir must be non-NULL. * @file_name: optional file name associated with event * @cookie: inotify rename cookie * @@ -155,7 +164,7 @@ struct fsnotify_ops { const struct qstr *file_name, u32 cookie); void (*free_group_priv)(struct fsnotify_group *group); void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); - void (*free_event)(struct fsnotify_event *event); + void (*free_event)(struct fsnotify_group *group, struct fsnotify_event *event); /* called on final put+free to free memory */ void (*free_mark)(struct fsnotify_mark *mark); }; @@ -238,6 +247,7 @@ struct fsnotify_group { int flags; /* flags from fanotify_init() */ int f_flags; /* event_f_flags from fanotify_init() */ struct ucounts *ucounts; + mempool_t error_events_pool; } fanotify_data; #endif /* CONFIG_FANOTIFY */ }; @@ -248,6 +258,14 @@ enum fsnotify_data_type { FSNOTIFY_EVENT_NONE, FSNOTIFY_EVENT_PATH, FSNOTIFY_EVENT_INODE, + FSNOTIFY_EVENT_DENTRY, + FSNOTIFY_EVENT_ERROR, +}; + +struct fs_error_report { + int error; + struct inode *inode; + struct super_block *sb; }; static inline struct inode *fsnotify_data_inode(const void *data, int data_type) @@ -255,8 +273,25 @@ static inline struct inode *fsnotify_data_inode(const void *data, int data_type) switch (data_type) { case FSNOTIFY_EVENT_INODE: return (struct inode *)data; + case FSNOTIFY_EVENT_DENTRY: + return d_inode(data); case FSNOTIFY_EVENT_PATH: return d_inode(((const struct path *)data)->dentry); + case FSNOTIFY_EVENT_ERROR: + return ((struct fs_error_report *)data)->inode; + default: + return NULL; + } +} + +static inline struct dentry *fsnotify_data_dentry(const void *data, int data_type) +{ + switch (data_type) { + case FSNOTIFY_EVENT_DENTRY: + /* Non const is needed for dget() */ + return (struct dentry *)data; + case FSNOTIFY_EVENT_PATH: + return ((const struct path *)data)->dentry; default: return NULL; } @@ -273,6 +308,35 @@ static inline const struct path *fsnotify_data_path(const void *data, } } +static inline struct super_block *fsnotify_data_sb(const void *data, + int data_type) +{ + switch (data_type) { + case FSNOTIFY_EVENT_INODE: + return ((struct inode *)data)->i_sb; + case FSNOTIFY_EVENT_DENTRY: + return ((struct dentry *)data)->d_sb; + case FSNOTIFY_EVENT_PATH: + return ((const struct path *)data)->dentry->d_sb; + case FSNOTIFY_EVENT_ERROR: + return ((struct fs_error_report *) data)->sb; + default: + return NULL; + } +} + +static inline struct fs_error_report *fsnotify_data_error_report( + const void *data, + int data_type) +{ + switch (data_type) { + case FSNOTIFY_EVENT_ERROR: + return (struct fs_error_report *) data; + default: + return NULL; + } +} + enum fsnotify_obj_type { FSNOTIFY_OBJ_TYPE_INODE, FSNOTIFY_OBJ_TYPE_PARENT, @@ -482,16 +546,30 @@ extern int fsnotify_fasync(int fd, struct file *file, int on); extern void fsnotify_destroy_event(struct fsnotify_group *group, struct fsnotify_event *event); /* attach the event to the group notification queue */ -extern int fsnotify_add_event(struct fsnotify_group *group, - struct fsnotify_event *event, - int (*merge)(struct fsnotify_group *, - struct fsnotify_event *), - void (*insert)(struct fsnotify_group *, - struct fsnotify_event *)); +extern int fsnotify_insert_event(struct fsnotify_group *group, + struct fsnotify_event *event, + int (*merge)(struct fsnotify_group *, + struct fsnotify_event *), + void (*insert)(struct fsnotify_group *, + struct fsnotify_event *)); + +static inline int fsnotify_add_event(struct fsnotify_group *group, + struct fsnotify_event *event, + int (*merge)(struct fsnotify_group *, + struct fsnotify_event *)) +{ + return fsnotify_insert_event(group, event, merge, NULL); +} + /* Queue overflow event to a notification group */ static inline void fsnotify_queue_overflow(struct fsnotify_group *group) { - fsnotify_add_event(group, group->overflow_event, NULL, NULL); + fsnotify_add_event(group, group->overflow_event, NULL); +} + +static inline bool fsnotify_is_overflow_event(u32 mask) +{ + return mask & FS_Q_OVERFLOW; } static inline bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group) diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 9f4ad719bfe3..3a532ba66f6c 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -11,6 +11,7 @@ #include <linux/types.h> #include <linux/list.h> +#include <linux/bits.h> #include <linux/err.h> struct fwnode_operations; diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h index bfd00320c7f3..107613f7d792 100644 --- a/include/linux/generic-radix-tree.h +++ b/include/linux/generic-radix-tree.h @@ -38,8 +38,9 @@ #include <asm/page.h> #include <linux/bug.h> -#include <linux/kernel.h> #include <linux/log2.h> +#include <linux/math.h> +#include <linux/types.h> struct genradix_root; diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 59eabbc3a36b..462634b4b48f 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -250,7 +250,7 @@ static inline sector_t bdev_nr_sectors(struct block_device *bdev) static inline loff_t bdev_nr_bytes(struct block_device *bdev) { - return bdev_nr_sectors(bdev) << SECTOR_SHIFT; + return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT; } static inline sector_t get_capacity(struct gendisk *disk) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 3745efd21cf6..b976c4177299 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -531,6 +531,10 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, struct list_head *page_list, struct page **page_array); +unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, + unsigned long nr_pages, + struct page **page_array); + /* Bulk allocate order-0 pages */ static inline unsigned long alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list) @@ -618,9 +622,9 @@ static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order) extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); -void *alloc_pages_exact(size_t size, gfp_t gfp_mask); +void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1); void free_pages_exact(void *virt, size_t size); -void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); +__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(1); #define __get_free_page(gfp_mask) \ __get_free_pages((gfp_mask), 0) diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index a0f9901dcae6..a673a359e20b 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -168,11 +168,18 @@ struct gpio_irq_chip { /** * @parent_handler_data: + * @parent_handler_data_array: * * Data associated, and passed to, the handler for the parent - * interrupt. + * interrupt. Can either be a single pointer if @per_parent_data + * is false, or an array of @num_parents pointers otherwise. If + * @per_parent_data is true, @parent_handler_data_array cannot be + * NULL. */ - void *parent_handler_data; + union { + void *parent_handler_data; + void **parent_handler_data_array; + }; /** * @num_parents: @@ -204,6 +211,14 @@ struct gpio_irq_chip { bool threaded; /** + * @per_parent_data: + * + * True if parent_handler_data_array describes a @num_parents + * sized array to be used as parent data. + */ + bool per_parent_data; + + /** * @init_hw: optional routine to initialize hardware before * an IRQ chip will be added. This is quite useful when * a particular driver wants to clear IRQ related registers diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 27cdd715c5f9..25aff0f2ed0b 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -180,9 +180,9 @@ static inline void invalidate_kernel_vmap_range(void *vaddr, int size) #ifndef clear_user_highpage static inline void clear_user_highpage(struct page *page, unsigned long vaddr) { - void *addr = kmap_atomic(page); + void *addr = kmap_local_page(page); clear_user_page(addr, vaddr, page); - kunmap_atomic(addr); + kunmap_local(addr); } #endif @@ -214,9 +214,9 @@ alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, static inline void clear_highpage(struct page *page) { - void *kaddr = kmap_atomic(page); + void *kaddr = kmap_local_page(page); clear_page(kaddr); - kunmap_atomic(kaddr); + kunmap_local(kaddr); } #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE @@ -239,7 +239,7 @@ static inline void zero_user_segments(struct page *page, unsigned start1, unsigned end1, unsigned start2, unsigned end2) { - void *kaddr = kmap_atomic(page); + void *kaddr = kmap_local_page(page); unsigned int i; BUG_ON(end1 > page_size(page) || end2 > page_size(page)); @@ -250,7 +250,7 @@ static inline void zero_user_segments(struct page *page, if (end2 > start2) memset(kaddr + start2, 0, end2 - start2); - kunmap_atomic(kaddr); + kunmap_local(kaddr); for (i = 0; i < compound_nr(page); i++) flush_dcache_page(page + i); } @@ -275,11 +275,11 @@ static inline void copy_user_highpage(struct page *to, struct page *from, { char *vfrom, *vto; - vfrom = kmap_atomic(from); - vto = kmap_atomic(to); + vfrom = kmap_local_page(from); + vto = kmap_local_page(to); copy_user_page(vto, vfrom, vaddr, to); - kunmap_atomic(vto); - kunmap_atomic(vfrom); + kunmap_local(vto); + kunmap_local(vfrom); } #endif @@ -290,11 +290,11 @@ static inline void copy_highpage(struct page *to, struct page *from) { char *vfrom, *vto; - vfrom = kmap_atomic(from); - vto = kmap_atomic(to); + vfrom = kmap_local_page(from); + vto = kmap_local_page(to); copy_page(vto, vfrom); - kunmap_atomic(vto); - kunmap_atomic(vfrom); + kunmap_local(vto); + kunmap_local(vfrom); } #endif diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 1faebe1cd0ed..00351ccb49a3 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -124,6 +124,7 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, void hugepage_put_subpool(struct hugepage_subpool *spool); void reset_vma_resv_huge_pages(struct vm_area_struct *vma); +void clear_vma_resv_huge_pages(struct vm_area_struct *vma); int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *, loff_t *); @@ -132,6 +133,10 @@ int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *, int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *); +int move_hugetlb_page_tables(struct vm_area_struct *vma, + struct vm_area_struct *new_vma, + unsigned long old_addr, unsigned long new_addr, + unsigned long len); int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, @@ -143,9 +148,6 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page); -void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, - unsigned long start, unsigned long end, - struct page *ref_page); void hugetlb_report_meminfo(struct seq_file *); int hugetlb_report_node_meminfo(char *buf, int len, int nid); void hugetlb_show_meminfo(void); @@ -218,6 +220,10 @@ static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) { } +static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma) +{ +} + static inline unsigned long hugetlb_total_pages(void) { return 0; @@ -265,6 +271,16 @@ static inline int copy_hugetlb_page_range(struct mm_struct *dst, return 0; } +static inline int move_hugetlb_page_tables(struct vm_area_struct *vma, + struct vm_area_struct *new_vma, + unsigned long old_addr, + unsigned long new_addr, + unsigned long len) +{ + BUG(); + return 0; +} + static inline void hugetlb_report_meminfo(struct seq_file *m) { } @@ -385,13 +401,6 @@ static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb, BUG(); } -static inline void __unmap_hugepage_range(struct mmu_gather *tlb, - struct vm_area_struct *vma, unsigned long start, - unsigned long end, struct page *ref_page) -{ - BUG(); -} - static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) @@ -468,8 +477,7 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) extern const struct file_operations hugetlbfs_file_operations; extern const struct vm_operations_struct hugetlb_vm_ops; struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, - struct ucounts **ucounts, int creat_flags, - int page_size_log); + int creat_flags, int page_size_log); static inline bool is_file_hugepages(struct file *file) { @@ -488,8 +496,7 @@ static inline struct hstate *hstate_inode(struct inode *i) #define is_file_hugepages(file) false static inline struct file * hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, - struct ucounts **ucounts, int creat_flags, - int page_size_log) + int creat_flags, int page_size_log) { return ERR_PTR(-ENOSYS); } @@ -596,6 +603,7 @@ struct hstate { int next_nid_to_alloc; int next_nid_to_free; unsigned int order; + unsigned int demote_order; unsigned long mask; unsigned long max_huge_pages; unsigned long nr_huge_pages; @@ -605,6 +613,7 @@ struct hstate { unsigned long nr_overcommit_huge_pages; struct list_head hugepage_activelist; struct list_head hugepage_freelists[MAX_NUMNODES]; + unsigned int max_huge_pages_node[MAX_NUMNODES]; unsigned int nr_huge_pages_node[MAX_NUMNODES]; unsigned int free_huge_pages_node[MAX_NUMNODES]; unsigned int surplus_huge_pages_node[MAX_NUMNODES]; @@ -637,8 +646,9 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, unsigned long address, struct page *page); /* arch callback */ -int __init __alloc_bootmem_huge_page(struct hstate *h); -int __init alloc_bootmem_huge_page(struct hstate *h); +int __init __alloc_bootmem_huge_page(struct hstate *h, int nid); +int __init alloc_bootmem_huge_page(struct hstate *h, int nid); +bool __init hugetlb_node_alloc_supported(void); void __init hugetlb_add_hstate(unsigned order); bool __init arch_hugetlb_valid_size(unsigned long size); diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index ddc8713ce57b..b823311eac79 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -803,6 +803,12 @@ struct vmbus_device { #define VMBUS_DEFAULT_MAX_PKT_SIZE 4096 +struct vmbus_gpadl { + u32 gpadl_handle; + u32 size; + void *buffer; +}; + struct vmbus_channel { struct list_head listentry; @@ -822,7 +828,7 @@ struct vmbus_channel { bool rescind_ref; /* got rescind msg, got channel reference */ struct completion rescind_event; - u32 ringbuffer_gpadlhandle; + struct vmbus_gpadl ringbuffer_gpadlhandle; /* Allocated memory for ring buffer */ struct page *ringbuffer_page; @@ -1100,19 +1106,6 @@ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel, void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel, void (*chn_rescind_cb)(struct vmbus_channel *)); -/* - * Check if sub-channels have already been offerred. This API will be useful - * when the driver is unloaded after establishing sub-channels. In this case, - * when the driver is re-loaded, the driver would have to check if the - * subchannels have already been established before attempting to request - * the creation of sub-channels. - * This function returns TRUE to indicate that subchannels have already been - * created. - * This function should be invoked after setting the callback function for - * sub-channel creation. - */ -bool vmbus_are_subchannels_present(struct vmbus_channel *primary); - /* The format must be the same as struct vmdata_gpa_direct */ struct vmbus_channel_packet_page_buffer { u16 type; @@ -1192,10 +1185,10 @@ extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, extern int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer, u32 size, - u32 *gpadl_handle); + struct vmbus_gpadl *gpadl); extern int vmbus_teardown_gpadl(struct vmbus_channel *channel, - u32 gpadl_handle); + struct vmbus_gpadl *gpadl); void vmbus_reset_channel_cb(struct vmbus_channel *channel); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 2ce3efbe9198..16119ac1aa97 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -11,6 +11,7 @@ #define _LINUX_I2C_H #include <linux/acpi.h> /* for acpi_handle */ +#include <linux/bits.h> #include <linux/mod_devicetable.h> #include <linux/device.h> /* for struct device */ #include <linux/sched.h> /* for completion */ @@ -223,6 +224,15 @@ enum i2c_alert_protocol { }; /** + * enum i2c_driver_flags - Flags for an I2C device driver + * + * @I2C_DRV_ACPI_WAIVE_D0_PROBE: Don't put the device in D0 state for probe + */ +enum i2c_driver_flags { + I2C_DRV_ACPI_WAIVE_D0_PROBE = BIT(0), +}; + +/** * struct i2c_driver - represent an I2C device driver * @class: What kind of i2c device we instantiate (for detect) * @probe: Callback for device binding - soon to be deprecated @@ -236,6 +246,7 @@ enum i2c_alert_protocol { * @detect: Callback for device detection * @address_list: The I2C addresses to probe (for detect) * @clients: List of detected clients we created (for i2c-core use only) + * @flags: A bitmask of flags defined in &enum i2c_driver_flags * * The driver.owner field should be set to the module owner of this driver. * The driver.name field should be set to the name of this driver. @@ -294,6 +305,8 @@ struct i2c_driver { int (*detect)(struct i2c_client *client, struct i2c_board_info *info); const unsigned short *address_list; struct list_head clients; + + u32 flags; }; #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) @@ -1015,6 +1028,7 @@ u32 i2c_acpi_find_bus_speed(struct device *dev); struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, struct i2c_board_info *info); struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle); +bool i2c_acpi_waive_d0_probe(struct device *dev); #else static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, struct acpi_resource_i2c_serialbus **i2c) @@ -1038,6 +1052,10 @@ static inline struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle ha { return NULL; } +static inline bool i2c_acpi_waive_d0_probe(struct device *dev) +{ + return false; +} #endif /* CONFIG_ACPI */ #endif /* _LINUX_I2C_H */ diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h index b6928ac5c63d..418b1307d3f2 100644 --- a/include/linux/iio/buffer.h +++ b/include/linux/iio/buffer.h @@ -11,8 +11,15 @@ struct iio_buffer; +enum iio_buffer_direction { + IIO_BUFFER_DIRECTION_IN, + IIO_BUFFER_DIRECTION_OUT, +}; + int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data); +int iio_pop_from_buffer(struct iio_buffer *buffer, void *data); + /** * iio_push_to_buffers_with_timestamp() - push data and timestamp to buffers * @indio_dev: iio_dev structure for device. @@ -38,6 +45,10 @@ static inline int iio_push_to_buffers_with_timestamp(struct iio_dev *indio_dev, return iio_push_to_buffers(indio_dev, data); } +int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev, + const void *data, size_t data_sz, + int64_t timestamp); + bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, const unsigned long *mask); diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h index 245b32918ae1..e2ca8ea23e19 100644 --- a/include/linux/iio/buffer_impl.h +++ b/include/linux/iio/buffer_impl.h @@ -7,6 +7,7 @@ #ifdef CONFIG_IIO_BUFFER #include <uapi/linux/iio/buffer.h> +#include <linux/iio/buffer.h> struct iio_dev; struct iio_buffer; @@ -23,6 +24,10 @@ struct iio_buffer; * @read: try to get a specified number of bytes (must exist) * @data_available: indicates how much data is available for reading from * the buffer. + * @remove_from: remove scan from buffer. Drivers should calls this to + * remove a scan from a buffer. + * @write: try to write a number of bytes + * @space_available: returns the amount of bytes available in a buffer * @request_update: if a parameter change has been marked, update underlying * storage. * @set_bytes_per_datum:set number of bytes per datum @@ -49,6 +54,9 @@ struct iio_buffer_access_funcs { int (*store_to)(struct iio_buffer *buffer, const void *data); int (*read)(struct iio_buffer *buffer, size_t n, char __user *buf); size_t (*data_available)(struct iio_buffer *buffer); + int (*remove_from)(struct iio_buffer *buffer, void *data); + int (*write)(struct iio_buffer *buffer, size_t n, const char __user *buf); + size_t (*space_available)(struct iio_buffer *buffer); int (*request_update)(struct iio_buffer *buffer); @@ -80,6 +88,9 @@ struct iio_buffer { /** @bytes_per_datum: Size of individual datum including timestamp. */ size_t bytes_per_datum; + /* @direction: Direction of the data stream (in/out). */ + enum iio_buffer_direction direction; + /** * @access: Buffer access functions associated with the * implementation. diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 8bdbaf3f3796..22f67845cdd3 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -220,7 +220,6 @@ struct st_sensor_settings { /** * struct st_sensor_data - ST sensor device status - * @dev: Pointer to instance of struct device (I2C or SPI). * @trig: The trigger in use by the core driver. * @mount_matrix: The mounting matrix of the sensor. * @sensor_settings: Pointer to the specific sensor settings in use. @@ -240,7 +239,6 @@ struct st_sensor_settings { * @buffer_data: Data used by buffer part. */ struct st_sensor_data { - struct device *dev; struct iio_trigger *trig; struct iio_mount_matrix mount_matrix; struct st_sensor_settings *sensor_settings; @@ -273,7 +271,6 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p); int st_sensors_allocate_trigger(struct iio_dev *indio_dev, const struct iio_trigger_ops *trigger_ops); -void st_sensors_deallocate_trigger(struct iio_dev *indio_dev); int st_sensors_validate_device(struct iio_trigger *trig, struct iio_dev *indio_dev); #else @@ -282,10 +279,6 @@ static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev, { return 0; } -static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev) -{ - return; -} #define st_sensors_validate_device NULL #endif @@ -298,8 +291,6 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable); int st_sensors_power_enable(struct iio_dev *indio_dev); -void st_sensors_power_disable(struct iio_dev *indio_dev); - int st_sensors_debugfs_reg_access(struct iio_dev *indio_dev, unsigned reg, unsigned writeval, unsigned *readval); @@ -330,21 +321,17 @@ void st_sensors_dev_name_probe(struct device *dev, char *name, int len); /* Accelerometer */ const struct st_sensor_settings *st_accel_get_settings(const char *name); int st_accel_common_probe(struct iio_dev *indio_dev); -void st_accel_common_remove(struct iio_dev *indio_dev); /* Gyroscope */ const struct st_sensor_settings *st_gyro_get_settings(const char *name); int st_gyro_common_probe(struct iio_dev *indio_dev); -void st_gyro_common_remove(struct iio_dev *indio_dev); /* Magnetometer */ const struct st_sensor_settings *st_magn_get_settings(const char *name); int st_magn_common_probe(struct iio_dev *indio_dev); -void st_magn_common_remove(struct iio_dev *indio_dev); /* Pressure */ const struct st_sensor_settings *st_press_get_settings(const char *name); int st_press_common_probe(struct iio_dev *indio_dev); -void st_press_common_remove(struct iio_dev *indio_dev); #endif /* ST_SENSORS_H */ diff --git a/include/linux/iio/driver.h b/include/linux/iio/driver.h index 36de60a5da7a..7a157ed218f6 100644 --- a/include/linux/iio/driver.h +++ b/include/linux/iio/driver.h @@ -8,6 +8,7 @@ #ifndef _IIO_INKERN_H_ #define _IIO_INKERN_H_ +struct device; struct iio_dev; struct iio_map; @@ -26,4 +27,17 @@ int iio_map_array_register(struct iio_dev *indio_dev, */ int iio_map_array_unregister(struct iio_dev *indio_dev); +/** + * devm_iio_map_array_register - device-managed version of iio_map_array_register + * @dev: Device object to which to bind the unwinding of this registration + * @indio_dev: Pointer to the iio_dev structure + * @maps: Pointer to an IIO map object which is to be registered to this IIO device + * + * This function will call iio_map_array_register() to register an IIO map object + * and will also hook a callback to the iio_map_array_unregister() function to + * handle de-registration of the IIO map object when the device's refcount goes to + * zero. + */ +int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps); + #endif diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h index c9504e9da571..2be12b7b5dc5 100644 --- a/include/linux/iio/iio-opaque.h +++ b/include/linux/iio/iio-opaque.h @@ -23,6 +23,8 @@ * @groupcounter: index of next attribute group * @legacy_scan_el_group: attribute group for legacy scan elements attribute group * @legacy_buffer_group: attribute group for legacy buffer attributes group + * @bounce_buffer: for devices that call iio_push_to_buffers_with_timestamp_unaligned() + * @bounce_buffer_size: size of currently allocate bounce buffer * @scan_index_timestamp: cache of the index to the timestamp * @clock_id: timestamping clock posix identifier * @chrdev: associated character device @@ -50,6 +52,8 @@ struct iio_dev_opaque { int groupcounter; struct attribute_group legacy_scan_el_group; struct attribute_group legacy_buffer_group; + void *bounce_buffer; + size_t bounce_buffer_size; unsigned int scan_index_timestamp; clockid_t clock_id; diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index cf49997d5903..7c02f5292eea 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -49,6 +49,7 @@ struct adis_timeout { * @status_error_mask: Bitmask of errors supported by the device * @timeouts: Chip specific delays * @enable_irq: Hook for ADIS devices that have a special IRQ enable/disable + * @unmasked_drdy: True for devices that cannot mask/unmask the data ready pin * @has_paging: True if ADIS device has paged registers * @burst_reg_cmd: Register command that triggers burst * @burst_len: Burst size in the SPI RX buffer. If @burst_max_len is defined, @@ -78,6 +79,7 @@ struct adis_data { unsigned int status_error_mask; int (*enable_irq)(struct adis *adis, bool enable); + bool unmasked_drdy; bool has_paging; diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h index 7f154d1f8739..7490b05fc5b2 100644 --- a/include/linux/iio/triggered_buffer.h +++ b/include/linux/iio/triggered_buffer.h @@ -2,6 +2,7 @@ #ifndef _LINUX_IIO_TRIGGERED_BUFFER_H_ #define _LINUX_IIO_TRIGGERED_BUFFER_H_ +#include <linux/iio/buffer.h> #include <linux/interrupt.h> struct attribute; @@ -11,21 +12,27 @@ struct iio_buffer_setup_ops; int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev, irqreturn_t (*h)(int irq, void *p), irqreturn_t (*thread)(int irq, void *p), + enum iio_buffer_direction direction, const struct iio_buffer_setup_ops *setup_ops, const struct attribute **buffer_attrs); void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev); #define iio_triggered_buffer_setup(indio_dev, h, thread, setup_ops) \ - iio_triggered_buffer_setup_ext((indio_dev), (h), (thread), (setup_ops), NULL) + iio_triggered_buffer_setup_ext((indio_dev), (h), (thread), \ + IIO_BUFFER_DIRECTION_IN, (setup_ops), \ + NULL) int devm_iio_triggered_buffer_setup_ext(struct device *dev, struct iio_dev *indio_dev, irqreturn_t (*h)(int irq, void *p), irqreturn_t (*thread)(int irq, void *p), + enum iio_buffer_direction direction, const struct iio_buffer_setup_ops *ops, const struct attribute **buffer_attrs); #define devm_iio_triggered_buffer_setup(dev, indio_dev, h, thread, setup_ops) \ - devm_iio_triggered_buffer_setup_ext((dev), (indio_dev), (h), (thread), (setup_ops), NULL) + devm_iio_triggered_buffer_setup_ext((dev), (indio_dev), (h), (thread), \ + IIO_BUFFER_DIRECTION_IN, \ + (setup_ops), NULL) #endif diff --git a/include/linux/input/cy8ctmg110_pdata.h b/include/linux/input/cy8ctmg110_pdata.h deleted file mode 100644 index ee1d44545f30..000000000000 --- a/include/linux/input/cy8ctmg110_pdata.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_CY8CTMG110_PDATA_H -#define _LINUX_CY8CTMG110_PDATA_H - -struct cy8ctmg110_pdata -{ - int reset_pin; /* Reset pin is wired to this GPIO (optional) */ -}; - -#endif diff --git a/include/linux/instruction_pointer.h b/include/linux/instruction_pointer.h new file mode 100644 index 000000000000..cda1f706eaeb --- /dev/null +++ b/include/linux/instruction_pointer.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_INSTRUCTION_POINTER_H +#define _LINUX_INSTRUCTION_POINTER_H + +#define _RET_IP_ (unsigned long)__builtin_return_address(0) +#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) + +#endif /* _LINUX_INSTRUCTION_POINTER_H */ diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 05a65eb155f7..69230fd695ea 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -517,9 +517,6 @@ struct context_entry { u64 hi; }; -/* si_domain contains mulitple devices */ -#define DOMAIN_FLAG_STATIC_IDENTITY BIT(0) - /* * When VT-d works in the scalable mode, it allows DMA translation to * happen through either first level or second level page table. This @@ -708,9 +705,15 @@ static inline bool dma_pte_superpage(struct dma_pte *pte) return (pte->val & DMA_PTE_LARGE_PAGE); } -static inline int first_pte_in_page(struct dma_pte *pte) +static inline bool first_pte_in_page(struct dma_pte *pte) +{ + return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE); +} + +static inline int nr_pte_to_next_page(struct dma_pte *pte) { - return !((unsigned long)pte & ~VTD_PAGE_MASK); + return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) : + (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte; } extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index e9743cfd8585..66a774d2710e 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -132,13 +132,7 @@ io_mapping_init_wc(struct io_mapping *iomap, iomap->base = base; iomap->size = size; -#if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */ - iomap->prot = pgprot_noncached_wc(PAGE_KERNEL); -#elif defined(pgprot_writecombine) iomap->prot = pgprot_writecombine(PAGE_KERNEL); -#else - iomap->prot = pgprot_noncached(PAGE_KERNEL); -#endif return iomap; } diff --git a/include/linux/io.h b/include/linux/io.h index 9595151d800d..5fc800390fe4 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -132,6 +132,8 @@ static inline int arch_phys_wc_index(int handle) #endif #endif +int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long size); + enum { /* See memremap() kernel-doc for usage description... */ MEMREMAP_WB = 1 << 0, @@ -166,4 +168,7 @@ static inline void arch_io_free_memtype_wc(resource_size_t base, } #endif +int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start, + resource_size_t size); + #endif /* _LINUX_IO_H */ diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 63f4ea4dac9b..6d1b08d0ae93 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -330,12 +330,19 @@ struct iomap_dio_ops { */ #define IOMAP_DIO_OVERWRITE_ONLY (1 << 1) +/* + * When a page fault occurs, return a partial synchronous result and allow + * the caller to retry the rest of the operation after dealing with the page + * fault. + */ +#define IOMAP_DIO_PARTIAL (1 << 2) + ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops, const struct iomap_dio_ops *dops, - unsigned int dio_flags); + unsigned int dio_flags, size_t done_before); struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops, const struct iomap_dio_ops *dops, - unsigned int dio_flags); + unsigned int dio_flags, size_t done_before); ssize_t iomap_dio_complete(struct iomap_dio *dio); #ifdef CONFIG_SWAP diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 9ee238ad29ce..553da4899f55 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -64,6 +64,10 @@ struct irq_fwspec { u32 param[IRQ_DOMAIN_IRQ_SPEC_PARAMS]; }; +/* Conversion function from of_phandle_args fields to fwspec */ +void of_phandle_args_to_fwspec(struct device_node *np, const u32 *args, + unsigned int count, struct irq_fwspec *fwspec); + /* * Should several domains have the same device node, but serve * different purposes (for example one domain is for PCI/MSI, and the diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index a1d6fc82d7f0..4176c7eca7b5 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -24,25 +24,16 @@ struct cred; struct module; -static inline int is_kernel_inittext(unsigned long addr) -{ - if (addr >= (unsigned long)_sinittext - && addr <= (unsigned long)_einittext) - return 1; - return 0; -} - static inline int is_kernel_text(unsigned long addr) { - if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) || - arch_is_kernel_text(addr)) + if (__is_kernel_text(addr)) return 1; return in_gate_area_no_mm(addr); } static inline int is_kernel(unsigned long addr) { - if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) + if (__is_kernel(addr)) return 1; return in_gate_area_no_mm(addr); } diff --git a/include/linux/kasan.h b/include/linux/kasan.h index de5f5913374d..d8783b682669 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -375,12 +375,14 @@ static inline void kasan_unpoison_task_stack(struct task_struct *task) {} void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache); void kasan_record_aux_stack(void *ptr); +void kasan_record_aux_stack_noalloc(void *ptr); #else /* CONFIG_KASAN_GENERIC */ static inline void kasan_cache_shrink(struct kmem_cache *cache) {} static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} static inline void kasan_record_aux_stack(void *ptr) {} +static inline void kasan_record_aux_stack_noalloc(void *ptr) {} #endif /* CONFIG_KASAN_GENERIC */ @@ -439,6 +441,8 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, unsigned long free_region_end); +void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); + #else /* CONFIG_KASAN_VMALLOC */ static inline int kasan_populate_vmalloc(unsigned long start, @@ -456,6 +460,10 @@ static inline void kasan_release_vmalloc(unsigned long start, unsigned long free_region_start, unsigned long free_region_end) {} +static inline void kasan_populate_early_vm_area_shadow(void *start, + unsigned long size) +{ } + #endif /* CONFIG_KASAN_VMALLOC */ #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h index 9fd0ad80fef6..5f5965246877 100644 --- a/include/linux/kcsan-checks.h +++ b/include/linux/kcsan-checks.h @@ -100,9 +100,12 @@ void kcsan_set_access_mask(unsigned long mask); /* Scoped access information. */ struct kcsan_scoped_access { struct list_head list; + /* Access information. */ const volatile void *ptr; size_t size; int type; + /* Location where scoped access was set up. */ + unsigned long ip; }; /* * Automatically call kcsan_end_scoped_access() when kcsan_scoped_access goes diff --git a/include/linux/kernel.h b/include/linux/kernel.h index e8696e4a45aa..968b4c4fe65b 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -9,6 +9,7 @@ #include <linux/stddef.h> #include <linux/types.h> #include <linux/compiler.h> +#include <linux/container_of.h> #include <linux/bitops.h> #include <linux/kstrtox.h> #include <linux/log2.h> @@ -19,6 +20,7 @@ #include <linux/printk.h> #include <linux/build_bug.h> #include <linux/static_call_types.h> +#include <linux/instruction_pointer.h> #include <asm/byteorder.h> #include <uapi/linux/kernel.h> @@ -52,11 +54,6 @@ } \ ) -#define typeof_member(T, m) typeof(((T*)0)->m) - -#define _RET_IP_ (unsigned long)__builtin_return_address(0) -#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) - /** * upper_32_bits - return bits 32-63 of a number * @n: the number we're accessing @@ -228,8 +225,6 @@ extern bool parse_option_str(const char *str, const char *option); extern char *next_arg(char *args, char **param, char **val); extern int core_kernel_text(unsigned long addr); -extern int init_kernel_text(unsigned long addr); -extern int core_kernel_data(unsigned long addr); extern int __kernel_text_address(unsigned long addr); extern int kernel_text_address(unsigned long addr); extern int func_ptr_is_kernel_text(void *ptr); @@ -247,6 +242,7 @@ extern bool early_boot_irqs_disabled; extern enum system_states { SYSTEM_BOOTING, SYSTEM_SCHEDULING, + SYSTEM_FREEING_INITMEM, SYSTEM_RUNNING, SYSTEM_HALT, SYSTEM_POWER_OFF, @@ -482,36 +478,6 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } #define __CONCAT(a, b) a ## b #define CONCATENATE(a, b) __CONCAT(a, b) -/** - * container_of - cast a member of a structure out to the containing structure - * @ptr: the pointer to the member. - * @type: the type of the container struct this is embedded in. - * @member: the name of the member within the struct. - * - */ -#define container_of(ptr, type, member) ({ \ - void *__mptr = (void *)(ptr); \ - BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \ - !__same_type(*(ptr), void), \ - "pointer type mismatch in container_of()"); \ - ((type *)(__mptr - offsetof(type, member))); }) - -/** - * container_of_safe - cast a member of a structure out to the containing structure - * @ptr: the pointer to the member. - * @type: the type of the container struct this is embedded in. - * @member: the name of the member within the struct. - * - * If IS_ERR_OR_NULL(ptr), ptr is returned unchanged. - */ -#define container_of_safe(ptr, type, member) ({ \ - void *__mptr = (void *)(ptr); \ - BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \ - !__same_type(*(ptr), void), \ - "pointer type mismatch in container_of()"); \ - IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \ - ((type *)(__mptr - offsetof(type, member))); }) - /* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ #ifdef CONFIG_FTRACE_MCOUNT_RECORD # define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 1093abf7c28c..3ccce6f24548 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -269,10 +269,6 @@ struct kernfs_ops { struct poll_table_struct *pt); int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma); - -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lock_class_key lockdep_key; -#endif }; /* @@ -568,30 +564,6 @@ kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode, priv, NULL); } -static inline struct kernfs_node * -kernfs_create_file_ns(struct kernfs_node *parent, const char *name, - umode_t mode, kuid_t uid, kgid_t gid, - loff_t size, const struct kernfs_ops *ops, - void *priv, const void *ns) -{ - struct lock_class_key *key = NULL; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC - key = (struct lock_class_key *)&ops->lockdep_key; -#endif - return __kernfs_create_file(parent, name, mode, uid, gid, - size, ops, priv, ns, key); -} - -static inline struct kernfs_node * -kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode, - loff_t size, const struct kernfs_ops *ops, void *priv) -{ - return kernfs_create_file_ns(parent, name, mode, - GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, - size, ops, priv, NULL); -} - static inline int kernfs_remove_by_name(struct kernfs_node *parent, const char *name) { diff --git a/include/linux/kfence.h b/include/linux/kfence.h index 3fe6dd8a18c1..4b5e3679a72c 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -14,6 +14,9 @@ #ifdef CONFIG_KFENCE +#include <linux/atomic.h> +#include <linux/static_key.h> + /* * We allocate an even number of pages, as it simplifies calculations to map * address to metadata indices; effectively, the very first page serves as an @@ -22,13 +25,8 @@ #define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE) extern char *__kfence_pool; -#ifdef CONFIG_KFENCE_STATIC_KEYS -#include <linux/static_key.h> DECLARE_STATIC_KEY_FALSE(kfence_allocation_key); -#else -#include <linux/atomic.h> extern atomic_t kfence_allocation_gate; -#endif /** * is_kfence_address() - check if an address belongs to KFENCE pool @@ -116,13 +114,16 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags); */ static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { -#ifdef CONFIG_KFENCE_STATIC_KEYS - if (static_branch_unlikely(&kfence_allocation_key)) +#if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0 + if (!static_branch_unlikely(&kfence_allocation_key)) + return NULL; #else - if (unlikely(!atomic_read(&kfence_allocation_gate))) + if (!static_branch_likely(&kfence_allocation_key)) + return NULL; #endif - return __kfence_alloc(s, size, flags); - return NULL; + if (likely(atomic_read(&kfence_allocation_gate))) + return NULL; + return __kfence_alloc(s, size, flags); } /** diff --git a/include/linux/kobject.h b/include/linux/kobject.h index ea30529fba08..efd56f990a46 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -101,7 +101,6 @@ int kobject_init_and_add(struct kobject *kobj, extern void kobject_del(struct kobject *kobj); -extern struct kobject * __must_check kobject_create(void); extern struct kobject * __must_check kobject_create_and_add(const char *name, struct kobject *parent); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 0f18df7fe874..60a35d9fe259 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -39,8 +39,8 @@ #include <asm/kvm_host.h> #include <linux/kvm_dirty_ring.h> -#ifndef KVM_MAX_VCPU_ID -#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS +#ifndef KVM_MAX_VCPU_IDS +#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS #endif /* @@ -160,8 +160,7 @@ static inline bool is_error_page(struct page *page) #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, - struct kvm_vcpu *except, - unsigned long *vcpu_bitmap, cpumask_var_t tmp); + unsigned long *vcpu_bitmap); bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, struct kvm_vcpu *except); @@ -1082,10 +1081,17 @@ static inline struct kvm *kvm_arch_alloc_vm(void) { return kzalloc(sizeof(struct kvm), GFP_KERNEL); } +#endif + +static inline void __kvm_arch_free_vm(struct kvm *kvm) +{ + kvfree(kvm); +} +#ifndef __KVM_HAVE_ARCH_VM_FREE static inline void kvm_arch_free_vm(struct kvm *kvm) { - kfree(kvm); + __kvm_arch_free_vm(kvm); } #endif @@ -1765,6 +1771,8 @@ void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set); +bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *, + struct kvm_kernel_irq_routing_entry *); #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS diff --git a/include/linux/libata.h b/include/linux/libata.h index 236ec689056a..2a8404b26083 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -394,7 +394,7 @@ enum { /* This should match the actual table size of * ata_eh_cmd_timeout_table in libata-eh.c. */ - ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 6, + ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 7, /* Horkage types. May be set by libata or controller on drives (some horkage may be drive/controller pair dependent */ @@ -427,6 +427,7 @@ enum { ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */ ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */ + ATA_HORKAGE_NO_ID_DEV_LOG = (1 << 28), /* Identify device log missing */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ @@ -1403,7 +1404,7 @@ extern int ata_link_nr_enabled(struct ata_link *link); */ extern const struct ata_port_operations ata_base_port_ops; extern const struct ata_port_operations sata_port_ops; -extern struct device_attribute *ata_common_sdev_attrs[]; +extern const struct attribute_group *ata_common_sdev_groups[]; /* * All sht initializers (BASE, PIO, BMDMA, NCQ) must be instantiated @@ -1433,14 +1434,14 @@ extern struct device_attribute *ata_common_sdev_attrs[]; #define ATA_BASE_SHT(drv_name) \ ATA_SUBBASE_SHT(drv_name), \ - .sdev_attrs = ata_common_sdev_attrs + .sdev_groups = ata_common_sdev_groups #ifdef CONFIG_SATA_HOST -extern struct device_attribute *ata_ncq_sdev_attrs[]; +extern const struct attribute_group *ata_ncq_sdev_groups[]; #define ATA_NCQ_SHT(drv_name) \ ATA_SUBBASE_SHT(drv_name), \ - .sdev_attrs = ata_ncq_sdev_attrs, \ + .sdev_groups = ata_ncq_sdev_groups, \ .change_queue_depth = ata_scsi_change_queue_depth #endif diff --git a/include/linux/list.h b/include/linux/list.h index f2af4b4aa4e9..6636fc07f918 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -2,11 +2,13 @@ #ifndef _LINUX_LIST_H #define _LINUX_LIST_H +#include <linux/container_of.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/poison.h> #include <linux/const.h> -#include <linux/kernel.h> + +#include <asm/barrier.h> /* * Circular doubly linked list implementation. diff --git a/include/linux/llist.h b/include/linux/llist.h index 24f207b0190b..85bda2d02d65 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -49,7 +49,9 @@ */ #include <linux/atomic.h> -#include <linux/kernel.h> +#include <linux/container_of.h> +#include <linux/stddef.h> +#include <linux/types.h> struct llist_head { struct llist_node *first; diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h index a98309c0121c..398f70093cd3 100644 --- a/include/linux/lockd/xdr.h +++ b/include/linux/lockd/xdr.h @@ -96,18 +96,19 @@ struct nlm_reboot { */ #define NLMSVC_XDRSIZE sizeof(struct nlm_args) -int nlmsvc_decode_testargs(struct svc_rqst *, __be32 *); -int nlmsvc_encode_testres(struct svc_rqst *, __be32 *); -int nlmsvc_decode_lockargs(struct svc_rqst *, __be32 *); -int nlmsvc_decode_cancargs(struct svc_rqst *, __be32 *); -int nlmsvc_decode_unlockargs(struct svc_rqst *, __be32 *); -int nlmsvc_encode_res(struct svc_rqst *, __be32 *); -int nlmsvc_decode_res(struct svc_rqst *, __be32 *); -int nlmsvc_encode_void(struct svc_rqst *, __be32 *); -int nlmsvc_decode_void(struct svc_rqst *, __be32 *); -int nlmsvc_decode_shareargs(struct svc_rqst *, __be32 *); -int nlmsvc_encode_shareres(struct svc_rqst *, __be32 *); -int nlmsvc_decode_notify(struct svc_rqst *, __be32 *); -int nlmsvc_decode_reboot(struct svc_rqst *, __be32 *); +bool nlmsvc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlmsvc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlmsvc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlmsvc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlmsvc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlmsvc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlmsvc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlmsvc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr); + +bool nlmsvc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlmsvc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlmsvc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlmsvc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr); #endif /* LOCKD_XDR_H */ diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h index 5ae766f26e04..9a6b55da8fd6 100644 --- a/include/linux/lockd/xdr4.h +++ b/include/linux/lockd/xdr4.h @@ -22,21 +22,20 @@ #define nlm4_fbig cpu_to_be32(NLM_FBIG) #define nlm4_failed cpu_to_be32(NLM_FAILED) - - -int nlm4svc_decode_testargs(struct svc_rqst *, __be32 *); -int nlm4svc_encode_testres(struct svc_rqst *, __be32 *); -int nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *); -int nlm4svc_decode_cancargs(struct svc_rqst *, __be32 *); -int nlm4svc_decode_unlockargs(struct svc_rqst *, __be32 *); -int nlm4svc_encode_res(struct svc_rqst *, __be32 *); -int nlm4svc_decode_res(struct svc_rqst *, __be32 *); -int nlm4svc_encode_void(struct svc_rqst *, __be32 *); -int nlm4svc_decode_void(struct svc_rqst *, __be32 *); -int nlm4svc_decode_shareargs(struct svc_rqst *, __be32 *); -int nlm4svc_encode_shareres(struct svc_rqst *, __be32 *); -int nlm4svc_decode_notify(struct svc_rqst *, __be32 *); -int nlm4svc_decode_reboot(struct svc_rqst *, __be32 *); +bool nlm4svc_decode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlm4svc_decode_testargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlm4svc_decode_lockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlm4svc_decode_cancargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlm4svc_decode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlm4svc_decode_reboot(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlm4svc_decode_shareargs(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlm4svc_decode_notify(struct svc_rqst *rqstp, struct xdr_stream *xdr); + +bool nlm4svc_encode_testres(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlm4svc_encode_res(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlm4svc_encode_void(struct svc_rqst *rqstp, struct xdr_stream *xdr); +bool nlm4svc_encode_shareres(struct svc_rqst *rqstp, struct xdr_stream *xdr); extern const struct rpc_version nlm_version4; diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index a9ac70ae01ab..442a611fa0fb 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -329,12 +329,14 @@ LSM_HOOK(int, 0, tun_dev_create, void) LSM_HOOK(int, 0, tun_dev_attach_queue, void *security) LSM_HOOK(int, 0, tun_dev_attach, struct sock *sk, void *security) LSM_HOOK(int, 0, tun_dev_open, void *security) -LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_endpoint *ep, +LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_association *asoc, struct sk_buff *skb) LSM_HOOK(int, 0, sctp_bind_connect, struct sock *sk, int optname, struct sockaddr *address, int addrlen) -LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_endpoint *ep, +LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_association *asoc, struct sock *sk, struct sock *newsk) +LSM_HOOK(void, LSM_RET_VOID, sctp_assoc_established, struct sctp_association *asoc, + struct sk_buff *skb) #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 0bada4df23fc..d6823214d5c1 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1027,9 +1027,9 @@ * Security hooks for SCTP * * @sctp_assoc_request: - * Passes the @ep and @chunk->skb of the association INIT packet to + * Passes the @asoc and @chunk->skb of the association INIT packet to * the security module. - * @ep pointer to sctp endpoint structure. + * @asoc pointer to sctp association structure. * @skb pointer to skbuff of association packet. * Return 0 on success, error on failure. * @sctp_bind_connect: @@ -1047,9 +1047,14 @@ * Called whenever a new socket is created by accept(2) (i.e. a TCP * style socket) or when a socket is 'peeled off' e.g userspace * calls sctp_peeloff(3). - * @ep pointer to current sctp endpoint structure. + * @asoc pointer to current sctp association structure. * @sk pointer to current sock structure. - * @sk pointer to new sock structure. + * @newsk pointer to new sock structure. + * @sctp_assoc_established: + * Passes the @asoc and @chunk->skb of the association COOKIE_ACK packet + * to the security module. + * @asoc pointer to sctp association structure. + * @skb pointer to skbuff of association packet. * * Security hooks for Infiniband * diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 68427e8fadeb..15d03f6532d0 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -18,7 +18,6 @@ struct mdev_device { void *driver_data; struct list_head next; struct mdev_type *type; - struct device *iommu_device; bool active; }; @@ -27,25 +26,6 @@ static inline struct mdev_device *to_mdev_device(struct device *dev) return container_of(dev, struct mdev_device, dev); } -/* - * Called by the parent device driver to set the device which represents - * this mdev in iommu protection scope. By default, the iommu device is - * NULL, that indicates using vendor defined isolation. - * - * @dev: the mediated device that iommu will isolate. - * @iommu_device: a pci device which represents the iommu for @dev. - */ -static inline void mdev_set_iommu_device(struct mdev_device *mdev, - struct device *iommu_device) -{ - mdev->iommu_device = iommu_device; -} - -static inline struct device *mdev_get_iommu_device(struct mdev_device *mdev) -{ - return mdev->iommu_device; -} - unsigned int mdev_get_type_group_id(struct mdev_device *mdev); unsigned int mtype_get_type_group_id(struct mdev_type *mtype); struct device *mtype_get_parent_dev(struct mdev_type *mtype); diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 34de69b3b8ba..8adcf1fa8096 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -28,17 +28,26 @@ extern unsigned long long max_possible_pfn; /** * enum memblock_flags - definition of memory region attributes * @MEMBLOCK_NONE: no special request - * @MEMBLOCK_HOTPLUG: hotpluggable region + * @MEMBLOCK_HOTPLUG: memory region indicated in the firmware-provided memory + * map during early boot as hot(un)pluggable system RAM (e.g., memory range + * that might get hotunplugged later). With "movable_node" set on the kernel + * commandline, try keeping this memory region hotunpluggable. Does not apply + * to memblocks added ("hotplugged") after early boot. * @MEMBLOCK_MIRROR: mirrored region * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as * reserved in the memory map; refer to memblock_mark_nomap() description * for further details + * @MEMBLOCK_DRIVER_MANAGED: memory region that is always detected and added + * via a driver, and never indicated in the firmware-provided memory map as + * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the + * kernel resource tree. */ enum memblock_flags { MEMBLOCK_NONE = 0x0, /* No special request */ MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */ MEMBLOCK_MIRROR = 0x2, /* mirrored region */ MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */ + MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */ }; /** @@ -100,10 +109,11 @@ static inline void memblock_discard(void) {} #endif void memblock_allow_resize(void); -int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); +int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid, + enum memblock_flags flags); int memblock_add(phys_addr_t base, phys_addr_t size); int memblock_remove(phys_addr_t base, phys_addr_t size); -int memblock_free(phys_addr_t base, phys_addr_t size); +int memblock_phys_free(phys_addr_t base, phys_addr_t size); int memblock_reserve(phys_addr_t base, phys_addr_t size); #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP int memblock_physmem_add(phys_addr_t base, phys_addr_t size); @@ -118,7 +128,7 @@ int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); void memblock_free_all(void); -void memblock_free_ptr(void *ptr, size_t size); +void memblock_free(void *ptr, size_t size); void reset_node_managed_pages(pg_data_t *pgdat); void reset_all_zones_managed_pages(void); @@ -133,7 +143,7 @@ void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags, struct memblock_type *type_b, phys_addr_t *out_start, phys_addr_t *out_end, int *out_nid); -void __memblock_free_late(phys_addr_t base, phys_addr_t size); +void memblock_free_late(phys_addr_t base, phys_addr_t size); #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, @@ -208,7 +218,8 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, */ #define for_each_mem_range(i, p_start, p_end) \ __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \ - MEMBLOCK_HOTPLUG, p_start, p_end, NULL) + MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED, \ + p_start, p_end, NULL) /** * for_each_mem_range_rev - reverse iterate through memblock areas from @@ -219,7 +230,8 @@ static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, */ #define for_each_mem_range_rev(i, p_start, p_end) \ __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \ - MEMBLOCK_HOTPLUG, p_start, p_end, NULL) + MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED,\ + p_start, p_end, NULL) /** * for_each_reserved_mem_range - iterate over all reserved memblock areas @@ -249,6 +261,11 @@ static inline bool memblock_is_nomap(struct memblock_region *m) return m->flags & MEMBLOCK_NOMAP; } +static inline bool memblock_is_driver_managed(struct memblock_region *m) +{ + return m->flags & MEMBLOCK_DRIVER_MANAGED; +} + int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, unsigned long *end_pfn); void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, @@ -372,7 +389,7 @@ static inline int memblock_get_region_node(const struct memblock_region *r) /* Flags for memblock allocation APIs */ #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) #define MEMBLOCK_ALLOC_ACCESSIBLE 0 -#define MEMBLOCK_ALLOC_KASAN 1 +#define MEMBLOCK_ALLOC_NOLEAKTRACE 1 /* We are using top down, so it is safe to use 0 here */ #define MEMBLOCK_LOW_LIMIT 0 @@ -441,23 +458,6 @@ static inline void *memblock_alloc_node(phys_addr_t size, MEMBLOCK_ALLOC_ACCESSIBLE, nid); } -static inline void memblock_free_early(phys_addr_t base, - phys_addr_t size) -{ - memblock_free(base, size); -} - -static inline void memblock_free_early_nid(phys_addr_t base, - phys_addr_t size, int nid) -{ - memblock_free(base, size); -} - -static inline void memblock_free_late(phys_addr_t base, phys_addr_t size) -{ - __memblock_free_late(base, size); -} - /* * Set the allocation direction to bottom-up or top-down. */ diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e34bf0cbdf55..0c5c403f4be6 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -180,12 +180,6 @@ struct mem_cgroup_thresholds { struct mem_cgroup_threshold_ary *spare; }; -enum memcg_kmem_state { - KMEM_NONE, - KMEM_ALLOCATED, - KMEM_ONLINE, -}; - #if defined(CONFIG_SMP) struct memcg_padding { char x[0]; @@ -318,7 +312,6 @@ struct mem_cgroup { #ifdef CONFIG_MEMCG_KMEM int kmemcg_id; - enum memcg_kmem_state kmem_state; struct obj_cgroup __rcu *objcg; struct list_head objcg_list; /* list of inherited objcgs */ #endif @@ -1667,7 +1660,7 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure) return true; do { - if (time_before(jiffies, memcg->socket_pressure)) + if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) return true; } while ((memcg = parent_mem_cgroup(memcg))); return false; diff --git a/include/linux/memory.h b/include/linux/memory.h index 182c606adb06..88eb587b5143 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -96,7 +96,6 @@ struct memory_notify { unsigned long start_pfn; unsigned long nr_pages; int status_change_nid_normal; - int status_change_nid_high; int status_change_nid; }; @@ -110,7 +109,7 @@ struct mem_section; #define SLAB_CALLBACK_PRI 1 #define IPC_CALLBACK_PRI 10 -#ifndef CONFIG_MEMORY_HOTPLUG_SPARSE +#ifndef CONFIG_MEMORY_HOTPLUG static inline void memory_dev_init(void) { return; @@ -126,7 +125,14 @@ static inline int memory_notify(unsigned long val, void *v) { return 0; } -#else +static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri) +{ + return 0; +} +/* These aren't inline functions due to a GCC bug. */ +#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; }) +#define unregister_hotmemory_notifier(nb) ({ (void)(nb); }) +#else /* CONFIG_MEMORY_HOTPLUG */ extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); int create_memory_block_devices(unsigned long start, unsigned long size, @@ -140,7 +146,6 @@ typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); extern int walk_memory_blocks(unsigned long start, unsigned long size, void *arg, walk_memory_blocks_func_t func); extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func); -#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) extern int memory_group_register_static(int nid, unsigned long max_pages); extern int memory_group_register_dynamic(int nid, unsigned long unit_pages); @@ -149,9 +154,6 @@ struct memory_group *memory_group_find_by_id(int mgid); typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *); int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func, struct memory_group *excluded, void *arg); -#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ - -#ifdef CONFIG_MEMORY_HOTPLUG #define hotplug_memory_notifier(fn, pri) ({ \ static __meminitdata struct notifier_block fn##_mem_nb =\ { .notifier_call = fn, .priority = pri };\ @@ -159,15 +161,7 @@ int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func, }) #define register_hotmemory_notifier(nb) register_memory_notifier(nb) #define unregister_hotmemory_notifier(nb) unregister_memory_notifier(nb) -#else -static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri) -{ - return 0; -} -/* These aren't inline functions due to a GCC bug. */ -#define register_hotmemory_notifier(nb) ({ (void)(nb); 0; }) -#define unregister_hotmemory_notifier(nb) ({ (void)(nb); }) -#endif +#endif /* CONFIG_MEMORY_HOTPLUG */ /* * Kernel text modification mutex, used for code patching. Users of this lock diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index e5a867c950b2..be48e003a518 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -98,9 +98,6 @@ static inline void zone_seqlock_init(struct zone *zone) { seqlock_init(&zone->span_seqlock); } -extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); -extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); -extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); extern void adjust_present_page_count(struct page *page, struct memory_group *group, long nr_pages); diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 4091692bed8c..3c7595e81150 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -8,7 +8,6 @@ #include <linux/sched.h> #include <linux/mmzone.h> -#include <linux/dax.h> #include <linux/slab.h> #include <linux/rbtree.h> #include <linux/spinlock.h> @@ -184,8 +183,6 @@ extern bool vma_migratable(struct vm_area_struct *vma); extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); extern void mpol_put_task_policy(struct task_struct *); -extern bool numa_demotion_enabled; - static inline bool mpol_is_preferred_many(struct mempolicy *pol) { return (pol->mode == MPOL_PREFERRED_MANY); @@ -301,8 +298,6 @@ static inline nodemask_t *policy_nodemask_current(gfp_t gfp) return NULL; } -#define numa_demotion_enabled false - static inline bool mpol_is_preferred_many(struct mempolicy *pol) { return false; diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h index fa7a43f02f27..8db52324f416 100644 --- a/include/linux/mfd/da9063/core.h +++ b/include/linux/mfd/da9063/core.h @@ -36,6 +36,7 @@ enum da9063_variant_codes { PMIC_DA9063_BB = 0x5, PMIC_DA9063_CA = 0x6, PMIC_DA9063_DA = 0x7, + PMIC_DA9063_EA = 0x8, }; /* Interrupts */ diff --git a/include/linux/mfd/hi6421-spmi-pmic.h b/include/linux/mfd/hi6421-spmi-pmic.h deleted file mode 100644 index e5b8dbf828b6..000000000000 --- a/include/linux/mfd/hi6421-spmi-pmic.h +++ /dev/null @@ -1,25 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Header file for device driver Hi6421 PMIC - * - * Copyright (c) 2013 Linaro Ltd. - * Copyright (C) 2011 Hisilicon. - * Copyright (c) 2020-2021 Huawei Technologies Co., Ltd - * - * Guodong Xu <guodong.xu@linaro.org> - */ - -#ifndef __HISI_PMIC_H -#define __HISI_PMIC_H - -#include <linux/irqdomain.h> -#include <linux/regmap.h> - -struct hi6421_spmi_pmic { - struct resource *res; - struct device *dev; - void __iomem *regs; - struct regmap *regmap; -}; - -#endif /* __HISI_PMIC_H */ diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h index 833e578e051e..b1482b3cf353 100644 --- a/include/linux/mfd/max77686-private.h +++ b/include/linux/mfd/max77686-private.h @@ -133,35 +133,35 @@ enum max77686_pmic_reg { /* Reserved: 0x7A-0x7D */ MAX77686_REG_BBAT_CHG = 0x7E, - MAX77686_REG_32KHZ = 0x7F, + MAX77686_REG_32KHZ = 0x7F, MAX77686_REG_PMIC_END = 0x80, }; enum max77686_rtc_reg { - MAX77686_RTC_INT = 0x00, - MAX77686_RTC_INTM = 0x01, + MAX77686_RTC_INT = 0x00, + MAX77686_RTC_INTM = 0x01, MAX77686_RTC_CONTROLM = 0x02, MAX77686_RTC_CONTROL = 0x03, MAX77686_RTC_UPDATE0 = 0x04, /* Reserved: 0x5 */ MAX77686_WTSR_SMPL_CNTL = 0x06, - MAX77686_RTC_SEC = 0x07, - MAX77686_RTC_MIN = 0x08, - MAX77686_RTC_HOUR = 0x09, + MAX77686_RTC_SEC = 0x07, + MAX77686_RTC_MIN = 0x08, + MAX77686_RTC_HOUR = 0x09, MAX77686_RTC_WEEKDAY = 0x0A, - MAX77686_RTC_MONTH = 0x0B, - MAX77686_RTC_YEAR = 0x0C, - MAX77686_RTC_DATE = 0x0D, - MAX77686_ALARM1_SEC = 0x0E, - MAX77686_ALARM1_MIN = 0x0F, + MAX77686_RTC_MONTH = 0x0B, + MAX77686_RTC_YEAR = 0x0C, + MAX77686_RTC_DATE = 0x0D, + MAX77686_ALARM1_SEC = 0x0E, + MAX77686_ALARM1_MIN = 0x0F, MAX77686_ALARM1_HOUR = 0x10, MAX77686_ALARM1_WEEKDAY = 0x11, MAX77686_ALARM1_MONTH = 0x12, MAX77686_ALARM1_YEAR = 0x13, MAX77686_ALARM1_DATE = 0x14, - MAX77686_ALARM2_SEC = 0x15, - MAX77686_ALARM2_MIN = 0x16, + MAX77686_ALARM2_SEC = 0x15, + MAX77686_ALARM2_MIN = 0x16, MAX77686_ALARM2_HOUR = 0x17, MAX77686_ALARM2_WEEKDAY = 0x18, MAX77686_ALARM2_MONTH = 0x19, diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h index 90b20550c1c8..06d3f11dc3c9 100644 --- a/include/linux/mfd/stm32-lptimer.h +++ b/include/linux/mfd/stm32-lptimer.h @@ -45,6 +45,11 @@ #define STM32_LPTIM_PRESC GENMASK(11, 9) #define STM32_LPTIM_CKPOL GENMASK(2, 1) +/* STM32_LPTIM_CKPOL */ +#define STM32_LPTIM_CKPOL_RISING_EDGE 0 +#define STM32_LPTIM_CKPOL_FALLING_EDGE 1 +#define STM32_LPTIM_CKPOL_BOTH_EDGES 2 + /* STM32_LPTIM_ARR */ #define STM32_LPTIM_MAX_ARR 0xFFFF diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h index f8db83aedb2b..5f5c43fd69dd 100644 --- a/include/linux/mfd/stm32-timers.h +++ b/include/linux/mfd/stm32-timers.h @@ -82,6 +82,10 @@ #define MAX_TIM_ICPSC 0x3 #define TIM_CR2_MMS_SHIFT 4 #define TIM_CR2_MMS2_SHIFT 20 +#define TIM_SMCR_SMS_SLAVE_MODE_DISABLED 0 /* counts on internal clock when CEN=1 */ +#define TIM_SMCR_SMS_ENCODER_MODE_1 1 /* counts TI1FP1 edges, depending on TI2FP2 level */ +#define TIM_SMCR_SMS_ENCODER_MODE_2 2 /* counts TI2FP2 edges, depending on TI1FP1 level */ +#define TIM_SMCR_SMS_ENCODER_MODE_3 3 /* counts on both TI1FP1 and TI2FP2 edges */ #define TIM_SMCR_TS_SHIFT 4 #define TIM_BDTR_BKF_MASK 0xF #define TIM_BDTR_BKF_SHIFT(x) (16 + (x) * 4) diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index ffc091b77633..ba13e043d910 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h @@ -1,22 +1,16 @@ -#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H -#define __LINUX_TI_AM335X_TSCADC_MFD_H - +/* SPDX-License-Identifier: GPL-2.0-only */ /* * TI Touch Screen / ADC MFD driver * * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ +#ifndef __LINUX_TI_AM335X_TSCADC_MFD_H +#define __LINUX_TI_AM335X_TSCADC_MFD_H + +#include <linux/bitfield.h> #include <linux/mfd/core.h> +#include <linux/units.h> #define REG_RAWIRQSTATUS 0x024 #define REG_IRQSTATUS 0x028 @@ -46,13 +40,6 @@ /* IRQ wakeup enable */ #define IRQWKUP_ENB BIT(0) -/* Step Enable */ -#define STEPENB_MASK (0x1FFFF << 0) -#define STEPENB(val) ((val) << 0) -#define ENB(val) (1 << (val)) -#define STPENB_STEPENB STEPENB(0x1FFFF) -#define STPENB_STEPENB_TC STEPENB(0x1FFF) - /* IRQ enable */ #define IRQENB_HW_PEN BIT(0) #define IRQENB_EOS BIT(1) @@ -65,12 +52,10 @@ #define IRQENB_PENUP BIT(9) /* Step Configuration */ -#define STEPCONFIG_MODE_MASK (3 << 0) -#define STEPCONFIG_MODE(val) ((val) << 0) +#define STEPCONFIG_MODE(val) FIELD_PREP(GENMASK(1, 0), (val)) #define STEPCONFIG_MODE_SWCNT STEPCONFIG_MODE(1) #define STEPCONFIG_MODE_HWSYNC STEPCONFIG_MODE(2) -#define STEPCONFIG_AVG_MASK (7 << 2) -#define STEPCONFIG_AVG(val) ((val) << 2) +#define STEPCONFIG_AVG(val) FIELD_PREP(GENMASK(4, 2), (val)) #define STEPCONFIG_AVG_16 STEPCONFIG_AVG(4) #define STEPCONFIG_XPP BIT(5) #define STEPCONFIG_XNN BIT(6) @@ -78,70 +63,68 @@ #define STEPCONFIG_YNN BIT(8) #define STEPCONFIG_XNP BIT(9) #define STEPCONFIG_YPN BIT(10) -#define STEPCONFIG_RFP(val) ((val) << 12) -#define STEPCONFIG_RFP_VREFP (0x3 << 12) -#define STEPCONFIG_INM_MASK (0xF << 15) -#define STEPCONFIG_INM(val) ((val) << 15) +#define STEPCONFIG_RFP(val) FIELD_PREP(GENMASK(13, 12), (val)) +#define STEPCONFIG_RFP_VREFP STEPCONFIG_RFP(3) +#define STEPCONFIG_INM(val) FIELD_PREP(GENMASK(18, 15), (val)) #define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8) -#define STEPCONFIG_INP_MASK (0xF << 19) -#define STEPCONFIG_INP(val) ((val) << 19) +#define STEPCONFIG_INP(val) FIELD_PREP(GENMASK(22, 19), (val)) #define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4) #define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8) #define STEPCONFIG_FIFO1 BIT(26) -#define STEPCONFIG_RFM(val) ((val) << 23) -#define STEPCONFIG_RFM_VREFN (0x3 << 23) +#define STEPCONFIG_RFM(val) FIELD_PREP(GENMASK(24, 23), (val)) +#define STEPCONFIG_RFM_VREFN STEPCONFIG_RFM(3) /* Delay register */ -#define STEPDELAY_OPEN_MASK (0x3FFFF << 0) -#define STEPDELAY_OPEN(val) ((val) << 0) +#define STEPDELAY_OPEN(val) FIELD_PREP(GENMASK(17, 0), (val)) #define STEPCONFIG_OPENDLY STEPDELAY_OPEN(0x098) -#define STEPDELAY_SAMPLE_MASK (0xFF << 24) -#define STEPDELAY_SAMPLE(val) ((val) << 24) +#define STEPCONFIG_MAX_OPENDLY GENMASK(17, 0) +#define STEPDELAY_SAMPLE(val) FIELD_PREP(GENMASK(31, 24), (val)) #define STEPCONFIG_SAMPLEDLY STEPDELAY_SAMPLE(0) +#define STEPCONFIG_MAX_SAMPLE GENMASK(7, 0) /* Charge Config */ -#define STEPCHARGE_RFP_MASK (7 << 12) -#define STEPCHARGE_RFP(val) ((val) << 12) +#define STEPCHARGE_RFP(val) FIELD_PREP(GENMASK(14, 12), (val)) #define STEPCHARGE_RFP_XPUL STEPCHARGE_RFP(1) -#define STEPCHARGE_INM_MASK (0xF << 15) -#define STEPCHARGE_INM(val) ((val) << 15) +#define STEPCHARGE_INM(val) FIELD_PREP(GENMASK(18, 15), (val)) #define STEPCHARGE_INM_AN1 STEPCHARGE_INM(1) -#define STEPCHARGE_INP_MASK (0xF << 19) -#define STEPCHARGE_INP(val) ((val) << 19) -#define STEPCHARGE_RFM_MASK (3 << 23) -#define STEPCHARGE_RFM(val) ((val) << 23) +#define STEPCHARGE_INP(val) FIELD_PREP(GENMASK(22, 19), (val)) +#define STEPCHARGE_RFM(val) FIELD_PREP(GENMASK(24, 23), (val)) #define STEPCHARGE_RFM_XNUR STEPCHARGE_RFM(1) /* Charge delay */ -#define CHARGEDLY_OPEN_MASK (0x3FFFF << 0) -#define CHARGEDLY_OPEN(val) ((val) << 0) +#define CHARGEDLY_OPEN(val) FIELD_PREP(GENMASK(17, 0), (val)) #define CHARGEDLY_OPENDLY CHARGEDLY_OPEN(0x400) /* Control register */ -#define CNTRLREG_TSCSSENB BIT(0) +#define CNTRLREG_SSENB BIT(0) #define CNTRLREG_STEPID BIT(1) -#define CNTRLREG_STEPCONFIGWRT BIT(2) +#define CNTRLREG_TSC_STEPCONFIGWRT BIT(2) #define CNTRLREG_POWERDOWN BIT(4) -#define CNTRLREG_AFE_CTRL_MASK (3 << 5) -#define CNTRLREG_AFE_CTRL(val) ((val) << 5) -#define CNTRLREG_4WIRE CNTRLREG_AFE_CTRL(1) -#define CNTRLREG_5WIRE CNTRLREG_AFE_CTRL(2) -#define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3) -#define CNTRLREG_TSCENB BIT(7) +#define CNTRLREG_TSC_AFE_CTRL(val) FIELD_PREP(GENMASK(6, 5), (val)) +#define CNTRLREG_TSC_4WIRE CNTRLREG_TSC_AFE_CTRL(1) +#define CNTRLREG_TSC_5WIRE CNTRLREG_TSC_AFE_CTRL(2) +#define CNTRLREG_TSC_8WIRE CNTRLREG_TSC_AFE_CTRL(3) +#define CNTRLREG_TSC_ENB BIT(7) + +/*Control registers bitfields for MAGADC IP */ +#define CNTRLREG_MAGADCENB BIT(0) +#define CNTRLREG_MAG_PREAMP_PWRDOWN BIT(5) +#define CNTRLREG_MAG_PREAMP_BYPASS BIT(6) /* FIFO READ Register */ -#define FIFOREAD_DATA_MASK (0xfff << 0) -#define FIFOREAD_CHNLID_MASK (0xf << 16) +#define FIFOREAD_DATA_MASK GENMASK(11, 0) +#define FIFOREAD_CHNLID_MASK GENMASK(19, 16) /* DMA ENABLE/CLEAR Register */ #define DMA_FIFO0 BIT(0) #define DMA_FIFO1 BIT(1) /* Sequencer Status */ -#define SEQ_STATUS BIT(5) +#define SEQ_STATUS BIT(5) #define CHARGE_STEP 0x11 -#define ADC_CLK 3000000 +#define TSC_ADC_CLK (3 * HZ_PER_MHZ) +#define MAG_ADC_CLK (13 * HZ_PER_MHZ) #define TOTAL_STEPS 16 #define TOTAL_CHANNELS 8 #define FIFO1_THRESHOLD 19 @@ -158,21 +141,27 @@ * * max processing time: 266431 * 308ns = 83ms(approx) */ -#define IDLE_TIMEOUT 83 /* milliseconds */ +#define IDLE_TIMEOUT_MS 83 /* milliseconds */ #define TSCADC_CELLS 2 +struct ti_tscadc_data { + char *adc_feature_name; + char *adc_feature_compatible; + char *secondary_feature_name; + char *secondary_feature_compatible; + unsigned int target_clk_rate; +}; + struct ti_tscadc_dev { struct device *dev; struct regmap *regmap; void __iomem *tscadc_base; phys_addr_t tscadc_phys_base; + const struct ti_tscadc_data *data; int irq; - int used_cells; /* 1-2 */ - int tsc_wires; - int tsc_cell; /* -1 if not used */ - int adc_cell; /* -1 if not used */ struct mfd_cell cells[TSCADC_CELLS]; + u32 ctrl; u32 reg_se_cache; bool adc_waiting; bool adc_in_use; @@ -194,6 +183,12 @@ static inline struct ti_tscadc_dev *ti_tscadc_dev_get(struct platform_device *p) return *tscadc_dev; } +static inline bool ti_adc_with_touchscreen(struct ti_tscadc_dev *tscadc) +{ + return of_device_is_compatible(tscadc->dev->of_node, + "ti,am3359-tscadc"); +} + void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tsadc, u32 val); void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val); void am335x_tsc_se_clr(struct ti_tscadc_dev *tsadc, u32 val); diff --git a/include/linux/mfd/tps65912.h b/include/linux/mfd/tps65912.h index 7943e413deae..8a61386cb8c1 100644 --- a/include/linux/mfd/tps65912.h +++ b/include/linux/mfd/tps65912.h @@ -322,6 +322,6 @@ struct tps65912 { extern const struct regmap_config tps65912_regmap_config; int tps65912_device_init(struct tps65912 *tps); -int tps65912_device_exit(struct tps65912 *tps); +void tps65912_device_exit(struct tps65912 *tps); #endif /* __LINUX_MFD_TPS65912_H */ diff --git a/include/linux/mfd/tps80031.h b/include/linux/mfd/tps80031.h deleted file mode 100644 index 2c75c9c9318f..000000000000 --- a/include/linux/mfd/tps80031.h +++ /dev/null @@ -1,637 +0,0 @@ -/* - * tps80031.h -- TI TPS80031 and TI TPS80032 PMIC driver. - * - * Copyright (c) 2012, NVIDIA Corporation. - * - * Author: Laxman Dewangan <ldewangan@nvidia.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind, - * whether express or implied; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA - * 02111-1307, USA - */ - -#ifndef __LINUX_MFD_TPS80031_H -#define __LINUX_MFD_TPS80031_H - -#include <linux/device.h> -#include <linux/regmap.h> - -/* Pull-ups/Pull-downs */ -#define TPS80031_CFG_INPUT_PUPD1 0xF0 -#define TPS80031_CFG_INPUT_PUPD2 0xF1 -#define TPS80031_CFG_INPUT_PUPD3 0xF2 -#define TPS80031_CFG_INPUT_PUPD4 0xF3 -#define TPS80031_CFG_LDO_PD1 0xF4 -#define TPS80031_CFG_LDO_PD2 0xF5 -#define TPS80031_CFG_SMPS_PD 0xF6 - -/* Real Time Clock */ -#define TPS80031_SECONDS_REG 0x00 -#define TPS80031_MINUTES_REG 0x01 -#define TPS80031_HOURS_REG 0x02 -#define TPS80031_DAYS_REG 0x03 -#define TPS80031_MONTHS_REG 0x04 -#define TPS80031_YEARS_REG 0x05 -#define TPS80031_WEEKS_REG 0x06 -#define TPS80031_ALARM_SECONDS_REG 0x08 -#define TPS80031_ALARM_MINUTES_REG 0x09 -#define TPS80031_ALARM_HOURS_REG 0x0A -#define TPS80031_ALARM_DAYS_REG 0x0B -#define TPS80031_ALARM_MONTHS_REG 0x0C -#define TPS80031_ALARM_YEARS_REG 0x0D -#define TPS80031_RTC_CTRL_REG 0x10 -#define TPS80031_RTC_STATUS_REG 0x11 -#define TPS80031_RTC_INTERRUPTS_REG 0x12 -#define TPS80031_RTC_COMP_LSB_REG 0x13 -#define TPS80031_RTC_COMP_MSB_REG 0x14 -#define TPS80031_RTC_RESET_STATUS_REG 0x16 - -/*PMC Master Module */ -#define TPS80031_PHOENIX_START_CONDITION 0x1F -#define TPS80031_PHOENIX_MSK_TRANSITION 0x20 -#define TPS80031_STS_HW_CONDITIONS 0x21 -#define TPS80031_PHOENIX_LAST_TURNOFF_STS 0x22 -#define TPS80031_VSYSMIN_LO_THRESHOLD 0x23 -#define TPS80031_VSYSMIN_HI_THRESHOLD 0x24 -#define TPS80031_PHOENIX_DEV_ON 0x25 -#define TPS80031_STS_PWR_GRP_STATE 0x27 -#define TPS80031_PH_CFG_VSYSLOW 0x28 -#define TPS80031_PH_STS_BOOT 0x29 -#define TPS80031_PHOENIX_SENS_TRANSITION 0x2A -#define TPS80031_PHOENIX_SEQ_CFG 0x2B -#define TPS80031_PRIMARY_WATCHDOG_CFG 0X2C -#define TPS80031_KEY_PRESS_DUR_CFG 0X2D -#define TPS80031_SMPS_LDO_SHORT_STS 0x2E - -/* PMC Slave Module - Broadcast */ -#define TPS80031_BROADCAST_ADDR_ALL 0x31 -#define TPS80031_BROADCAST_ADDR_REF 0x32 -#define TPS80031_BROADCAST_ADDR_PROV 0x33 -#define TPS80031_BROADCAST_ADDR_CLK_RST 0x34 - -/* PMC Slave Module SMPS Regulators */ -#define TPS80031_SMPS4_CFG_TRANS 0x41 -#define TPS80031_SMPS4_CFG_STATE 0x42 -#define TPS80031_SMPS4_CFG_VOLTAGE 0x44 -#define TPS80031_VIO_CFG_TRANS 0x47 -#define TPS80031_VIO_CFG_STATE 0x48 -#define TPS80031_VIO_CFG_FORCE 0x49 -#define TPS80031_VIO_CFG_VOLTAGE 0x4A -#define TPS80031_VIO_CFG_STEP 0x48 -#define TPS80031_SMPS1_CFG_TRANS 0x53 -#define TPS80031_SMPS1_CFG_STATE 0x54 -#define TPS80031_SMPS1_CFG_FORCE 0x55 -#define TPS80031_SMPS1_CFG_VOLTAGE 0x56 -#define TPS80031_SMPS1_CFG_STEP 0x57 -#define TPS80031_SMPS2_CFG_TRANS 0x59 -#define TPS80031_SMPS2_CFG_STATE 0x5A -#define TPS80031_SMPS2_CFG_FORCE 0x5B -#define TPS80031_SMPS2_CFG_VOLTAGE 0x5C -#define TPS80031_SMPS2_CFG_STEP 0x5D -#define TPS80031_SMPS3_CFG_TRANS 0x65 -#define TPS80031_SMPS3_CFG_STATE 0x66 -#define TPS80031_SMPS3_CFG_VOLTAGE 0x68 - -/* PMC Slave Module LDO Regulators */ -#define TPS80031_VANA_CFG_TRANS 0x81 -#define TPS80031_VANA_CFG_STATE 0x82 -#define TPS80031_VANA_CFG_VOLTAGE 0x83 -#define TPS80031_LDO2_CFG_TRANS 0x85 -#define TPS80031_LDO2_CFG_STATE 0x86 -#define TPS80031_LDO2_CFG_VOLTAGE 0x87 -#define TPS80031_LDO4_CFG_TRANS 0x89 -#define TPS80031_LDO4_CFG_STATE 0x8A -#define TPS80031_LDO4_CFG_VOLTAGE 0x8B -#define TPS80031_LDO3_CFG_TRANS 0x8D -#define TPS80031_LDO3_CFG_STATE 0x8E -#define TPS80031_LDO3_CFG_VOLTAGE 0x8F -#define TPS80031_LDO6_CFG_TRANS 0x91 -#define TPS80031_LDO6_CFG_STATE 0x92 -#define TPS80031_LDO6_CFG_VOLTAGE 0x93 -#define TPS80031_LDOLN_CFG_TRANS 0x95 -#define TPS80031_LDOLN_CFG_STATE 0x96 -#define TPS80031_LDOLN_CFG_VOLTAGE 0x97 -#define TPS80031_LDO5_CFG_TRANS 0x99 -#define TPS80031_LDO5_CFG_STATE 0x9A -#define TPS80031_LDO5_CFG_VOLTAGE 0x9B -#define TPS80031_LDO1_CFG_TRANS 0x9D -#define TPS80031_LDO1_CFG_STATE 0x9E -#define TPS80031_LDO1_CFG_VOLTAGE 0x9F -#define TPS80031_LDOUSB_CFG_TRANS 0xA1 -#define TPS80031_LDOUSB_CFG_STATE 0xA2 -#define TPS80031_LDOUSB_CFG_VOLTAGE 0xA3 -#define TPS80031_LDO7_CFG_TRANS 0xA5 -#define TPS80031_LDO7_CFG_STATE 0xA6 -#define TPS80031_LDO7_CFG_VOLTAGE 0xA7 - -/* PMC Slave Module External Control */ -#define TPS80031_REGEN1_CFG_TRANS 0xAE -#define TPS80031_REGEN1_CFG_STATE 0xAF -#define TPS80031_REGEN2_CFG_TRANS 0xB1 -#define TPS80031_REGEN2_CFG_STATE 0xB2 -#define TPS80031_SYSEN_CFG_TRANS 0xB4 -#define TPS80031_SYSEN_CFG_STATE 0xB5 - -/* PMC Slave Module Internal Control */ -#define TPS80031_NRESPWRON_CFG_TRANS 0xB7 -#define TPS80031_NRESPWRON_CFG_STATE 0xB8 -#define TPS80031_CLK32KAO_CFG_TRANS 0xBA -#define TPS80031_CLK32KAO_CFG_STATE 0xBB -#define TPS80031_CLK32KG_CFG_TRANS 0xBD -#define TPS80031_CLK32KG_CFG_STATE 0xBE -#define TPS80031_CLK32KAUDIO_CFG_TRANS 0xC0 -#define TPS80031_CLK32KAUDIO_CFG_STATE 0xC1 -#define TPS80031_VRTC_CFG_TRANS 0xC3 -#define TPS80031_VRTC_CFG_STATE 0xC4 -#define TPS80031_BIAS_CFG_TRANS 0xC6 -#define TPS80031_BIAS_CFG_STATE 0xC7 -#define TPS80031_VSYSMIN_HI_CFG_TRANS 0xC9 -#define TPS80031_VSYSMIN_HI_CFG_STATE 0xCA -#define TPS80031_RC6MHZ_CFG_TRANS 0xCC -#define TPS80031_RC6MHZ_CFG_STATE 0xCD -#define TPS80031_TMP_CFG_TRANS 0xCF -#define TPS80031_TMP_CFG_STATE 0xD0 - -/* PMC Slave Module resources assignment */ -#define TPS80031_PREQ1_RES_ASS_A 0xD7 -#define TPS80031_PREQ1_RES_ASS_B 0xD8 -#define TPS80031_PREQ1_RES_ASS_C 0xD9 -#define TPS80031_PREQ2_RES_ASS_A 0xDA -#define TPS80031_PREQ2_RES_ASS_B 0xDB -#define TPS80031_PREQ2_RES_ASS_C 0xDC -#define TPS80031_PREQ3_RES_ASS_A 0xDD -#define TPS80031_PREQ3_RES_ASS_B 0xDE -#define TPS80031_PREQ3_RES_ASS_C 0xDF - -/* PMC Slave Module Miscellaneous */ -#define TPS80031_SMPS_OFFSET 0xE0 -#define TPS80031_SMPS_MULT 0xE3 -#define TPS80031_MISC1 0xE4 -#define TPS80031_MISC2 0xE5 -#define TPS80031_BBSPOR_CFG 0xE6 -#define TPS80031_TMP_CFG 0xE7 - -/* Battery Charging Controller and Indicator LED */ -#define TPS80031_CONTROLLER_CTRL2 0xDA -#define TPS80031_CONTROLLER_VSEL_COMP 0xDB -#define TPS80031_CHARGERUSB_VSYSREG 0xDC -#define TPS80031_CHARGERUSB_VICHRG_PC 0xDD -#define TPS80031_LINEAR_CHRG_STS 0xDE -#define TPS80031_CONTROLLER_INT_MASK 0xE0 -#define TPS80031_CONTROLLER_CTRL1 0xE1 -#define TPS80031_CONTROLLER_WDG 0xE2 -#define TPS80031_CONTROLLER_STAT1 0xE3 -#define TPS80031_CHARGERUSB_INT_STATUS 0xE4 -#define TPS80031_CHARGERUSB_INT_MASK 0xE5 -#define TPS80031_CHARGERUSB_STATUS_INT1 0xE6 -#define TPS80031_CHARGERUSB_STATUS_INT2 0xE7 -#define TPS80031_CHARGERUSB_CTRL1 0xE8 -#define TPS80031_CHARGERUSB_CTRL2 0xE9 -#define TPS80031_CHARGERUSB_CTRL3 0xEA -#define TPS80031_CHARGERUSB_STAT1 0xEB -#define TPS80031_CHARGERUSB_VOREG 0xEC -#define TPS80031_CHARGERUSB_VICHRG 0xED -#define TPS80031_CHARGERUSB_CINLIMIT 0xEE -#define TPS80031_CHARGERUSB_CTRLLIMIT1 0xEF -#define TPS80031_CHARGERUSB_CTRLLIMIT2 0xF0 -#define TPS80031_LED_PWM_CTRL1 0xF4 -#define TPS80031_LED_PWM_CTRL2 0xF5 - -/* USB On-The-Go */ -#define TPS80031_BACKUP_REG 0xFA -#define TPS80031_USB_VENDOR_ID_LSB 0x00 -#define TPS80031_USB_VENDOR_ID_MSB 0x01 -#define TPS80031_USB_PRODUCT_ID_LSB 0x02 -#define TPS80031_USB_PRODUCT_ID_MSB 0x03 -#define TPS80031_USB_VBUS_CTRL_SET 0x04 -#define TPS80031_USB_VBUS_CTRL_CLR 0x05 -#define TPS80031_USB_ID_CTRL_SET 0x06 -#define TPS80031_USB_ID_CTRL_CLR 0x07 -#define TPS80031_USB_VBUS_INT_SRC 0x08 -#define TPS80031_USB_VBUS_INT_LATCH_SET 0x09 -#define TPS80031_USB_VBUS_INT_LATCH_CLR 0x0A -#define TPS80031_USB_VBUS_INT_EN_LO_SET 0x0B -#define TPS80031_USB_VBUS_INT_EN_LO_CLR 0x0C -#define TPS80031_USB_VBUS_INT_EN_HI_SET 0x0D -#define TPS80031_USB_VBUS_INT_EN_HI_CLR 0x0E -#define TPS80031_USB_ID_INT_SRC 0x0F -#define TPS80031_USB_ID_INT_LATCH_SET 0x10 -#define TPS80031_USB_ID_INT_LATCH_CLR 0x11 -#define TPS80031_USB_ID_INT_EN_LO_SET 0x12 -#define TPS80031_USB_ID_INT_EN_LO_CLR 0x13 -#define TPS80031_USB_ID_INT_EN_HI_SET 0x14 -#define TPS80031_USB_ID_INT_EN_HI_CLR 0x15 -#define TPS80031_USB_OTG_ADP_CTRL 0x16 -#define TPS80031_USB_OTG_ADP_HIGH 0x17 -#define TPS80031_USB_OTG_ADP_LOW 0x18 -#define TPS80031_USB_OTG_ADP_RISE 0x19 -#define TPS80031_USB_OTG_REVISION 0x1A - -/* Gas Gauge */ -#define TPS80031_FG_REG_00 0xC0 -#define TPS80031_FG_REG_01 0xC1 -#define TPS80031_FG_REG_02 0xC2 -#define TPS80031_FG_REG_03 0xC3 -#define TPS80031_FG_REG_04 0xC4 -#define TPS80031_FG_REG_05 0xC5 -#define TPS80031_FG_REG_06 0xC6 -#define TPS80031_FG_REG_07 0xC7 -#define TPS80031_FG_REG_08 0xC8 -#define TPS80031_FG_REG_09 0xC9 -#define TPS80031_FG_REG_10 0xCA -#define TPS80031_FG_REG_11 0xCB - -/* General Purpose ADC */ -#define TPS80031_GPADC_CTRL 0x2E -#define TPS80031_GPADC_CTRL2 0x2F -#define TPS80031_RTSELECT_LSB 0x32 -#define TPS80031_RTSELECT_ISB 0x33 -#define TPS80031_RTSELECT_MSB 0x34 -#define TPS80031_GPSELECT_ISB 0x35 -#define TPS80031_CTRL_P1 0x36 -#define TPS80031_RTCH0_LSB 0x37 -#define TPS80031_RTCH0_MSB 0x38 -#define TPS80031_RTCH1_LSB 0x39 -#define TPS80031_RTCH1_MSB 0x3A -#define TPS80031_GPCH0_LSB 0x3B -#define TPS80031_GPCH0_MSB 0x3C - -/* SIM, MMC and Battery Detection */ -#define TPS80031_SIMDEBOUNCING 0xEB -#define TPS80031_SIMCTRL 0xEC -#define TPS80031_MMCDEBOUNCING 0xED -#define TPS80031_MMCCTRL 0xEE -#define TPS80031_BATDEBOUNCING 0xEF - -/* Vibrator Driver and PWMs */ -#define TPS80031_VIBCTRL 0x9B -#define TPS80031_VIBMODE 0x9C -#define TPS80031_PWM1ON 0xBA -#define TPS80031_PWM1OFF 0xBB -#define TPS80031_PWM2ON 0xBD -#define TPS80031_PWM2OFF 0xBE - -/* Control Interface */ -#define TPS80031_INT_STS_A 0xD0 -#define TPS80031_INT_STS_B 0xD1 -#define TPS80031_INT_STS_C 0xD2 -#define TPS80031_INT_MSK_LINE_A 0xD3 -#define TPS80031_INT_MSK_LINE_B 0xD4 -#define TPS80031_INT_MSK_LINE_C 0xD5 -#define TPS80031_INT_MSK_STS_A 0xD6 -#define TPS80031_INT_MSK_STS_B 0xD7 -#define TPS80031_INT_MSK_STS_C 0xD8 -#define TPS80031_TOGGLE1 0x90 -#define TPS80031_TOGGLE2 0x91 -#define TPS80031_TOGGLE3 0x92 -#define TPS80031_PWDNSTATUS1 0x93 -#define TPS80031_PWDNSTATUS2 0x94 -#define TPS80031_VALIDITY0 0x17 -#define TPS80031_VALIDITY1 0x18 -#define TPS80031_VALIDITY2 0x19 -#define TPS80031_VALIDITY3 0x1A -#define TPS80031_VALIDITY4 0x1B -#define TPS80031_VALIDITY5 0x1C -#define TPS80031_VALIDITY6 0x1D -#define TPS80031_VALIDITY7 0x1E - -/* Version number related register */ -#define TPS80031_JTAGVERNUM 0x87 -#define TPS80031_EPROM_REV 0xDF - -/* GPADC Trimming Bits. */ -#define TPS80031_GPADC_TRIM0 0xCC -#define TPS80031_GPADC_TRIM1 0xCD -#define TPS80031_GPADC_TRIM2 0xCE -#define TPS80031_GPADC_TRIM3 0xCF -#define TPS80031_GPADC_TRIM4 0xD0 -#define TPS80031_GPADC_TRIM5 0xD1 -#define TPS80031_GPADC_TRIM6 0xD2 -#define TPS80031_GPADC_TRIM7 0xD3 -#define TPS80031_GPADC_TRIM8 0xD4 -#define TPS80031_GPADC_TRIM9 0xD5 -#define TPS80031_GPADC_TRIM10 0xD6 -#define TPS80031_GPADC_TRIM11 0xD7 -#define TPS80031_GPADC_TRIM12 0xD8 -#define TPS80031_GPADC_TRIM13 0xD9 -#define TPS80031_GPADC_TRIM14 0xDA -#define TPS80031_GPADC_TRIM15 0xDB -#define TPS80031_GPADC_TRIM16 0xDC -#define TPS80031_GPADC_TRIM17 0xDD -#define TPS80031_GPADC_TRIM18 0xDE - -/* TPS80031_CONTROLLER_STAT1 bit fields */ -#define TPS80031_CONTROLLER_STAT1_BAT_TEMP 0 -#define TPS80031_CONTROLLER_STAT1_BAT_REMOVED 1 -#define TPS80031_CONTROLLER_STAT1_VBUS_DET 2 -#define TPS80031_CONTROLLER_STAT1_VAC_DET 3 -#define TPS80031_CONTROLLER_STAT1_FAULT_WDG 4 -#define TPS80031_CONTROLLER_STAT1_LINCH_GATED 6 -/* TPS80031_CONTROLLER_INT_MASK bit filed */ -#define TPS80031_CONTROLLER_INT_MASK_MVAC_DET 0 -#define TPS80031_CONTROLLER_INT_MASK_MVBUS_DET 1 -#define TPS80031_CONTROLLER_INT_MASK_MBAT_TEMP 2 -#define TPS80031_CONTROLLER_INT_MASK_MFAULT_WDG 3 -#define TPS80031_CONTROLLER_INT_MASK_MBAT_REMOVED 4 -#define TPS80031_CONTROLLER_INT_MASK_MLINCH_GATED 5 - -#define TPS80031_CHARGE_CONTROL_SUB_INT_MASK 0x3F - -/* TPS80031_PHOENIX_DEV_ON bit field */ -#define TPS80031_DEVOFF 0x1 - -#define TPS80031_EXT_CONTROL_CFG_TRANS 0 -#define TPS80031_EXT_CONTROL_CFG_STATE 1 - -/* State register field */ -#define TPS80031_STATE_OFF 0x00 -#define TPS80031_STATE_ON 0x01 -#define TPS80031_STATE_MASK 0x03 - -/* Trans register field */ -#define TPS80031_TRANS_ACTIVE_OFF 0x00 -#define TPS80031_TRANS_ACTIVE_ON 0x01 -#define TPS80031_TRANS_ACTIVE_MASK 0x03 -#define TPS80031_TRANS_SLEEP_OFF 0x00 -#define TPS80031_TRANS_SLEEP_ON 0x04 -#define TPS80031_TRANS_SLEEP_MASK 0x0C -#define TPS80031_TRANS_OFF_OFF 0x00 -#define TPS80031_TRANS_OFF_ACTIVE 0x10 -#define TPS80031_TRANS_OFF_MASK 0x30 - -#define TPS80031_EXT_PWR_REQ (TPS80031_PWR_REQ_INPUT_PREQ1 | \ - TPS80031_PWR_REQ_INPUT_PREQ2 | \ - TPS80031_PWR_REQ_INPUT_PREQ3) - -/* TPS80031_BBSPOR_CFG bit field */ -#define TPS80031_BBSPOR_CHG_EN 0x8 -#define TPS80031_MAX_REGISTER 0xFF - -struct i2c_client; - -/* Supported chips */ -enum chips { - TPS80031 = 0x00000001, - TPS80032 = 0x00000002, -}; - -enum { - TPS80031_INT_PWRON, - TPS80031_INT_RPWRON, - TPS80031_INT_SYS_VLOW, - TPS80031_INT_RTC_ALARM, - TPS80031_INT_RTC_PERIOD, - TPS80031_INT_HOT_DIE, - TPS80031_INT_VXX_SHORT, - TPS80031_INT_SPDURATION, - TPS80031_INT_WATCHDOG, - TPS80031_INT_BAT, - TPS80031_INT_SIM, - TPS80031_INT_MMC, - TPS80031_INT_RES, - TPS80031_INT_GPADC_RT, - TPS80031_INT_GPADC_SW2_EOC, - TPS80031_INT_CC_AUTOCAL, - TPS80031_INT_ID_WKUP, - TPS80031_INT_VBUSS_WKUP, - TPS80031_INT_ID, - TPS80031_INT_VBUS, - TPS80031_INT_CHRG_CTRL, - TPS80031_INT_EXT_CHRG, - TPS80031_INT_INT_CHRG, - TPS80031_INT_RES2, - TPS80031_INT_BAT_TEMP_OVRANGE, - TPS80031_INT_BAT_REMOVED, - TPS80031_INT_VBUS_DET, - TPS80031_INT_VAC_DET, - TPS80031_INT_FAULT_WDG, - TPS80031_INT_LINCH_GATED, - - /* Last interrupt id to get the end number */ - TPS80031_INT_NR, -}; - -/* TPS80031 Slave IDs */ -#define TPS80031_NUM_SLAVES 4 -#define TPS80031_SLAVE_ID0 0 -#define TPS80031_SLAVE_ID1 1 -#define TPS80031_SLAVE_ID2 2 -#define TPS80031_SLAVE_ID3 3 - -/* TPS80031 I2C addresses */ -#define TPS80031_I2C_ID0_ADDR 0x12 -#define TPS80031_I2C_ID1_ADDR 0x48 -#define TPS80031_I2C_ID2_ADDR 0x49 -#define TPS80031_I2C_ID3_ADDR 0x4A - -enum { - TPS80031_REGULATOR_VIO, - TPS80031_REGULATOR_SMPS1, - TPS80031_REGULATOR_SMPS2, - TPS80031_REGULATOR_SMPS3, - TPS80031_REGULATOR_SMPS4, - TPS80031_REGULATOR_VANA, - TPS80031_REGULATOR_LDO1, - TPS80031_REGULATOR_LDO2, - TPS80031_REGULATOR_LDO3, - TPS80031_REGULATOR_LDO4, - TPS80031_REGULATOR_LDO5, - TPS80031_REGULATOR_LDO6, - TPS80031_REGULATOR_LDO7, - TPS80031_REGULATOR_LDOLN, - TPS80031_REGULATOR_LDOUSB, - TPS80031_REGULATOR_VBUS, - TPS80031_REGULATOR_REGEN1, - TPS80031_REGULATOR_REGEN2, - TPS80031_REGULATOR_SYSEN, - TPS80031_REGULATOR_MAX, -}; - -/* Different configurations for the rails */ -enum { - /* USBLDO input selection */ - TPS80031_USBLDO_INPUT_VSYS = 0x00000001, - TPS80031_USBLDO_INPUT_PMID = 0x00000002, - - /* LDO3 output mode */ - TPS80031_LDO3_OUTPUT_VIB = 0x00000004, - - /* VBUS configuration */ - TPS80031_VBUS_DISCHRG_EN_PDN = 0x00000004, - TPS80031_VBUS_SW_ONLY = 0x00000008, - TPS80031_VBUS_SW_N_ID = 0x00000010, -}; - -/* External controls requests */ -enum tps80031_ext_control { - TPS80031_PWR_REQ_INPUT_NONE = 0x00000000, - TPS80031_PWR_REQ_INPUT_PREQ1 = 0x00000001, - TPS80031_PWR_REQ_INPUT_PREQ2 = 0x00000002, - TPS80031_PWR_REQ_INPUT_PREQ3 = 0x00000004, - TPS80031_PWR_OFF_ON_SLEEP = 0x00000008, - TPS80031_PWR_ON_ON_SLEEP = 0x00000010, -}; - -enum tps80031_pupd_pins { - TPS80031_PREQ1 = 0, - TPS80031_PREQ2A, - TPS80031_PREQ2B, - TPS80031_PREQ2C, - TPS80031_PREQ3, - TPS80031_NRES_WARM, - TPS80031_PWM_FORCE, - TPS80031_CHRG_EXT_CHRG_STATZ, - TPS80031_SIM, - TPS80031_MMC, - TPS80031_GPADC_START, - TPS80031_DVSI2C_SCL, - TPS80031_DVSI2C_SDA, - TPS80031_CTLI2C_SCL, - TPS80031_CTLI2C_SDA, -}; - -enum tps80031_pupd_settings { - TPS80031_PUPD_NORMAL, - TPS80031_PUPD_PULLDOWN, - TPS80031_PUPD_PULLUP, -}; - -struct tps80031 { - struct device *dev; - unsigned long chip_info; - int es_version; - struct i2c_client *clients[TPS80031_NUM_SLAVES]; - struct regmap *regmap[TPS80031_NUM_SLAVES]; - struct regmap_irq_chip_data *irq_data; -}; - -struct tps80031_pupd_init_data { - int input_pin; - int setting; -}; - -/* - * struct tps80031_regulator_platform_data - tps80031 regulator platform data. - * - * @reg_init_data: The regulator init data. - * @ext_ctrl_flag: External control flag for sleep/power request control. - * @config_flags: Configuration flag to configure the rails. - * It should be ORed of config enums. - */ - -struct tps80031_regulator_platform_data { - struct regulator_init_data *reg_init_data; - unsigned int ext_ctrl_flag; - unsigned int config_flags; -}; - -struct tps80031_platform_data { - int irq_base; - bool use_power_off; - struct tps80031_pupd_init_data *pupd_init_data; - int pupd_init_data_size; - struct tps80031_regulator_platform_data - *regulator_pdata[TPS80031_REGULATOR_MAX]; -}; - -static inline int tps80031_write(struct device *dev, int sid, - int reg, uint8_t val) -{ - struct tps80031 *tps80031 = dev_get_drvdata(dev); - - return regmap_write(tps80031->regmap[sid], reg, val); -} - -static inline int tps80031_writes(struct device *dev, int sid, int reg, - int len, uint8_t *val) -{ - struct tps80031 *tps80031 = dev_get_drvdata(dev); - - return regmap_bulk_write(tps80031->regmap[sid], reg, val, len); -} - -static inline int tps80031_read(struct device *dev, int sid, - int reg, uint8_t *val) -{ - struct tps80031 *tps80031 = dev_get_drvdata(dev); - unsigned int ival; - int ret; - - ret = regmap_read(tps80031->regmap[sid], reg, &ival); - if (ret < 0) { - dev_err(dev, "failed reading from reg 0x%02x\n", reg); - return ret; - } - - *val = ival; - return ret; -} - -static inline int tps80031_reads(struct device *dev, int sid, - int reg, int len, uint8_t *val) -{ - struct tps80031 *tps80031 = dev_get_drvdata(dev); - - return regmap_bulk_read(tps80031->regmap[sid], reg, val, len); -} - -static inline int tps80031_set_bits(struct device *dev, int sid, - int reg, uint8_t bit_mask) -{ - struct tps80031 *tps80031 = dev_get_drvdata(dev); - - return regmap_update_bits(tps80031->regmap[sid], reg, - bit_mask, bit_mask); -} - -static inline int tps80031_clr_bits(struct device *dev, int sid, - int reg, uint8_t bit_mask) -{ - struct tps80031 *tps80031 = dev_get_drvdata(dev); - - return regmap_update_bits(tps80031->regmap[sid], reg, bit_mask, 0); -} - -static inline int tps80031_update(struct device *dev, int sid, - int reg, uint8_t val, uint8_t mask) -{ - struct tps80031 *tps80031 = dev_get_drvdata(dev); - - return regmap_update_bits(tps80031->regmap[sid], reg, mask, val); -} - -static inline unsigned long tps80031_get_chip_info(struct device *dev) -{ - struct tps80031 *tps80031 = dev_get_drvdata(dev); - - return tps80031->chip_info; -} - -static inline int tps80031_get_pmu_version(struct device *dev) -{ - struct tps80031 *tps80031 = dev_get_drvdata(dev); - - return tps80031->es_version; -} - -static inline int tps80031_irq_get_virq(struct device *dev, int irq) -{ - struct tps80031 *tps80031 = dev_get_drvdata(dev); - - return regmap_irq_get_virq(tps80031->irq_data, irq); -} - -extern int tps80031_ext_power_req_config(struct device *dev, - unsigned long ext_ctrl_flag, int preq_bit, - int state_reg_add, int trans_reg_add); -#endif /*__LINUX_MFD_TPS80031_H */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 0d2aeb9b0f66..4850cc5bf813 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -19,24 +19,7 @@ struct migration_target_control; */ #define MIGRATEPAGE_SUCCESS 0 -/* - * Keep sync with: - * - macro MIGRATE_REASON in include/trace/events/migrate.h - * - migrate_reason_names[MR_TYPES] in mm/debug.c - */ -enum migrate_reason { - MR_COMPACTION, - MR_MEMORY_FAILURE, - MR_MEMORY_HOTPLUG, - MR_SYSCALL, /* also applies to cpusets */ - MR_MEMPOLICY_MBIND, - MR_NUMA_MISPLACED, - MR_CONTIG_RANGE, - MR_LONGTERM_PIN, - MR_DEMOTION, - MR_TYPES -}; - +/* Defined in mm/debug.c: */ extern const char *migrate_reason_names[MR_TYPES]; #ifdef CONFIG_MIGRATION @@ -61,6 +44,8 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio); void folio_migrate_copy(struct folio *newfolio, struct folio *folio); int folio_migrate_mapping(struct address_space *mapping, struct folio *newfolio, struct folio *folio, int extra_count); + +extern bool numa_demotion_enabled; #else static inline void putback_movable_pages(struct list_head *l) {} @@ -86,6 +71,8 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, { return -ENOSYS; } + +#define numa_demotion_enabled false #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_COMPACTION @@ -123,7 +110,6 @@ static inline int migrate_misplaced_page(struct page *page, */ #define MIGRATE_PFN_VALID (1UL << 0) #define MIGRATE_PFN_MIGRATE (1UL << 1) -#define MIGRATE_PFN_LOCKED (1UL << 2) #define MIGRATE_PFN_WRITE (1UL << 3) #define MIGRATE_PFN_SHIFT 6 diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h index 883c99249033..f37cc03f9369 100644 --- a/include/linux/migrate_mode.h +++ b/include/linux/migrate_mode.h @@ -19,4 +19,17 @@ enum migrate_mode { MIGRATE_SYNC_NO_COPY, }; +enum migrate_reason { + MR_COMPACTION, + MR_MEMORY_FAILURE, + MR_MEMORY_HOTPLUG, + MR_SYSCALL, /* also applies to cpusets */ + MR_MEMPOLICY_MBIND, + MR_NUMA_MISPLACED, + MR_CONTIG_RANGE, + MR_LONGTERM_PIN, + MR_DEMOTION, + MR_TYPES +}; + #endif /* MIGRATE_MODE_H_INCLUDED */ diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h index da2367e2ac1e..c238207d1615 100644 --- a/include/linux/misc_cgroup.h +++ b/include/linux/misc_cgroup.h @@ -36,7 +36,7 @@ struct misc_cg; struct misc_res { unsigned long max; atomic_long_t usage; - bool failed; + atomic_long_t events; }; /** @@ -46,6 +46,10 @@ struct misc_res { */ struct misc_cg { struct cgroup_subsys_state css; + + /* misc.events */ + struct cgroup_file events_file; + struct misc_res res[MISC_CG_RES_TYPES]; }; diff --git a/include/linux/mm.h b/include/linux/mm.h index 40ff114aaf9e..a7e4a9e7d807 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -794,40 +794,6 @@ static inline int is_vmalloc_or_module_addr(const void *x) } #endif -extern void *kvmalloc_node(size_t size, gfp_t flags, int node); -static inline void *kvmalloc(size_t size, gfp_t flags) -{ - return kvmalloc_node(size, flags, NUMA_NO_NODE); -} -static inline void *kvzalloc_node(size_t size, gfp_t flags, int node) -{ - return kvmalloc_node(size, flags | __GFP_ZERO, node); -} -static inline void *kvzalloc(size_t size, gfp_t flags) -{ - return kvmalloc(size, flags | __GFP_ZERO); -} - -static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags) -{ - size_t bytes; - - if (unlikely(check_mul_overflow(n, size, &bytes))) - return NULL; - - return kvmalloc(bytes, flags); -} - -static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) -{ - return kvmalloc_array(n, size, flags | __GFP_ZERO); -} - -extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, - gfp_t flags); -extern void kvfree(const void *addr); -extern void kvfree_sensitive(const void *addr, size_t len); - static inline int head_compound_mapcount(struct page *head) { return atomic_read(compound_mapcount_ptr(head)) + 1; @@ -904,6 +870,8 @@ void put_pages_list(struct list_head *pages); void split_page(struct page *page, unsigned int order); void folio_copy(struct folio *dst, struct folio *src); +unsigned long nr_free_buffer_pages(void); + /* * Compound pages have a destructor function. Provide a * prototype for that function and accessor functions. @@ -1861,12 +1829,24 @@ extern void user_shm_unlock(size_t, struct ucounts *); * Parameter block passed down to zap_pte_range in exceptional cases. */ struct zap_details { - struct address_space *check_mapping; /* Check page->mapping if set */ - pgoff_t first_index; /* Lowest page->index to unmap */ - pgoff_t last_index; /* Highest page->index to unmap */ + struct address_space *zap_mapping; /* Check page->mapping if set */ struct page *single_page; /* Locked page to be unmapped */ }; +/* + * We set details->zap_mappings when we want to unmap shared but keep private + * pages. Return true if skip zapping this page, false otherwise. + */ +static inline bool +zap_skip_check_mapping(struct zap_details *details, struct page *page) +{ + if (!details || !page) + return false; + + return details->zap_mapping && + (details->zap_mapping != page_rmapping(page)); +} + struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte); struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, @@ -2576,7 +2556,7 @@ static inline unsigned long get_num_physpages(void) * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn, * max_highmem_pfn}; * for_each_valid_physical_page_range() - * memblock_add_node(base, size, nid) + * memblock_add_node(base, size, nid, MEMBLOCK_NONE) * free_area_init(max_zone_pfns); */ void free_area_init(unsigned long *max_zone_pfn); @@ -2604,6 +2584,7 @@ extern void memmap_init_range(unsigned long, int, unsigned long, unsigned long, unsigned long, enum meminit_context, struct vmem_altmap *, int migratetype); extern void setup_per_zone_wmarks(void); +extern void calculate_min_free_kbytes(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); extern void __init mmap_init(void); @@ -2976,7 +2957,8 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, #define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */ #define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO * and return without waiting upon it */ -#define FOLL_POPULATE 0x40 /* fault in page */ +#define FOLL_POPULATE 0x40 /* fault in pages (with FOLL_MLOCK) */ +#define FOLL_NOFAULT 0x80 /* do not fault in pages */ #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ #define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ #define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 8f3131477ec6..bb8c6f5f19bc 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -114,10 +114,8 @@ struct page { struct page *next; #ifdef CONFIG_64BIT int pages; /* Nr of pages left */ - int pobjects; /* Approximate count */ #else short int pages; - short int pobjects; #endif }; }; @@ -454,17 +452,6 @@ struct vm_area_struct { struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } __randomize_layout; -struct core_thread { - struct task_struct *task; - struct core_thread *next; -}; - -struct core_state { - atomic_t nr_threads; - struct core_thread dumper; - struct completion startup; -}; - struct kioctx_table; struct mm_struct { struct { @@ -585,8 +572,6 @@ struct mm_struct { unsigned long flags; /* Must use atomic bitops to access */ - struct core_state *core_state; /* coredumping support */ - #ifdef CONFIG_AIO spinlock_t ioctx_lock; struct kioctx_table __rcu *ioctx_table; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 6a1d79d84675..58e744b78c2c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -199,6 +199,7 @@ enum node_stat_item { NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ NR_DIRTIED, /* page dirtyings since bootup */ NR_WRITTEN, /* page writings since bootup */ + NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */ NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */ NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */ @@ -272,6 +273,13 @@ enum lru_list { NR_LRU_LISTS }; +enum vmscan_throttle_state { + VMSCAN_THROTTLE_WRITEBACK, + VMSCAN_THROTTLE_ISOLATED, + VMSCAN_THROTTLE_NOPROGRESS, + NR_VMSCAN_THROTTLE, +}; + #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) @@ -841,6 +849,13 @@ typedef struct pglist_data { int node_id; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; + + /* workqueues for throttling reclaim for different reasons. */ + wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE]; + + atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */ + unsigned long nr_reclaim_start; /* nr pages written while throttled + * when throttling started. */ struct task_struct *kswapd; /* Protected by mem_hotplug_begin/end() */ int kswapd_order; @@ -1220,6 +1235,28 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, #define for_each_zone_zonelist(zone, z, zlist, highidx) \ for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL) +/* Whether the 'nodes' are all movable nodes */ +static inline bool movable_only_nodes(nodemask_t *nodes) +{ + struct zonelist *zonelist; + struct zoneref *z; + int nid; + + if (nodes_empty(*nodes)) + return false; + + /* + * We can chose arbitrary node from the nodemask to get a + * zonelist as they are interlinked. We just need to find + * at least one zone that can satisfy kernel allocations. + */ + nid = first_node(*nodes); + zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; + z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes); + return (!z->zone) ? true : false; +} + + #ifdef CONFIG_SPARSEMEM #include <asm/sparsemem.h> #endif @@ -1481,7 +1518,7 @@ static inline int pfn_valid(unsigned long pfn) if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; - ms = __nr_to_section(pfn_to_section_nr(pfn)); + ms = __pfn_to_section(pfn); if (!valid_section(ms)) return 0; /* @@ -1496,7 +1533,7 @@ static inline int pfn_in_present_section(unsigned long pfn) { if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; - return present_section(__nr_to_section(pfn_to_section_nr(pfn))); + return present_section(__pfn_to_section(pfn)); } static inline unsigned long next_present_section_nr(unsigned long section_nr) diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 88227044fc86..f5e7dfc2e4e9 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -72,8 +72,6 @@ struct mtd_oob_ops { uint8_t *oobbuf; }; -#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32 -#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640 /** * struct mtd_oob_region - oob region definition * @offset: region offset diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h index 5fc6bb2fefad..7a09b040ac39 100644 --- a/include/linux/mux/consumer.h +++ b/include/linux/mux/consumer.h @@ -16,10 +16,25 @@ struct device; struct mux_control; unsigned int mux_control_states(struct mux_control *mux); -int __must_check mux_control_select(struct mux_control *mux, - unsigned int state); -int __must_check mux_control_try_select(struct mux_control *mux, - unsigned int state); +int __must_check mux_control_select_delay(struct mux_control *mux, + unsigned int state, + unsigned int delay_us); +int __must_check mux_control_try_select_delay(struct mux_control *mux, + unsigned int state, + unsigned int delay_us); + +static inline int __must_check mux_control_select(struct mux_control *mux, + unsigned int state) +{ + return mux_control_select_delay(mux, state, 0); +} + +static inline int __must_check mux_control_try_select(struct mux_control *mux, + unsigned int state) +{ + return mux_control_try_select_delay(mux, state, 0); +} + int mux_control_deselect(struct mux_control *mux); struct mux_control *mux_control_get(struct device *dev, const char *mux_name); diff --git a/include/linux/mux/driver.h b/include/linux/mux/driver.h index 627a2c6bc02d..18824064f8c0 100644 --- a/include/linux/mux/driver.h +++ b/include/linux/mux/driver.h @@ -12,6 +12,7 @@ #include <dt-bindings/mux/mux.h> #include <linux/device.h> +#include <linux/ktime.h> #include <linux/semaphore.h> struct mux_chip; @@ -33,6 +34,7 @@ struct mux_control_ops { * @states: The number of mux controller states. * @idle_state: The mux controller state to use when inactive, or one * of MUX_IDLE_AS_IS and MUX_IDLE_DISCONNECT. + * @last_change: Timestamp of last change * * Mux drivers may only change @states and @idle_state, and may only do so * between allocation and registration of the mux controller. Specifically, @@ -47,6 +49,8 @@ struct mux_control { unsigned int states; int idle_state; + + ktime_t last_change; }; /** diff --git a/include/linux/nd.h b/include/linux/nd.h index ee9ad76afbba..8a8c63edb1b2 100644 --- a/include/linux/nd.h +++ b/include/linux/nd.h @@ -88,7 +88,7 @@ struct nd_namespace_pmem { struct nd_namespace_io nsio; unsigned long lbasize; char *alt_name; - u8 *uuid; + uuid_t *uuid; int id; }; @@ -105,7 +105,7 @@ struct nd_namespace_pmem { struct nd_namespace_blk { struct nd_namespace_common common; char *alt_name; - u8 *uuid; + uuid_t *uuid; int id; unsigned long lbasize; resource_size_t size; diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 15004c469807..5662d8be04eb 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -292,6 +292,10 @@ enum nfsstat4 { NFS4ERR_XATTR2BIG = 10096, }; +/* error codes for internal client use */ +#define NFS4ERR_RESET_TO_MDS 12001 +#define NFS4ERR_RESET_TO_PNFS 12002 + static inline bool seqid_mutating_err(u32 err) { /* See RFC 7530, section 9.1.7 */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index b9a8b925db43..05f249f20f55 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -81,7 +81,7 @@ struct nfs_open_context { fl_owner_t flock_owner; struct dentry *dentry; const struct cred *cred; - struct rpc_cred *ll_cred; /* low-level cred - use to check for expiry */ + struct rpc_cred __rcu *ll_cred; /* low-level cred - use to check for expiry */ struct nfs4_state *state; fmode_t mode; @@ -103,6 +103,7 @@ struct nfs_open_dir_context { __be32 verf[NFS_DIR_VERIFIER_SIZE]; __u64 dir_cookie; __u64 dup_cookie; + pgoff_t page_index; signed char duped; }; @@ -154,36 +155,39 @@ struct nfs_inode { unsigned long attrtimeo_timestamp; unsigned long attr_gencount; - /* "Generation counter" for the attribute cache. This is - * bumped whenever we update the metadata on the - * server. - */ - unsigned long cache_change_attribute; struct rb_root access_cache; struct list_head access_cache_entry_lru; struct list_head access_cache_inode_lru; - /* - * This is the cookie verifier used for NFSv3 readdir - * operations - */ - __be32 cookieverf[NFS_DIR_VERIFIER_SIZE]; - - atomic_long_t nrequests; - struct nfs_mds_commit_info commit_info; + union { + /* Directory */ + struct { + /* "Generation counter" for the attribute cache. + * This is bumped whenever we update the metadata + * on the server. + */ + unsigned long cache_change_attribute; + /* + * This is the cookie verifier used for NFSv3 readdir + * operations + */ + __be32 cookieverf[NFS_DIR_VERIFIER_SIZE]; + /* Readers: in-flight sillydelete RPC calls */ + /* Writers: rmdir */ + struct rw_semaphore rmdir_sem; + }; + /* Regular file */ + struct { + atomic_long_t nrequests; + struct nfs_mds_commit_info commit_info; + struct mutex commit_mutex; + }; + }; /* Open contexts for shared mmap writes */ struct list_head open_files; - /* Readers: in-flight sillydelete RPC calls */ - /* Writers: rmdir */ - struct rw_semaphore rmdir_sem; - struct mutex commit_mutex; - - /* track last access to cached pages */ - unsigned long page_index; - #if IS_ENABLED(CONFIG_NFS_V4) struct nfs4_cached_acl *nfs4_acl; /* NFSv4 state */ @@ -272,6 +276,7 @@ struct nfs4_copy_state { #define NFS_INO_INVALIDATING (3) /* inode is being invalidated */ #define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ #define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */ +#define NFS_INO_FORCE_READDIR (7) /* force readdirplus */ #define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ #define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ #define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */ @@ -383,7 +388,7 @@ extern void nfs_zap_caches(struct inode *); extern void nfs_set_inode_stale(struct inode *inode); extern void nfs_invalidate_atime(struct inode *); extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *, - struct nfs_fattr *, struct nfs4_label *); + struct nfs_fattr *); struct inode *nfs_ilookup(struct super_block *sb, struct nfs_fattr *, struct nfs_fh *); extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *); extern int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr); @@ -404,8 +409,7 @@ extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *map extern int nfs_revalidate_mapping_rcu(struct inode *inode); extern int nfs_setattr(struct user_namespace *, struct dentry *, struct iattr *); extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, struct nfs_fattr *); -extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr, - struct nfs4_label *label); +extern void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr); extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); extern void put_nfs_open_context(struct nfs_open_context *ctx); extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, const struct cred *cred, fmode_t mode); @@ -421,9 +425,22 @@ extern void nfs_fattr_set_barrier(struct nfs_fattr *fattr); extern unsigned long nfs_inc_attr_generation_counter(void); extern struct nfs_fattr *nfs_alloc_fattr(void); +extern struct nfs_fattr *nfs_alloc_fattr_with_label(struct nfs_server *server); + +static inline void nfs4_label_free(struct nfs4_label *label) +{ +#ifdef CONFIG_NFS_V4_SECURITY_LABEL + if (label) { + kfree(label->label); + kfree(label); + } +#endif +} static inline void nfs_free_fattr(const struct nfs_fattr *fattr) { + if (fattr) + nfs4_label_free(fattr->label); kfree(fattr); } @@ -511,10 +528,9 @@ extern void nfs_set_verifier(struct dentry * dentry, unsigned long verf); extern void nfs_clear_verifier_delegated(struct inode *inode); #endif /* IS_ENABLED(CONFIG_NFS_V4) */ extern struct dentry *nfs_add_or_obtain(struct dentry *dentry, - struct nfs_fh *fh, struct nfs_fattr *fattr, - struct nfs4_label *label); + struct nfs_fh *fh, struct nfs_fattr *fattr); extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, - struct nfs_fattr *fattr, struct nfs4_label *label); + struct nfs_fattr *fattr); extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags); extern void nfs_access_zap_cache(struct inode *inode); extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, @@ -569,11 +585,14 @@ extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); extern int nfs_commit_inode(struct inode *, int); extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail); extern void nfs_commit_free(struct nfs_commit_data *data); +bool nfs_commit_end(struct nfs_mds_commit_info *cinfo); static inline int nfs_have_writebacks(struct inode *inode) { - return atomic_long_read(&NFS_I(inode)->nrequests) != 0; + if (S_ISREG(inode->i_mode)) + return atomic_long_read(&NFS_I(inode)->nrequests) != 0; + return 0; } /* diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index e9698b6278a5..967a0098f0a9 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -488,7 +488,6 @@ struct nfs_openres { struct nfs4_change_info cinfo; __u32 rflags; struct nfs_fattr * f_attr; - struct nfs4_label *f_label; struct nfs_seqid * seqid; const struct nfs_server *server; fmode_t delegation_type; @@ -753,7 +752,6 @@ struct nfs_entry { int eof; struct nfs_fh * fh; struct nfs_fattr * fattr; - struct nfs4_label *label; unsigned char d_type; struct nfs_server * server; }; @@ -834,7 +832,6 @@ struct nfs_getaclres { struct nfs_setattrres { struct nfs4_sequence_res seq_res; struct nfs_fattr * fattr; - struct nfs4_label *label; const struct nfs_server * server; }; @@ -1041,7 +1038,6 @@ struct nfs4_create_res { const struct nfs_server * server; struct nfs_fh * fh; struct nfs_fattr * fattr; - struct nfs4_label *label; struct nfs4_change_info dir_cinfo; }; @@ -1066,7 +1062,6 @@ struct nfs4_getattr_res { struct nfs4_sequence_res seq_res; const struct nfs_server * server; struct nfs_fattr * fattr; - struct nfs4_label *label; }; struct nfs4_link_arg { @@ -1081,7 +1076,6 @@ struct nfs4_link_res { struct nfs4_sequence_res seq_res; const struct nfs_server * server; struct nfs_fattr * fattr; - struct nfs4_label *label; struct nfs4_change_info cinfo; struct nfs_fattr * dir_attr; }; @@ -1098,7 +1092,6 @@ struct nfs4_lookup_res { const struct nfs_server * server; struct nfs_fattr * fattr; struct nfs_fh * fh; - struct nfs4_label *label; }; struct nfs4_lookupp_arg { @@ -1112,7 +1105,6 @@ struct nfs4_lookupp_res { const struct nfs_server *server; struct nfs_fattr *fattr; struct nfs_fh *fh; - struct nfs4_label *label; }; struct nfs4_lookup_root_arg { @@ -1738,15 +1730,13 @@ struct nfs_rpc_ops { int (*submount) (struct fs_context *, struct nfs_server *); int (*try_get_tree) (struct fs_context *); int (*getattr) (struct nfs_server *, struct nfs_fh *, - struct nfs_fattr *, struct nfs4_label *, - struct inode *); + struct nfs_fattr *, struct inode *); int (*setattr) (struct dentry *, struct nfs_fattr *, struct iattr *); int (*lookup) (struct inode *, struct dentry *, - struct nfs_fh *, struct nfs_fattr *, - struct nfs4_label *); + struct nfs_fh *, struct nfs_fattr *); int (*lookupp) (struct inode *, struct nfs_fh *, - struct nfs_fattr *, struct nfs4_label *); + struct nfs_fattr *); int (*access) (struct inode *, struct nfs_access_entry *); int (*readlink)(struct inode *, struct page *, unsigned int, unsigned int); diff --git a/include/linux/node.h b/include/linux/node.h index 8e5a29897936..bb21fd631b16 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -85,7 +85,7 @@ struct node { struct device dev; struct list_head access_list; -#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS) +#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_HUGETLBFS) struct work_struct node_work; #endif #ifdef CONFIG_HMEM_REPORTING @@ -98,7 +98,7 @@ struct memory_block; extern struct node *node_devices[]; typedef void (*node_registration_func_t)(struct node *); -#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) +#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA) void link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn, enum meminit_context context); diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index 104505e9028f..98efb7b5660d 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -19,6 +19,9 @@ typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset, void *val, size_t bytes); typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset, void *val, size_t bytes); +/* used for vendor specific post processing of cell data */ +typedef int (*nvmem_cell_post_process_t)(void *priv, const char *id, unsigned int offset, + void *buf, size_t bytes); enum nvmem_type { NVMEM_TYPE_UNKNOWN = 0, @@ -62,6 +65,7 @@ struct nvmem_keepout { * @no_of_node: Device should not use the parent's of_node even if it's !NULL. * @reg_read: Callback to read data. * @reg_write: Callback to write data. + * @cell_post_process: Callback for vendor specific post processing of cell data * @size: Device size. * @word_size: Minimum read/write access granularity. * @stride: Minimum read/write access stride. @@ -92,6 +96,7 @@ struct nvmem_config { bool no_of_node; nvmem_reg_read_t reg_read; nvmem_reg_write_t reg_write; + nvmem_cell_post_process_t cell_post_process; int size; int word_size; int stride; diff --git a/include/linux/of.h b/include/linux/of.h index 6f1c41f109bb..ff143a027abc 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -185,7 +185,7 @@ static inline bool of_node_is_root(const struct device_node *node) return node && (node->parent == NULL); } -static inline int of_node_check_flag(struct device_node *n, unsigned long flag) +static inline int of_node_check_flag(const struct device_node *n, unsigned long flag) { return test_bit(flag, &n->_flags); } @@ -353,6 +353,7 @@ extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); extern struct device_node *of_get_next_cpu_node(struct device_node *prev); extern struct device_node *of_get_cpu_state_node(struct device_node *cpu_node, int index); +extern u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread); #define for_each_property_of_node(dn, pp) \ for (pp = dn->properties; pp != NULL; pp = pp->next) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 981341a3c3c4..52ec4b5e5615 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -245,7 +245,7 @@ static __always_inline int PageCompound(struct page *page) #define PAGE_POISON_PATTERN -1l static inline int PagePoisoned(const struct page *page) { - return page->flags == PAGE_POISON_PATTERN; + return READ_ONCE(page->flags) == PAGE_POISON_PATTERN; } #ifdef CONFIG_DEBUG_VM diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index 43c638c51c1f..119a0c9d2a8b 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h @@ -8,9 +8,9 @@ extern struct static_key_false page_owner_inited; extern struct page_ext_operations page_owner_ops; -extern void __reset_page_owner(struct page *page, unsigned int order); +extern void __reset_page_owner(struct page *page, unsigned short order); extern void __set_page_owner(struct page *page, - unsigned int order, gfp_t gfp_mask); + unsigned short order, gfp_t gfp_mask); extern void __split_page_owner(struct page *page, unsigned int nr); extern void __folio_copy_owner(struct folio *newfolio, struct folio *old); extern void __set_page_owner_migrate_reason(struct page *page, int reason); @@ -18,14 +18,14 @@ extern void __dump_page_owner(const struct page *page); extern void pagetypeinfo_showmixedcount_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone); -static inline void reset_page_owner(struct page *page, unsigned int order) +static inline void reset_page_owner(struct page *page, unsigned short order) { if (static_branch_unlikely(&page_owner_inited)) __reset_page_owner(page, order); } static inline void set_page_owner(struct page *page, - unsigned int order, gfp_t gfp_mask) + unsigned short order, gfp_t gfp_mask) { if (static_branch_unlikely(&page_owner_inited)) __set_page_owner(page, order, gfp_mask); @@ -52,7 +52,7 @@ static inline void dump_page_owner(const struct page *page) __dump_page_owner(page); } #else -static inline void reset_page_owner(struct page *page, unsigned int order) +static inline void reset_page_owner(struct page *page, unsigned short order) { } static inline void set_page_owner(struct page *page, @@ -60,7 +60,7 @@ static inline void set_page_owner(struct page *page, { } static inline void split_page_owner(struct page *page, - unsigned int order) + unsigned short order) { } static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 013cdc90f5fd..6a30916b76e5 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -24,6 +24,56 @@ static inline bool mapping_empty(struct address_space *mapping) } /* + * mapping_shrinkable - test if page cache state allows inode reclaim + * @mapping: the page cache mapping + * + * This checks the mapping's cache state for the pupose of inode + * reclaim and LRU management. + * + * The caller is expected to hold the i_lock, but is not required to + * hold the i_pages lock, which usually protects cache state. That's + * because the i_lock and the list_lru lock that protect the inode and + * its LRU state don't nest inside the irq-safe i_pages lock. + * + * Cache deletions are performed under the i_lock, which ensures that + * when an inode goes empty, it will reliably get queued on the LRU. + * + * Cache additions do not acquire the i_lock and may race with this + * check, in which case we'll report the inode as shrinkable when it + * has cache pages. This is okay: the shrinker also checks the + * refcount and the referenced bit, which will be elevated or set in + * the process of adding new cache pages to an inode. + */ +static inline bool mapping_shrinkable(struct address_space *mapping) +{ + void *head; + + /* + * On highmem systems, there could be lowmem pressure from the + * inodes before there is highmem pressure from the page + * cache. Make inodes shrinkable regardless of cache state. + */ + if (IS_ENABLED(CONFIG_HIGHMEM)) + return true; + + /* Cache completely empty? Shrink away. */ + head = rcu_access_pointer(mapping->i_pages.xa_head); + if (!head) + return true; + + /* + * The xarray stores single offset-0 entries directly in the + * head pointer, which allows non-resident page cache entries + * to escape the shadow shrinker's list of xarray nodes. The + * inode shrinker needs to pick them up under memory pressure. + */ + if (!xa_is_node(head) && xa_is_value(head)) + return true; + + return false; +} + +/* * Bits in mapping->flags. */ enum mapping_flags { @@ -824,61 +874,11 @@ int folio_wait_private_2_killable(struct folio *folio); void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter); /* - * Fault everything in given userspace address range in. + * Fault in userspace address range. */ -static inline int fault_in_pages_writeable(char __user *uaddr, size_t size) -{ - char __user *end = uaddr + size - 1; - - if (unlikely(size == 0)) - return 0; - - if (unlikely(uaddr > end)) - return -EFAULT; - /* - * Writing zeroes into userspace here is OK, because we know that if - * the zero gets there, we'll be overwriting it. - */ - do { - if (unlikely(__put_user(0, uaddr) != 0)) - return -EFAULT; - uaddr += PAGE_SIZE; - } while (uaddr <= end); - - /* Check whether the range spilled into the next page. */ - if (((unsigned long)uaddr & PAGE_MASK) == - ((unsigned long)end & PAGE_MASK)) - return __put_user(0, end); - - return 0; -} - -static inline int fault_in_pages_readable(const char __user *uaddr, size_t size) -{ - volatile char c; - const char __user *end = uaddr + size - 1; - - if (unlikely(size == 0)) - return 0; - - if (unlikely(uaddr > end)) - return -EFAULT; - - do { - if (unlikely(__get_user(c, uaddr) != 0)) - return -EFAULT; - uaddr += PAGE_SIZE; - } while (uaddr <= end); - - /* Check whether the range spilled into the next page. */ - if (((unsigned long)uaddr & PAGE_MASK) == - ((unsigned long)end & PAGE_MASK)) { - return __get_user(c, end); - } - - (void)c; - return 0; -} +size_t fault_in_writeable(char __user *uaddr, size_t size); +size_t fault_in_safe_writeable(const char __user *uaddr, size_t size); +size_t fault_in_readable(const char __user *uaddr, size_t size); int add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp); diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index f16de399d2de..078225b514d4 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -84,6 +84,14 @@ extern struct pci_bus *acpi_pci_root_create(struct acpi_pci_root *root, void acpi_pci_add_bus(struct pci_bus *bus); void acpi_pci_remove_bus(struct pci_bus *bus); +#ifdef CONFIG_PCI +void pci_acpi_setup(struct device *dev, struct acpi_device *adev); +void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev); +#else +static inline void pci_acpi_setup(struct device *dev, struct acpi_device *adev) {} +static inline void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev) {} +#endif + #ifdef CONFIG_ACPI_PCI_SLOT void acpi_pci_slot_init(void); void acpi_pci_slot_enumerate(struct pci_bus *bus); diff --git a/include/linux/pci.h b/include/linux/pci.h index cd8aa6fce204..138d764c1c35 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -900,7 +900,10 @@ struct pci_driver { struct pci_dynids dynids; }; -#define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) +static inline struct pci_driver *to_pci_driver(struct device_driver *drv) +{ + return drv ? container_of(drv, struct pci_driver, driver) : NULL; +} /** * PCI_DEVICE - macro used to describe a specific PCI device @@ -1130,6 +1133,7 @@ u16 pci_find_ext_capability(struct pci_dev *dev, int cap); u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 pos, int cap); struct pci_bus *pci_find_next_bus(const struct pci_bus *from); u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap); +u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec); u64 pci_get_dsn(struct pci_dev *dev); @@ -1350,6 +1354,8 @@ void pci_unlock_rescan_remove(void); /* Vital Product Data routines */ ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); +ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf); +ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx); @@ -1498,19 +1504,8 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode, #define PCI_IRQ_ALL_TYPES \ (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX) -/* kmem_cache style wrapper around pci_alloc_consistent() */ - #include <linux/dmapool.h> -#define pci_pool dma_pool -#define pci_pool_create(name, pdev, size, align, allocation) \ - dma_pool_create(name, &pdev->dev, size, align, allocation) -#define pci_pool_destroy(pool) dma_pool_destroy(pool) -#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) -#define pci_pool_zalloc(pool, flags, handle) \ - dma_pool_zalloc(pool, flags, handle) -#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) - struct msix_entry { u32 vector; /* Kernel uses to write allocated vector */ u16 entry; /* Driver uses to specify entry, OS writes */ @@ -2126,7 +2121,7 @@ void pcibios_disable_device(struct pci_dev *dev); void pcibios_set_master(struct pci_dev *dev); int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); -int pcibios_add_device(struct pci_dev *dev); +int pcibios_device_add(struct pci_dev *dev); void pcibios_release_device(struct pci_dev *dev); #ifdef CONFIG_PCI void pcibios_penalize_isa_irq(int irq, int active); diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 5e76af742c80..98a9371133f8 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -123,7 +123,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_populate_pte_fn_t populate_pte_fn); #endif -extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); +extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __alloc_size(1); extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr); extern bool is_kernel_percpu_address(unsigned long addr); @@ -131,8 +131,8 @@ extern bool is_kernel_percpu_address(unsigned long addr); extern void __init setup_per_cpu_areas(void); #endif -extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp); -extern void __percpu *__alloc_percpu(size_t size, size_t align); +extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1); +extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1); extern void free_percpu(void __percpu *__pdata); extern phys_addr_t per_cpu_ptr_to_phys(void *addr); diff --git a/include/linux/phylink.h b/include/linux/phylink.h index f037470b6fb3..3563820a1765 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -20,6 +20,29 @@ enum { MLO_AN_PHY = 0, /* Conventional PHY */ MLO_AN_FIXED, /* Fixed-link mode */ MLO_AN_INBAND, /* In-band protocol */ + + MAC_SYM_PAUSE = BIT(0), + MAC_ASYM_PAUSE = BIT(1), + MAC_10HD = BIT(2), + MAC_10FD = BIT(3), + MAC_10 = MAC_10HD | MAC_10FD, + MAC_100HD = BIT(4), + MAC_100FD = BIT(5), + MAC_100 = MAC_100HD | MAC_100FD, + MAC_1000HD = BIT(6), + MAC_1000FD = BIT(7), + MAC_1000 = MAC_1000HD | MAC_1000FD, + MAC_2500FD = BIT(8), + MAC_5000FD = BIT(9), + MAC_10000FD = BIT(10), + MAC_20000FD = BIT(11), + MAC_25000FD = BIT(12), + MAC_40000FD = BIT(13), + MAC_50000FD = BIT(14), + MAC_56000FD = BIT(15), + MAC_100000FD = BIT(16), + MAC_200000FD = BIT(17), + MAC_400000FD = BIT(18), }; static inline bool phylink_autoneg_inband(unsigned int mode) @@ -69,6 +92,7 @@ enum phylink_op_type { * if MAC link is at %MLO_AN_FIXED mode. * @supported_interfaces: bitmap describing which PHY_INTERFACE_MODE_xxx * are supported by the MAC/PCS. + * @mac_capabilities: MAC pause/speed/duplex capabilities. */ struct phylink_config { struct device *dev; @@ -79,6 +103,7 @@ struct phylink_config { void (*get_fixed_state)(struct phylink_config *config, struct phylink_link_state *state); DECLARE_PHY_INTERFACE_MASK(supported_interfaces); + unsigned long mac_capabilities; }; /** @@ -442,6 +467,12 @@ void pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, phy_interface_t interface, int speed, int duplex); #endif +void phylink_get_linkmodes(unsigned long *linkmodes, phy_interface_t interface, + unsigned long mac_capabilities); +void phylink_generic_validate(struct phylink_config *config, + unsigned long *supported, + struct phylink_link_state *state); + struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *, phy_interface_t iface, const struct phylink_mac_ops *mac_ops); diff --git a/include/linux/pid.h b/include/linux/pid.h index af308e15f174..343abf22092e 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -78,6 +78,7 @@ struct file; extern struct pid *pidfd_pid(const struct file *file); struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags); +struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags); int pidfd_create(struct pid *pid, unsigned int flags); static inline struct pid *get_pid(struct pid *pid) diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h index 02599687770c..df3c78c92ca2 100644 --- a/include/linux/platform_data/cros_ec_proto.h +++ b/include/linux/platform_data/cros_ec_proto.h @@ -205,7 +205,7 @@ struct cros_ec_dev { struct cros_ec_debugfs *debug_info; bool has_kb_wake_angle; u16 cmd_offset; - u32 features[2]; + struct ec_response_get_features features; }; #define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev) @@ -227,10 +227,13 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev); -int cros_ec_check_features(struct cros_ec_dev *ec, int feature); +bool cros_ec_check_features(struct cros_ec_dev *ec, int feature); int cros_ec_get_sensor_count(struct cros_ec_dev *ec); +int cros_ec_command(struct cros_ec_device *ec_dev, unsigned int version, int command, void *outdata, + int outsize, void *indata, int insize); + /** * cros_ec_get_time_ns() - Return time in ns. * diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h index 101333fe2b8d..40185f9d7c14 100644 --- a/include/linux/platform_data/mlxreg.h +++ b/include/linux/platform_data/mlxreg.h @@ -25,12 +25,75 @@ enum mlxreg_wdt_type { }; /** + * enum mlxreg_hotplug_kind - kind of hotplug entry + * + * @MLXREG_HOTPLUG_DEVICE_NA: do not care; + * @MLXREG_HOTPLUG_LC_PRESENT: entry for line card presence in/out events; + * @MLXREG_HOTPLUG_LC_VERIFIED: entry for line card verification status events + * coming after line card security signature validation; + * @MLXREG_HOTPLUG_LC_POWERED: entry for line card power on/off events; + * @MLXREG_HOTPLUG_LC_SYNCED: entry for line card synchronization events, coming + * after hardware-firmware synchronization handshake; + * @MLXREG_HOTPLUG_LC_READY: entry for line card ready events, indicating line card + PHYs ready / unready state; + * @MLXREG_HOTPLUG_LC_ACTIVE: entry for line card active events, indicating firmware + * availability / unavailability for the ports on line card; + * @MLXREG_HOTPLUG_LC_THERMAL: entry for line card thermal shutdown events, positive + * event indicates that system should power off the line + * card for which this event has been received; + */ +enum mlxreg_hotplug_kind { + MLXREG_HOTPLUG_DEVICE_NA = 0, + MLXREG_HOTPLUG_LC_PRESENT = 1, + MLXREG_HOTPLUG_LC_VERIFIED = 2, + MLXREG_HOTPLUG_LC_POWERED = 3, + MLXREG_HOTPLUG_LC_SYNCED = 4, + MLXREG_HOTPLUG_LC_READY = 5, + MLXREG_HOTPLUG_LC_ACTIVE = 6, + MLXREG_HOTPLUG_LC_THERMAL = 7, +}; + +/** + * enum mlxreg_hotplug_device_action - hotplug device action required for + * driver's connectivity + * + * @MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION: probe device for 'on' event, remove + * for 'off' event; + * @MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION: probe platform device for 'on' + * event, remove for 'off' event; + * @MLXREG_HOTPLUG_DEVICE_NO_ACTION: no connectivity action is required; + */ +enum mlxreg_hotplug_device_action { + MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION = 0, + MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION = 1, + MLXREG_HOTPLUG_DEVICE_NO_ACTION = 2, +}; + +/** + * struct mlxreg_core_hotplug_notifier - hotplug notifier block: + * + * @identity: notifier identity name; + * @handle: user handle to be passed by user handler function; + * @user_handler: user handler function associated with the event; + */ +struct mlxreg_core_hotplug_notifier { + char identity[MLXREG_CORE_LABEL_MAX_SIZE]; + void *handle; + int (*user_handler)(void *handle, enum mlxreg_hotplug_kind kind, u8 action); +}; + +/** * struct mlxreg_hotplug_device - I2C device data: * * @adapter: I2C device adapter; * @client: I2C device client; * @brdinfo: device board information; * @nr: I2C device adapter number, to which device is to be attached; + * @pdev: platform device, if device is instantiated as a platform device; + * @action: action to be performed upon event receiving; + * @handle: user handle to be passed by user handler function; + * @user_handler: user handler function associated with the event; + * @notifier: pointer to event notifier block; * * Structure represents I2C hotplug device static data (board topology) and * dynamic data (related kernel objects handles). @@ -40,6 +103,11 @@ struct mlxreg_hotplug_device { struct i2c_client *client; struct i2c_board_info *brdinfo; int nr; + struct platform_device *pdev; + enum mlxreg_hotplug_device_action action; + void *handle; + int (*user_handler)(void *handle, enum mlxreg_hotplug_kind kind, u8 action); + struct mlxreg_core_hotplug_notifier *notifier; }; /** @@ -51,12 +119,18 @@ struct mlxreg_hotplug_device { * @bit: attribute effective bit; * @capability: attribute capability register; * @reg_prsnt: attribute presence register; + * @reg_sync: attribute synch register; + * @reg_pwr: attribute power register; + * @reg_ena: attribute enable register; * @mode: access mode; * @np - pointer to node platform associated with attribute; * @hpdev - hotplug device data; + * @notifier: pointer to event notifier block; * @health_cntr: dynamic device health indication counter; * @attached: true if device has been attached after good health indication; * @regnum: number of registers occupied by multi-register attribute; + * @slot: slot number, at which device is located; + * @secured: if set indicates that entry access is secured; */ struct mlxreg_core_data { char label[MLXREG_CORE_LABEL_MAX_SIZE]; @@ -65,18 +139,25 @@ struct mlxreg_core_data { u32 bit; u32 capability; u32 reg_prsnt; + u32 reg_sync; + u32 reg_pwr; + u32 reg_ena; umode_t mode; struct device_node *np; struct mlxreg_hotplug_device hpdev; + struct mlxreg_core_hotplug_notifier *notifier; u32 health_cntr; bool attached; u8 regnum; + u8 slot; + u8 secured; }; /** * struct mlxreg_core_item - same type components controlled by the driver: * * @data: component data; + * @kind: kind of hotplug attribute; * @aggr_mask: group aggregation mask; * @reg: group interrupt status register; * @mask: group interrupt mask; @@ -89,6 +170,7 @@ struct mlxreg_core_data { */ struct mlxreg_core_item { struct mlxreg_core_data *data; + enum mlxreg_hotplug_kind kind; u32 aggr_mask; u32 reg; u32 mask; diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index 9837fb011f2f..eb556f988d57 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -50,6 +50,9 @@ struct sysc_regbits { s8 emufree_shift; }; +#define SYSC_MODULE_QUIRK_OTG BIT(30) +#define SYSC_QUIRK_RESET_ON_CTX_LOST BIT(29) +#define SYSC_QUIRK_REINIT_ON_CTX_LOST BIT(28) #define SYSC_QUIRK_REINIT_ON_RESUME BIT(27) #define SYSC_QUIRK_GPMC_DEBUG BIT(26) #define SYSC_MODULE_QUIRK_ENA_RESETDONE BIT(25) diff --git a/include/linux/platform_data/ux500_wdt.h b/include/linux/platform_data/ux500_wdt.h deleted file mode 100644 index de6a4ad41e76..000000000000 --- a/include/linux/platform_data/ux500_wdt.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) ST Ericsson SA 2011 - * - * STE Ux500 Watchdog platform data - */ -#ifndef __UX500_WDT_H -#define __UX500_WDT_H - -/** - * struct ux500_wdt_data - */ -struct ux500_wdt_data { - unsigned int timeout; - bool has_28_bits_resolution; -}; - -#endif /* __UX500_WDT_H */ diff --git a/include/linux/platform_data/x86/soc.h b/include/linux/platform_data/x86/soc.h new file mode 100644 index 000000000000..da05f425587a --- /dev/null +++ b/include/linux/platform_data/x86/soc.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Helpers for Intel SoC model detection + * + * Copyright (c) 2019, Intel Corporation. + */ + +#ifndef __PLATFORM_DATA_X86_SOC_H +#define __PLATFORM_DATA_X86_SOC_H + +#if IS_ENABLED(CONFIG_X86) + +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> + +#define SOC_INTEL_IS_CPU(soc, type) \ +static inline bool soc_intel_is_##soc(void) \ +{ \ + static const struct x86_cpu_id soc##_cpu_ids[] = { \ + X86_MATCH_INTEL_FAM6_MODEL(type, NULL), \ + {} \ + }; \ + const struct x86_cpu_id *id; \ + \ + id = x86_match_cpu(soc##_cpu_ids); \ + if (id) \ + return true; \ + return false; \ +} + +SOC_INTEL_IS_CPU(byt, ATOM_SILVERMONT); +SOC_INTEL_IS_CPU(cht, ATOM_AIRMONT); +SOC_INTEL_IS_CPU(apl, ATOM_GOLDMONT); +SOC_INTEL_IS_CPU(glk, ATOM_GOLDMONT_PLUS); +SOC_INTEL_IS_CPU(cml, KABYLAKE_L); + +#else /* IS_ENABLED(CONFIG_X86) */ + +static inline bool soc_intel_is_byt(void) +{ + return false; +} + +static inline bool soc_intel_is_cht(void) +{ + return false; +} + +static inline bool soc_intel_is_apl(void) +{ + return false; +} + +static inline bool soc_intel_is_glk(void) +{ + return false; +} + +static inline bool soc_intel_is_cml(void) +{ + return false; +} +#endif /* IS_ENABLED(CONFIG_X86) */ + +#endif /* __PLATFORM_DATA_X86_SOC_H */ diff --git a/include/linux/plist.h b/include/linux/plist.h index 66bab1bca35c..0f352c1d3c80 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h @@ -73,8 +73,11 @@ #ifndef _LINUX_PLIST_H_ #define _LINUX_PLIST_H_ -#include <linux/kernel.h> +#include <linux/container_of.h> #include <linux/list.h> +#include <linux/types.h> + +#include <asm/bug.h> struct plist_head { struct list_head node_list; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 84150a22fd7c..879c138c7b8e 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -156,9 +156,9 @@ int devm_pm_opp_set_clkname(struct device *dev, const char *name); struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); int devm_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); -struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); +struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char * const *names, struct device ***virt_devs); void dev_pm_opp_detach_genpd(struct opp_table *opp_table); -int devm_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs); +int devm_pm_opp_attach_genpd(struct device *dev, const char * const *names, struct device ***virt_devs); struct dev_pm_opp *dev_pm_opp_xlate_required_opp(struct opp_table *src_table, struct opp_table *dst_table, struct dev_pm_opp *src_opp); int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); @@ -376,7 +376,7 @@ static inline int devm_pm_opp_set_clkname(struct device *dev, const char *name) return -EOPNOTSUPP; } -static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char **names, struct device ***virt_devs) +static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, const char * const *names, struct device ***virt_devs) { return ERR_PTR(-EOPNOTSUPP); } @@ -384,7 +384,7 @@ static inline struct opp_table *dev_pm_opp_attach_genpd(struct device *dev, cons static inline void dev_pm_opp_detach_genpd(struct opp_table *opp_table) {} static inline int devm_pm_opp_attach_genpd(struct device *dev, - const char **names, + const char * const *names, struct device ***virt_devs) { return -EOPNOTSUPP; @@ -439,7 +439,9 @@ static inline int dev_pm_opp_sync_regulators(struct device *dev) #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) int dev_pm_opp_of_add_table(struct device *dev); int dev_pm_opp_of_add_table_indexed(struct device *dev, int index); +int devm_pm_opp_of_add_table_indexed(struct device *dev, int index); int dev_pm_opp_of_add_table_noclk(struct device *dev, int index); +int devm_pm_opp_of_add_table_noclk(struct device *dev, int index); void dev_pm_opp_of_remove_table(struct device *dev); int devm_pm_opp_of_add_table(struct device *dev); int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); @@ -465,11 +467,21 @@ static inline int dev_pm_opp_of_add_table_indexed(struct device *dev, int index) return -EOPNOTSUPP; } +static inline int devm_pm_opp_of_add_table_indexed(struct device *dev, int index) +{ + return -EOPNOTSUPP; +} + static inline int dev_pm_opp_of_add_table_noclk(struct device *dev, int index) { return -EOPNOTSUPP; } +static inline int devm_pm_opp_of_add_table_noclk(struct device *dev, int index) +{ + return -EOPNOTSUPP; +} + static inline void dev_pm_opp_of_remove_table(struct device *dev) { } diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h index cd5b62db9084..e63a63aa47a3 100644 --- a/include/linux/pm_wakeirq.h +++ b/include/linux/pm_wakeirq.h @@ -17,8 +17,8 @@ #ifdef CONFIG_PM extern int dev_pm_set_wake_irq(struct device *dev, int irq); -extern int dev_pm_set_dedicated_wake_irq(struct device *dev, - int irq); +extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq); +extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq); extern void dev_pm_clear_wake_irq(struct device *dev); extern void dev_pm_enable_wake_irq(struct device *dev); extern void dev_pm_disable_wake_irq(struct device *dev); @@ -35,6 +35,11 @@ static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) return 0; } +static inline int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq) +{ + return 0; +} + static inline void dev_pm_clear_wake_irq(struct device *dev) { } diff --git a/include/linux/pnfs_osd_xdr.h b/include/linux/pnfs_osd_xdr.h deleted file mode 100644 index 17d7d0d20eca..000000000000 --- a/include/linux/pnfs_osd_xdr.h +++ /dev/null @@ -1,317 +0,0 @@ -/* - * pNFS-osd on-the-wire data structures - * - * Copyright (C) 2007 Panasas Inc. [year of first publication] - * All rights reserved. - * - * Benny Halevy <bhalevy@panasas.com> - * Boaz Harrosh <ooo@electrozaur.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * See the file COPYING included with this distribution for more details. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the Panasas company nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef __PNFS_OSD_XDR_H__ -#define __PNFS_OSD_XDR_H__ - -#include <linux/nfs_fs.h> - -/* - * draft-ietf-nfsv4-minorversion-22 - * draft-ietf-nfsv4-pnfs-obj-12 - */ - -/* Layout Structure */ - -enum pnfs_osd_raid_algorithm4 { - PNFS_OSD_RAID_0 = 1, - PNFS_OSD_RAID_4 = 2, - PNFS_OSD_RAID_5 = 3, - PNFS_OSD_RAID_PQ = 4 /* Reed-Solomon P+Q */ -}; - -/* struct pnfs_osd_data_map4 { - * uint32_t odm_num_comps; - * length4 odm_stripe_unit; - * uint32_t odm_group_width; - * uint32_t odm_group_depth; - * uint32_t odm_mirror_cnt; - * pnfs_osd_raid_algorithm4 odm_raid_algorithm; - * }; - */ -struct pnfs_osd_data_map { - u32 odm_num_comps; - u64 odm_stripe_unit; - u32 odm_group_width; - u32 odm_group_depth; - u32 odm_mirror_cnt; - u32 odm_raid_algorithm; -}; - -/* struct pnfs_osd_objid4 { - * deviceid4 oid_device_id; - * uint64_t oid_partition_id; - * uint64_t oid_object_id; - * }; - */ -struct pnfs_osd_objid { - struct nfs4_deviceid oid_device_id; - u64 oid_partition_id; - u64 oid_object_id; -}; - -/* For printout. I use: - * kprint("dev(%llx:%llx)", _DEVID_LO(pointer), _DEVID_HI(pointer)); - * BE style - */ -#define _DEVID_LO(oid_device_id) \ - (unsigned long long)be64_to_cpup((__be64 *)(oid_device_id)->data) - -#define _DEVID_HI(oid_device_id) \ - (unsigned long long)be64_to_cpup(((__be64 *)(oid_device_id)->data) + 1) - -enum pnfs_osd_version { - PNFS_OSD_MISSING = 0, - PNFS_OSD_VERSION_1 = 1, - PNFS_OSD_VERSION_2 = 2 -}; - -struct pnfs_osd_opaque_cred { - u32 cred_len; - void *cred; -}; - -enum pnfs_osd_cap_key_sec { - PNFS_OSD_CAP_KEY_SEC_NONE = 0, - PNFS_OSD_CAP_KEY_SEC_SSV = 1, -}; - -/* struct pnfs_osd_object_cred4 { - * pnfs_osd_objid4 oc_object_id; - * pnfs_osd_version4 oc_osd_version; - * pnfs_osd_cap_key_sec4 oc_cap_key_sec; - * opaque oc_capability_key<>; - * opaque oc_capability<>; - * }; - */ -struct pnfs_osd_object_cred { - struct pnfs_osd_objid oc_object_id; - u32 oc_osd_version; - u32 oc_cap_key_sec; - struct pnfs_osd_opaque_cred oc_cap_key; - struct pnfs_osd_opaque_cred oc_cap; -}; - -/* struct pnfs_osd_layout4 { - * pnfs_osd_data_map4 olo_map; - * uint32_t olo_comps_index; - * pnfs_osd_object_cred4 olo_components<>; - * }; - */ -struct pnfs_osd_layout { - struct pnfs_osd_data_map olo_map; - u32 olo_comps_index; - u32 olo_num_comps; - struct pnfs_osd_object_cred *olo_comps; -}; - -/* Device Address */ -enum pnfs_osd_targetid_type { - OBJ_TARGET_ANON = 1, - OBJ_TARGET_SCSI_NAME = 2, - OBJ_TARGET_SCSI_DEVICE_ID = 3, -}; - -/* union pnfs_osd_targetid4 switch (pnfs_osd_targetid_type4 oti_type) { - * case OBJ_TARGET_SCSI_NAME: - * string oti_scsi_name<>; - * - * case OBJ_TARGET_SCSI_DEVICE_ID: - * opaque oti_scsi_device_id<>; - * - * default: - * void; - * }; - * - * union pnfs_osd_targetaddr4 switch (bool ota_available) { - * case TRUE: - * netaddr4 ota_netaddr; - * case FALSE: - * void; - * }; - * - * struct pnfs_osd_deviceaddr4 { - * pnfs_osd_targetid4 oda_targetid; - * pnfs_osd_targetaddr4 oda_targetaddr; - * uint64_t oda_lun; - * opaque oda_systemid<>; - * pnfs_osd_object_cred4 oda_root_obj_cred; - * opaque oda_osdname<>; - * }; - */ -struct pnfs_osd_targetid { - u32 oti_type; - struct nfs4_string oti_scsi_device_id; -}; - -/* struct netaddr4 { - * // see struct rpcb in RFC1833 - * string r_netid<>; // network id - * string r_addr<>; // universal address - * }; - */ -struct pnfs_osd_net_addr { - struct nfs4_string r_netid; - struct nfs4_string r_addr; -}; - -struct pnfs_osd_targetaddr { - u32 ota_available; - struct pnfs_osd_net_addr ota_netaddr; -}; - -struct pnfs_osd_deviceaddr { - struct pnfs_osd_targetid oda_targetid; - struct pnfs_osd_targetaddr oda_targetaddr; - u8 oda_lun[8]; - struct nfs4_string oda_systemid; - struct pnfs_osd_object_cred oda_root_obj_cred; - struct nfs4_string oda_osdname; -}; - -/* LAYOUTCOMMIT: layoutupdate */ - -/* union pnfs_osd_deltaspaceused4 switch (bool dsu_valid) { - * case TRUE: - * int64_t dsu_delta; - * case FALSE: - * void; - * }; - * - * struct pnfs_osd_layoutupdate4 { - * pnfs_osd_deltaspaceused4 olu_delta_space_used; - * bool olu_ioerr_flag; - * }; - */ -struct pnfs_osd_layoutupdate { - u32 dsu_valid; - s64 dsu_delta; - u32 olu_ioerr_flag; -}; - -/* LAYOUTRETURN: I/O Rrror Report */ - -enum pnfs_osd_errno { - PNFS_OSD_ERR_EIO = 1, - PNFS_OSD_ERR_NOT_FOUND = 2, - PNFS_OSD_ERR_NO_SPACE = 3, - PNFS_OSD_ERR_BAD_CRED = 4, - PNFS_OSD_ERR_NO_ACCESS = 5, - PNFS_OSD_ERR_UNREACHABLE = 6, - PNFS_OSD_ERR_RESOURCE = 7 -}; - -/* struct pnfs_osd_ioerr4 { - * pnfs_osd_objid4 oer_component; - * length4 oer_comp_offset; - * length4 oer_comp_length; - * bool oer_iswrite; - * pnfs_osd_errno4 oer_errno; - * }; - */ -struct pnfs_osd_ioerr { - struct pnfs_osd_objid oer_component; - u64 oer_comp_offset; - u64 oer_comp_length; - u32 oer_iswrite; - u32 oer_errno; -}; - -/* OSD XDR Client API */ -/* Layout helpers */ -/* Layout decoding is done in two parts: - * 1. First Call pnfs_osd_xdr_decode_layout_map to read in only the header part - * of the layout. @iter members need not be initialized. - * Returned: - * @layout members are set. (@layout->olo_comps set to NULL). - * - * Zero on success, or negative error if passed xdr is broken. - * - * 2. 2nd Call pnfs_osd_xdr_decode_layout_comp() in a loop until it returns - * false, to decode the next component. - * Returned: - * true if there is more to decode or false if we are done or error. - * - * Example: - * struct pnfs_osd_xdr_decode_layout_iter iter; - * struct pnfs_osd_layout layout; - * struct pnfs_osd_object_cred comp; - * int status; - * - * status = pnfs_osd_xdr_decode_layout_map(&layout, &iter, xdr); - * if (unlikely(status)) - * goto err; - * while(pnfs_osd_xdr_decode_layout_comp(&comp, &iter, xdr, &status)) { - * // All of @comp strings point to inside the xdr_buffer - * // or scrach buffer. Copy them out to user memory eg. - * copy_single_comp(dest_comp++, &comp); - * } - * if (unlikely(status)) - * goto err; - */ - -struct pnfs_osd_xdr_decode_layout_iter { - unsigned total_comps; - unsigned decoded_comps; -}; - -extern int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout, - struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr); - -extern bool pnfs_osd_xdr_decode_layout_comp(struct pnfs_osd_object_cred *comp, - struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr, - int *err); - -/* Device Info helpers */ - -/* Note: All strings inside @deviceaddr point to space inside @p. - * @p should stay valid while @deviceaddr is in use. - */ -extern void pnfs_osd_xdr_decode_deviceaddr( - struct pnfs_osd_deviceaddr *deviceaddr, __be32 *p); - -/* layoutupdate (layout_commit) xdr helpers */ -extern int -pnfs_osd_xdr_encode_layoutupdate(struct xdr_stream *xdr, - struct pnfs_osd_layoutupdate *lou); - -/* osd_ioerror encoding (layout_return) */ -extern __be32 *pnfs_osd_xdr_ioerr_reserve_space(struct xdr_stream *xdr); -extern void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr); - -#endif /* __PNFS_OSD_XDR_H__ */ diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h index dd24756a8af7..c417abd2ab70 100644 --- a/include/linux/power/max17042_battery.h +++ b/include/linux/power/max17042_battery.h @@ -78,7 +78,7 @@ enum max17042_register { MAX17042_T_empty = 0x34, MAX17042_FullCAP0 = 0x35, - MAX17042_LAvg_empty = 0x36, + MAX17042_IAvg_empty = 0x36, MAX17042_FCTC = 0x37, MAX17042_RCOMP0 = 0x38, MAX17042_TempCo = 0x39, @@ -221,7 +221,7 @@ struct max17042_config_data { u16 fullcap; /* 0x10 */ u16 fullcapnom; /* 0x23 */ u16 socempty; /* 0x33 */ - u16 lavg_empty; /* 0x36 */ + u16 iavg_empty; /* 0x36 */ u16 dqacc; /* 0x45 */ u16 dpacc; /* 0x46 */ u16 qrtbl00; /* 0x12 */ diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index b5ebf6c01292..8aee2945ff08 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -362,29 +362,25 @@ static inline void user_single_step_report(struct pt_regs *regs) #ifndef arch_ptrace_stop_needed /** * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called - * @code: current->exit_code value ptrace will stop with - * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with * * This is called with the siglock held, to decide whether or not it's - * necessary to release the siglock and call arch_ptrace_stop() with the - * same @code and @info arguments. It can be defined to a constant if - * arch_ptrace_stop() is never required, or always is. On machines where - * this makes sense, it should be defined to a quick test to optimize out - * calling arch_ptrace_stop() when it would be superfluous. For example, - * if the thread has not been back to user mode since the last stop, the - * thread state might indicate that nothing needs to be done. + * necessary to release the siglock and call arch_ptrace_stop(). It can be + * defined to a constant if arch_ptrace_stop() is never required, or always + * is. On machines where this makes sense, it should be defined to a quick + * test to optimize out calling arch_ptrace_stop() when it would be + * superfluous. For example, if the thread has not been back to user mode + * since the last stop, the thread state might indicate that nothing needs + * to be done. * * This is guaranteed to be invoked once before a task stops for ptrace and * may include arch-specific operations necessary prior to a ptrace stop. */ -#define arch_ptrace_stop_needed(code, info) (0) +#define arch_ptrace_stop_needed() (0) #endif #ifndef arch_ptrace_stop /** * arch_ptrace_stop - Do machine-specific work before stopping for ptrace - * @code: current->exit_code value ptrace will stop with - * @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with * * This is called with no locks held when arch_ptrace_stop_needed() has * just returned nonzero. It is allowed to block, e.g. for user memory @@ -394,7 +390,7 @@ static inline void user_single_step_report(struct pt_regs *regs) * we only do it when the arch requires it for this particular stop, as * indicated by arch_ptrace_stop_needed(). */ -#define arch_ptrace_stop(code, info) do { } while (0) +#define arch_ptrace_stop() do { } while (0) #endif #ifndef current_pt_regs diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 725c9b784e60..e6dac95e4960 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -429,16 +429,19 @@ struct pwm_device *devm_fwnode_pwm_get(struct device *dev, #else static inline struct pwm_device *pwm_request(int pwm_id, const char *label) { + might_sleep(); return ERR_PTR(-ENODEV); } static inline void pwm_free(struct pwm_device *pwm) { + might_sleep(); } static inline int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state) { + might_sleep(); return -ENOTSUPP; } @@ -450,6 +453,7 @@ static inline int pwm_adjust_config(struct pwm_device *pwm) static inline int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) { + might_sleep(); return -EINVAL; } @@ -462,11 +466,13 @@ static inline int pwm_capture(struct pwm_device *pwm, static inline int pwm_enable(struct pwm_device *pwm) { + might_sleep(); return -EINVAL; } static inline void pwm_disable(struct pwm_device *pwm) { + might_sleep(); } static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data) @@ -493,12 +499,14 @@ static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, unsigned int index, const char *label) { + might_sleep(); return ERR_PTR(-ENODEV); } static inline struct pwm_device *pwm_get(struct device *dev, const char *consumer) { + might_sleep(); return ERR_PTR(-ENODEV); } @@ -506,16 +514,19 @@ static inline struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np, const char *con_id) { + might_sleep(); return ERR_PTR(-ENODEV); } static inline void pwm_put(struct pwm_device *pwm) { + might_sleep(); } static inline struct pwm_device *devm_pwm_get(struct device *dev, const char *consumer) { + might_sleep(); return ERR_PTR(-ENODEV); } @@ -523,6 +534,7 @@ static inline struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np, const char *con_id) { + might_sleep(); return ERR_PTR(-ENODEV); } @@ -530,6 +542,7 @@ static inline struct pwm_device * devm_fwnode_pwm_get(struct device *dev, struct fwnode_handle *fwnode, const char *con_id) { + might_sleep(); return ERR_PTR(-ENODEV); } #endif diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 64ad900ac742..f7c1d21c2f39 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -9,8 +9,10 @@ #define _LINUX_RADIX_TREE_H #include <linux/bitops.h> -#include <linux/kernel.h> +#include <linux/gfp.h> #include <linux/list.h> +#include <linux/lockdep.h> +#include <linux/math.h> #include <linux/percpu.h> #include <linux/preempt.h> #include <linux/rcupdate.h> diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 83c09ac36b13..e0600e1e5c17 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -684,18 +684,6 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc, void *priv); int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine); -static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev) -{ - return container_of(vdev->dev.parent, struct rproc_vdev, dev); -} - -static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev) -{ - struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); - - return rvdev->rproc; -} - void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev); void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev); diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index d97dcd049f18..02fa9116cd60 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h @@ -186,6 +186,8 @@ int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, poll_table *wait); +ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept); + #else static inline int rpmsg_register_device(struct rpmsg_device *rpdev) @@ -231,7 +233,7 @@ static inline struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev /* This shouldn't be possible */ WARN_ON(1); - return ERR_PTR(-ENXIO); + return NULL; } static inline int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len) @@ -296,6 +298,14 @@ static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, return 0; } +static inline ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept) +{ + /* This shouldn't be possible */ + WARN_ON(1); + + return -ENXIO; +} + #endif /* IS_ENABLED(CONFIG_RPMSG) */ /* use a macro to avoid include chaining to get THIS_MODULE */ diff --git a/include/linux/rtc.h b/include/linux/rtc.h index bd611e26291d..47fd1c2d3a57 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h @@ -66,6 +66,8 @@ struct rtc_class_ops { int (*alarm_irq_enable)(struct device *, unsigned int enabled); int (*read_offset)(struct device *, long *offset); int (*set_offset)(struct device *, long offset); + int (*param_get)(struct device *, struct rtc_param *param); + int (*param_set)(struct device *, struct rtc_param *param); }; struct rtc_device; @@ -80,6 +82,7 @@ struct rtc_timer { /* flags */ #define RTC_DEV_BUSY 0 +#define RTC_NO_CDEV 1 struct rtc_device { struct device dev; diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index 352c6127cb90..f9348769e558 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -11,7 +11,6 @@ #include <linux/linkage.h> #include <linux/types.h> -#include <linux/kernel.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/atomic.h> diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index 4a6ff274335a..fc0357a6e19b 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -9,8 +9,17 @@ #ifndef __LINUX_SCALE_BITMAP_H #define __LINUX_SCALE_BITMAP_H -#include <linux/kernel.h> +#include <linux/atomic.h> +#include <linux/bitops.h> +#include <linux/cache.h> +#include <linux/list.h> +#include <linux/log2.h> +#include <linux/minmax.h> +#include <linux/percpu.h> #include <linux/slab.h> +#include <linux/smp.h> +#include <linux/types.h> +#include <linux/wait.h> struct seq_file; diff --git a/include/linux/sched.h b/include/linux/sched.h index 6f6f8f340a0f..78c351e35fec 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1661,6 +1661,7 @@ extern struct pid *cad_pid; #define PF_VCPU 0x00000001 /* I'm a virtual CPU */ #define PF_IDLE 0x00000002 /* I am an IDLE thread */ #define PF_EXITING 0x00000004 /* Getting shut down */ +#define PF_POSTCOREDUMP 0x00000008 /* Coredumps should ignore this task */ #define PF_IO_WORKER 0x00000010 /* Task is an IO worker */ #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index e5f4ce622ee6..23505394ef70 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -72,6 +72,17 @@ struct multiprocess_signals { struct hlist_node node; }; +struct core_thread { + struct task_struct *task; + struct core_thread *next; +}; + +struct core_state { + atomic_t nr_threads; + struct core_thread dumper; + struct completion startup; +}; + /* * NOTE! "signal_struct" does not have its own * locking, because a shared signal_struct always @@ -110,6 +121,8 @@ struct signal_struct { int group_stop_count; unsigned int flags; /* see SIGNAL_* flags below */ + struct core_state *core_state; /* coredumping support */ + /* * PR_SET_CHILD_SUBREAPER marks a process, like a service * manager, to re-parent orphan (double-forking) child processes @@ -338,6 +351,7 @@ extern int kill_pid(struct pid *pid, int sig, int priv); extern __must_check bool do_notify_parent(struct task_struct *, int); extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); extern void force_sig(int); +extern void force_fatal_sig(int); extern int send_sig(int, struct task_struct *, int); extern int zap_other_threads(struct task_struct *p); extern struct sigqueue *sigqueue_alloc(void); diff --git a/include/linux/security.h b/include/linux/security.h index 7e0ba63b5dde..06eac4e61a13 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -179,7 +179,7 @@ struct xfrm_policy; struct xfrm_state; struct xfrm_user_sec_ctx; struct seq_file; -struct sctp_endpoint; +struct sctp_association; #ifdef CONFIG_MMU extern unsigned long mmap_min_addr; @@ -1425,11 +1425,13 @@ int security_tun_dev_create(void); int security_tun_dev_attach_queue(void *security); int security_tun_dev_attach(struct sock *sk, void *security); int security_tun_dev_open(void *security); -int security_sctp_assoc_request(struct sctp_endpoint *ep, struct sk_buff *skb); +int security_sctp_assoc_request(struct sctp_association *asoc, struct sk_buff *skb); int security_sctp_bind_connect(struct sock *sk, int optname, struct sockaddr *address, int addrlen); -void security_sctp_sk_clone(struct sctp_endpoint *ep, struct sock *sk, +void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk, struct sock *newsk); +void security_sctp_assoc_established(struct sctp_association *asoc, + struct sk_buff *skb); #else /* CONFIG_SECURITY_NETWORK */ static inline int security_unix_stream_connect(struct sock *sock, @@ -1631,7 +1633,7 @@ static inline int security_tun_dev_open(void *security) return 0; } -static inline int security_sctp_assoc_request(struct sctp_endpoint *ep, +static inline int security_sctp_assoc_request(struct sctp_association *asoc, struct sk_buff *skb) { return 0; @@ -1644,11 +1646,16 @@ static inline int security_sctp_bind_connect(struct sock *sk, int optname, return 0; } -static inline void security_sctp_sk_clone(struct sctp_endpoint *ep, +static inline void security_sctp_sk_clone(struct sctp_association *asoc, struct sock *sk, struct sock *newsk) { } + +static inline void security_sctp_assoc_established(struct sctp_association *asoc, + struct sk_buff *skb) +{ +} #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index dd99569595fd..72dbb44a4573 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -4,6 +4,7 @@ #include <linux/types.h> #include <linux/string.h> +#include <linux/string_helpers.h> #include <linux/bug.h> #include <linux/mutex.h> #include <linux/cpumask.h> @@ -135,7 +136,21 @@ static inline void seq_escape_str(struct seq_file *m, const char *src, seq_escape_mem(m, src, strlen(src), flags, esc); } -void seq_escape(struct seq_file *m, const char *s, const char *esc); +/** + * seq_escape - print string into buffer, escaping some characters + * @m: target buffer + * @s: NULL-terminated string + * @esc: set of characters that need escaping + * + * Puts string into buffer, replacing each occurrence of character from + * @esc with usual octal escape. + * + * Use seq_has_overflowed() to check for errors. + */ +static inline void seq_escape(struct seq_file *m, const char *s, const char *esc) +{ + seq_escape_str(m, s, ESCAPE_OCTAL, esc); +} void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, @@ -194,7 +209,7 @@ static const struct file_operations __name ## _fops = { \ #define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \ static int __name ## _open(struct inode *inode, struct file *file) \ { \ - return single_open(file, __name ## _show, inode->i_private); \ + return single_open(file, __name ## _show, PDE_DATA(inode)); \ } \ \ static const struct proc_ops __name ## _proc_ops = { \ diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h deleted file mode 100644 index 3cca2b8fac43..000000000000 --- a/include/linux/seqno-fence.h +++ /dev/null @@ -1,109 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * seqno-fence, using a dma-buf to synchronize fencing - * - * Copyright (C) 2012 Texas Instruments - * Copyright (C) 2012 Canonical Ltd - * Authors: - * Rob Clark <robdclark@gmail.com> - * Maarten Lankhorst <maarten.lankhorst@canonical.com> - */ - -#ifndef __LINUX_SEQNO_FENCE_H -#define __LINUX_SEQNO_FENCE_H - -#include <linux/dma-fence.h> -#include <linux/dma-buf.h> - -enum seqno_fence_condition { - SEQNO_FENCE_WAIT_GEQUAL, - SEQNO_FENCE_WAIT_NONZERO -}; - -struct seqno_fence { - struct dma_fence base; - - const struct dma_fence_ops *ops; - struct dma_buf *sync_buf; - uint32_t seqno_ofs; - enum seqno_fence_condition condition; -}; - -extern const struct dma_fence_ops seqno_fence_ops; - -/** - * to_seqno_fence - cast a fence to a seqno_fence - * @fence: fence to cast to a seqno_fence - * - * Returns NULL if the fence is not a seqno_fence, - * or the seqno_fence otherwise. - */ -static inline struct seqno_fence * -to_seqno_fence(struct dma_fence *fence) -{ - if (fence->ops != &seqno_fence_ops) - return NULL; - return container_of(fence, struct seqno_fence, base); -} - -/** - * seqno_fence_init - initialize a seqno fence - * @fence: seqno_fence to initialize - * @lock: pointer to spinlock to use for fence - * @sync_buf: buffer containing the memory location to signal on - * @context: the execution context this fence is a part of - * @seqno_ofs: the offset within @sync_buf - * @seqno: the sequence # to signal on - * @cond: fence wait condition - * @ops: the fence_ops for operations on this seqno fence - * - * This function initializes a struct seqno_fence with passed parameters, - * and takes a reference on sync_buf which is released on fence destruction. - * - * A seqno_fence is a dma_fence which can complete in software when - * enable_signaling is called, but it also completes when - * (s32)((sync_buf)[seqno_ofs] - seqno) >= 0 is true - * - * The seqno_fence will take a refcount on the sync_buf until it's - * destroyed, but actual lifetime of sync_buf may be longer if one of the - * callers take a reference to it. - * - * Certain hardware have instructions to insert this type of wait condition - * in the command stream, so no intervention from software would be needed. - * This type of fence can be destroyed before completed, however a reference - * on the sync_buf dma-buf can be taken. It is encouraged to re-use the same - * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the - * device's vm can be expensive. - * - * It is recommended for creators of seqno_fence to call dma_fence_signal() - * before destruction. This will prevent possible issues from wraparound at - * time of issue vs time of check, since users can check dma_fence_is_signaled() - * before submitting instructions for the hardware to wait on the fence. - * However, when ops.enable_signaling is not called, it doesn't have to be - * done as soon as possible, just before there's any real danger of seqno - * wraparound. - */ -static inline void -seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock, - struct dma_buf *sync_buf, uint32_t context, - uint32_t seqno_ofs, uint32_t seqno, - enum seqno_fence_condition cond, - const struct dma_fence_ops *ops) -{ - BUG_ON(!fence || !sync_buf || !ops); - BUG_ON(!ops->wait || !ops->enable_signaling || - !ops->get_driver_name || !ops->get_timeline_name); - - /* - * ops is used in dma_fence_init for get_driver_name, so needs to be - * initialized first - */ - fence->ops = ops; - dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); - get_dma_buf(sync_buf); - fence->sync_buf = sync_buf; - fence->seqno_ofs = seqno_ofs; - fence->condition = cond; -} - -#endif /* __LINUX_SEQNO_FENCE_H */ diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h index 9814fff58a69..76fbf92b04d9 100644 --- a/include/linux/shrinker.h +++ b/include/linux/shrinker.h @@ -93,4 +93,5 @@ extern void register_shrinker_prepared(struct shrinker *shrinker); extern int register_shrinker(struct shrinker *shrinker); extern void unregister_shrinker(struct shrinker *shrinker); extern void free_prealloced_shrinker(struct shrinker *shrinker); +extern void synchronize_shrinkers(void); #endif diff --git a/include/linux/signal.h b/include/linux/signal.h index 7d34105e20c6..a6db6f2ae113 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -126,7 +126,6 @@ static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2) #define sigmask(sig) (1UL << ((sig) - 1)) #ifndef __HAVE_ARCH_SIG_SETOPS -#include <linux/string.h> #define _SIG_SET_BINOP(name, op) \ static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \ diff --git a/include/linux/signal_types.h b/include/linux/signal_types.h index 34cb28b8f16c..a70b2bdbf4d9 100644 --- a/include/linux/signal_types.h +++ b/include/linux/signal_types.h @@ -70,6 +70,9 @@ struct ksignal { int sig; }; +/* Used to kill the race between sigaction and forced signals */ +#define SA_IMMUTABLE 0x00800000 + #ifndef __ARCH_UAPI_SA_FLAGS #ifdef SA_RESTORER #define __ARCH_UAPI_SA_FLAGS SA_RESTORER diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0bd6520329f6..686a666d073d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -454,9 +454,15 @@ enum { * all frags to avoid possible bad checksum */ SKBFL_SHARED_FRAG = BIT(1), + + /* segment contains only zerocopy data and should not be + * charged to the kernel memory. + */ + SKBFL_PURE_ZEROCOPY = BIT(2), }; #define SKBFL_ZEROCOPY_FRAG (SKBFL_ZEROCOPY_ENABLE | SKBFL_SHARED_FRAG) +#define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY) /* * The callback notifies userspace to release buffers when skb DMA is done in @@ -1464,6 +1470,17 @@ static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb) return is_zcopy ? skb_uarg(skb) : NULL; } +static inline bool skb_zcopy_pure(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->flags & SKBFL_PURE_ZEROCOPY; +} + +static inline bool skb_pure_zcopy_same(const struct sk_buff *skb1, + const struct sk_buff *skb2) +{ + return skb_zcopy_pure(skb1) == skb_zcopy_pure(skb2); +} + static inline void net_zcopy_get(struct ubuf_info *uarg) { refcount_inc(&uarg->refcnt); @@ -1528,7 +1545,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success) if (!skb_zcopy_is_nouarg(skb)) uarg->callback(skb, uarg, zerocopy_success); - skb_shinfo(skb)->flags &= ~SKBFL_ZEROCOPY_FRAG; + skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY; } } @@ -1675,6 +1692,22 @@ static inline int skb_unclone(struct sk_buff *skb, gfp_t pri) return 0; } +/* This variant of skb_unclone() makes sure skb->truesize is not changed */ +static inline int skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) +{ + might_sleep_if(gfpflags_allow_blocking(pri)); + + if (skb_cloned(skb)) { + unsigned int save = skb->truesize; + int res; + + res = pskb_expand_head(skb, 0, 0, pri); + skb->truesize = save; + return res; + } + return 0; +} + /** * skb_header_cloned - is the header a clone * @skb: buffer to check diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index b4256847c707..584d94be9c8b 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -507,6 +507,18 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock) return !!psock->saved_data_ready; } +static inline bool sk_is_tcp(const struct sock *sk) +{ + return sk->sk_type == SOCK_STREAM && + sk->sk_protocol == IPPROTO_TCP; +} + +static inline bool sk_is_udp(const struct sock *sk) +{ + return sk->sk_type == SOCK_DGRAM && + sk->sk_protocol == IPPROTO_UDP; +} + #if IS_ENABLED(CONFIG_NET_SOCK_MSG) #define BPF_F_STRPARSER (1UL << 1) diff --git a/include/linux/slab.h b/include/linux/slab.h index 083f3ce550bc..181045148b06 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -142,8 +142,6 @@ struct mem_cgroup; void __init kmem_cache_init(void); bool slab_is_available(void); -extern bool usercopy_fallback; - struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, unsigned int align, slab_flags_t flags, void (*ctor)(void *)); @@ -152,8 +150,8 @@ struct kmem_cache *kmem_cache_create_usercopy(const char *name, slab_flags_t flags, unsigned int useroffset, unsigned int usersize, void (*ctor)(void *)); -void kmem_cache_destroy(struct kmem_cache *); -int kmem_cache_shrink(struct kmem_cache *); +void kmem_cache_destroy(struct kmem_cache *s); +int kmem_cache_shrink(struct kmem_cache *s); /* * Please use this macro to create slab caches. Simply specify the @@ -181,11 +179,11 @@ int kmem_cache_shrink(struct kmem_cache *); /* * Common kmalloc functions provided by all allocators */ -void * __must_check krealloc(const void *, size_t, gfp_t); -void kfree(const void *); -void kfree_sensitive(const void *); -size_t __ksize(const void *); -size_t ksize(const void *); +void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2); +void kfree(const void *objp); +void kfree_sensitive(const void *objp); +size_t __ksize(const void *objp); +size_t ksize(const void *objp); #ifdef CONFIG_PRINTK bool kmem_valid_obj(void *object); void kmem_dump_obj(void *object); @@ -425,9 +423,9 @@ static __always_inline unsigned int __kmalloc_index(size_t size, #define kmalloc_index(s) __kmalloc_index(s, true) #endif /* !CONFIG_SLOB */ -void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; -void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; -void kmem_cache_free(struct kmem_cache *, void *); +void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); +void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc; +void kmem_cache_free(struct kmem_cache *s, void *objp); /* * Bulk allocation and freeing operations. These are accelerated in an @@ -436,8 +434,8 @@ void kmem_cache_free(struct kmem_cache *, void *); * * Note that interrupts must be enabled when calling these functions. */ -void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); -int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); +void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p); +int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p); /* * Caller must not use kfree_bulk() on memory not originally allocated @@ -449,10 +447,12 @@ static __always_inline void kfree_bulk(size_t size, void **p) } #ifdef CONFIG_NUMA -void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc; -void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; +void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment + __alloc_size(1); +void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment + __malloc; #else -static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) +static __always_inline __alloc_size(1) void *__kmalloc_node(size_t size, gfp_t flags, int node) { return __kmalloc(size, flags); } @@ -464,25 +464,24 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f #endif #ifdef CONFIG_TRACING -extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc; +extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size) + __assume_slab_alignment __alloc_size(3); #ifdef CONFIG_NUMA -extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, - gfp_t gfpflags, - int node, size_t size) __assume_slab_alignment __malloc; +extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, + int node, size_t size) __assume_slab_alignment + __alloc_size(4); #else -static __always_inline void * -kmem_cache_alloc_node_trace(struct kmem_cache *s, - gfp_t gfpflags, - int node, size_t size) +static __always_inline __alloc_size(4) void *kmem_cache_alloc_node_trace(struct kmem_cache *s, + gfp_t gfpflags, int node, size_t size) { return kmem_cache_alloc_trace(s, gfpflags, size); } #endif /* CONFIG_NUMA */ #else /* CONFIG_TRACING */ -static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, - gfp_t flags, size_t size) +static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s, + gfp_t flags, size_t size) { void *ret = kmem_cache_alloc(s, flags); @@ -490,10 +489,8 @@ static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, return ret; } -static __always_inline void * -kmem_cache_alloc_node_trace(struct kmem_cache *s, - gfp_t gfpflags, - int node, size_t size) +static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, + int node, size_t size) { void *ret = kmem_cache_alloc_node(s, gfpflags, node); @@ -502,19 +499,21 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, } #endif /* CONFIG_TRACING */ -extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; +extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment + __alloc_size(1); #ifdef CONFIG_TRACING -extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; +extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) + __assume_page_alignment __alloc_size(1); #else -static __always_inline void * -kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) +static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags, + unsigned int order) { return kmalloc_order(size, flags, order); } #endif -static __always_inline void *kmalloc_large(size_t size, gfp_t flags) +static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags) { unsigned int order = get_order(size); return kmalloc_order_trace(size, flags, order); @@ -574,7 +573,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) * Try really hard to succeed the allocation but fail * eventually. */ -static __always_inline void *kmalloc(size_t size, gfp_t flags) +static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) { if (__builtin_constant_p(size)) { #ifndef CONFIG_SLOB @@ -596,7 +595,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) return __kmalloc(size, flags); } -static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) +static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node) { #ifndef CONFIG_SLOB if (__builtin_constant_p(size) && @@ -620,7 +619,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ -static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) +static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags) { size_t bytes; @@ -638,8 +637,10 @@ static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) * @new_size: new size of a single member of the array * @flags: the type of memory to allocate (see kmalloc) */ -static __must_check inline void * -krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags) +static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p, + size_t new_n, + size_t new_size, + gfp_t flags) { size_t bytes; @@ -655,7 +656,7 @@ krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags) * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ -static inline void *kcalloc(size_t n, size_t size, gfp_t flags) +static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags) { return kmalloc_array(n, size, flags | __GFP_ZERO); } @@ -668,12 +669,13 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) * allocator where we care about the real place the memory allocation * request comes from. */ -extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); +extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) + __alloc_size(1); #define kmalloc_track_caller(size, flags) \ __kmalloc_track_caller(size, flags, _RET_IP_) -static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, - int node) +static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, + int node) { size_t bytes; @@ -684,14 +686,15 @@ static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, return __kmalloc_node(bytes, flags, node); } -static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) +static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) { return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); } #ifdef CONFIG_NUMA -extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); +extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node, + unsigned long caller) __alloc_size(1); #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node_track_caller(size, flags, node, \ _RET_IP_) @@ -716,7 +719,7 @@ static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) * @size: how many bytes of memory are required. * @flags: the type of memory to allocate (see kmalloc). */ -static inline void *kzalloc(size_t size, gfp_t flags) +static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags) { return kmalloc(size, flags | __GFP_ZERO); } @@ -727,11 +730,45 @@ static inline void *kzalloc(size_t size, gfp_t flags) * @flags: the type of memory to allocate (see kmalloc). * @node: memory node from which to allocate */ -static inline void *kzalloc_node(size_t size, gfp_t flags, int node) +static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node) { return kmalloc_node(size, flags | __GFP_ZERO, node); } +extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1); +static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags) +{ + return kvmalloc_node(size, flags, NUMA_NO_NODE); +} +static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node) +{ + return kvmalloc_node(size, flags | __GFP_ZERO, node); +} +static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags) +{ + return kvmalloc(size, flags | __GFP_ZERO); +} + +static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags) +{ + size_t bytes; + + if (unlikely(check_mul_overflow(n, size, &bytes))) + return NULL; + + return kvmalloc(bytes, flags); +} + +static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags) +{ + return kvmalloc_array(n, size, flags | __GFP_ZERO); +} + +extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) + __alloc_size(3); +extern void kvfree(const void *addr); +extern void kvfree_sensitive(const void *addr, size_t len); + unsigned int kmem_cache_size(struct kmem_cache *s); void __init kmem_cache_init_late(void); diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 85499f0586b0..0fa751b946fa 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -99,6 +99,8 @@ struct kmem_cache { #ifdef CONFIG_SLUB_CPU_PARTIAL /* Number of per cpu partial objects to keep around */ unsigned int cpu_partial; + /* Number of per cpu partial pages to keep around */ + unsigned int cpu_partial_pages; #endif struct kmem_cache_order_objects oo; @@ -141,17 +143,6 @@ struct kmem_cache { struct kmem_cache_node *node[MAX_NUMNODES]; }; -#ifdef CONFIG_SLUB_CPU_PARTIAL -#define slub_cpu_partial(s) ((s)->cpu_partial) -#define slub_set_cpu_partial(s, n) \ -({ \ - slub_cpu_partial(s) = (n); \ -}) -#else -#define slub_cpu_partial(s) (0) -#define slub_set_cpu_partial(s, n) -#endif /* CONFIG_SLUB_CPU_PARTIAL */ - #ifdef CONFIG_SYSFS #define SLAB_SUPPORTS_SYSFS void sysfs_slab_unlink(struct kmem_cache *); diff --git a/include/linux/smp.h b/include/linux/smp.h index 510519e8a1eb..a80ab58ae3f1 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -108,7 +108,6 @@ static inline void on_each_cpu_cond(smp_cond_func_t cond_func, #ifdef CONFIG_SMP #include <linux/preempt.h> -#include <linux/kernel.h> #include <linux/compiler.h> #include <linux/thread_info.h> #include <asm/smp.h> diff --git a/include/linux/soc/mediatek/mtk-mmsys.h b/include/linux/soc/mediatek/mtk-mmsys.h index 2228bf6133da..4bba275e235a 100644 --- a/include/linux/soc/mediatek/mtk-mmsys.h +++ b/include/linux/soc/mediatek/mtk-mmsys.h @@ -29,13 +29,16 @@ enum mtk_ddp_comp_id { DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_OVL_2L1, + DDP_COMPONENT_OVL_2L2, DDP_COMPONENT_OVL1, + DDP_COMPONENT_POSTMASK0, DDP_COMPONENT_PWM0, DDP_COMPONENT_PWM1, DDP_COMPONENT_PWM2, DDP_COMPONENT_RDMA0, DDP_COMPONENT_RDMA1, DDP_COMPONENT_RDMA2, + DDP_COMPONENT_RDMA4, DDP_COMPONENT_UFOE, DDP_COMPONENT_WDMA0, DDP_COMPONENT_WDMA1, diff --git a/include/linux/soc/qcom/apr.h b/include/linux/soc/qcom/apr.h index 137f9f2ac4c3..23c5b30f3511 100644 --- a/include/linux/soc/qcom/apr.h +++ b/include/linux/soc/qcom/apr.h @@ -7,6 +7,7 @@ #include <linux/device.h> #include <linux/mod_devicetable.h> #include <dt-bindings/soc/qcom,apr.h> +#include <dt-bindings/soc/qcom,gpr.h> extern struct bus_type aprbus; @@ -75,10 +76,65 @@ struct apr_resp_pkt { int payload_size; }; +struct gpr_hdr { + uint32_t version:4; + uint32_t hdr_size:4; + uint32_t pkt_size:24; + uint32_t dest_domain:8; + uint32_t src_domain:8; + uint32_t reserved:16; + uint32_t src_port; + uint32_t dest_port; + uint32_t token; + uint32_t opcode; +} __packed; + +struct gpr_pkt { + struct gpr_hdr hdr; + uint32_t payload[]; +}; + +struct gpr_resp_pkt { + struct gpr_hdr hdr; + void *payload; + int payload_size; +}; + +#define GPR_HDR_SIZE sizeof(struct gpr_hdr) +#define GPR_PKT_VER 0x0 +#define GPR_PKT_HEADER_WORD_SIZE ((sizeof(struct gpr_pkt) + 3) >> 2) +#define GPR_PKT_HEADER_BYTE_SIZE (GPR_PKT_HEADER_WORD_SIZE << 2) + +#define GPR_BASIC_RSP_RESULT 0x02001005 + +struct gpr_ibasic_rsp_result_t { + uint32_t opcode; + uint32_t status; +}; + +#define GPR_BASIC_EVT_ACCEPTED 0x02001006 + +struct gpr_ibasic_rsp_accepted_t { + uint32_t opcode; +}; + /* Bits 0 to 15 -- Minor version, Bits 16 to 31 -- Major version */ #define APR_SVC_MAJOR_VERSION(v) ((v >> 16) & 0xFF) #define APR_SVC_MINOR_VERSION(v) (v & 0xFF) +typedef int (*gpr_port_cb) (struct gpr_resp_pkt *d, void *priv, int op); +struct packet_router; +struct pkt_router_svc { + struct device *dev; + gpr_port_cb callback; + struct packet_router *pr; + spinlock_t lock; + int id; + void *priv; +}; + +typedef struct pkt_router_svc gpr_port_t; + struct apr_device { struct device dev; uint16_t svc_id; @@ -86,21 +142,26 @@ struct apr_device { uint32_t version; char name[APR_NAME_SIZE]; const char *service_path; - spinlock_t lock; + struct pkt_router_svc svc; struct list_head node; }; +typedef struct apr_device gpr_device_t; + #define to_apr_device(d) container_of(d, struct apr_device, dev) +#define svc_to_apr_device(d) container_of(d, struct apr_device, svc) struct apr_driver { int (*probe)(struct apr_device *sl); int (*remove)(struct apr_device *sl); int (*callback)(struct apr_device *a, struct apr_resp_pkt *d); + int (*gpr_callback)(struct gpr_resp_pkt *d, void *data, int op); struct device_driver driver; const struct apr_device_id *id_table; }; +typedef struct apr_driver gpr_driver_t; #define to_apr_driver(d) container_of(d, struct apr_driver, driver) /* @@ -123,7 +184,14 @@ void apr_driver_unregister(struct apr_driver *drv); #define module_apr_driver(__apr_driver) \ module_driver(__apr_driver, apr_driver_register, \ apr_driver_unregister) +#define module_gpr_driver(__gpr_driver) module_apr_driver(__gpr_driver) int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt); +gpr_port_t *gpr_alloc_port(gpr_device_t *gdev, struct device *dev, + gpr_port_cb cb, void *priv); +void gpr_free_port(gpr_port_t *port); +int gpr_send_port_pkt(gpr_port_t *port, struct gpr_pkt *pkt); +int gpr_send_pkt(gpr_device_t *gdev, struct gpr_pkt *pkt); + #endif /* __QCOM_APR_H_ */ diff --git a/include/linux/soc/qcom/qcom_aoss.h b/include/linux/soc/qcom/qcom_aoss.h new file mode 100644 index 000000000000..3c2a82e606f8 --- /dev/null +++ b/include/linux/soc/qcom/qcom_aoss.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + */ + +#ifndef __QCOM_AOSS_H__ +#define __QCOM_AOSS_H__ + +#include <linux/err.h> +#include <linux/device.h> + +struct qmp; + +#if IS_ENABLED(CONFIG_QCOM_AOSS_QMP) + +int qmp_send(struct qmp *qmp, const void *data, size_t len); +struct qmp *qmp_get(struct device *dev); +void qmp_put(struct qmp *qmp); + +#else + +static inline int qmp_send(struct qmp *qmp, const void *data, size_t len) +{ + return -ENODEV; +} + +static inline struct qmp *qmp_get(struct device *dev) +{ + return ERR_PTR(-ENODEV); +} + +static inline void qmp_put(struct qmp *qmp) +{ +} + +#endif + +#endif diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h index 60e66fc9b6bf..860dd8cdf9f3 100644 --- a/include/linux/soc/qcom/smd-rpm.h +++ b/include/linux/soc/qcom/smd-rpm.h @@ -38,6 +38,8 @@ struct qcom_smd_rpm; #define QCOM_SMD_RPM_IPA_CLK 0x617069 #define QCOM_SMD_RPM_CE_CLK 0x6563 #define QCOM_SMD_RPM_AGGR_CLK 0x72676761 +#define QCOM_SMD_RPM_HWKM_CLK 0x6d6b7768 +#define QCOM_SMD_RPM_PKA_CLK 0x616b70 int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, int state, diff --git a/include/linux/soc/samsung/exynos-chipid.h b/include/linux/soc/samsung/exynos-chipid.h index 8bca6763f99c..62f0e2531068 100644 --- a/include/linux/soc/samsung/exynos-chipid.h +++ b/include/linux/soc/samsung/exynos-chipid.h @@ -9,10 +9,8 @@ #define __LINUX_SOC_EXYNOS_CHIPID_H #define EXYNOS_CHIPID_REG_PRO_ID 0x00 -#define EXYNOS_SUBREV_MASK (0xf << 4) -#define EXYNOS_MAINREV_MASK (0xf << 0) -#define EXYNOS_REV_MASK (EXYNOS_SUBREV_MASK | \ - EXYNOS_MAINREV_MASK) +#define EXYNOS_REV_PART_MASK 0xf +#define EXYNOS_REV_PART_SHIFT 4 #define EXYNOS_MASK 0xfffff000 #define EXYNOS_CHIPID_REG_PKG_ID 0x04 diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h index 1a5eaef3b7f2..d424c1aadf38 100644 --- a/include/linux/spi/ads7846.h +++ b/include/linux/spi/ads7846.h @@ -1,17 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* linux/spi/ads7846.h */ -/* Touchscreen characteristics vary between boards and models. The - * platform_data for the device's "struct device" holds this information. - * - * It's OK if the min/max values are zero. - */ -enum ads7846_filter { - ADS7846_FILTER_OK, - ADS7846_FILTER_REPEAT, - ADS7846_FILTER_IGNORE, -}; - struct ads7846_platform_data { u16 model; /* 7843, 7845, 7846, 7873. */ u16 vref_delay_usecs; /* 0 for external vref; etc */ @@ -51,10 +40,6 @@ struct ads7846_platform_data { int gpio_pendown_debounce; /* platform specific debounce time for * the gpio_pendown */ int (*get_pendown_state)(void); - int (*filter_init) (const struct ads7846_platform_data *pdata, - void **filter_data); - int (*filter) (void *filter_data, int data_idx, int *val); - void (*filter_cleanup)(void *filter_data); void (*wait_for_sync)(void); bool wakeup; unsigned long irq_flags; diff --git a/include/linux/spi/max7301.h b/include/linux/spi/max7301.h index 21449067aedb..e392c53758bc 100644 --- a/include/linux/spi/max7301.h +++ b/include/linux/spi/max7301.h @@ -31,6 +31,6 @@ struct max7301_platform_data { u32 input_pullup_active; }; -extern int __max730x_remove(struct device *dev); +extern void __max730x_remove(struct device *dev); extern int __max730x_probe(struct max7301 *ts); #endif diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index f0447062eecd..b4e5ca23f840 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -57,7 +57,6 @@ #include <linux/compiler.h> #include <linux/irqflags.h> #include <linux/thread_info.h> -#include <linux/kernel.h> #include <linux/stringify.h> #include <linux/bottom_half.h> #include <linux/lockdep.h> diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h index 6bb4bc1a5f54..c34b55a6e554 100644 --- a/include/linux/stackdepot.h +++ b/include/linux/stackdepot.h @@ -11,15 +11,24 @@ #ifndef _LINUX_STACKDEPOT_H #define _LINUX_STACKDEPOT_H +#include <linux/gfp.h> + typedef u32 depot_stack_handle_t; +depot_stack_handle_t __stack_depot_save(unsigned long *entries, + unsigned int nr_entries, + gfp_t gfp_flags, bool can_alloc); + depot_stack_handle_t stack_depot_save(unsigned long *entries, unsigned int nr_entries, gfp_t gfp_flags); unsigned int stack_depot_fetch(depot_stack_handle_t handle, unsigned long **entries); -unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries); +int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size, + int spaces); + +void stack_depot_print(depot_stack_handle_t stack); #ifdef CONFIG_STACKDEPOT int stack_depot_init(void); diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 9edecb494e9e..bef158815e83 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h @@ -21,6 +21,7 @@ unsigned int stack_trace_save_tsk(struct task_struct *task, unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store, unsigned int size, unsigned int skipnr); unsigned int stack_trace_save_user(unsigned long *store, unsigned int size); +unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries); /* Internal interfaces. Do not use in generic code */ #ifdef CONFIG_ARCH_STACKWALK diff --git a/include/linux/string.h b/include/linux/string.h index 5a36608144a9..b6572aeca2f5 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -253,23 +253,8 @@ static inline const char *kbasename(const char *path) #include <linux/fortify-string.h> #endif -/** - * memcpy_and_pad - Copy one buffer to another with padding - * @dest: Where to copy to - * @dest_len: The destination buffer size - * @src: Where to copy from - * @count: The number of bytes to copy - * @pad: Character to use for padding if space is left in destination. - */ -static inline void memcpy_and_pad(void *dest, size_t dest_len, - const void *src, size_t count, int pad) -{ - if (dest_len > count) { - memcpy(dest, src, count); - memset(dest + count, pad, dest_len - count); - } else - memcpy(dest, src, dest_len); -} +void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count, + int pad); /** * memset_after - Set a value after a struct member to the end of a struct diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h index 68189c4a2eb1..4ba39e1403b2 100644 --- a/include/linux/string_helpers.h +++ b/include/linux/string_helpers.h @@ -4,6 +4,7 @@ #include <linux/bits.h> #include <linux/ctype.h> +#include <linux/string.h> #include <linux/types.h> struct file; diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index a4661646adc9..267b7aeaf1a6 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -40,6 +40,7 @@ struct rpc_clnt { unsigned int cl_clid; /* client id */ struct list_head cl_clients; /* Global list of clients */ struct list_head cl_tasks; /* List of tasks */ + atomic_t cl_pid; /* task PID counter */ spinlock_t cl_lock; /* spinlock */ struct rpc_xprt __rcu * cl_xprt; /* transport */ const struct rpc_procinfo *cl_procinfo; /* procedure info */ diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index a237b8dbf608..db964bb63912 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -150,25 +150,13 @@ struct rpc_task_setup { #define RPC_TASK_MSG_PIN_WAIT 5 #define RPC_TASK_SIGNALLED 6 -#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) -#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) #define rpc_test_and_set_running(t) \ test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) -#define rpc_clear_running(t) \ - do { \ - smp_mb__before_atomic(); \ - clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \ - smp_mb__after_atomic(); \ - } while (0) +#define rpc_clear_running(t) clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) #define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) #define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) -#define rpc_clear_queued(t) \ - do { \ - smp_mb__before_atomic(); \ - clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \ - smp_mb__after_atomic(); \ - } while (0) +#define rpc_clear_queued(t) clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate) #define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate) diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 064c96157d1f..0ae28ae6caf2 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -443,10 +443,7 @@ struct svc_version { /* Need xprt with congestion control */ bool vs_need_cong_ctrl; - /* Override dispatch function (e.g. when caching replies). - * A return value of 0 means drop the request. - * vs_dispatch == NULL means use default dispatcher. - */ + /* Dispatch function */ int (*vs_dispatch)(struct svc_rqst *, __be32 *); }; @@ -457,9 +454,11 @@ struct svc_procedure { /* process the request: */ __be32 (*pc_func)(struct svc_rqst *); /* XDR decode args: */ - int (*pc_decode)(struct svc_rqst *, __be32 *data); + bool (*pc_decode)(struct svc_rqst *rqstp, + struct xdr_stream *xdr); /* XDR encode result: */ - int (*pc_encode)(struct svc_rqst *, __be32 *data); + bool (*pc_encode)(struct svc_rqst *rqstp, + struct xdr_stream *xdr); /* XDR free result: */ void (*pc_release)(struct svc_rqst *); unsigned int pc_argsize; /* argument struct size */ @@ -532,8 +531,7 @@ int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, - struct page **pages, - struct kvec *first, size_t total); + struct xdr_buf *payload); char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first, void *p, size_t total); diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h index 068e1982ad37..74bfdffaf7b0 100644 --- a/include/linux/surface_aggregator/controller.h +++ b/include/linux/surface_aggregator/controller.h @@ -792,8 +792,8 @@ enum ssam_event_mask { #define SSAM_EVENT_REGISTRY_KIP \ SSAM_EVENT_REGISTRY(SSAM_SSH_TC_KIP, 0x02, 0x27, 0x28) -#define SSAM_EVENT_REGISTRY_REG \ - SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, 0x02, 0x01, 0x02) +#define SSAM_EVENT_REGISTRY_REG(tid)\ + SSAM_EVENT_REGISTRY(SSAM_SSH_TC_REG, tid, 0x01, 0x02) /** * enum ssam_event_notifier_flags - Flags for event notifiers. diff --git a/include/linux/swap.h b/include/linux/swap.h index cdf0957a88a4..d1ea44b31f19 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -341,7 +341,6 @@ void workingset_update_node(struct xa_node *node); /* linux/mm/page_alloc.c */ extern unsigned long totalreserve_pages; -extern unsigned long nr_free_buffer_pages(void); /* Definition of global_zone_page_state not available yet */ #define nr_free_pages() global_zone_page_state(NR_FREE_PAGES) diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index b0cb2a9973f4..569272871375 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -45,7 +45,8 @@ extern void __init swiotlb_update_mem_attributes(void); phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, size_t mapping_size, size_t alloc_size, - enum dma_data_direction dir, unsigned long attrs); + unsigned int alloc_aligned_mask, enum dma_data_direction dir, + unsigned long attrs); extern void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h index 082f1d51957a..be24056ac00f 100644 --- a/include/linux/switchtec.h +++ b/include/linux/switchtec.h @@ -19,6 +19,7 @@ #define SWITCHTEC_EVENT_EN_CLI BIT(2) #define SWITCHTEC_EVENT_EN_IRQ BIT(3) #define SWITCHTEC_EVENT_FATAL BIT(4) +#define SWITCHTEC_EVENT_NOT_SUPP BIT(31) #define SWITCHTEC_DMA_MRPC_EN BIT(0) diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 3ebfea0781f1..a1f03461369b 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -197,7 +197,11 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, * @num_pages: number of locked pages * @dmabuf: dmabuf used to for exporting to user space * @flags: defined by TEE_SHM_* in tee_drv.h - * @id: unique id of a shared memory object on this device + * @id: unique id of a shared memory object on this device, shared + * with user space + * @sec_world_id: + * secure world assigned id of this shared memory object, not + * used by all drivers * * This pool is only supposed to be accessed directly from the TEE * subsystem and from drivers that implements their own shm pool manager. @@ -213,6 +217,7 @@ struct tee_shm { struct dma_buf *dmabuf; u32 flags; int id; + u64 sec_world_id; }; /** diff --git a/include/linux/tty.h b/include/linux/tty.h index 168e57e40bbb..5dbd7c5afac7 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -252,20 +252,20 @@ static inline bool tty_throttled(struct tty_struct *tty) } #ifdef CONFIG_TTY -extern void tty_kref_put(struct tty_struct *tty); -extern struct pid *tty_get_pgrp(struct tty_struct *tty); -extern void tty_vhangup_self(void); -extern void disassociate_ctty(int priv); -extern dev_t tty_devnum(struct tty_struct *tty); -extern void proc_clear_tty(struct task_struct *p); -extern struct tty_struct *get_current_tty(void); +void tty_kref_put(struct tty_struct *tty); +struct pid *tty_get_pgrp(struct tty_struct *tty); +void tty_vhangup_self(void); +void disassociate_ctty(int priv); +dev_t tty_devnum(struct tty_struct *tty); +void proc_clear_tty(struct task_struct *p); +struct tty_struct *get_current_tty(void); /* tty_io.c */ -extern int __init tty_init(void); -extern const char *tty_name(const struct tty_struct *tty); -extern struct tty_struct *tty_kopen_exclusive(dev_t device); -extern struct tty_struct *tty_kopen_shared(dev_t device); -extern void tty_kclose(struct tty_struct *tty); -extern int tty_dev_name_to_number(const char *name, dev_t *number); +int __init tty_init(void); +const char *tty_name(const struct tty_struct *tty); +struct tty_struct *tty_kopen_exclusive(dev_t device); +struct tty_struct *tty_kopen_shared(dev_t device); +void tty_kclose(struct tty_struct *tty); +int tty_dev_name_to_number(const char *name, dev_t *number); #else static inline void tty_kref_put(struct tty_struct *tty) { } @@ -296,7 +296,7 @@ static inline int tty_dev_name_to_number(const char *name, dev_t *number) extern struct ktermios tty_std_termios; -extern int vcs_init(void); +int vcs_init(void); extern struct class *tty_class; @@ -316,34 +316,34 @@ static inline struct tty_struct *tty_kref_get(struct tty_struct *tty) return tty; } -extern const char *tty_driver_name(const struct tty_struct *tty); -extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); -extern void stop_tty(struct tty_struct *tty); -extern void start_tty(struct tty_struct *tty); -extern void tty_write_message(struct tty_struct *tty, char *msg); -extern int tty_send_xchar(struct tty_struct *tty, char ch); -extern int tty_put_char(struct tty_struct *tty, unsigned char c); -extern unsigned int tty_chars_in_buffer(struct tty_struct *tty); -extern unsigned int tty_write_room(struct tty_struct *tty); -extern void tty_driver_flush_buffer(struct tty_struct *tty); -extern void tty_unthrottle(struct tty_struct *tty); -extern int tty_throttle_safe(struct tty_struct *tty); -extern int tty_unthrottle_safe(struct tty_struct *tty); -extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws); -extern int tty_get_icount(struct tty_struct *tty, - struct serial_icounter_struct *icount); -extern int is_current_pgrp_orphaned(void); -extern void tty_hangup(struct tty_struct *tty); -extern void tty_vhangup(struct tty_struct *tty); -extern int tty_hung_up_p(struct file *filp); -extern void do_SAK(struct tty_struct *tty); -extern void __do_SAK(struct tty_struct *tty); -extern void no_tty(void); -extern speed_t tty_termios_baud_rate(struct ktermios *termios); -extern void tty_termios_encode_baud_rate(struct ktermios *termios, - speed_t ibaud, speed_t obaud); -extern void tty_encode_baud_rate(struct tty_struct *tty, - speed_t ibaud, speed_t obaud); +const char *tty_driver_name(const struct tty_struct *tty); +void tty_wait_until_sent(struct tty_struct *tty, long timeout); +void stop_tty(struct tty_struct *tty); +void start_tty(struct tty_struct *tty); +void tty_write_message(struct tty_struct *tty, char *msg); +int tty_send_xchar(struct tty_struct *tty, char ch); +int tty_put_char(struct tty_struct *tty, unsigned char c); +unsigned int tty_chars_in_buffer(struct tty_struct *tty); +unsigned int tty_write_room(struct tty_struct *tty); +void tty_driver_flush_buffer(struct tty_struct *tty); +void tty_unthrottle(struct tty_struct *tty); +int tty_throttle_safe(struct tty_struct *tty); +int tty_unthrottle_safe(struct tty_struct *tty); +int tty_do_resize(struct tty_struct *tty, struct winsize *ws); +int tty_get_icount(struct tty_struct *tty, + struct serial_icounter_struct *icount); +int is_current_pgrp_orphaned(void); +void tty_hangup(struct tty_struct *tty); +void tty_vhangup(struct tty_struct *tty); +int tty_hung_up_p(struct file *filp); +void do_SAK(struct tty_struct *tty); +void __do_SAK(struct tty_struct *tty); +void no_tty(void); +speed_t tty_termios_baud_rate(struct ktermios *termios); +void tty_termios_encode_baud_rate(struct ktermios *termios, speed_t ibaud, + speed_t obaud); +void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud, + speed_t obaud); /** * tty_get_baud_rate - get tty bit rates @@ -363,37 +363,36 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty) unsigned char tty_get_char_size(unsigned int cflag); unsigned char tty_get_frame_size(unsigned int cflag); -extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old); -extern int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b); -extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt); +void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old); +int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b); +int tty_set_termios(struct tty_struct *tty, struct ktermios *kt); -extern void tty_wakeup(struct tty_struct *tty); +void tty_wakeup(struct tty_struct *tty); -extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file, - unsigned int cmd, unsigned long arg); -extern int tty_perform_flush(struct tty_struct *tty, unsigned long arg); -extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx); -extern void tty_release_struct(struct tty_struct *tty, int idx); -extern void tty_init_termios(struct tty_struct *tty); -extern void tty_save_termios(struct tty_struct *tty); -extern int tty_standard_install(struct tty_driver *driver, +int tty_mode_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg); +int tty_perform_flush(struct tty_struct *tty, unsigned long arg); +struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx); +void tty_release_struct(struct tty_struct *tty, int idx); +void tty_init_termios(struct tty_struct *tty); +void tty_save_termios(struct tty_struct *tty); +int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty); extern struct mutex tty_mutex; /* n_tty.c */ -extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops); +void n_tty_inherit_ops(struct tty_ldisc_ops *ops); #ifdef CONFIG_TTY -extern void __init n_tty_init(void); +void __init n_tty_init(void); #else static inline void n_tty_init(void) { } #endif /* tty_audit.c */ #ifdef CONFIG_AUDIT -extern void tty_audit_exit(void); -extern void tty_audit_fork(struct signal_struct *sig); -extern int tty_audit_push(void); +void tty_audit_exit(void); +void tty_audit_fork(struct signal_struct *sig); +int tty_audit_push(void); #else static inline void tty_audit_exit(void) { @@ -408,24 +407,23 @@ static inline int tty_audit_push(void) #endif /* tty_ioctl.c */ -extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, - unsigned int cmd, unsigned long arg); +int n_tty_ioctl_helper(struct tty_struct *tty, unsigned int cmd, + unsigned long arg); /* vt.c */ -extern int vt_ioctl(struct tty_struct *tty, - unsigned int cmd, unsigned long arg); +int vt_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg); -extern long vt_compat_ioctl(struct tty_struct *tty, - unsigned int cmd, unsigned long arg); +long vt_compat_ioctl(struct tty_struct *tty, unsigned int cmd, + unsigned long arg); /* tty_mutex.c */ /* functions for preparation of BKL removal */ -extern void tty_lock(struct tty_struct *tty); -extern int tty_lock_interruptible(struct tty_struct *tty); -extern void tty_unlock(struct tty_struct *tty); -extern void tty_lock_slave(struct tty_struct *tty); -extern void tty_unlock_slave(struct tty_struct *tty); -extern void tty_set_lock_subclass(struct tty_struct *tty); +void tty_lock(struct tty_struct *tty); +int tty_lock_interruptible(struct tty_struct *tty); +void tty_unlock(struct tty_struct *tty); +void tty_lock_slave(struct tty_struct *tty); +void tty_unlock_slave(struct tty_struct *tty); +void tty_set_lock_subclass(struct tty_struct *tty); #endif diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index c20431d8def8..795b94ccdeb6 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -327,11 +327,11 @@ struct tty_driver { extern struct list_head tty_drivers; -extern struct tty_driver *__tty_alloc_driver(unsigned int lines, - struct module *owner, unsigned long flags); -extern struct tty_driver *tty_find_polling_driver(char *name, int *line); +struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner, + unsigned long flags); +struct tty_driver *tty_find_polling_driver(char *name, int *line); -extern void tty_driver_kref_put(struct tty_driver *driver); +void tty_driver_kref_put(struct tty_driver *driver); /* Use TTY_DRIVER_* flags below */ #define tty_alloc_driver(lines, flags) \ @@ -360,7 +360,7 @@ static inline void tty_set_operations(struct tty_driver *driver, * Used for PTY's, in particular. * * TTY_DRIVER_REAL_RAW --- if set, indicates that the driver will - * guarantee never not to set any special character handling + * guarantee never to set any special character handling * flags if ((IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR || * !INPCK)). That is, if there is no reason for the driver to * send notifications of parity and break characters up to the diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h index 32284992b31a..9916acb5de49 100644 --- a/include/linux/tty_flip.h +++ b/include/linux/tty_flip.h @@ -7,16 +7,16 @@ struct tty_ldisc; -extern int tty_buffer_set_limit(struct tty_port *port, int limit); -extern unsigned int tty_buffer_space_avail(struct tty_port *port); -extern int tty_buffer_request_room(struct tty_port *port, size_t size); -extern int tty_insert_flip_string_flags(struct tty_port *port, +int tty_buffer_set_limit(struct tty_port *port, int limit); +unsigned int tty_buffer_space_avail(struct tty_port *port); +int tty_buffer_request_room(struct tty_port *port, size_t size); +int tty_insert_flip_string_flags(struct tty_port *port, const unsigned char *chars, const char *flags, size_t size); -extern int tty_insert_flip_string_fixed_flag(struct tty_port *port, +int tty_insert_flip_string_fixed_flag(struct tty_port *port, const unsigned char *chars, char flag, size_t size); -extern int tty_prepare_flip_string(struct tty_port *port, - unsigned char **chars, size_t size); -extern void tty_flip_buffer_push(struct tty_port *port); +int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars, + size_t size); +void tty_flip_buffer_push(struct tty_port *port); void tty_schedule_flip(struct tty_port *port); int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag); @@ -45,7 +45,7 @@ static inline int tty_insert_flip_string(struct tty_port *port, int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, const char *f, int count); -extern void tty_buffer_lock_exclusive(struct tty_port *port); -extern void tty_buffer_unlock_exclusive(struct tty_port *port); +void tty_buffer_lock_exclusive(struct tty_port *port); +void tty_buffer_unlock_exclusive(struct tty_port *port); #endif /* _LINUX_TTY_FLIP_H */ diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index b1d812e902aa..b85d84fb5f49 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -146,7 +146,7 @@ struct ld_semaphore { #endif }; -extern void __init_ldsem(struct ld_semaphore *sem, const char *name, +void __init_ldsem(struct ld_semaphore *sem, const char *name, struct lock_class_key *key); #define init_ldsem(sem) \ @@ -157,18 +157,18 @@ do { \ } while (0) -extern int ldsem_down_read(struct ld_semaphore *sem, long timeout); -extern int ldsem_down_read_trylock(struct ld_semaphore *sem); -extern int ldsem_down_write(struct ld_semaphore *sem, long timeout); -extern int ldsem_down_write_trylock(struct ld_semaphore *sem); -extern void ldsem_up_read(struct ld_semaphore *sem); -extern void ldsem_up_write(struct ld_semaphore *sem); +int ldsem_down_read(struct ld_semaphore *sem, long timeout); +int ldsem_down_read_trylock(struct ld_semaphore *sem); +int ldsem_down_write(struct ld_semaphore *sem, long timeout); +int ldsem_down_write_trylock(struct ld_semaphore *sem); +void ldsem_up_read(struct ld_semaphore *sem); +void ldsem_up_write(struct ld_semaphore *sem); #ifdef CONFIG_DEBUG_LOCK_ALLOC -extern int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, - long timeout); -extern int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass, - long timeout); +int ldsem_down_read_nested(struct ld_semaphore *sem, int subclass, + long timeout); +int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass, + long timeout); #else # define ldsem_down_read_nested(sem, subclass, timeout) \ ldsem_down_read(sem, timeout) @@ -180,7 +180,6 @@ extern int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass, struct tty_ldisc_ops { char *name; int num; - int flags; /* * The following routines are called from above. @@ -200,7 +199,7 @@ struct tty_ldisc_ops { void (*set_termios)(struct tty_struct *tty, struct ktermios *old); __poll_t (*poll)(struct tty_struct *, struct file *, struct poll_table_struct *); - int (*hangup)(struct tty_struct *tty); + void (*hangup)(struct tty_struct *tty); /* * The following routines are called from below. @@ -220,8 +219,6 @@ struct tty_ldisc { struct tty_struct *tty; }; -#define LDISC_FLAG_DEFINED 0x00000001 - #define MODULE_ALIAS_LDISC(ldisc) \ MODULE_ALIAS("tty-ldisc-" __stringify(ldisc)) diff --git a/include/linux/uio.h b/include/linux/uio.h index 207101a9c5c3..6350354f97e9 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -35,6 +35,7 @@ struct iov_iter_state { struct iov_iter { u8 iter_type; + bool nofault; bool data_source; size_t iov_offset; size_t count; @@ -133,7 +134,8 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, struct iov_iter *i); void iov_iter_advance(struct iov_iter *i, size_t bytes); void iov_iter_revert(struct iov_iter *i, size_t bytes); -int iov_iter_fault_in_readable(const struct iov_iter *i, size_t bytes); +size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t bytes); +size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t bytes); size_t iov_iter_single_seg_count(const struct iov_iter *i); size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i); diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 2c1fc9212cf2..548a028f2dab 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -124,7 +124,6 @@ struct usb_hcd { #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ #define HCD_FLAG_DEAD 6 /* controller has died? */ #define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */ -#define HCD_FLAG_DEFER_RH_REGISTER 8 /* Defer roothub registration */ /* The flags can be tested using these macros; they are likely to * be slightly faster than test_bit(). @@ -135,7 +134,6 @@ struct usb_hcd { #define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING)) #define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING)) #define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD)) -#define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER)) /* * Specifies if interfaces are authorized by default diff --git a/include/linux/usb/tegra_usb_phy.h b/include/linux/usb/tegra_usb_phy.h index fd1c9f6a4e37..d3e65eb9e16f 100644 --- a/include/linux/usb/tegra_usb_phy.h +++ b/include/linux/usb/tegra_usb_phy.h @@ -18,6 +18,7 @@ #include <linux/clk.h> #include <linux/gpio.h> +#include <linux/regmap.h> #include <linux/reset.h> #include <linux/usb/otg.h> @@ -30,6 +31,7 @@ * enter host mode * requires_extra_tuning_parameters: true if xcvr_hsslew, hssquelch_level * and hsdiscon_level should be set for adequate signal quality + * requires_pmc_ao_power_up: true if USB AO is powered down by default */ struct tegra_phy_soc_config { @@ -37,6 +39,7 @@ struct tegra_phy_soc_config { bool has_hostpc; bool requires_usbmode_setup; bool requires_extra_tuning_parameters; + bool requires_pmc_ao_power_up; }; struct tegra_utmip_config { @@ -62,6 +65,7 @@ enum tegra_usb_phy_port_speed { struct tegra_xtal_freq; struct tegra_usb_phy { + int irq; int instance; const struct tegra_xtal_freq *freq; void __iomem *regs; @@ -70,6 +74,7 @@ struct tegra_usb_phy { struct clk *pll_u; struct clk *pad_clk; struct regulator *vbus; + struct regmap *pmc_regmap; enum usb_dr_mode mode; void *config; const struct tegra_phy_soc_config *soc_config; diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 3972ab765de1..c3011ccda430 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -6,6 +6,8 @@ #include <linux/device.h> #include <linux/interrupt.h> #include <linux/vhost_iotlb.h> +#include <linux/virtio_net.h> +#include <linux/if_ether.h> /** * struct vdpa_calllback - vDPA callback definition. @@ -63,6 +65,7 @@ struct vdpa_mgmt_dev; * @dev: underlying device * @dma_dev: the actual device that is performing DMA * @config: the configuration ops for this device. + * @cf_mutex: Protects get and set access to configuration layout. * @index: device index * @features_valid: were features initialized? for legacy guests * @use_va: indicate whether virtual address must be used by this device @@ -74,6 +77,7 @@ struct vdpa_device { struct device dev; struct device *dma_dev; const struct vdpa_config_ops *config; + struct mutex cf_mutex; /* Protects get/set config */ unsigned int index; bool features_valid; bool use_va; @@ -91,6 +95,14 @@ struct vdpa_iova_range { u64 last; }; +struct vdpa_dev_set_config { + struct { + u8 mac[ETH_ALEN]; + u16 mtu; + } net; + u64 mask; +}; + /** * Corresponding file area for device memory mapping * @file: vma->vm_file for the mapping @@ -171,6 +183,9 @@ struct vdpa_map_file { * @get_vq_num_max: Get the max size of virtqueue * @vdev: vdpa device * Returns u16: max size of virtqueue + * @get_vq_num_min: Get the min size of virtqueue (optional) + * @vdev: vdpa device + * Returns u16: min size of virtqueue * @get_device_id: Get virtio device id * @vdev: vdpa device * Returns u32: virtio device id @@ -257,7 +272,7 @@ struct vdpa_config_ops { struct vdpa_notification_area (*get_vq_notification)(struct vdpa_device *vdev, u16 idx); /* vq irq is not expected to be changed once DRIVER_OK is set */ - int (*get_vq_irq)(struct vdpa_device *vdv, u16 idx); + int (*get_vq_irq)(struct vdpa_device *vdev, u16 idx); /* Device ops */ u32 (*get_vq_align)(struct vdpa_device *vdev); @@ -266,6 +281,7 @@ struct vdpa_config_ops { void (*set_config_cb)(struct vdpa_device *vdev, struct vdpa_callback *cb); u16 (*get_vq_num_max)(struct vdpa_device *vdev); + u16 (*get_vq_num_min)(struct vdpa_device *vdev); u32 (*get_device_id)(struct vdpa_device *vdev); u32 (*get_vendor_id)(struct vdpa_device *vdev); u8 (*get_status)(struct vdpa_device *vdev); @@ -382,26 +398,16 @@ static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features) return ops->set_features(vdev, features); } -static inline void vdpa_get_config(struct vdpa_device *vdev, - unsigned int offset, void *buf, - unsigned int len) -{ - const struct vdpa_config_ops *ops = vdev->config; - - /* - * Config accesses aren't supposed to trigger before features are set. - * If it does happen we assume a legacy guest. - */ - if (!vdev->features_valid) - vdpa_set_features(vdev, 0); - ops->get_config(vdev, offset, buf, len); -} - +void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, + void *buf, unsigned int len); +void vdpa_set_config(struct vdpa_device *dev, unsigned int offset, + const void *buf, unsigned int length); /** * struct vdpa_mgmtdev_ops - vdpa device ops * @dev_add: Add a vdpa device using alloc and register * @mdev: parent device to use for device addition * @name: name of the new vdpa device + * @config: config attributes to apply to the device under creation * Driver need to add a new device using _vdpa_register_device() * after fully initializing the vdpa device. Driver must return 0 * on success or appropriate error code. @@ -412,14 +418,25 @@ static inline void vdpa_get_config(struct vdpa_device *vdev, * _vdpa_unregister_device(). */ struct vdpa_mgmtdev_ops { - int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name); + int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name, + const struct vdpa_dev_set_config *config); void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev); }; +/** + * struct vdpa_mgmt_dev - vdpa management device + * @device: Management parent device + * @ops: operations supported by management device + * @id_table: Pointer to device id table of supported ids + * @config_attr_mask: bit mask of attributes of type enum vdpa_attr that + * management device support during dev_add callback + * @list: list entry + */ struct vdpa_mgmt_dev { struct device *device; const struct vdpa_mgmtdev_ops *ops; - const struct virtio_device_id *id_table; /* supported ids */ + const struct virtio_device_id *id_table; + u64 config_attr_mask; struct list_head list; }; diff --git a/include/linux/vfio.h b/include/linux/vfio.h index b53a9557884a..76191d7abed1 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -71,68 +71,17 @@ struct vfio_device_ops { int (*match)(struct vfio_device *vdev, char *buf); }; -extern struct iommu_group *vfio_iommu_group_get(struct device *dev); -extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev); - void vfio_init_group_dev(struct vfio_device *device, struct device *dev, const struct vfio_device_ops *ops); void vfio_uninit_group_dev(struct vfio_device *device); int vfio_register_group_dev(struct vfio_device *device); +int vfio_register_emulated_iommu_dev(struct vfio_device *device); void vfio_unregister_group_dev(struct vfio_device *device); extern struct vfio_device *vfio_device_get_from_dev(struct device *dev); extern void vfio_device_put(struct vfio_device *device); int vfio_assign_device_set(struct vfio_device *device, void *set_id); -/* events for the backend driver notify callback */ -enum vfio_iommu_notify_type { - VFIO_IOMMU_CONTAINER_CLOSE = 0, -}; - -/** - * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks - */ -struct vfio_iommu_driver_ops { - char *name; - struct module *owner; - void *(*open)(unsigned long arg); - void (*release)(void *iommu_data); - ssize_t (*read)(void *iommu_data, char __user *buf, - size_t count, loff_t *ppos); - ssize_t (*write)(void *iommu_data, const char __user *buf, - size_t count, loff_t *size); - long (*ioctl)(void *iommu_data, unsigned int cmd, - unsigned long arg); - int (*mmap)(void *iommu_data, struct vm_area_struct *vma); - int (*attach_group)(void *iommu_data, - struct iommu_group *group); - void (*detach_group)(void *iommu_data, - struct iommu_group *group); - int (*pin_pages)(void *iommu_data, - struct iommu_group *group, - unsigned long *user_pfn, - int npage, int prot, - unsigned long *phys_pfn); - int (*unpin_pages)(void *iommu_data, - unsigned long *user_pfn, int npage); - int (*register_notifier)(void *iommu_data, - unsigned long *events, - struct notifier_block *nb); - int (*unregister_notifier)(void *iommu_data, - struct notifier_block *nb); - int (*dma_rw)(void *iommu_data, dma_addr_t user_iova, - void *data, size_t count, bool write); - struct iommu_domain *(*group_iommu_domain)(void *iommu_data, - struct iommu_group *group); - void (*notify)(void *iommu_data, - enum vfio_iommu_notify_type event); -}; - -extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); - -extern void vfio_unregister_iommu_driver( - const struct vfio_iommu_driver_ops *ops); - /* * External user API */ diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 41edbc01ffa4..44d0e09da2d9 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -152,6 +152,7 @@ size_t virtio_max_dma_size(struct virtio_device *vdev); * @feature_table_size: number of entries in the feature table array. * @feature_table_legacy: same as feature_table but when working in legacy mode. * @feature_table_size_legacy: number of entries in feature table legacy array. + * @suppress_used_validation: set to not have core validate used length * @probe: the function to call when a device is found. Returns 0 or -errno. * @scan: optional function to call after successful probe; intended * for virtio-scsi to invoke a scan. @@ -168,6 +169,7 @@ struct virtio_driver { unsigned int feature_table_size; const unsigned int *feature_table_legacy; unsigned int feature_table_size_legacy; + bool suppress_used_validation; int (*validate)(struct virtio_device *dev); int (*probe)(struct virtio_device *dev); void (*scan)(struct virtio_device *dev); diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 8519b3ae5d52..4d107ad31149 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -23,6 +23,8 @@ struct virtio_shm_region { * any of @get/@set, @get_status/@set_status, or @get_features/ * @finalize_features are NOT safe to be called from an atomic * context. + * @enable_cbs: enable the callbacks + * vdev: the virtio_device * @get: read the value of a configuration field * vdev: the virtio_device * offset: the offset of the configuration field @@ -75,6 +77,7 @@ struct virtio_shm_region { */ typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { + void (*enable_cbs)(struct virtio_device *vdev); void (*get)(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len); void (*set)(struct virtio_device *vdev, unsigned offset, @@ -229,6 +232,9 @@ void virtio_device_ready(struct virtio_device *dev) { unsigned status = dev->config->get_status(dev); + if (dev->config->enable_cbs) + dev->config->enable_cbs(dev); + BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK); dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK); } diff --git a/include/linux/virtio_pci_legacy.h b/include/linux/virtio_pci_legacy.h new file mode 100644 index 000000000000..e5d665faf00e --- /dev/null +++ b/include/linux/virtio_pci_legacy.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_VIRTIO_PCI_LEGACY_H +#define _LINUX_VIRTIO_PCI_LEGACY_H + +#include "linux/mod_devicetable.h" +#include <linux/pci.h> +#include <linux/virtio_pci.h> + +struct virtio_pci_legacy_device { + struct pci_dev *pci_dev; + + /* Where to read and clear interrupt */ + u8 __iomem *isr; + /* The IO mapping for the PCI config space (legacy mode only) */ + void __iomem *ioaddr; + + struct virtio_device_id id; +}; + +u64 vp_legacy_get_features(struct virtio_pci_legacy_device *ldev); +u64 vp_legacy_get_driver_features(struct virtio_pci_legacy_device *ldev); +void vp_legacy_set_features(struct virtio_pci_legacy_device *ldev, + u32 features); +u8 vp_legacy_get_status(struct virtio_pci_legacy_device *ldev); +void vp_legacy_set_status(struct virtio_pci_legacy_device *ldev, + u8 status); +u16 vp_legacy_queue_vector(struct virtio_pci_legacy_device *ldev, + u16 idx, u16 vector); +u16 vp_legacy_config_vector(struct virtio_pci_legacy_device *ldev, + u16 vector); +void vp_legacy_set_queue_address(struct virtio_pci_legacy_device *ldev, + u16 index, u32 queue_pfn); +bool vp_legacy_get_queue_enable(struct virtio_pci_legacy_device *ldev, + u16 idx); +void vp_legacy_set_queue_size(struct virtio_pci_legacy_device *ldev, + u16 idx, u16 size); +u16 vp_legacy_get_queue_size(struct virtio_pci_legacy_device *ldev, + u16 idx); +int vp_legacy_probe(struct virtio_pci_legacy_device *ldev); +void vp_legacy_remove(struct virtio_pci_legacy_device *ldev); + +#endif diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 671d402c3778..6e022cc712e6 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -22,7 +22,7 @@ struct notifier_block; /* in notifier.h */ #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ #define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */ #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ -#define VM_NO_GUARD 0x00000040 /* don't add guard page */ +#define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */ #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */ #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */ @@ -136,21 +136,21 @@ static inline void vmalloc_init(void) static inline unsigned long vmalloc_nr_pages(void) { return 0; } #endif -extern void *vmalloc(unsigned long size); -extern void *vzalloc(unsigned long size); -extern void *vmalloc_user(unsigned long size); -extern void *vmalloc_node(unsigned long size, int node); -extern void *vzalloc_node(unsigned long size, int node); -extern void *vmalloc_32(unsigned long size); -extern void *vmalloc_32_user(unsigned long size); -extern void *__vmalloc(unsigned long size, gfp_t gfp_mask); +extern void *vmalloc(unsigned long size) __alloc_size(1); +extern void *vzalloc(unsigned long size) __alloc_size(1); +extern void *vmalloc_user(unsigned long size) __alloc_size(1); +extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1); +extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1); +extern void *vmalloc_32(unsigned long size) __alloc_size(1); +extern void *vmalloc_32_user(unsigned long size) __alloc_size(1); +extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1); extern void *__vmalloc_node_range(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, unsigned long vm_flags, int node, - const void *caller); + const void *caller) __alloc_size(1); void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, - int node, const void *caller); -void *vmalloc_no_huge(unsigned long size); + int node, const void *caller) __alloc_size(1); +void *vmalloc_no_huge(unsigned long size) __alloc_size(1); extern void vfree(const void *addr); extern void vfree_atomic(const void *addr); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 74d3c1efd9bb..7fee9b6cfede 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -469,7 +469,8 @@ extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); extern unsigned int work_busy(struct work_struct *work); extern __printf(1, 2) void set_worker_desc(const char *fmt, ...); extern void print_worker_info(const char *log_lvl, struct task_struct *task); -extern void show_workqueue_state(void); +extern void show_all_workqueues(void); +extern void show_one_workqueue(struct workqueue_struct *wq); extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task); /** |