diff options
Diffstat (limited to 'include/linux')
180 files changed, 4404 insertions, 1997 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index ed80f147bd50..87715f20b69a 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -101,7 +101,7 @@ static inline bool has_acpi_companion(struct device *dev) static inline void acpi_preset_companion(struct device *dev, struct acpi_device *parent, u64 addr) { - ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, NULL)); + ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, false)); } static inline const char *acpi_dev_name(struct acpi_device *adev) @@ -340,7 +340,14 @@ struct pci_dev; int acpi_pci_irq_enable (struct pci_dev *dev); void acpi_penalize_isa_irq(int irq, int active); bool acpi_isa_irq_available(int irq); +#ifdef CONFIG_PCI void acpi_penalize_sci_irq(int irq, int trigger, int polarity); +#else +static inline void acpi_penalize_sci_irq(int irq, int trigger, + int polarity) +{ +} +#endif void acpi_pci_irq_disable (struct pci_dev *dev); extern int ec_read(u8 addr, u8 *val); @@ -1054,6 +1061,17 @@ static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) } #endif +#if defined(CONFIG_ACPI) && IS_ENABLED(CONFIG_I2C) +bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, + struct acpi_resource_i2c_serialbus **i2c); +#else +static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, + struct acpi_resource_i2c_serialbus **i2c) +{ + return false; +} +#endif + /* Device properties */ #ifdef CONFIG_ACPI @@ -1313,4 +1331,14 @@ static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level) } #endif +#ifdef CONFIG_ACPI +extern int acpi_platform_notify(struct device *dev, enum kobject_action action); +#else +static inline int +acpi_platform_notify(struct device *dev, enum kobject_action action) +{ + return 0; +} +#endif + #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/adxl.h b/include/linux/adxl.h index 2d29f55923e3..2a629acb4c3f 100644 --- a/include/linux/adxl.h +++ b/include/linux/adxl.h @@ -7,12 +7,7 @@ #ifndef _LINUX_ADXL_H #define _LINUX_ADXL_H -#ifdef CONFIG_ACPI_ADXL const char * const *adxl_get_component_names(void); int adxl_decode(u64 addr, u64 component_values[]); -#else -static inline const char * const *adxl_get_component_names(void) { return NULL; } -static inline int adxl_decode(u64 addr, u64 component_values[]) { return -EOPNOTSUPP; } -#endif #endif /* _LINUX_ADXL_H */ diff --git a/include/linux/audit.h b/include/linux/audit.h index 9334fbef7bae..a625c29a2ea2 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -115,8 +115,6 @@ extern int audit_classify_compat_syscall(int abi, unsigned syscall); struct filename; -extern void audit_log_session_info(struct audit_buffer *ab); - #define AUDIT_OFF 0 #define AUDIT_ON 1 #define AUDIT_LOCKED 2 @@ -153,8 +151,7 @@ extern void audit_log_link_denied(const char *operation); extern void audit_log_lost(const char *message); extern int audit_log_task_context(struct audit_buffer *ab); -extern void audit_log_task_info(struct audit_buffer *ab, - struct task_struct *tsk); +extern void audit_log_task_info(struct audit_buffer *ab); extern int audit_update_lsm_rules(void); @@ -202,8 +199,7 @@ static inline int audit_log_task_context(struct audit_buffer *ab) { return 0; } -static inline void audit_log_task_info(struct audit_buffer *ab, - struct task_struct *tsk) +static inline void audit_log_task_info(struct audit_buffer *ab) { } #define audit_enabled AUDIT_OFF #endif /* CONFIG_AUDIT */ diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index b2488055fd1d..7605b5919c3a 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -171,7 +171,7 @@ struct virtchnl_msg { VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg); -/* Message descriptions and data structures.*/ +/* Message descriptions and data structures. */ /* VIRTCHNL_OP_VERSION * VF posts its version number to the PF. PF responds with its version number @@ -342,6 +342,8 @@ struct virtchnl_vsi_queue_config_info { struct virtchnl_queue_pair_info qpair[1]; }; +VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); + /* VIRTCHNL_OP_REQUEST_QUEUES * VF sends this message to request the PF to allocate additional queues to * this VF. Each VF gets a guaranteed number of queues on init but asking for @@ -357,8 +359,6 @@ struct virtchnl_vf_res_request { u16 num_queue_pairs; }; -VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); - /* VIRTCHNL_OP_CONFIG_IRQ_MAP * VF uses this message to map vectors to queues. * The rxq_map and txq_map fields are bitmaps used to indicate which queues @@ -819,8 +819,8 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, if (msglen >= valid_len) { struct virtchnl_tc_info *vti = (struct virtchnl_tc_info *)msg; - valid_len += vti->num_tc * - sizeof(struct virtchnl_channel_info); + valid_len += (vti->num_tc - 1) * + sizeof(struct virtchnl_channel_info); if (vti->num_tc == 0) err_msg_format = true; } diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 33014ae73103..e734f163bd0b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -23,6 +23,7 @@ struct bpf_prog; struct bpf_map; struct sock; struct seq_file; +struct btf; struct btf_type; /* map is generic key/value storage optionally accesible by eBPF programs */ @@ -52,6 +53,7 @@ struct bpf_map_ops { void (*map_seq_show_elem)(struct bpf_map *map, void *key, struct seq_file *m); int (*map_check_btf)(const struct bpf_map *map, + const struct btf *btf, const struct btf_type *key_type, const struct btf_type *value_type); }; @@ -126,6 +128,7 @@ static inline bool bpf_map_support_seq_show(const struct bpf_map *map) } int map_check_no_btf(const struct bpf_map *map, + const struct btf *btf, const struct btf_type *key_type, const struct btf_type *value_type); @@ -268,15 +271,18 @@ struct bpf_prog_offload_ops { int (*insn_hook)(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); int (*finalize)(struct bpf_verifier_env *env); + int (*prepare)(struct bpf_prog *prog); + int (*translate)(struct bpf_prog *prog); + void (*destroy)(struct bpf_prog *prog); }; struct bpf_prog_offload { struct bpf_prog *prog; struct net_device *netdev; + struct bpf_offload_dev *offdev; void *dev_priv; struct list_head offloads; bool dev_state; - const struct bpf_prog_offload_ops *dev_ops; void *jited_image; u32 jited_len; }; @@ -293,9 +299,11 @@ struct bpf_prog_aux { atomic_t refcnt; u32 used_map_cnt; u32 max_ctx_offset; + u32 max_pkt_offset; u32 stack_depth; u32 id; - u32 func_cnt; + u32 func_cnt; /* used by non-func prog as the number of func progs */ + u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ bool offload_requested; struct bpf_prog **func; void *jit_data; /* JIT specific data. arch dependent */ @@ -312,6 +320,30 @@ struct bpf_prog_aux { void *security; #endif struct bpf_prog_offload *offload; + struct btf *btf; + struct bpf_func_info *func_info; + /* bpf_line_info loaded from userspace. linfo->insn_off + * has the xlated insn offset. + * Both the main and sub prog share the same linfo. + * The subprog can access its first linfo by + * using the linfo_idx. + */ + struct bpf_line_info *linfo; + /* jited_linfo is the jited addr of the linfo. It has a + * one to one mapping to linfo: + * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. + * Both the main and sub prog share the same jited_linfo. + * The subprog can access its first jited_linfo by + * using the linfo_idx. + */ + void **jited_linfo; + u32 func_info_cnt; + u32 nr_linfo; + /* subprog can use linfo_idx to access its first linfo and + * jited_linfo. + * main prog always has linfo_idx == 0 + */ + u32 linfo_idx; union { struct work_struct work; struct rcu_head rcu; @@ -523,7 +555,8 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) } /* verify correctness of eBPF program */ -int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); +int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, + union bpf_attr __user *uattr); void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); /* Map specifics */ @@ -691,7 +724,8 @@ int bpf_map_offload_get_next_key(struct bpf_map *map, bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); -struct bpf_offload_dev *bpf_offload_dev_create(void); +struct bpf_offload_dev * +bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops); void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, struct net_device *netdev); diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index d93e89761a8b..c233efc106c6 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -38,6 +38,7 @@ enum bpf_reg_liveness { REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */ REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */ + REG_LIVE_DONE = 4, /* liveness won't be updating this register anymore */ }; struct bpf_reg_state { @@ -203,6 +204,7 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) struct bpf_subprog_info { u32 start; /* insn idx of function entry point */ + u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ u16 stack_depth; /* max. stack depth used by this function */ }; @@ -223,6 +225,7 @@ struct bpf_verifier_env { bool allow_ptr_leaks; bool seen_direct_write; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ + const struct bpf_line_info *prev_linfo; struct bpf_verifier_log log; struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; u32 subprog_cnt; @@ -245,7 +248,7 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) return cur_func(env)->regs; } -int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); +int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx); int bpf_prog_offload_finalize(struct bpf_verifier_env *env); diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 949e9af8d9d6..9cd00a37b8d3 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -28,6 +28,7 @@ #define PHY_ID_BCM89610 0x03625cd0 #define PHY_ID_BCM7250 0xae025280 +#define PHY_ID_BCM7255 0xae025120 #define PHY_ID_BCM7260 0xae025190 #define PHY_ID_BCM7268 0xae025090 #define PHY_ID_BCM7271 0xae0253b0 diff --git a/include/linux/btf.h b/include/linux/btf.h index e076c4697049..12502e25e767 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -7,6 +7,7 @@ #include <linux/types.h> struct btf; +struct btf_member; struct btf_type; union bpf_attr; @@ -46,5 +47,24 @@ void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, struct seq_file *m); int btf_get_fd_by_id(u32 id); u32 btf_id(const struct btf *btf); +bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, + const struct btf_member *m, + u32 expected_offset, u32 expected_size); + +#ifdef CONFIG_BPF_SYSCALL +const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); +const char *btf_name_by_offset(const struct btf *btf, u32 offset); +#else +static inline const struct btf_type *btf_type_by_id(const struct btf *btf, + u32 type_id) +{ + return NULL; +} +static inline const char *btf_name_by_offset(const struct btf *btf, + u32 offset) +{ + return NULL; +} +#endif #endif diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index a83e1f632eb7..f01623aef2f7 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -169,6 +169,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf, void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, unsigned int idx); +struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr); unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); void can_free_echo_skb(struct net_device *dev, unsigned int idx); diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h index cb31683bbe15..8268811a697e 100644 --- a/include/linux/can/rx-offload.h +++ b/include/linux/can/rx-offload.h @@ -41,7 +41,12 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload * int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight); int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg); int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload); -int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb); +int can_rx_offload_queue_sorted(struct can_rx_offload *offload, + struct sk_buff *skb, u32 timestamp); +unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, + unsigned int idx, u32 timestamp); +int can_rx_offload_queue_tail(struct can_rx_offload *offload, + struct sk_buff *skb); void can_rx_offload_reset(struct can_rx_offload *offload); void can_rx_offload_del(struct can_rx_offload *offload); void can_rx_offload_enable(struct can_rx_offload *offload); diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 6b92b3395fa9..65a38c4a02a1 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -213,12 +213,6 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \ CEPH_FEATURE_CEPHX_V2) -#define CEPH_FEATURES_REQUIRED_DEFAULT \ - (CEPH_FEATURE_NOSRCADDR | \ - CEPH_FEATURE_SUBSCRIBE2 | \ - CEPH_FEATURE_RECONNECT_SEQ | \ - CEPH_FEATURE_PGID64 | \ - CEPH_FEATURE_PGPOOL3 | \ - CEPH_FEATURE_OSDENC) +#define CEPH_FEATURES_REQUIRED_DEFAULT 0 #endif diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 60c51871b04b..e443fa9fa859 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -1,12 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* - * linux/include/linux/clk-provider.h - * * Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com> * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef __LINUX_CLK_PROVIDER_H #define __LINUX_CLK_PROVIDER_H @@ -601,6 +596,12 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw); * @lock: register lock * * Clock with adjustable fractional divider affecting its output frequency. + * + * Flags: + * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator + * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED + * is set then the numerator and denominator are both the value read + * plus one. */ struct clk_fractional_divider { struct clk_hw hw; @@ -620,6 +621,8 @@ struct clk_fractional_divider { #define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw) +#define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0) + extern const struct clk_ops clk_fractional_divider_ops; struct clk *clk_register_fractional_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h index e0c362363c38..85f8cf9d1226 100644 --- a/include/linux/clk/clk-conf.h +++ b/include/linux/clk/clk-conf.h @@ -1,10 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2014 Samsung Electronics Co., Ltd. * Sylwester Nawrocki <s.nawrocki@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/types.h> diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index c0f5db3a9621..2010493e1040 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -143,18 +143,6 @@ #define KASAN_ABI_VERSION 3 #endif -/* - * Because __no_sanitize_address conflicts with inlining: - * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 - * we do one or the other. - */ -#ifdef CONFIG_KASAN -#define __no_sanitize_address_or_inline \ - __no_sanitize_address __maybe_unused notrace -#else -#define __no_sanitize_address_or_inline inline -#endif - #if GCC_VERSION >= 50100 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 18c80cfa4fc4..fc5004a4b07d 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -99,13 +99,22 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, * unique, to convince GCC not to merge duplicate inline asm statements. */ #define annotate_reachable() ({ \ - asm volatile("ANNOTATE_REACHABLE counter=%c0" \ - : : "i" (__COUNTER__)); \ + asm volatile("%c0:\n\t" \ + ".pushsection .discard.reachable\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ }) #define annotate_unreachable() ({ \ - asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \ - : : "i" (__COUNTER__)); \ + asm volatile("%c0:\n\t" \ + ".pushsection .discard.unreachable\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ }) +#define ASM_UNREACHABLE \ + "999:\n\t" \ + ".pushsection .discard.unreachable\n\t" \ + ".long 999b - .\n\t" \ + ".popsection\n\t" #else #define annotate_reachable() #define annotate_unreachable() @@ -189,7 +198,7 @@ void __read_once_size(const volatile void *p, void *res, int size) * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 * '__maybe_unused' allows us to avoid defined-but-not-used warnings. */ -# define __no_kasan_or_inline __no_sanitize_address __maybe_unused +# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused #else # define __no_kasan_or_inline __always_inline #endif @@ -293,45 +302,6 @@ static inline void *offset_to_ptr(const int *off) return (void *)((unsigned long)off + *off); } -#else /* __ASSEMBLY__ */ - -#ifdef __KERNEL__ -#ifndef LINKER_SCRIPT - -#ifdef CONFIG_STACK_VALIDATION -.macro ANNOTATE_UNREACHABLE counter:req -\counter: - .pushsection .discard.unreachable - .long \counter\()b -. - .popsection -.endm - -.macro ANNOTATE_REACHABLE counter:req -\counter: - .pushsection .discard.reachable - .long \counter\()b -. - .popsection -.endm - -.macro ASM_UNREACHABLE -999: - .pushsection .discard.unreachable - .long 999b - . - .popsection -.endm -#else /* CONFIG_STACK_VALIDATION */ -.macro ANNOTATE_UNREACHABLE counter:req -.endm - -.macro ANNOTATE_REACHABLE counter:req -.endm - -.macro ASM_UNREACHABLE -.endm -#endif /* CONFIG_STACK_VALIDATION */ - -#endif /* LINKER_SCRIPT */ -#endif /* __KERNEL__ */ #endif /* __ASSEMBLY__ */ /* Compile time object size, -1 for unknown */ diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 6b28c1b7310c..fe07b680dd4a 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -4,22 +4,26 @@ /* * The attributes in this file are unconditionally defined and they directly - * map to compiler attribute(s) -- except those that are optional. + * map to compiler attribute(s), unless one of the compilers does not support + * the attribute. In that case, __has_attribute is used to check for support + * and the reason is stated in its comment ("Optional: ..."). * * Any other "attributes" (i.e. those that depend on a configuration option, * on a compiler, on an architecture, on plugins, on other attributes...) * should be defined elsewhere (e.g. compiler_types.h or compiler-*.h). + * The intention is to keep this file as simple as possible, as well as + * compiler- and version-agnostic (e.g. avoiding GCC_VERSION checks). * * This file is meant to be sorted (by actual attribute name, * not by #define identifier). Use the __attribute__((__name__)) syntax * (i.e. with underscores) to avoid future collisions with other macros. - * If an attribute is optional, state the reason in the comment. + * Provide links to the documentation of each supported compiler, if it exists. */ /* - * To check for optional attributes, we use __has_attribute, which is supported - * on gcc >= 5, clang >= 2.9 and icc >= 17. In the meantime, to support - * 4.6 <= gcc < 5, we implement __has_attribute by hand. + * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17. + * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute + * by hand. * * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__ * depending on the compiler used to build it; however, these attributes have @@ -33,7 +37,6 @@ # define __GCC4_has_attribute___designated_init__ 0 # define __GCC4_has_attribute___externally_visible__ 1 # define __GCC4_has_attribute___noclone__ 1 -# define __GCC4_has_attribute___optimize__ 1 # define __GCC4_has_attribute___nonstring__ 0 # define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8) #endif @@ -159,17 +162,11 @@ /* * Optional: not supported by clang - * Note: icc does not recognize gcc's no-tracer * * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noclone-function-attribute - * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-optimize-function-attribute */ #if __has_attribute(__noclone__) -# if __has_attribute(__optimize__) -# define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) -# else -# define __noclone __attribute__((__noclone__)) -# endif +# define __noclone __attribute__((__noclone__)) #else # define __noclone #endif diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 3439d7d0249a..ba814f18cb4c 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -104,59 +104,6 @@ struct ftrace_likely_data { unsigned long constant; }; -#endif /* __KERNEL__ */ - -#endif /* __ASSEMBLY__ */ - -/* - * The below symbols may be defined for one or more, but not ALL, of the above - * compilers. We don't consider that to be an error, so set them to nothing. - * For example, some of them are for compiler specific plugins. - */ -#ifndef __latent_entropy -# define __latent_entropy -#endif - -#ifndef __randomize_layout -# define __randomize_layout __designated_init -#endif - -#ifndef __no_randomize_layout -# define __no_randomize_layout -#endif - -#ifndef randomized_struct_fields_start -# define randomized_struct_fields_start -# define randomized_struct_fields_end -#endif - -/* Are two types/vars the same type (ignoring qualifiers)? */ -#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) - -/* Is this type a native word size -- useful for atomic operations */ -#define __native_word(t) \ - (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \ - sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) - -/* Helpers for emitting diagnostics in pragmas. */ -#ifndef __diag -#define __diag(string) -#endif - -#ifndef __diag_GCC -#define __diag_GCC(version, severity, string) -#endif - -#define __diag_push() __diag(push) -#define __diag_pop() __diag(pop) - -#define __diag_ignore(compiler, version, option, comment) \ - __diag_ ## compiler(version, ignore, option) -#define __diag_warn(compiler, version, option, comment) \ - __diag_ ## compiler(version, warn, option) -#define __diag_error(compiler, version, option, comment) \ - __diag_ ## compiler(version, error, option) - #ifdef CONFIG_ENABLE_MUST_CHECK #define __must_check __attribute__((__warn_unused_result__)) #else @@ -211,4 +158,61 @@ struct ftrace_likely_data { */ #define noinline_for_stack noinline +#endif /* __KERNEL__ */ + +#endif /* __ASSEMBLY__ */ + +/* + * The below symbols may be defined for one or more, but not ALL, of the above + * compilers. We don't consider that to be an error, so set them to nothing. + * For example, some of them are for compiler specific plugins. + */ +#ifndef __latent_entropy +# define __latent_entropy +#endif + +#ifndef __randomize_layout +# define __randomize_layout __designated_init +#endif + +#ifndef __no_randomize_layout +# define __no_randomize_layout +#endif + +#ifndef randomized_struct_fields_start +# define randomized_struct_fields_start +# define randomized_struct_fields_end +#endif + +#ifndef asm_volatile_goto +#define asm_volatile_goto(x...) asm goto(x) +#endif + +/* Are two types/vars the same type (ignoring qualifiers)? */ +#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) + +/* Is this type a native word size -- useful for atomic operations */ +#define __native_word(t) \ + (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \ + sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) + +/* Helpers for emitting diagnostics in pragmas. */ +#ifndef __diag +#define __diag(string) +#endif + +#ifndef __diag_GCC +#define __diag_GCC(version, severity, string) +#endif + +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) + +#define __diag_ignore(compiler, version, option, comment) \ + __diag_ ## compiler(version, ignore, option) +#define __diag_warn(compiler, version, option, comment) \ + __diag_ ## compiler(version, warn, option) +#define __diag_error(compiler, version, option, comment) \ + __diag_ ## compiler(version, error, option) + #endif /* __LINUX_COMPILER_TYPES_H */ diff --git a/include/linux/cordic.h b/include/linux/cordic.h index cf68ca4a508c..3d656f54d64f 100644 --- a/include/linux/cordic.h +++ b/include/linux/cordic.h @@ -18,6 +18,15 @@ #include <linux/types.h> +#define CORDIC_ANGLE_GEN 39797 +#define CORDIC_PRECISION_SHIFT 16 +#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2) + +#define CORDIC_FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT)) +#define CORDIC_FLOAT(X) (((X) >= 0) \ + ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \ + : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1)) + /** * struct cordic_iq - i/q coordinate. * diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 882a9b9e34bc..c86d6d8bdfed 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -950,6 +950,14 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy) } #endif +#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) +void sched_cpufreq_governor_change(struct cpufreq_policy *policy, + struct cpufreq_governor *old_gov); +#else +static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy, + struct cpufreq_governor *old_gov) { } +#endif + extern void arch_freq_prepare_all(void); extern unsigned int arch_freq_get_on_cpu(int cpu); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index e0cd2baa8380..fd586d0301e7 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -164,6 +164,8 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_L2X0_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, + CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, + CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE, CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index faed7a8977e8..4dff74f48d4b 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -33,6 +33,8 @@ struct cpuidle_state_usage { unsigned long long disable; unsigned long long usage; unsigned long long time; /* in US */ + unsigned long long above; /* Number of times it's been too deep */ + unsigned long long below; /* Number of times it's been too shallow */ #ifdef CONFIG_SUSPEND unsigned long long s2idle_usage; unsigned long long s2idle_time; /* in US */ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 3634ad6fe202..902ec171fc6d 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -49,7 +49,6 @@ #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 -#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 #define CRYPTO_ALG_TYPE_KPP 0x00000008 #define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b @@ -77,12 +76,6 @@ #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 /* - * This bit is set for symmetric key ciphers that have already been wrapped - * with a generic IV generator to prevent them from being wrapped again. - */ -#define CRYPTO_ALG_GENIV 0x00000200 - -/* * Set if the algorithm has passed automated run-time testing. Note that * if there is no run-time testing for a given algorithm it is considered * to have passed. @@ -157,7 +150,6 @@ struct crypto_async_request; struct crypto_blkcipher; struct crypto_tfm; struct crypto_type; -struct skcipher_givcrypt_request; typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); @@ -246,31 +238,16 @@ struct cipher_desc { * be called in parallel with the same transformation object. * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt * and the conditions are exactly the same. - * @givencrypt: Update the IV for encryption. With this function, a cipher - * implementation may provide the function on how to update the IV - * for encryption. - * @givdecrypt: Update the IV for decryption. This is the reverse of - * @givencrypt . - * @geniv: The transformation implementation may use an "IV generator" provided - * by the kernel crypto API. Several use cases have a predefined - * approach how IVs are to be updated. For such use cases, the kernel - * crypto API provides ready-to-use implementations that can be - * referenced with this variable. * @ivsize: IV size applicable for transformation. The consumer must provide an * IV of exactly that size to perform the encrypt or decrypt operation. * - * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are - * mandatory and must be filled. + * All fields except @ivsize are mandatory and must be filled. */ struct ablkcipher_alg { int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int keylen); int (*encrypt)(struct ablkcipher_request *req); int (*decrypt)(struct ablkcipher_request *req); - int (*givencrypt)(struct skcipher_givcrypt_request *req); - int (*givdecrypt)(struct skcipher_givcrypt_request *req); - - const char *geniv; unsigned int min_keysize; unsigned int max_keysize; @@ -284,10 +261,9 @@ struct ablkcipher_alg { * @setkey: see struct ablkcipher_alg * @encrypt: see struct ablkcipher_alg * @decrypt: see struct ablkcipher_alg - * @geniv: see struct ablkcipher_alg * @ivsize: see struct ablkcipher_alg * - * All fields except @geniv and @ivsize are mandatory and must be filled. + * All fields except @ivsize are mandatory and must be filled. */ struct blkcipher_alg { int (*setkey)(struct crypto_tfm *tfm, const u8 *key, @@ -299,8 +275,6 @@ struct blkcipher_alg { struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes); - const char *geniv; - unsigned int min_keysize; unsigned int max_keysize; unsigned int ivsize; @@ -369,6 +343,115 @@ struct compress_alg { unsigned int slen, u8 *dst, unsigned int *dlen); }; +#ifdef CONFIG_CRYPTO_STATS +/* + * struct crypto_istat_aead - statistics for AEAD algorithm + * @encrypt_cnt: number of encrypt requests + * @encrypt_tlen: total data size handled by encrypt requests + * @decrypt_cnt: number of decrypt requests + * @decrypt_tlen: total data size handled by decrypt requests + * @err_cnt: number of error for AEAD requests + */ +struct crypto_istat_aead { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t err_cnt; +}; + +/* + * struct crypto_istat_akcipher - statistics for akcipher algorithm + * @encrypt_cnt: number of encrypt requests + * @encrypt_tlen: total data size handled by encrypt requests + * @decrypt_cnt: number of decrypt requests + * @decrypt_tlen: total data size handled by decrypt requests + * @verify_cnt: number of verify operation + * @sign_cnt: number of sign requests + * @err_cnt: number of error for akcipher requests + */ +struct crypto_istat_akcipher { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t verify_cnt; + atomic64_t sign_cnt; + atomic64_t err_cnt; +}; + +/* + * struct crypto_istat_cipher - statistics for cipher algorithm + * @encrypt_cnt: number of encrypt requests + * @encrypt_tlen: total data size handled by encrypt requests + * @decrypt_cnt: number of decrypt requests + * @decrypt_tlen: total data size handled by decrypt requests + * @err_cnt: number of error for cipher requests + */ +struct crypto_istat_cipher { + atomic64_t encrypt_cnt; + atomic64_t encrypt_tlen; + atomic64_t decrypt_cnt; + atomic64_t decrypt_tlen; + atomic64_t err_cnt; +}; + +/* + * struct crypto_istat_compress - statistics for compress algorithm + * @compress_cnt: number of compress requests + * @compress_tlen: total data size handled by compress requests + * @decompress_cnt: number of decompress requests + * @decompress_tlen: total data size handled by decompress requests + * @err_cnt: number of error for compress requests + */ +struct crypto_istat_compress { + atomic64_t compress_cnt; + atomic64_t compress_tlen; + atomic64_t decompress_cnt; + atomic64_t decompress_tlen; + atomic64_t err_cnt; +}; + +/* + * struct crypto_istat_hash - statistics for has algorithm + * @hash_cnt: number of hash requests + * @hash_tlen: total data size hashed + * @err_cnt: number of error for hash requests + */ +struct crypto_istat_hash { + atomic64_t hash_cnt; + atomic64_t hash_tlen; + atomic64_t err_cnt; +}; + +/* + * struct crypto_istat_kpp - statistics for KPP algorithm + * @setsecret_cnt: number of setsecrey operation + * @generate_public_key_cnt: number of generate_public_key operation + * @compute_shared_secret_cnt: number of compute_shared_secret operation + * @err_cnt: number of error for KPP requests + */ +struct crypto_istat_kpp { + atomic64_t setsecret_cnt; + atomic64_t generate_public_key_cnt; + atomic64_t compute_shared_secret_cnt; + atomic64_t err_cnt; +}; + +/* + * struct crypto_istat_rng: statistics for RNG algorithm + * @generate_cnt: number of RNG generate requests + * @generate_tlen: total data size of generated data by the RNG + * @seed_cnt: number of times the RNG was seeded + * @err_cnt: number of error for RNG requests + */ +struct crypto_istat_rng { + atomic64_t generate_cnt; + atomic64_t generate_tlen; + atomic64_t seed_cnt; + atomic64_t err_cnt; +}; +#endif /* CONFIG_CRYPTO_STATS */ #define cra_ablkcipher cra_u.ablkcipher #define cra_blkcipher cra_u.blkcipher @@ -454,32 +537,14 @@ struct compress_alg { * @cra_refcnt: internally used * @cra_destroy: internally used * - * All following statistics are for this crypto_alg - * @encrypt_cnt: number of encrypt requests - * @decrypt_cnt: number of decrypt requests - * @compress_cnt: number of compress requests - * @decompress_cnt: number of decompress requests - * @generate_cnt: number of RNG generate requests - * @seed_cnt: number of times the rng was seeded - * @hash_cnt: number of hash requests - * @sign_cnt: number of sign requests - * @setsecret_cnt: number of setsecrey operation - * @generate_public_key_cnt: number of generate_public_key operation - * @verify_cnt: number of verify operation - * @compute_shared_secret_cnt: number of compute_shared_secret operation - * @encrypt_tlen: total data size handled by encrypt requests - * @decrypt_tlen: total data size handled by decrypt requests - * @compress_tlen: total data size handled by compress requests - * @decompress_tlen: total data size handled by decompress requests - * @generate_tlen: total data size of generated data by the RNG - * @hash_tlen: total data size hashed - * @akcipher_err_cnt: number of error for akcipher requests - * @cipher_err_cnt: number of error for akcipher requests - * @compress_err_cnt: number of error for akcipher requests - * @aead_err_cnt: number of error for akcipher requests - * @hash_err_cnt: number of error for akcipher requests - * @rng_err_cnt: number of error for akcipher requests - * @kpp_err_cnt: number of error for akcipher requests + * @stats: union of all possible crypto_istat_xxx structures + * @stats.aead: statistics for AEAD algorithm + * @stats.akcipher: statistics for akcipher algorithm + * @stats.cipher: statistics for cipher algorithm + * @stats.compress: statistics for compress algorithm + * @stats.hash: statistics for hash algorithm + * @stats.rng: statistics for rng algorithm + * @stats.kpp: statistics for KPP algorithm * * The struct crypto_alg describes a generic Crypto API algorithm and is common * for all of the transformations. Any variable not documented here shall not @@ -515,46 +580,86 @@ struct crypto_alg { struct module *cra_module; +#ifdef CONFIG_CRYPTO_STATS union { - atomic_t encrypt_cnt; - atomic_t compress_cnt; - atomic_t generate_cnt; - atomic_t hash_cnt; - atomic_t setsecret_cnt; - }; - union { - atomic64_t encrypt_tlen; - atomic64_t compress_tlen; - atomic64_t generate_tlen; - atomic64_t hash_tlen; - }; - union { - atomic_t akcipher_err_cnt; - atomic_t cipher_err_cnt; - atomic_t compress_err_cnt; - atomic_t aead_err_cnt; - atomic_t hash_err_cnt; - atomic_t rng_err_cnt; - atomic_t kpp_err_cnt; - }; - union { - atomic_t decrypt_cnt; - atomic_t decompress_cnt; - atomic_t seed_cnt; - atomic_t generate_public_key_cnt; - }; - union { - atomic64_t decrypt_tlen; - atomic64_t decompress_tlen; - }; - union { - atomic_t verify_cnt; - atomic_t compute_shared_secret_cnt; - }; - atomic_t sign_cnt; + struct crypto_istat_aead aead; + struct crypto_istat_akcipher akcipher; + struct crypto_istat_cipher cipher; + struct crypto_istat_compress compress; + struct crypto_istat_hash hash; + struct crypto_istat_rng rng; + struct crypto_istat_kpp kpp; + } stats; +#endif /* CONFIG_CRYPTO_STATS */ } CRYPTO_MINALIGN_ATTR; +#ifdef CONFIG_CRYPTO_STATS +void crypto_stats_init(struct crypto_alg *alg); +void crypto_stats_get(struct crypto_alg *alg); +void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg); +void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg); +void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); +void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret); +void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg); +void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg); +void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg); +void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg); +void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg); +void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg); +void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg); +void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg); +void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret); +void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret); +void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret); +void crypto_stats_rng_seed(struct crypto_alg *alg, int ret); +void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret); +void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); +void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg); +#else +static inline void crypto_stats_init(struct crypto_alg *alg) +{} +static inline void crypto_stats_get(struct crypto_alg *alg) +{} +static inline void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_rng_seed(struct crypto_alg *alg, int ret) +{} +static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret) +{} +static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) +{} +static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg) +{} +#endif /* * A helper struct for waiting for completion of async crypto ops */ @@ -800,14 +905,14 @@ static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( static inline u32 crypto_skcipher_type(u32 type) { - type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + type &= ~CRYPTO_ALG_TYPE_MASK; type |= CRYPTO_ALG_TYPE_BLKCIPHER; return type; } static inline u32 crypto_skcipher_mask(u32 mask) { - mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); + mask &= ~CRYPTO_ALG_TYPE_MASK; mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; return mask; } @@ -973,38 +1078,6 @@ static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( return __crypto_ablkcipher_cast(req->base.tfm); } -static inline void crypto_stat_ablkcipher_encrypt(struct ablkcipher_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct ablkcipher_tfm *crt = - crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt); - } else { - atomic_inc(&crt->base->base.__crt_alg->encrypt_cnt); - atomic64_add(req->nbytes, &crt->base->base.__crt_alg->encrypt_tlen); - } -#endif -} - -static inline void crypto_stat_ablkcipher_decrypt(struct ablkcipher_request *req, - int ret) -{ -#ifdef CONFIG_CRYPTO_STATS - struct ablkcipher_tfm *crt = - crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); - - if (ret && ret != -EINPROGRESS && ret != -EBUSY) { - atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt); - } else { - atomic_inc(&crt->base->base.__crt_alg->decrypt_cnt); - atomic64_add(req->nbytes, &crt->base->base.__crt_alg->decrypt_tlen); - } -#endif -} - /** * crypto_ablkcipher_encrypt() - encrypt plaintext * @req: reference to the ablkcipher_request handle that holds all information @@ -1020,10 +1093,13 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) { struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + struct crypto_alg *alg = crt->base->base.__crt_alg; + unsigned int nbytes = req->nbytes; int ret; + crypto_stats_get(alg); ret = crt->encrypt(req); - crypto_stat_ablkcipher_encrypt(req, ret); + crypto_stats_ablkcipher_encrypt(nbytes, ret, alg); return ret; } @@ -1042,10 +1118,13 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) { struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); + struct crypto_alg *alg = crt->base->base.__crt_alg; + unsigned int nbytes = req->nbytes; int ret; + crypto_stats_get(alg); ret = crt->decrypt(req); - crypto_stat_ablkcipher_decrypt(req, ret); + crypto_stats_ablkcipher_decrypt(nbytes, ret, alg); return ret; } diff --git a/include/linux/dax.h b/include/linux/dax.h index 450b28db9533..0dd316a74a29 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -7,6 +7,8 @@ #include <linux/radix-tree.h> #include <asm/pgtable.h> +typedef unsigned long dax_entry_t; + struct iomap_ops; struct dax_device; struct dax_operations { @@ -88,8 +90,8 @@ int dax_writeback_mapping_range(struct address_space *mapping, struct block_device *bdev, struct writeback_control *wbc); struct page *dax_layout_busy_page(struct address_space *mapping); -bool dax_lock_mapping_entry(struct page *page); -void dax_unlock_mapping_entry(struct page *page); +dax_entry_t dax_lock_page(struct page *page); +void dax_unlock_page(struct page *page, dax_entry_t cookie); #else static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize) @@ -122,14 +124,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping, return -EOPNOTSUPP; } -static inline bool dax_lock_mapping_entry(struct page *page) +static inline dax_entry_t dax_lock_page(struct page *page) { if (IS_DAX(page->mapping->host)) - return true; - return false; + return ~0UL; + return 0; } -static inline void dax_unlock_mapping_entry(struct page *page) +static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) { } #endif diff --git a/include/linux/dell-led.h b/include/linux/dell-led.h deleted file mode 100644 index 92521471517f..000000000000 --- a/include/linux/dell-led.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __DELL_LED_H__ -#define __DELL_LED_H__ - -int dell_micmute_led_set(int on); - -#endif diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index e4963b0f45da..fbffa74bfc1b 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -131,6 +131,9 @@ struct devfreq_dev_profile { * @scaling_min_freq: Limit minimum frequency requested by OPP interface * @scaling_max_freq: Limit maximum frequency requested by OPP interface * @stop_polling: devfreq polling status of a device. + * @suspend_freq: frequency of a device set during suspend phase. + * @resume_freq: frequency of a device set in resume phase. + * @suspend_count: suspend requests counter for a device. * @total_trans: Number of devfreq transitions * @trans_table: Statistics of devfreq transitions * @time_in_state: Statistics of devfreq states @@ -167,6 +170,10 @@ struct devfreq { unsigned long scaling_max_freq; bool stop_polling; + unsigned long suspend_freq; + unsigned long resume_freq; + atomic_t suspend_count; + /* information for device frequency transition */ unsigned int total_trans; unsigned int *trans_table; @@ -198,6 +205,9 @@ extern void devm_devfreq_remove_device(struct device *dev, extern int devfreq_suspend_device(struct devfreq *devfreq); extern int devfreq_resume_device(struct devfreq *devfreq); +extern void devfreq_suspend(void); +extern void devfreq_resume(void); + /** * update_devfreq() - Reevaluate the device and configure frequency * @devfreq: the devfreq device @@ -324,6 +334,9 @@ static inline int devfreq_resume_device(struct devfreq *devfreq) return 0; } +static inline void devfreq_suspend(void) {} +static inline void devfreq_resume(void) {} + static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, u32 flags) { diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index bd73e7a91410..9e66bfe369aa 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -5,7 +5,7 @@ #include <linux/dma-mapping.h> #include <linux/mem_encrypt.h> -#define DIRECT_MAPPING_ERROR 0 +#define DIRECT_MAPPING_ERROR (~(dma_addr_t)0) #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA #include <asm/dma-direct.h> diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 02dba8cd033d..999e4b104410 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -541,6 +541,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) return ret < 0 ? ret : 0; } +struct dma_fence *dma_fence_get_stub(void); u64 dma_fence_context_alloc(unsigned num); #define DMA_FENCE_TRACE(f, fmt, args...) \ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 15bd41447025..d327bdd53716 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -796,7 +796,7 @@ static inline void dmam_release_declared_memory(struct device *dev) static inline void *dma_alloc_wc(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t gfp) { - unsigned long attrs = DMA_ATTR_NO_WARN; + unsigned long attrs = DMA_ATTR_WRITE_COMBINE; if (gfp & __GFP_NOWARN) attrs |= DMA_ATTR_NO_WARN; diff --git a/include/linux/edac.h b/include/linux/edac.h index 1d0c9ea8825d..342dabda9c7e 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -669,10 +669,4 @@ struct mem_ctl_info { bool fake_inject_ue; u16 fake_inject_count; }; - -/* - * Maximum number of memory controllers in the coherent fabric. - */ -#define EDAC_MAX_MCS 2 * MAX_NUMNODES - #endif diff --git a/include/linux/efi.h b/include/linux/efi.h index 845174e113ce..becd5d76a207 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1000,13 +1000,11 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_gettimeofday (struct timespec64 *ts); extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ #ifdef CONFIG_X86 -extern void efi_free_boot_services(void); extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size, bool nonblocking); extern void efi_find_mirror(void); #else -static inline void efi_free_boot_services(void) {} static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size, @@ -1046,7 +1044,6 @@ extern void efi_mem_reserve(phys_addr_t addr, u64 size); extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); -extern void efi_reserve_boot_services(void); extern int efi_get_fdt_params(struct efi_fdt_params *params); extern struct kobject *efi_kobj; @@ -1167,6 +1164,8 @@ static inline bool efi_enabled(int feature) extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); extern bool efi_is_table_address(unsigned long phys_addr); + +extern int efi_apply_persistent_mem_reservations(void); #else static inline bool efi_enabled(int feature) { @@ -1185,6 +1184,11 @@ static inline bool efi_is_table_address(unsigned long phys_addr) { return false; } + +static inline int efi_apply_persistent_mem_reservations(void) +{ + return 0; +} #endif extern int efi_status_to_err(efi_status_t status); @@ -1708,9 +1712,19 @@ extern struct efi_runtime_work efi_rts_work; extern struct workqueue_struct *efi_rts_wq; struct linux_efi_memreserve { - phys_addr_t next; - phys_addr_t base; - phys_addr_t size; + int size; // allocated size of the array + atomic_t count; // number of entries used + phys_addr_t next; // pa of next struct instance + struct { + phys_addr_t base; + phys_addr_t size; + } entry[0]; }; +#define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \ + (count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0])) + +#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \ + / sizeof(((struct linux_efi_memreserve *)0)->entry[0])) + #endif /* _LINUX_EFI_H */ diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h new file mode 100644 index 000000000000..aa027f7bcb3e --- /dev/null +++ b/include/linux/energy_model.h @@ -0,0 +1,187 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_ENERGY_MODEL_H +#define _LINUX_ENERGY_MODEL_H +#include <linux/cpumask.h> +#include <linux/jump_label.h> +#include <linux/kobject.h> +#include <linux/rcupdate.h> +#include <linux/sched/cpufreq.h> +#include <linux/sched/topology.h> +#include <linux/types.h> + +#ifdef CONFIG_ENERGY_MODEL +/** + * em_cap_state - Capacity state of a performance domain + * @frequency: The CPU frequency in KHz, for consistency with CPUFreq + * @power: The power consumed by 1 CPU at this level, in milli-watts + * @cost: The cost coefficient associated with this level, used during + * energy calculation. Equal to: power * max_frequency / frequency + */ +struct em_cap_state { + unsigned long frequency; + unsigned long power; + unsigned long cost; +}; + +/** + * em_perf_domain - Performance domain + * @table: List of capacity states, in ascending order + * @nr_cap_states: Number of capacity states + * @cpus: Cpumask covering the CPUs of the domain + * + * A "performance domain" represents a group of CPUs whose performance is + * scaled together. All CPUs of a performance domain must have the same + * micro-architecture. Performance domains often have a 1-to-1 mapping with + * CPUFreq policies. + */ +struct em_perf_domain { + struct em_cap_state *table; + int nr_cap_states; + unsigned long cpus[0]; +}; + +#define EM_CPU_MAX_POWER 0xFFFF + +struct em_data_callback { + /** + * active_power() - Provide power at the next capacity state of a CPU + * @power : Active power at the capacity state in mW (modified) + * @freq : Frequency at the capacity state in kHz (modified) + * @cpu : CPU for which we do this operation + * + * active_power() must find the lowest capacity state of 'cpu' above + * 'freq' and update 'power' and 'freq' to the matching active power + * and frequency. + * + * The power is the one of a single CPU in the domain, expressed in + * milli-watts. It is expected to fit in the [0, EM_CPU_MAX_POWER] + * range. + * + * Return 0 on success. + */ + int (*active_power)(unsigned long *power, unsigned long *freq, int cpu); +}; +#define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb } + +struct em_perf_domain *em_cpu_get(int cpu); +int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, + struct em_data_callback *cb); + +/** + * em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain + * @pd : performance domain for which energy has to be estimated + * @max_util : highest utilization among CPUs of the domain + * @sum_util : sum of the utilization of all CPUs in the domain + * + * Return: the sum of the energy consumed by the CPUs of the domain assuming + * a capacity state satisfying the max utilization of the domain. + */ +static inline unsigned long em_pd_energy(struct em_perf_domain *pd, + unsigned long max_util, unsigned long sum_util) +{ + unsigned long freq, scale_cpu; + struct em_cap_state *cs; + int i, cpu; + + /* + * In order to predict the capacity state, map the utilization of the + * most utilized CPU of the performance domain to a requested frequency, + * like schedutil. + */ + cpu = cpumask_first(to_cpumask(pd->cpus)); + scale_cpu = arch_scale_cpu_capacity(NULL, cpu); + cs = &pd->table[pd->nr_cap_states - 1]; + freq = map_util_freq(max_util, cs->frequency, scale_cpu); + + /* + * Find the lowest capacity state of the Energy Model above the + * requested frequency. + */ + for (i = 0; i < pd->nr_cap_states; i++) { + cs = &pd->table[i]; + if (cs->frequency >= freq) + break; + } + + /* + * The capacity of a CPU in the domain at that capacity state (cs) + * can be computed as: + * + * cs->freq * scale_cpu + * cs->cap = -------------------- (1) + * cpu_max_freq + * + * So, ignoring the costs of idle states (which are not available in + * the EM), the energy consumed by this CPU at that capacity state is + * estimated as: + * + * cs->power * cpu_util + * cpu_nrg = -------------------- (2) + * cs->cap + * + * since 'cpu_util / cs->cap' represents its percentage of busy time. + * + * NOTE: Although the result of this computation actually is in + * units of power, it can be manipulated as an energy value + * over a scheduling period, since it is assumed to be + * constant during that interval. + * + * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product + * of two terms: + * + * cs->power * cpu_max_freq cpu_util + * cpu_nrg = ------------------------ * --------- (3) + * cs->freq scale_cpu + * + * The first term is static, and is stored in the em_cap_state struct + * as 'cs->cost'. + * + * Since all CPUs of the domain have the same micro-architecture, they + * share the same 'cs->cost', and the same CPU capacity. Hence, the + * total energy of the domain (which is the simple sum of the energy of + * all of its CPUs) can be factorized as: + * + * cs->cost * \Sum cpu_util + * pd_nrg = ------------------------ (4) + * scale_cpu + */ + return cs->cost * sum_util / scale_cpu; +} + +/** + * em_pd_nr_cap_states() - Get the number of capacity states of a perf. domain + * @pd : performance domain for which this must be done + * + * Return: the number of capacity states in the performance domain table + */ +static inline int em_pd_nr_cap_states(struct em_perf_domain *pd) +{ + return pd->nr_cap_states; +} + +#else +struct em_perf_domain {}; +struct em_data_callback {}; +#define EM_DATA_CB(_active_power_cb) { } + +static inline int em_register_perf_domain(cpumask_t *span, + unsigned int nr_states, struct em_data_callback *cb) +{ + return -EINVAL; +} +static inline struct em_perf_domain *em_cpu_get(int cpu) +{ + return NULL; +} +static inline unsigned long em_pd_energy(struct em_perf_domain *pd, + unsigned long max_util, unsigned long sum_util) +{ + return 0; +} +static inline int em_pd_nr_cap_states(struct em_perf_domain *pd) +{ + return 0; +} +#endif + +#endif diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 572e11bb8696..2c0af7b00715 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -32,6 +32,7 @@ struct device; int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr); unsigned char *arch_get_platform_mac_address(void); +int nvmem_get_mac_address(struct device *dev, void *addrbuf); u32 eth_get_headlen(void *data, unsigned int max_len); __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); extern const struct header_ops eth_header_ops; diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index a5a60691e48b..9e2142795335 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -37,10 +37,11 @@ /* Events that user can request to be notified on */ #define FANOTIFY_EVENTS (FAN_ACCESS | FAN_MODIFY | \ - FAN_CLOSE | FAN_OPEN) + FAN_CLOSE | FAN_OPEN | FAN_OPEN_EXEC) /* Events that require a permission response from user */ -#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM) +#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \ + FAN_OPEN_EXEC_PERM) /* Extra flags that may be reported with event or control handling of events */ #define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR) diff --git a/include/linux/filter.h b/include/linux/filter.h index de629b706d1d..8c8544b375eb 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -449,6 +449,13 @@ struct sock_reuseport; offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 +#if BITS_PER_LONG == 64 +# define bpf_ctx_range_ptr(TYPE, MEMBER) \ + offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 +#else +# define bpf_ctx_range_ptr(TYPE, MEMBER) \ + offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1 +#endif /* BITS_PER_LONG == 64 */ #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ ({ \ @@ -668,24 +675,10 @@ static inline u32 bpf_ctx_off_adjust_machine(u32 size) return size; } -static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access, - u32 size_default) -{ - size_default = bpf_ctx_off_adjust_machine(size_default); - size_access = bpf_ctx_off_adjust_machine(size_access); - -#ifdef __LITTLE_ENDIAN - return (off & (size_default - 1)) == 0; -#else - return (off & (size_default - 1)) + size_access == size_default; -#endif -} - static inline bool bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) { - return bpf_ctx_narrow_align_ok(off, size, size_default) && - size <= size_default && (size & (size - 1)) == 0; + return size <= size_default && (size & (size - 1)) == 0; } #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) @@ -732,6 +725,13 @@ void bpf_prog_free(struct bpf_prog *fp); bool bpf_opcode_in_insntable(u8 code); +void bpf_prog_free_linfo(struct bpf_prog *prog); +void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, + const u32 *insn_to_jit_off); +int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog); +void bpf_prog_free_jited_linfo(struct bpf_prog *prog); +void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog); + struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, gfp_t gfp_extra_flags); @@ -854,7 +854,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, extern int bpf_jit_enable; extern int bpf_jit_harden; extern int bpf_jit_kallsyms; -extern int bpf_jit_limit; +extern long bpf_jit_limit; typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); @@ -866,6 +866,10 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr); void bpf_jit_free(struct bpf_prog *fp); +int bpf_jit_get_func_addr(const struct bpf_prog *prog, + const struct bpf_insn *insn, bool extra_pass, + u64 *func_addr, bool *func_addr_fixed); + struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h index 29ada609de03..ebc55098faee 100644 --- a/include/linux/firmware/imx/sci.h +++ b/include/linux/firmware/imx/sci.h @@ -14,4 +14,5 @@ #include <linux/firmware/imx/types.h> #include <linux/firmware/imx/svc/misc.h> +#include <linux/firmware/imx/svc/pm.h> #endif /* _SC_SCI_H */ diff --git a/include/linux/firmware/imx/svc/pm.h b/include/linux/firmware/imx/svc/pm.h new file mode 100644 index 000000000000..1f6975dd37b0 --- /dev/null +++ b/include/linux/firmware/imx/svc/pm.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * Copyright 2017-2018 NXP + * + * Header file containing the public API for the System Controller (SC) + * Power Management (PM) function. This includes functions for power state + * control, clock control, reset control, and wake-up event control. + * + * PM_SVC (SVC) Power Management Service + * + * Module for the Power Management (PM) service. + */ + +#ifndef _SC_PM_API_H +#define _SC_PM_API_H + +#include <linux/firmware/imx/sci.h> + +/* + * This type is used to indicate RPC PM function calls. + */ +enum imx_sc_pm_func { + IMX_SC_PM_FUNC_UNKNOWN = 0, + IMX_SC_PM_FUNC_SET_SYS_POWER_MODE = 19, + IMX_SC_PM_FUNC_SET_PARTITION_POWER_MODE = 1, + IMX_SC_PM_FUNC_GET_SYS_POWER_MODE = 2, + IMX_SC_PM_FUNC_SET_RESOURCE_POWER_MODE = 3, + IMX_SC_PM_FUNC_GET_RESOURCE_POWER_MODE = 4, + IMX_SC_PM_FUNC_REQ_LOW_POWER_MODE = 16, + IMX_SC_PM_FUNC_SET_CPU_RESUME_ADDR = 17, + IMX_SC_PM_FUNC_REQ_SYS_IF_POWER_MODE = 18, + IMX_SC_PM_FUNC_SET_CLOCK_RATE = 5, + IMX_SC_PM_FUNC_GET_CLOCK_RATE = 6, + IMX_SC_PM_FUNC_CLOCK_ENABLE = 7, + IMX_SC_PM_FUNC_SET_CLOCK_PARENT = 14, + IMX_SC_PM_FUNC_GET_CLOCK_PARENT = 15, + IMX_SC_PM_FUNC_RESET = 13, + IMX_SC_PM_FUNC_RESET_REASON = 10, + IMX_SC_PM_FUNC_BOOT = 8, + IMX_SC_PM_FUNC_REBOOT = 9, + IMX_SC_PM_FUNC_REBOOT_PARTITION = 12, + IMX_SC_PM_FUNC_CPU_START = 11, +}; + +/* + * Defines for ALL parameters + */ +#define IMX_SC_PM_CLK_ALL UINT8_MAX /* All clocks */ + +/* + * Defines for SC PM Power Mode + */ +#define IMX_SC_PM_PW_MODE_OFF 0 /* Power off */ +#define IMX_SC_PM_PW_MODE_STBY 1 /* Power in standby */ +#define IMX_SC_PM_PW_MODE_LP 2 /* Power in low-power */ +#define IMX_SC_PM_PW_MODE_ON 3 /* Power on */ + +/* + * Defines for SC PM CLK + */ +#define IMX_SC_PM_CLK_SLV_BUS 0 /* Slave bus clock */ +#define IMX_SC_PM_CLK_MST_BUS 1 /* Master bus clock */ +#define IMX_SC_PM_CLK_PER 2 /* Peripheral clock */ +#define IMX_SC_PM_CLK_PHY 3 /* Phy clock */ +#define IMX_SC_PM_CLK_MISC 4 /* Misc clock */ +#define IMX_SC_PM_CLK_MISC0 0 /* Misc 0 clock */ +#define IMX_SC_PM_CLK_MISC1 1 /* Misc 1 clock */ +#define IMX_SC_PM_CLK_MISC2 2 /* Misc 2 clock */ +#define IMX_SC_PM_CLK_MISC3 3 /* Misc 3 clock */ +#define IMX_SC_PM_CLK_MISC4 4 /* Misc 4 clock */ +#define IMX_SC_PM_CLK_CPU 2 /* CPU clock */ +#define IMX_SC_PM_CLK_PLL 4 /* PLL */ +#define IMX_SC_PM_CLK_BYPASS 4 /* Bypass clock */ + +/* + * Defines for SC PM CLK Parent + */ +#define IMX_SC_PM_PARENT_XTAL 0 /* Parent is XTAL. */ +#define IMX_SC_PM_PARENT_PLL0 1 /* Parent is PLL0 */ +#define IMX_SC_PM_PARENT_PLL1 2 /* Parent is PLL1 or PLL0/2 */ +#define IMX_SC_PM_PARENT_PLL2 3 /* Parent in PLL2 or PLL0/4 */ +#define IMX_SC_PM_PARENT_BYPS 4 /* Parent is a bypass clock. */ + +#endif /* _SC_PM_API_H */ diff --git a/include/linux/firmware/imx/types.h b/include/linux/firmware/imx/types.h index 9cbf0c4a6069..80821100e85f 100644 --- a/include/linux/firmware/imx/types.h +++ b/include/linux/firmware/imx/types.h @@ -10,558 +10,6 @@ #define _SC_TYPES_H /* - * This type is used to indicate a resource. Resources include peripherals - * and bus masters (but not memory regions). Note items from list should - * never be changed or removed (only added to at the end of the list). - */ -enum imx_sc_rsrc { - IMX_SC_R_A53 = 0, - IMX_SC_R_A53_0 = 1, - IMX_SC_R_A53_1 = 2, - IMX_SC_R_A53_2 = 3, - IMX_SC_R_A53_3 = 4, - IMX_SC_R_A72 = 5, - IMX_SC_R_A72_0 = 6, - IMX_SC_R_A72_1 = 7, - IMX_SC_R_A72_2 = 8, - IMX_SC_R_A72_3 = 9, - IMX_SC_R_CCI = 10, - IMX_SC_R_DB = 11, - IMX_SC_R_DRC_0 = 12, - IMX_SC_R_DRC_1 = 13, - IMX_SC_R_GIC_SMMU = 14, - IMX_SC_R_IRQSTR_M4_0 = 15, - IMX_SC_R_IRQSTR_M4_1 = 16, - IMX_SC_R_SMMU = 17, - IMX_SC_R_GIC = 18, - IMX_SC_R_DC_0_BLIT0 = 19, - IMX_SC_R_DC_0_BLIT1 = 20, - IMX_SC_R_DC_0_BLIT2 = 21, - IMX_SC_R_DC_0_BLIT_OUT = 22, - IMX_SC_R_DC_0_CAPTURE0 = 23, - IMX_SC_R_DC_0_CAPTURE1 = 24, - IMX_SC_R_DC_0_WARP = 25, - IMX_SC_R_DC_0_INTEGRAL0 = 26, - IMX_SC_R_DC_0_INTEGRAL1 = 27, - IMX_SC_R_DC_0_VIDEO0 = 28, - IMX_SC_R_DC_0_VIDEO1 = 29, - IMX_SC_R_DC_0_FRAC0 = 30, - IMX_SC_R_DC_0_FRAC1 = 31, - IMX_SC_R_DC_0 = 32, - IMX_SC_R_GPU_2_PID0 = 33, - IMX_SC_R_DC_0_PLL_0 = 34, - IMX_SC_R_DC_0_PLL_1 = 35, - IMX_SC_R_DC_1_BLIT0 = 36, - IMX_SC_R_DC_1_BLIT1 = 37, - IMX_SC_R_DC_1_BLIT2 = 38, - IMX_SC_R_DC_1_BLIT_OUT = 39, - IMX_SC_R_DC_1_CAPTURE0 = 40, - IMX_SC_R_DC_1_CAPTURE1 = 41, - IMX_SC_R_DC_1_WARP = 42, - IMX_SC_R_DC_1_INTEGRAL0 = 43, - IMX_SC_R_DC_1_INTEGRAL1 = 44, - IMX_SC_R_DC_1_VIDEO0 = 45, - IMX_SC_R_DC_1_VIDEO1 = 46, - IMX_SC_R_DC_1_FRAC0 = 47, - IMX_SC_R_DC_1_FRAC1 = 48, - IMX_SC_R_DC_1 = 49, - IMX_SC_R_GPU_3_PID0 = 50, - IMX_SC_R_DC_1_PLL_0 = 51, - IMX_SC_R_DC_1_PLL_1 = 52, - IMX_SC_R_SPI_0 = 53, - IMX_SC_R_SPI_1 = 54, - IMX_SC_R_SPI_2 = 55, - IMX_SC_R_SPI_3 = 56, - IMX_SC_R_UART_0 = 57, - IMX_SC_R_UART_1 = 58, - IMX_SC_R_UART_2 = 59, - IMX_SC_R_UART_3 = 60, - IMX_SC_R_UART_4 = 61, - IMX_SC_R_EMVSIM_0 = 62, - IMX_SC_R_EMVSIM_1 = 63, - IMX_SC_R_DMA_0_CH0 = 64, - IMX_SC_R_DMA_0_CH1 = 65, - IMX_SC_R_DMA_0_CH2 = 66, - IMX_SC_R_DMA_0_CH3 = 67, - IMX_SC_R_DMA_0_CH4 = 68, - IMX_SC_R_DMA_0_CH5 = 69, - IMX_SC_R_DMA_0_CH6 = 70, - IMX_SC_R_DMA_0_CH7 = 71, - IMX_SC_R_DMA_0_CH8 = 72, - IMX_SC_R_DMA_0_CH9 = 73, - IMX_SC_R_DMA_0_CH10 = 74, - IMX_SC_R_DMA_0_CH11 = 75, - IMX_SC_R_DMA_0_CH12 = 76, - IMX_SC_R_DMA_0_CH13 = 77, - IMX_SC_R_DMA_0_CH14 = 78, - IMX_SC_R_DMA_0_CH15 = 79, - IMX_SC_R_DMA_0_CH16 = 80, - IMX_SC_R_DMA_0_CH17 = 81, - IMX_SC_R_DMA_0_CH18 = 82, - IMX_SC_R_DMA_0_CH19 = 83, - IMX_SC_R_DMA_0_CH20 = 84, - IMX_SC_R_DMA_0_CH21 = 85, - IMX_SC_R_DMA_0_CH22 = 86, - IMX_SC_R_DMA_0_CH23 = 87, - IMX_SC_R_DMA_0_CH24 = 88, - IMX_SC_R_DMA_0_CH25 = 89, - IMX_SC_R_DMA_0_CH26 = 90, - IMX_SC_R_DMA_0_CH27 = 91, - IMX_SC_R_DMA_0_CH28 = 92, - IMX_SC_R_DMA_0_CH29 = 93, - IMX_SC_R_DMA_0_CH30 = 94, - IMX_SC_R_DMA_0_CH31 = 95, - IMX_SC_R_I2C_0 = 96, - IMX_SC_R_I2C_1 = 97, - IMX_SC_R_I2C_2 = 98, - IMX_SC_R_I2C_3 = 99, - IMX_SC_R_I2C_4 = 100, - IMX_SC_R_ADC_0 = 101, - IMX_SC_R_ADC_1 = 102, - IMX_SC_R_FTM_0 = 103, - IMX_SC_R_FTM_1 = 104, - IMX_SC_R_CAN_0 = 105, - IMX_SC_R_CAN_1 = 106, - IMX_SC_R_CAN_2 = 107, - IMX_SC_R_DMA_1_CH0 = 108, - IMX_SC_R_DMA_1_CH1 = 109, - IMX_SC_R_DMA_1_CH2 = 110, - IMX_SC_R_DMA_1_CH3 = 111, - IMX_SC_R_DMA_1_CH4 = 112, - IMX_SC_R_DMA_1_CH5 = 113, - IMX_SC_R_DMA_1_CH6 = 114, - IMX_SC_R_DMA_1_CH7 = 115, - IMX_SC_R_DMA_1_CH8 = 116, - IMX_SC_R_DMA_1_CH9 = 117, - IMX_SC_R_DMA_1_CH10 = 118, - IMX_SC_R_DMA_1_CH11 = 119, - IMX_SC_R_DMA_1_CH12 = 120, - IMX_SC_R_DMA_1_CH13 = 121, - IMX_SC_R_DMA_1_CH14 = 122, - IMX_SC_R_DMA_1_CH15 = 123, - IMX_SC_R_DMA_1_CH16 = 124, - IMX_SC_R_DMA_1_CH17 = 125, - IMX_SC_R_DMA_1_CH18 = 126, - IMX_SC_R_DMA_1_CH19 = 127, - IMX_SC_R_DMA_1_CH20 = 128, - IMX_SC_R_DMA_1_CH21 = 129, - IMX_SC_R_DMA_1_CH22 = 130, - IMX_SC_R_DMA_1_CH23 = 131, - IMX_SC_R_DMA_1_CH24 = 132, - IMX_SC_R_DMA_1_CH25 = 133, - IMX_SC_R_DMA_1_CH26 = 134, - IMX_SC_R_DMA_1_CH27 = 135, - IMX_SC_R_DMA_1_CH28 = 136, - IMX_SC_R_DMA_1_CH29 = 137, - IMX_SC_R_DMA_1_CH30 = 138, - IMX_SC_R_DMA_1_CH31 = 139, - IMX_SC_R_UNUSED1 = 140, - IMX_SC_R_UNUSED2 = 141, - IMX_SC_R_UNUSED3 = 142, - IMX_SC_R_UNUSED4 = 143, - IMX_SC_R_GPU_0_PID0 = 144, - IMX_SC_R_GPU_0_PID1 = 145, - IMX_SC_R_GPU_0_PID2 = 146, - IMX_SC_R_GPU_0_PID3 = 147, - IMX_SC_R_GPU_1_PID0 = 148, - IMX_SC_R_GPU_1_PID1 = 149, - IMX_SC_R_GPU_1_PID2 = 150, - IMX_SC_R_GPU_1_PID3 = 151, - IMX_SC_R_PCIE_A = 152, - IMX_SC_R_SERDES_0 = 153, - IMX_SC_R_MATCH_0 = 154, - IMX_SC_R_MATCH_1 = 155, - IMX_SC_R_MATCH_2 = 156, - IMX_SC_R_MATCH_3 = 157, - IMX_SC_R_MATCH_4 = 158, - IMX_SC_R_MATCH_5 = 159, - IMX_SC_R_MATCH_6 = 160, - IMX_SC_R_MATCH_7 = 161, - IMX_SC_R_MATCH_8 = 162, - IMX_SC_R_MATCH_9 = 163, - IMX_SC_R_MATCH_10 = 164, - IMX_SC_R_MATCH_11 = 165, - IMX_SC_R_MATCH_12 = 166, - IMX_SC_R_MATCH_13 = 167, - IMX_SC_R_MATCH_14 = 168, - IMX_SC_R_PCIE_B = 169, - IMX_SC_R_SATA_0 = 170, - IMX_SC_R_SERDES_1 = 171, - IMX_SC_R_HSIO_GPIO = 172, - IMX_SC_R_MATCH_15 = 173, - IMX_SC_R_MATCH_16 = 174, - IMX_SC_R_MATCH_17 = 175, - IMX_SC_R_MATCH_18 = 176, - IMX_SC_R_MATCH_19 = 177, - IMX_SC_R_MATCH_20 = 178, - IMX_SC_R_MATCH_21 = 179, - IMX_SC_R_MATCH_22 = 180, - IMX_SC_R_MATCH_23 = 181, - IMX_SC_R_MATCH_24 = 182, - IMX_SC_R_MATCH_25 = 183, - IMX_SC_R_MATCH_26 = 184, - IMX_SC_R_MATCH_27 = 185, - IMX_SC_R_MATCH_28 = 186, - IMX_SC_R_LCD_0 = 187, - IMX_SC_R_LCD_0_PWM_0 = 188, - IMX_SC_R_LCD_0_I2C_0 = 189, - IMX_SC_R_LCD_0_I2C_1 = 190, - IMX_SC_R_PWM_0 = 191, - IMX_SC_R_PWM_1 = 192, - IMX_SC_R_PWM_2 = 193, - IMX_SC_R_PWM_3 = 194, - IMX_SC_R_PWM_4 = 195, - IMX_SC_R_PWM_5 = 196, - IMX_SC_R_PWM_6 = 197, - IMX_SC_R_PWM_7 = 198, - IMX_SC_R_GPIO_0 = 199, - IMX_SC_R_GPIO_1 = 200, - IMX_SC_R_GPIO_2 = 201, - IMX_SC_R_GPIO_3 = 202, - IMX_SC_R_GPIO_4 = 203, - IMX_SC_R_GPIO_5 = 204, - IMX_SC_R_GPIO_6 = 205, - IMX_SC_R_GPIO_7 = 206, - IMX_SC_R_GPT_0 = 207, - IMX_SC_R_GPT_1 = 208, - IMX_SC_R_GPT_2 = 209, - IMX_SC_R_GPT_3 = 210, - IMX_SC_R_GPT_4 = 211, - IMX_SC_R_KPP = 212, - IMX_SC_R_MU_0A = 213, - IMX_SC_R_MU_1A = 214, - IMX_SC_R_MU_2A = 215, - IMX_SC_R_MU_3A = 216, - IMX_SC_R_MU_4A = 217, - IMX_SC_R_MU_5A = 218, - IMX_SC_R_MU_6A = 219, - IMX_SC_R_MU_7A = 220, - IMX_SC_R_MU_8A = 221, - IMX_SC_R_MU_9A = 222, - IMX_SC_R_MU_10A = 223, - IMX_SC_R_MU_11A = 224, - IMX_SC_R_MU_12A = 225, - IMX_SC_R_MU_13A = 226, - IMX_SC_R_MU_5B = 227, - IMX_SC_R_MU_6B = 228, - IMX_SC_R_MU_7B = 229, - IMX_SC_R_MU_8B = 230, - IMX_SC_R_MU_9B = 231, - IMX_SC_R_MU_10B = 232, - IMX_SC_R_MU_11B = 233, - IMX_SC_R_MU_12B = 234, - IMX_SC_R_MU_13B = 235, - IMX_SC_R_ROM_0 = 236, - IMX_SC_R_FSPI_0 = 237, - IMX_SC_R_FSPI_1 = 238, - IMX_SC_R_IEE = 239, - IMX_SC_R_IEE_R0 = 240, - IMX_SC_R_IEE_R1 = 241, - IMX_SC_R_IEE_R2 = 242, - IMX_SC_R_IEE_R3 = 243, - IMX_SC_R_IEE_R4 = 244, - IMX_SC_R_IEE_R5 = 245, - IMX_SC_R_IEE_R6 = 246, - IMX_SC_R_IEE_R7 = 247, - IMX_SC_R_SDHC_0 = 248, - IMX_SC_R_SDHC_1 = 249, - IMX_SC_R_SDHC_2 = 250, - IMX_SC_R_ENET_0 = 251, - IMX_SC_R_ENET_1 = 252, - IMX_SC_R_MLB_0 = 253, - IMX_SC_R_DMA_2_CH0 = 254, - IMX_SC_R_DMA_2_CH1 = 255, - IMX_SC_R_DMA_2_CH2 = 256, - IMX_SC_R_DMA_2_CH3 = 257, - IMX_SC_R_DMA_2_CH4 = 258, - IMX_SC_R_USB_0 = 259, - IMX_SC_R_USB_1 = 260, - IMX_SC_R_USB_0_PHY = 261, - IMX_SC_R_USB_2 = 262, - IMX_SC_R_USB_2_PHY = 263, - IMX_SC_R_DTCP = 264, - IMX_SC_R_NAND = 265, - IMX_SC_R_LVDS_0 = 266, - IMX_SC_R_LVDS_0_PWM_0 = 267, - IMX_SC_R_LVDS_0_I2C_0 = 268, - IMX_SC_R_LVDS_0_I2C_1 = 269, - IMX_SC_R_LVDS_1 = 270, - IMX_SC_R_LVDS_1_PWM_0 = 271, - IMX_SC_R_LVDS_1_I2C_0 = 272, - IMX_SC_R_LVDS_1_I2C_1 = 273, - IMX_SC_R_LVDS_2 = 274, - IMX_SC_R_LVDS_2_PWM_0 = 275, - IMX_SC_R_LVDS_2_I2C_0 = 276, - IMX_SC_R_LVDS_2_I2C_1 = 277, - IMX_SC_R_M4_0_PID0 = 278, - IMX_SC_R_M4_0_PID1 = 279, - IMX_SC_R_M4_0_PID2 = 280, - IMX_SC_R_M4_0_PID3 = 281, - IMX_SC_R_M4_0_PID4 = 282, - IMX_SC_R_M4_0_RGPIO = 283, - IMX_SC_R_M4_0_SEMA42 = 284, - IMX_SC_R_M4_0_TPM = 285, - IMX_SC_R_M4_0_PIT = 286, - IMX_SC_R_M4_0_UART = 287, - IMX_SC_R_M4_0_I2C = 288, - IMX_SC_R_M4_0_INTMUX = 289, - IMX_SC_R_M4_0_SIM = 290, - IMX_SC_R_M4_0_WDOG = 291, - IMX_SC_R_M4_0_MU_0B = 292, - IMX_SC_R_M4_0_MU_0A0 = 293, - IMX_SC_R_M4_0_MU_0A1 = 294, - IMX_SC_R_M4_0_MU_0A2 = 295, - IMX_SC_R_M4_0_MU_0A3 = 296, - IMX_SC_R_M4_0_MU_1A = 297, - IMX_SC_R_M4_1_PID0 = 298, - IMX_SC_R_M4_1_PID1 = 299, - IMX_SC_R_M4_1_PID2 = 300, - IMX_SC_R_M4_1_PID3 = 301, - IMX_SC_R_M4_1_PID4 = 302, - IMX_SC_R_M4_1_RGPIO = 303, - IMX_SC_R_M4_1_SEMA42 = 304, - IMX_SC_R_M4_1_TPM = 305, - IMX_SC_R_M4_1_PIT = 306, - IMX_SC_R_M4_1_UART = 307, - IMX_SC_R_M4_1_I2C = 308, - IMX_SC_R_M4_1_INTMUX = 309, - IMX_SC_R_M4_1_SIM = 310, - IMX_SC_R_M4_1_WDOG = 311, - IMX_SC_R_M4_1_MU_0B = 312, - IMX_SC_R_M4_1_MU_0A0 = 313, - IMX_SC_R_M4_1_MU_0A1 = 314, - IMX_SC_R_M4_1_MU_0A2 = 315, - IMX_SC_R_M4_1_MU_0A3 = 316, - IMX_SC_R_M4_1_MU_1A = 317, - IMX_SC_R_SAI_0 = 318, - IMX_SC_R_SAI_1 = 319, - IMX_SC_R_SAI_2 = 320, - IMX_SC_R_IRQSTR_SCU2 = 321, - IMX_SC_R_IRQSTR_DSP = 322, - IMX_SC_R_UNUSED5 = 323, - IMX_SC_R_UNUSED6 = 324, - IMX_SC_R_AUDIO_PLL_0 = 325, - IMX_SC_R_PI_0 = 326, - IMX_SC_R_PI_0_PWM_0 = 327, - IMX_SC_R_PI_0_PWM_1 = 328, - IMX_SC_R_PI_0_I2C_0 = 329, - IMX_SC_R_PI_0_PLL = 330, - IMX_SC_R_PI_1 = 331, - IMX_SC_R_PI_1_PWM_0 = 332, - IMX_SC_R_PI_1_PWM_1 = 333, - IMX_SC_R_PI_1_I2C_0 = 334, - IMX_SC_R_PI_1_PLL = 335, - IMX_SC_R_SC_PID0 = 336, - IMX_SC_R_SC_PID1 = 337, - IMX_SC_R_SC_PID2 = 338, - IMX_SC_R_SC_PID3 = 339, - IMX_SC_R_SC_PID4 = 340, - IMX_SC_R_SC_SEMA42 = 341, - IMX_SC_R_SC_TPM = 342, - IMX_SC_R_SC_PIT = 343, - IMX_SC_R_SC_UART = 344, - IMX_SC_R_SC_I2C = 345, - IMX_SC_R_SC_MU_0B = 346, - IMX_SC_R_SC_MU_0A0 = 347, - IMX_SC_R_SC_MU_0A1 = 348, - IMX_SC_R_SC_MU_0A2 = 349, - IMX_SC_R_SC_MU_0A3 = 350, - IMX_SC_R_SC_MU_1A = 351, - IMX_SC_R_SYSCNT_RD = 352, - IMX_SC_R_SYSCNT_CMP = 353, - IMX_SC_R_DEBUG = 354, - IMX_SC_R_SYSTEM = 355, - IMX_SC_R_SNVS = 356, - IMX_SC_R_OTP = 357, - IMX_SC_R_VPU_PID0 = 358, - IMX_SC_R_VPU_PID1 = 359, - IMX_SC_R_VPU_PID2 = 360, - IMX_SC_R_VPU_PID3 = 361, - IMX_SC_R_VPU_PID4 = 362, - IMX_SC_R_VPU_PID5 = 363, - IMX_SC_R_VPU_PID6 = 364, - IMX_SC_R_VPU_PID7 = 365, - IMX_SC_R_VPU_UART = 366, - IMX_SC_R_VPUCORE = 367, - IMX_SC_R_VPUCORE_0 = 368, - IMX_SC_R_VPUCORE_1 = 369, - IMX_SC_R_VPUCORE_2 = 370, - IMX_SC_R_VPUCORE_3 = 371, - IMX_SC_R_DMA_4_CH0 = 372, - IMX_SC_R_DMA_4_CH1 = 373, - IMX_SC_R_DMA_4_CH2 = 374, - IMX_SC_R_DMA_4_CH3 = 375, - IMX_SC_R_DMA_4_CH4 = 376, - IMX_SC_R_ISI_CH0 = 377, - IMX_SC_R_ISI_CH1 = 378, - IMX_SC_R_ISI_CH2 = 379, - IMX_SC_R_ISI_CH3 = 380, - IMX_SC_R_ISI_CH4 = 381, - IMX_SC_R_ISI_CH5 = 382, - IMX_SC_R_ISI_CH6 = 383, - IMX_SC_R_ISI_CH7 = 384, - IMX_SC_R_MJPEG_DEC_S0 = 385, - IMX_SC_R_MJPEG_DEC_S1 = 386, - IMX_SC_R_MJPEG_DEC_S2 = 387, - IMX_SC_R_MJPEG_DEC_S3 = 388, - IMX_SC_R_MJPEG_ENC_S0 = 389, - IMX_SC_R_MJPEG_ENC_S1 = 390, - IMX_SC_R_MJPEG_ENC_S2 = 391, - IMX_SC_R_MJPEG_ENC_S3 = 392, - IMX_SC_R_MIPI_0 = 393, - IMX_SC_R_MIPI_0_PWM_0 = 394, - IMX_SC_R_MIPI_0_I2C_0 = 395, - IMX_SC_R_MIPI_0_I2C_1 = 396, - IMX_SC_R_MIPI_1 = 397, - IMX_SC_R_MIPI_1_PWM_0 = 398, - IMX_SC_R_MIPI_1_I2C_0 = 399, - IMX_SC_R_MIPI_1_I2C_1 = 400, - IMX_SC_R_CSI_0 = 401, - IMX_SC_R_CSI_0_PWM_0 = 402, - IMX_SC_R_CSI_0_I2C_0 = 403, - IMX_SC_R_CSI_1 = 404, - IMX_SC_R_CSI_1_PWM_0 = 405, - IMX_SC_R_CSI_1_I2C_0 = 406, - IMX_SC_R_HDMI = 407, - IMX_SC_R_HDMI_I2S = 408, - IMX_SC_R_HDMI_I2C_0 = 409, - IMX_SC_R_HDMI_PLL_0 = 410, - IMX_SC_R_HDMI_RX = 411, - IMX_SC_R_HDMI_RX_BYPASS = 412, - IMX_SC_R_HDMI_RX_I2C_0 = 413, - IMX_SC_R_ASRC_0 = 414, - IMX_SC_R_ESAI_0 = 415, - IMX_SC_R_SPDIF_0 = 416, - IMX_SC_R_SPDIF_1 = 417, - IMX_SC_R_SAI_3 = 418, - IMX_SC_R_SAI_4 = 419, - IMX_SC_R_SAI_5 = 420, - IMX_SC_R_GPT_5 = 421, - IMX_SC_R_GPT_6 = 422, - IMX_SC_R_GPT_7 = 423, - IMX_SC_R_GPT_8 = 424, - IMX_SC_R_GPT_9 = 425, - IMX_SC_R_GPT_10 = 426, - IMX_SC_R_DMA_2_CH5 = 427, - IMX_SC_R_DMA_2_CH6 = 428, - IMX_SC_R_DMA_2_CH7 = 429, - IMX_SC_R_DMA_2_CH8 = 430, - IMX_SC_R_DMA_2_CH9 = 431, - IMX_SC_R_DMA_2_CH10 = 432, - IMX_SC_R_DMA_2_CH11 = 433, - IMX_SC_R_DMA_2_CH12 = 434, - IMX_SC_R_DMA_2_CH13 = 435, - IMX_SC_R_DMA_2_CH14 = 436, - IMX_SC_R_DMA_2_CH15 = 437, - IMX_SC_R_DMA_2_CH16 = 438, - IMX_SC_R_DMA_2_CH17 = 439, - IMX_SC_R_DMA_2_CH18 = 440, - IMX_SC_R_DMA_2_CH19 = 441, - IMX_SC_R_DMA_2_CH20 = 442, - IMX_SC_R_DMA_2_CH21 = 443, - IMX_SC_R_DMA_2_CH22 = 444, - IMX_SC_R_DMA_2_CH23 = 445, - IMX_SC_R_DMA_2_CH24 = 446, - IMX_SC_R_DMA_2_CH25 = 447, - IMX_SC_R_DMA_2_CH26 = 448, - IMX_SC_R_DMA_2_CH27 = 449, - IMX_SC_R_DMA_2_CH28 = 450, - IMX_SC_R_DMA_2_CH29 = 451, - IMX_SC_R_DMA_2_CH30 = 452, - IMX_SC_R_DMA_2_CH31 = 453, - IMX_SC_R_ASRC_1 = 454, - IMX_SC_R_ESAI_1 = 455, - IMX_SC_R_SAI_6 = 456, - IMX_SC_R_SAI_7 = 457, - IMX_SC_R_AMIX = 458, - IMX_SC_R_MQS_0 = 459, - IMX_SC_R_DMA_3_CH0 = 460, - IMX_SC_R_DMA_3_CH1 = 461, - IMX_SC_R_DMA_3_CH2 = 462, - IMX_SC_R_DMA_3_CH3 = 463, - IMX_SC_R_DMA_3_CH4 = 464, - IMX_SC_R_DMA_3_CH5 = 465, - IMX_SC_R_DMA_3_CH6 = 466, - IMX_SC_R_DMA_3_CH7 = 467, - IMX_SC_R_DMA_3_CH8 = 468, - IMX_SC_R_DMA_3_CH9 = 469, - IMX_SC_R_DMA_3_CH10 = 470, - IMX_SC_R_DMA_3_CH11 = 471, - IMX_SC_R_DMA_3_CH12 = 472, - IMX_SC_R_DMA_3_CH13 = 473, - IMX_SC_R_DMA_3_CH14 = 474, - IMX_SC_R_DMA_3_CH15 = 475, - IMX_SC_R_DMA_3_CH16 = 476, - IMX_SC_R_DMA_3_CH17 = 477, - IMX_SC_R_DMA_3_CH18 = 478, - IMX_SC_R_DMA_3_CH19 = 479, - IMX_SC_R_DMA_3_CH20 = 480, - IMX_SC_R_DMA_3_CH21 = 481, - IMX_SC_R_DMA_3_CH22 = 482, - IMX_SC_R_DMA_3_CH23 = 483, - IMX_SC_R_DMA_3_CH24 = 484, - IMX_SC_R_DMA_3_CH25 = 485, - IMX_SC_R_DMA_3_CH26 = 486, - IMX_SC_R_DMA_3_CH27 = 487, - IMX_SC_R_DMA_3_CH28 = 488, - IMX_SC_R_DMA_3_CH29 = 489, - IMX_SC_R_DMA_3_CH30 = 490, - IMX_SC_R_DMA_3_CH31 = 491, - IMX_SC_R_AUDIO_PLL_1 = 492, - IMX_SC_R_AUDIO_CLK_0 = 493, - IMX_SC_R_AUDIO_CLK_1 = 494, - IMX_SC_R_MCLK_OUT_0 = 495, - IMX_SC_R_MCLK_OUT_1 = 496, - IMX_SC_R_PMIC_0 = 497, - IMX_SC_R_PMIC_1 = 498, - IMX_SC_R_SECO = 499, - IMX_SC_R_CAAM_JR1 = 500, - IMX_SC_R_CAAM_JR2 = 501, - IMX_SC_R_CAAM_JR3 = 502, - IMX_SC_R_SECO_MU_2 = 503, - IMX_SC_R_SECO_MU_3 = 504, - IMX_SC_R_SECO_MU_4 = 505, - IMX_SC_R_HDMI_RX_PWM_0 = 506, - IMX_SC_R_A35 = 507, - IMX_SC_R_A35_0 = 508, - IMX_SC_R_A35_1 = 509, - IMX_SC_R_A35_2 = 510, - IMX_SC_R_A35_3 = 511, - IMX_SC_R_DSP = 512, - IMX_SC_R_DSP_RAM = 513, - IMX_SC_R_CAAM_JR1_OUT = 514, - IMX_SC_R_CAAM_JR2_OUT = 515, - IMX_SC_R_CAAM_JR3_OUT = 516, - IMX_SC_R_VPU_DEC_0 = 517, - IMX_SC_R_VPU_ENC_0 = 518, - IMX_SC_R_CAAM_JR0 = 519, - IMX_SC_R_CAAM_JR0_OUT = 520, - IMX_SC_R_PMIC_2 = 521, - IMX_SC_R_DBLOGIC = 522, - IMX_SC_R_HDMI_PLL_1 = 523, - IMX_SC_R_BOARD_R0 = 524, - IMX_SC_R_BOARD_R1 = 525, - IMX_SC_R_BOARD_R2 = 526, - IMX_SC_R_BOARD_R3 = 527, - IMX_SC_R_BOARD_R4 = 528, - IMX_SC_R_BOARD_R5 = 529, - IMX_SC_R_BOARD_R6 = 530, - IMX_SC_R_BOARD_R7 = 531, - IMX_SC_R_MJPEG_DEC_MP = 532, - IMX_SC_R_MJPEG_ENC_MP = 533, - IMX_SC_R_VPU_TS_0 = 534, - IMX_SC_R_VPU_MU_0 = 535, - IMX_SC_R_VPU_MU_1 = 536, - IMX_SC_R_VPU_MU_2 = 537, - IMX_SC_R_VPU_MU_3 = 538, - IMX_SC_R_VPU_ENC_1 = 539, - IMX_SC_R_VPU = 540, - IMX_SC_R_LAST -}; - -/* NOTE - please add by replacing some of the UNUSED from above! */ - -/* * This type is used to indicate a control. */ enum imx_sc_ctrl { diff --git a/include/linux/fs.h b/include/linux/fs.h index c95c0807471f..26a8607b3c3c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1044,10 +1044,15 @@ bool opens_in_grace(struct net *); * Obviously, the last two criteria only matter for POSIX locks. */ struct file_lock { - struct file_lock *fl_next; /* singly linked list for this inode */ + struct file_lock *fl_blocker; /* The lock, that is blocking us */ struct list_head fl_list; /* link into file_lock_context */ struct hlist_node fl_link; /* node in global lists */ - struct list_head fl_block; /* circular list of blocked processes */ + struct list_head fl_blocked_requests; /* list of requests with + * ->fl_blocker pointing here + */ + struct list_head fl_blocked_member; /* node in + * ->fl_blocker->fl_blocked_requests + */ fl_owner_t fl_owner; unsigned int fl_flags; unsigned char fl_type; @@ -1119,7 +1124,7 @@ extern void locks_remove_file(struct file *); extern void locks_release_private(struct file_lock *); extern void posix_test_lock(struct file *, struct file_lock *); extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); -extern int posix_unblock_lock(struct file_lock *); +extern int locks_delete_block(struct file_lock *); extern int vfs_test_lock(struct file *, struct file_lock *); extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); @@ -1209,7 +1214,7 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl, return -ENOLCK; } -static inline int posix_unblock_lock(struct file_lock *waiter) +static inline int locks_delete_block(struct file_lock *waiter) { return -ENOENT; } diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 34cf0fdd7dc7..610815e3f1aa 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -196,8 +196,7 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op) static inline void fscache_retrieval_complete(struct fscache_retrieval *op, int n_pages) { - atomic_sub(n_pages, &op->n_pages); - if (atomic_read(&op->n_pages) <= 0) + if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0) fscache_op_complete(&op->op, false); } diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index fd1ce10553bf..2ccb08cb5d6a 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -26,30 +26,46 @@ static inline int fsnotify_parent(const struct path *path, struct dentry *dentry return __fsnotify_parent(path, dentry, mask); } -/* simple call site for access decisions */ +/* + * Simple wrapper to consolidate calls fsnotify_parent()/fsnotify() when + * an event is on a path. + */ +static inline int fsnotify_path(struct inode *inode, const struct path *path, + __u32 mask) +{ + int ret = fsnotify_parent(path, NULL, mask); + + if (ret) + return ret; + return fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); +} + +/* Simple call site for access decisions */ static inline int fsnotify_perm(struct file *file, int mask) { + int ret; const struct path *path = &file->f_path; struct inode *inode = file_inode(file); __u32 fsnotify_mask = 0; - int ret; if (file->f_mode & FMODE_NONOTIFY) return 0; if (!(mask & (MAY_READ | MAY_OPEN))) return 0; - if (mask & MAY_OPEN) + if (mask & MAY_OPEN) { fsnotify_mask = FS_OPEN_PERM; - else if (mask & MAY_READ) - fsnotify_mask = FS_ACCESS_PERM; - else - BUG(); - ret = fsnotify_parent(path, NULL, fsnotify_mask); - if (ret) - return ret; + if (file->f_flags & __FMODE_EXEC) { + ret = fsnotify_path(inode, path, FS_OPEN_EXEC_PERM); - return fsnotify(inode, fsnotify_mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); + if (ret) + return ret; + } + } else if (mask & MAY_READ) { + fsnotify_mask = FS_ACCESS_PERM; + } + + return fsnotify_path(inode, path, fsnotify_mask); } /* @@ -180,10 +196,8 @@ static inline void fsnotify_access(struct file *file) if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; - if (!(file->f_mode & FMODE_NONOTIFY)) { - fsnotify_parent(path, NULL, mask); - fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); - } + if (!(file->f_mode & FMODE_NONOTIFY)) + fsnotify_path(inode, path, mask); } /* @@ -198,10 +212,8 @@ static inline void fsnotify_modify(struct file *file) if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; - if (!(file->f_mode & FMODE_NONOTIFY)) { - fsnotify_parent(path, NULL, mask); - fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); - } + if (!(file->f_mode & FMODE_NONOTIFY)) + fsnotify_path(inode, path, mask); } /* @@ -215,9 +227,10 @@ static inline void fsnotify_open(struct file *file) if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; + if (file->f_flags & __FMODE_EXEC) + mask |= FS_OPEN_EXEC; - fsnotify_parent(path, NULL, mask); - fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); + fsnotify_path(inode, path, mask); } /* @@ -233,10 +246,8 @@ static inline void fsnotify_close(struct file *file) if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; - if (!(file->f_mode & FMODE_NONOTIFY)) { - fsnotify_parent(path, NULL, mask); - fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0); - } + if (!(file->f_mode & FMODE_NONOTIFY)) + fsnotify_path(inode, path, mask); } /* diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 135b973e44d1..7639774e7475 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -38,6 +38,7 @@ #define FS_DELETE 0x00000200 /* Subfile was deleted */ #define FS_DELETE_SELF 0x00000400 /* Self was deleted */ #define FS_MOVE_SELF 0x00000800 /* Self was moved */ +#define FS_OPEN_EXEC 0x00001000 /* File was opened for exec */ #define FS_UNMOUNT 0x00002000 /* inode on umount fs */ #define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ @@ -45,6 +46,7 @@ #define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */ #define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */ +#define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */ #define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */ #define FS_ISDIR 0x40000000 /* event occurred against dir */ @@ -62,11 +64,13 @@ #define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\ FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\ FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\ - FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM) + FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM | \ + FS_OPEN_EXEC | FS_OPEN_EXEC_PERM) #define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO) -#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM) +#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \ + FS_OPEN_EXEC_PERM) /* Events that can be reported to backends */ #define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ @@ -74,7 +78,8 @@ FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \ FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \ FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \ - FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME) + FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME | \ + FS_OPEN_EXEC | FS_OPEN_EXEC_PERM) /* Extra flags that may be reported with event or control handling of events */ #define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \ diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index a397907e8d72..5c990e891d6a 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -420,6 +420,9 @@ enum { }; void arch_ftrace_update_code(int command); +void arch_ftrace_update_trampoline(struct ftrace_ops *ops); +void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec); +void arch_ftrace_trampoline_free(struct ftrace_ops *ops); struct ftrace_rec_iter; @@ -777,8 +780,8 @@ struct ftrace_ret_stack { extern void return_to_handler(void); extern int -ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, - unsigned long frame_pointer, unsigned long *retp); +function_graph_enter(unsigned long ret, unsigned long func, + unsigned long frame_pointer, unsigned long *retp); unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, unsigned long *retp); diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 76f8db0b0e71..0705164f928c 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -510,18 +510,22 @@ alloc_pages(gfp_t gfp_mask, unsigned int order) } extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, struct vm_area_struct *vma, unsigned long addr, - int node); + int node, bool hugepage); +#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ + alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true) #else #define alloc_pages(gfp_mask, order) \ alloc_pages_node(numa_node_id(), gfp_mask, order) -#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\ +#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\ + alloc_pages(gfp_mask, order) +#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \ alloc_pages(gfp_mask, order) #endif #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) #define alloc_page_vma(gfp_mask, vma, addr) \ - alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id()) + alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false) #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ - alloc_pages_vma(gfp_mask, 0, vma, addr, node) + alloc_pages_vma(gfp_mask, 0, vma, addr, node, false) extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); extern unsigned long get_zeroed_page(gfp_t gfp_mask); diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index f2f887795d43..8aebcf822082 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -104,6 +104,7 @@ struct gpio_descs *__must_check devm_gpiod_get_array_optional(struct device *dev, const char *con_id, enum gpiod_flags flags); void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); +void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc); void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs); int gpiod_get_direction(struct gpio_desc *desc); @@ -172,6 +173,10 @@ int desc_to_gpio(const struct gpio_desc *desc); struct device_node; struct fwnode_handle; +struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label); struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, struct device_node *node, const char *propname, int index, @@ -245,6 +250,15 @@ static inline void gpiod_put(struct gpio_desc *desc) WARN_ON(1); } +static inline void devm_gpiod_unhinge(struct device *dev, + struct gpio_desc *desc) +{ + might_sleep(); + + /* GPIO can never have been requested */ + WARN_ON(1); +} + static inline void gpiod_put_array(struct gpio_descs *descs) { might_sleep(); @@ -518,6 +532,15 @@ struct device_node; struct fwnode_handle; static inline +struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, + const char *propname, int index, + enum gpiod_flags dflags, + const char *label) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, struct device_node *node, const char *propname, int index, diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index 4f3febc0f971..d2bacf502429 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h @@ -163,6 +163,9 @@ struct hdmi_avi_infoframe { int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame); ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer, size_t size); +ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame, + void *buffer, size_t size); +int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame); enum hdmi_spd_sdi { HDMI_SPD_SDI_UNKNOWN, @@ -194,6 +197,9 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame, const char *vendor, const char *product); ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer, size_t size); +ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame, + void *buffer, size_t size); +int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame); enum hdmi_audio_coding_type { HDMI_AUDIO_CODING_TYPE_STREAM, @@ -272,6 +278,9 @@ struct hdmi_audio_infoframe { int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame); ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame, void *buffer, size_t size); +ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame, + void *buffer, size_t size); +int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame); enum hdmi_3d_structure { HDMI_3D_STRUCTURE_INVALID = -1, @@ -299,6 +308,9 @@ struct hdmi_vendor_infoframe { int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame); ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, void *buffer, size_t size); +ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame, + void *buffer, size_t size); +int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame); union hdmi_vendor_any_infoframe { struct { @@ -330,10 +342,14 @@ union hdmi_infoframe { struct hdmi_audio_infoframe audio; }; -ssize_t -hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size); -int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer); +ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, + size_t size); +ssize_t hdmi_infoframe_pack_only(const union hdmi_infoframe *frame, + void *buffer, size_t size); +int hdmi_infoframe_check(union hdmi_infoframe *frame); +int hdmi_infoframe_unpack(union hdmi_infoframe *frame, + const void *buffer, size_t size); void hdmi_infoframe_log(const char *level, struct device *dev, - union hdmi_infoframe *frame); + const union hdmi_infoframe *frame); #endif /* _DRM_HDMI_H */ diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h index 331dc377c275..dc12f5c4b076 100644 --- a/include/linux/hid-sensor-hub.h +++ b/include/linux/hid-sensor-hub.h @@ -177,6 +177,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev, * @attr_usage_id: Attribute usage id as per spec * @report_id: Report id to look for * @flag: Synchronous or asynchronous read +* @is_signed: If true then fields < 32 bits will be sign-extended * * Issues a synchronous or asynchronous read request for an input attribute. * Returns data upto 32 bits. @@ -190,7 +191,8 @@ enum sensor_hub_read_flags { int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev, u32 usage_id, u32 attr_usage_id, u32 report_id, - enum sensor_hub_read_flags flag + enum sensor_hub_read_flags flag, + bool is_signed ); /** diff --git a/include/linux/hid.h b/include/linux/hid.h index 2827b87590d8..a355d61940f2 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -722,8 +722,8 @@ struct hid_usage_id { * input will not be passed to raw_event unless hid_device_io_start is * called. * - * raw_event and event should return 0 on no action performed, 1 when no - * further processing should be done and negative on error + * raw_event and event should return negative on error, any other value will + * pass the event on to .event() typically return 0 for success. * * input_mapping shall return a negative value to completely ignore this usage * (e.g. doubled or invalid usage), zero to continue with parsing of this @@ -1139,34 +1139,6 @@ static inline u32 hid_report_len(struct hid_report *report) int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt); - -/** - * struct hid_scroll_counter - Utility class for processing high-resolution - * scroll events. - * @dev: the input device for which events should be reported. - * @microns_per_hi_res_unit: the amount moved by the user's finger for each - * high-resolution unit reported by the mouse, in - * microns. - * @resolution_multiplier: the wheel's resolution in high-resolution mode as a - * multiple of its lower resolution. For example, if - * moving the wheel by one "notch" would result in a - * value of 1 in low-resolution mode but 8 in - * high-resolution, the multiplier is 8. - * @remainder: counts the number of high-resolution units moved since the last - * low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should - * only be used by class methods. - */ -struct hid_scroll_counter { - struct input_dev *dev; - int microns_per_hi_res_unit; - int resolution_multiplier; - - int remainder; -}; - -void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter, - int hi_res_value); - /* HID quirks API */ unsigned long hid_lookup_quirk(const struct hid_device *hdev); int hid_quirks_init(char **quirks_param, __u16 bus, int count); diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 3892e9c8b2de..2e8957eac4d4 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * include/linux/hrtimer.h - * * hrtimers - High-resolution kernel timers * * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> @@ -9,8 +8,6 @@ * data type definitions, declarations, prototypes * * Started by: Thomas Gleixner and Ingo Molnar - * - * For licencing details see kernel-base/COPYING */ #ifndef _LINUX_HRTIMER_H #define _LINUX_HRTIMER_H diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index b3e24368930a..14131b6fae68 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -905,6 +905,13 @@ struct vmbus_channel { bool probe_done; + /* + * We must offload the handling of the primary/sub channels + * from the single-threaded vmbus_connection.work_queue to + * two different workqueue, otherwise we can block + * vmbus_connection.work_queue and hang: see vmbus_process_offer(). + */ + struct work_struct add_channel_work; }; static inline bool is_hvsock_channel(const struct vmbus_channel *c) diff --git a/include/linux/i3c/ccc.h b/include/linux/i3c/ccc.h new file mode 100644 index 000000000000..73b0982cc519 --- /dev/null +++ b/include/linux/i3c/ccc.h @@ -0,0 +1,385 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Cadence Design Systems Inc. + * + * Author: Boris Brezillon <boris.brezillon@bootlin.com> + */ + +#ifndef I3C_CCC_H +#define I3C_CCC_H + +#include <linux/bitops.h> +#include <linux/i3c/device.h> + +/* I3C CCC (Common Command Codes) related definitions */ +#define I3C_CCC_DIRECT BIT(7) + +#define I3C_CCC_ID(id, broadcast) \ + ((id) | ((broadcast) ? 0 : I3C_CCC_DIRECT)) + +/* Commands valid in both broadcast and unicast modes */ +#define I3C_CCC_ENEC(broadcast) I3C_CCC_ID(0x0, broadcast) +#define I3C_CCC_DISEC(broadcast) I3C_CCC_ID(0x1, broadcast) +#define I3C_CCC_ENTAS(as, broadcast) I3C_CCC_ID(0x2 + (as), broadcast) +#define I3C_CCC_RSTDAA(broadcast) I3C_CCC_ID(0x6, broadcast) +#define I3C_CCC_SETMWL(broadcast) I3C_CCC_ID(0x9, broadcast) +#define I3C_CCC_SETMRL(broadcast) I3C_CCC_ID(0xa, broadcast) +#define I3C_CCC_SETXTIME(broadcast) ((broadcast) ? 0x28 : 0x98) +#define I3C_CCC_VENDOR(id, broadcast) ((id) + ((broadcast) ? 0x61 : 0xe0)) + +/* Broadcast-only commands */ +#define I3C_CCC_ENTDAA I3C_CCC_ID(0x7, true) +#define I3C_CCC_DEFSLVS I3C_CCC_ID(0x8, true) +#define I3C_CCC_ENTTM I3C_CCC_ID(0xb, true) +#define I3C_CCC_ENTHDR(x) I3C_CCC_ID(0x20 + (x), true) + +/* Unicast-only commands */ +#define I3C_CCC_SETDASA I3C_CCC_ID(0x7, false) +#define I3C_CCC_SETNEWDA I3C_CCC_ID(0x8, false) +#define I3C_CCC_GETMWL I3C_CCC_ID(0xb, false) +#define I3C_CCC_GETMRL I3C_CCC_ID(0xc, false) +#define I3C_CCC_GETPID I3C_CCC_ID(0xd, false) +#define I3C_CCC_GETBCR I3C_CCC_ID(0xe, false) +#define I3C_CCC_GETDCR I3C_CCC_ID(0xf, false) +#define I3C_CCC_GETSTATUS I3C_CCC_ID(0x10, false) +#define I3C_CCC_GETACCMST I3C_CCC_ID(0x11, false) +#define I3C_CCC_SETBRGTGT I3C_CCC_ID(0x13, false) +#define I3C_CCC_GETMXDS I3C_CCC_ID(0x14, false) +#define I3C_CCC_GETHDRCAP I3C_CCC_ID(0x15, false) +#define I3C_CCC_GETXTIME I3C_CCC_ID(0x19, false) + +#define I3C_CCC_EVENT_SIR BIT(0) +#define I3C_CCC_EVENT_MR BIT(1) +#define I3C_CCC_EVENT_HJ BIT(3) + +/** + * struct i3c_ccc_events - payload passed to ENEC/DISEC CCC + * + * @events: bitmask of I3C_CCC_EVENT_xxx events. + * + * Depending on the CCC command, the specific events coming from all devices + * (broadcast version) or a specific device (unicast version) will be + * enabled (ENEC) or disabled (DISEC). + */ +struct i3c_ccc_events { + u8 events; +}; + +/** + * struct i3c_ccc_mwl - payload passed to SETMWL/GETMWL CCC + * + * @len: maximum write length in bytes + * + * The maximum write length is only applicable to SDR private messages or + * extended Write CCCs (like SETXTIME). + */ +struct i3c_ccc_mwl { + __be16 len; +}; + +/** + * struct i3c_ccc_mrl - payload passed to SETMRL/GETMRL CCC + * + * @len: maximum read length in bytes + * @ibi_len: maximum IBI payload length + * + * The maximum read length is only applicable to SDR private messages or + * extended Read CCCs (like GETXTIME). + * The IBI length is only valid if the I3C slave is IBI capable + * (%I3C_BCR_IBI_REQ_CAP is set). + */ +struct i3c_ccc_mrl { + __be16 read_len; + u8 ibi_len; +} __packed; + +/** + * struct i3c_ccc_dev_desc - I3C/I2C device descriptor used for DEFSLVS + * + * @dyn_addr: dynamic address assigned to the I3C slave or 0 if the entry is + * describing an I2C slave. + * @dcr: DCR value (not applicable to entries describing I2C devices) + * @lvr: LVR value (not applicable to entries describing I3C devices) + * @bcr: BCR value or 0 if this entry is describing an I2C slave + * @static_addr: static address or 0 if the device does not have a static + * address + * + * The DEFSLVS command should be passed an array of i3c_ccc_dev_desc + * descriptors (one entry per I3C/I2C dev controlled by the master). + */ +struct i3c_ccc_dev_desc { + u8 dyn_addr; + union { + u8 dcr; + u8 lvr; + }; + u8 bcr; + u8 static_addr; +}; + +/** + * struct i3c_ccc_defslvs - payload passed to DEFSLVS CCC + * + * @count: number of dev descriptors + * @master: descriptor describing the current master + * @slaves: array of descriptors describing slaves controlled by the + * current master + * + * Information passed to the broadcast DEFSLVS to propagate device + * information to all masters currently acting as slaves on the bus. + * This is only meaningful if you have more than one master. + */ +struct i3c_ccc_defslvs { + u8 count; + struct i3c_ccc_dev_desc master; + struct i3c_ccc_dev_desc slaves[0]; +} __packed; + +/** + * enum i3c_ccc_test_mode - enum listing all available test modes + * + * @I3C_CCC_EXIT_TEST_MODE: exit test mode + * @I3C_CCC_VENDOR_TEST_MODE: enter vendor test mode + */ +enum i3c_ccc_test_mode { + I3C_CCC_EXIT_TEST_MODE, + I3C_CCC_VENDOR_TEST_MODE, +}; + +/** + * struct i3c_ccc_enttm - payload passed to ENTTM CCC + * + * @mode: one of the &enum i3c_ccc_test_mode modes + * + * Information passed to the ENTTM CCC to instruct an I3C device to enter a + * specific test mode. + */ +struct i3c_ccc_enttm { + u8 mode; +}; + +/** + * struct i3c_ccc_setda - payload passed to SETNEWDA and SETDASA CCCs + * + * @addr: dynamic address to assign to an I3C device + * + * Information passed to the SETNEWDA and SETDASA CCCs to assign/change the + * dynamic address of an I3C device. + */ +struct i3c_ccc_setda { + u8 addr; +}; + +/** + * struct i3c_ccc_getpid - payload passed to GETPID CCC + * + * @pid: 48 bits PID in big endian + */ +struct i3c_ccc_getpid { + u8 pid[6]; +}; + +/** + * struct i3c_ccc_getbcr - payload passed to GETBCR CCC + * + * @bcr: BCR (Bus Characteristic Register) value + */ +struct i3c_ccc_getbcr { + u8 bcr; +}; + +/** + * struct i3c_ccc_getdcr - payload passed to GETDCR CCC + * + * @dcr: DCR (Device Characteristic Register) value + */ +struct i3c_ccc_getdcr { + u8 dcr; +}; + +#define I3C_CCC_STATUS_PENDING_INT(status) ((status) & GENMASK(3, 0)) +#define I3C_CCC_STATUS_PROTOCOL_ERROR BIT(5) +#define I3C_CCC_STATUS_ACTIVITY_MODE(status) \ + (((status) & GENMASK(7, 6)) >> 6) + +/** + * struct i3c_ccc_getstatus - payload passed to GETSTATUS CCC + * + * @status: status of the I3C slave (see I3C_CCC_STATUS_xxx macros for more + * information). + */ +struct i3c_ccc_getstatus { + __be16 status; +}; + +/** + * struct i3c_ccc_getaccmst - payload passed to GETACCMST CCC + * + * @newmaster: address of the master taking bus ownership + */ +struct i3c_ccc_getaccmst { + u8 newmaster; +}; + +/** + * struct i3c_ccc_bridged_slave_desc - bridged slave descriptor + * + * @addr: dynamic address of the bridged device + * @id: ID of the slave device behind the bridge + */ +struct i3c_ccc_bridged_slave_desc { + u8 addr; + __be16 id; +} __packed; + +/** + * struct i3c_ccc_setbrgtgt - payload passed to SETBRGTGT CCC + * + * @count: number of bridged slaves + * @bslaves: bridged slave descriptors + */ +struct i3c_ccc_setbrgtgt { + u8 count; + struct i3c_ccc_bridged_slave_desc bslaves[0]; +} __packed; + +/** + * enum i3c_sdr_max_data_rate - max data rate values for private SDR transfers + */ +enum i3c_sdr_max_data_rate { + I3C_SDR0_FSCL_MAX, + I3C_SDR1_FSCL_8MHZ, + I3C_SDR2_FSCL_6MHZ, + I3C_SDR3_FSCL_4MHZ, + I3C_SDR4_FSCL_2MHZ, +}; + +/** + * enum i3c_tsco - clock to data turn-around + */ +enum i3c_tsco { + I3C_TSCO_8NS, + I3C_TSCO_9NS, + I3C_TSCO_10NS, + I3C_TSCO_11NS, + I3C_TSCO_12NS, +}; + +#define I3C_CCC_MAX_SDR_FSCL_MASK GENMASK(2, 0) +#define I3C_CCC_MAX_SDR_FSCL(x) ((x) & I3C_CCC_MAX_SDR_FSCL_MASK) + +/** + * struct i3c_ccc_getmxds - payload passed to GETMXDS CCC + * + * @maxwr: write limitations + * @maxrd: read limitations + * @maxrdturn: maximum read turn-around expressed micro-seconds and + * little-endian formatted + */ +struct i3c_ccc_getmxds { + u8 maxwr; + u8 maxrd; + u8 maxrdturn[3]; +} __packed; + +#define I3C_CCC_HDR_MODE(mode) BIT(mode) + +/** + * struct i3c_ccc_gethdrcap - payload passed to GETHDRCAP CCC + * + * @modes: bitmap of supported HDR modes + */ +struct i3c_ccc_gethdrcap { + u8 modes; +} __packed; + +/** + * enum i3c_ccc_setxtime_subcmd - SETXTIME sub-commands + */ +enum i3c_ccc_setxtime_subcmd { + I3C_CCC_SETXTIME_ST = 0x7f, + I3C_CCC_SETXTIME_DT = 0xbf, + I3C_CCC_SETXTIME_ENTER_ASYNC_MODE0 = 0xdf, + I3C_CCC_SETXTIME_ENTER_ASYNC_MODE1 = 0xef, + I3C_CCC_SETXTIME_ENTER_ASYNC_MODE2 = 0xf7, + I3C_CCC_SETXTIME_ENTER_ASYNC_MODE3 = 0xfb, + I3C_CCC_SETXTIME_ASYNC_TRIGGER = 0xfd, + I3C_CCC_SETXTIME_TPH = 0x3f, + I3C_CCC_SETXTIME_TU = 0x9f, + I3C_CCC_SETXTIME_ODR = 0x8f, +}; + +/** + * struct i3c_ccc_setxtime - payload passed to SETXTIME CCC + * + * @subcmd: one of the sub-commands ddefined in &enum i3c_ccc_setxtime_subcmd + * @data: sub-command payload. Amount of data is determined by + * &i3c_ccc_setxtime->subcmd + */ +struct i3c_ccc_setxtime { + u8 subcmd; + u8 data[0]; +} __packed; + +#define I3C_CCC_GETXTIME_SYNC_MODE BIT(0) +#define I3C_CCC_GETXTIME_ASYNC_MODE(x) BIT((x) + 1) +#define I3C_CCC_GETXTIME_OVERFLOW BIT(7) + +/** + * struct i3c_ccc_getxtime - payload retrieved from GETXTIME CCC + * + * @supported_modes: bitmap describing supported XTIME modes + * @state: current status (enabled mode and overflow status) + * @frequency: slave's internal oscillator frequency in 500KHz steps + * @inaccuracy: slave's internal oscillator inaccuracy in 0.1% steps + */ +struct i3c_ccc_getxtime { + u8 supported_modes; + u8 state; + u8 frequency; + u8 inaccuracy; +} __packed; + +/** + * struct i3c_ccc_cmd_payload - CCC payload + * + * @len: payload length + * @data: payload data. This buffer must be DMA-able + */ +struct i3c_ccc_cmd_payload { + u16 len; + void *data; +}; + +/** + * struct i3c_ccc_cmd_dest - CCC command destination + * + * @addr: can be an I3C device address or the broadcast address if this is a + * broadcast CCC + * @payload: payload to be sent to this device or broadcasted + */ +struct i3c_ccc_cmd_dest { + u8 addr; + struct i3c_ccc_cmd_payload payload; +}; + +/** + * struct i3c_ccc_cmd - CCC command + * + * @rnw: true if the CCC should retrieve data from the device. Only valid for + * unicast commands + * @id: CCC command id + * @ndests: number of destinations. Should always be one for broadcast commands + * @dests: array of destinations and associated payload for this CCC. Most of + * the time, only one destination is provided + * @err: I3C error code + */ +struct i3c_ccc_cmd { + u8 rnw; + u8 id; + unsigned int ndests; + struct i3c_ccc_cmd_dest *dests; + enum i3c_error_code err; +}; + +#endif /* I3C_CCC_H */ diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h new file mode 100644 index 000000000000..5ecb055fd375 --- /dev/null +++ b/include/linux/i3c/device.h @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Cadence Design Systems Inc. + * + * Author: Boris Brezillon <boris.brezillon@bootlin.com> + */ + +#ifndef I3C_DEV_H +#define I3C_DEV_H + +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/i2c.h> +#include <linux/kconfig.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> + +/** + * enum i3c_error_code - I3C error codes + * + * These are the standard error codes as defined by the I3C specification. + * When -EIO is returned by the i3c_device_do_priv_xfers() or + * i3c_device_send_hdr_cmds() one can check the error code in + * &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of + * what went wrong. + * + * @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C + * related + * @I3C_ERROR_M0: M0 error + * @I3C_ERROR_M1: M1 error + * @I3C_ERROR_M2: M2 error + */ +enum i3c_error_code { + I3C_ERROR_UNKNOWN = 0, + I3C_ERROR_M0 = 1, + I3C_ERROR_M1, + I3C_ERROR_M2, +}; + +/** + * enum i3c_hdr_mode - HDR mode ids + * @I3C_HDR_DDR: DDR mode + * @I3C_HDR_TSP: TSP mode + * @I3C_HDR_TSL: TSL mode + */ +enum i3c_hdr_mode { + I3C_HDR_DDR, + I3C_HDR_TSP, + I3C_HDR_TSL, +}; + +/** + * struct i3c_priv_xfer - I3C SDR private transfer + * @rnw: encodes the transfer direction. true for a read, false for a write + * @len: transfer length in bytes of the transfer + * @data: input/output buffer + * @data.in: input buffer. Must point to a DMA-able buffer + * @data.out: output buffer. Must point to a DMA-able buffer + * @err: I3C error code + */ +struct i3c_priv_xfer { + u8 rnw; + u16 len; + union { + void *in; + const void *out; + } data; + enum i3c_error_code err; +}; + +/** + * enum i3c_dcr - I3C DCR values + * @I3C_DCR_GENERIC_DEVICE: generic I3C device + */ +enum i3c_dcr { + I3C_DCR_GENERIC_DEVICE = 0, +}; + +#define I3C_PID_MANUF_ID(pid) (((pid) & GENMASK_ULL(47, 33)) >> 33) +#define I3C_PID_RND_LOWER_32BITS(pid) (!!((pid) & BIT_ULL(32))) +#define I3C_PID_RND_VAL(pid) ((pid) & GENMASK_ULL(31, 0)) +#define I3C_PID_PART_ID(pid) (((pid) & GENMASK_ULL(31, 16)) >> 16) +#define I3C_PID_INSTANCE_ID(pid) (((pid) & GENMASK_ULL(15, 12)) >> 12) +#define I3C_PID_EXTRA_INFO(pid) ((pid) & GENMASK_ULL(11, 0)) + +#define I3C_BCR_DEVICE_ROLE(bcr) ((bcr) & GENMASK(7, 6)) +#define I3C_BCR_I3C_SLAVE (0 << 6) +#define I3C_BCR_I3C_MASTER (1 << 6) +#define I3C_BCR_HDR_CAP BIT(5) +#define I3C_BCR_BRIDGE BIT(4) +#define I3C_BCR_OFFLINE_CAP BIT(3) +#define I3C_BCR_IBI_PAYLOAD BIT(2) +#define I3C_BCR_IBI_REQ_CAP BIT(1) +#define I3C_BCR_MAX_DATA_SPEED_LIM BIT(0) + +/** + * struct i3c_device_info - I3C device information + * @pid: Provisional ID + * @bcr: Bus Characteristic Register + * @dcr: Device Characteristic Register + * @static_addr: static/I2C address + * @dyn_addr: dynamic address + * @hdr_cap: supported HDR modes + * @max_read_ds: max read speed information + * @max_write_ds: max write speed information + * @max_ibi_len: max IBI payload length + * @max_read_turnaround: max read turn-around time in micro-seconds + * @max_read_len: max private SDR read length in bytes + * @max_write_len: max private SDR write length in bytes + * + * These are all basic information that should be advertised by an I3C device. + * Some of them are optional depending on the device type and device + * capabilities. + * For each I3C slave attached to a master with + * i3c_master_add_i3c_dev_locked(), the core will send the relevant CCC command + * to retrieve these data. + */ +struct i3c_device_info { + u64 pid; + u8 bcr; + u8 dcr; + u8 static_addr; + u8 dyn_addr; + u8 hdr_cap; + u8 max_read_ds; + u8 max_write_ds; + u8 max_ibi_len; + u32 max_read_turnaround; + u16 max_read_len; + u16 max_write_len; +}; + +/* + * I3C device internals are kept hidden from I3C device users. It's just + * simpler to refactor things when everything goes through getter/setters, and + * I3C device drivers should not have to worry about internal representation + * anyway. + */ +struct i3c_device; + +/* These macros should be used to i3c_device_id entries. */ +#define I3C_MATCH_MANUF_AND_PART (I3C_MATCH_MANUF | I3C_MATCH_PART) + +#define I3C_DEVICE(_manufid, _partid, _drvdata) \ + { \ + .match_flags = I3C_MATCH_MANUF_AND_PART, \ + .manuf_id = _manufid, \ + .part_id = _partid, \ + .data = _drvdata, \ + } + +#define I3C_DEVICE_EXTRA_INFO(_manufid, _partid, _info, _drvdata) \ + { \ + .match_flags = I3C_MATCH_MANUF_AND_PART | \ + I3C_MATCH_EXTRA_INFO, \ + .manuf_id = _manufid, \ + .part_id = _partid, \ + .extra_info = _info, \ + .data = _drvdata, \ + } + +#define I3C_CLASS(_dcr, _drvdata) \ + { \ + .match_flags = I3C_MATCH_DCR, \ + .dcr = _dcr, \ + } + +/** + * struct i3c_driver - I3C device driver + * @driver: inherit from device_driver + * @probe: I3C device probe method + * @remove: I3C device remove method + * @id_table: I3C device match table. Will be used by the framework to decide + * which device to bind to this driver + */ +struct i3c_driver { + struct device_driver driver; + int (*probe)(struct i3c_device *dev); + int (*remove)(struct i3c_device *dev); + const struct i3c_device_id *id_table; +}; + +static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv) +{ + return container_of(drv, struct i3c_driver, driver); +} + +struct device *i3cdev_to_dev(struct i3c_device *i3cdev); +struct i3c_device *dev_to_i3cdev(struct device *dev); + +static inline void i3cdev_set_drvdata(struct i3c_device *i3cdev, + void *data) +{ + struct device *dev = i3cdev_to_dev(i3cdev); + + dev_set_drvdata(dev, data); +} + +static inline void *i3cdev_get_drvdata(struct i3c_device *i3cdev) +{ + struct device *dev = i3cdev_to_dev(i3cdev); + + return dev_get_drvdata(dev); +} + +int i3c_driver_register_with_owner(struct i3c_driver *drv, + struct module *owner); +void i3c_driver_unregister(struct i3c_driver *drv); + +#define i3c_driver_register(__drv) \ + i3c_driver_register_with_owner(__drv, THIS_MODULE) + +/** + * module_i3c_driver() - Register a module providing an I3C driver + * @__drv: the I3C driver to register + * + * Provide generic init/exit functions that simply register/unregister an I3C + * driver. + * Should be used by any driver that does not require extra init/cleanup steps. + */ +#define module_i3c_driver(__drv) \ + module_driver(__drv, i3c_driver_register, i3c_driver_unregister) + +/** + * i3c_i2c_driver_register() - Register an i2c and an i3c driver + * @i3cdrv: the I3C driver to register + * @i2cdrv: the I2C driver to register + * + * This function registers both @i2cdev and @i3cdev, and fails if one of these + * registrations fails. This is mainly useful for devices that support both I2C + * and I3C modes. + * Note that when CONFIG_I3C is not enabled, this function only registers the + * I2C driver. + * + * Return: 0 if both registrations succeeds, a negative error code otherwise. + */ +static inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv, + struct i2c_driver *i2cdrv) +{ + int ret; + + ret = i2c_add_driver(i2cdrv); + if (ret || !IS_ENABLED(CONFIG_I3C)) + return ret; + + ret = i3c_driver_register(i3cdrv); + if (ret) + i2c_del_driver(i2cdrv); + + return ret; +} + +/** + * i3c_i2c_driver_unregister() - Unregister an i2c and an i3c driver + * @i3cdrv: the I3C driver to register + * @i2cdrv: the I2C driver to register + * + * This function unregisters both @i3cdrv and @i2cdrv. + * Note that when CONFIG_I3C is not enabled, this function only unregisters the + * @i2cdrv. + */ +static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv, + struct i2c_driver *i2cdrv) +{ + if (IS_ENABLED(CONFIG_I3C)) + i3c_driver_unregister(i3cdrv); + + i2c_del_driver(i2cdrv); +} + +/** + * module_i3c_i2c_driver() - Register a module providing an I3C and an I2C + * driver + * @__i3cdrv: the I3C driver to register + * @__i2cdrv: the I3C driver to register + * + * Provide generic init/exit functions that simply register/unregister an I3C + * and an I2C driver. + * This macro can be used even if CONFIG_I3C is disabled, in this case, only + * the I2C driver will be registered. + * Should be used by any driver that does not require extra init/cleanup steps. + */ +#define module_i3c_i2c_driver(__i3cdrv, __i2cdrv) \ + module_driver(__i3cdrv, \ + i3c_i2c_driver_register, \ + i3c_i2c_driver_unregister) + +int i3c_device_do_priv_xfers(struct i3c_device *dev, + struct i3c_priv_xfer *xfers, + int nxfers); + +void i3c_device_get_info(struct i3c_device *dev, struct i3c_device_info *info); + +struct i3c_ibi_payload { + unsigned int len; + const void *data; +}; + +/** + * struct i3c_ibi_setup - IBI setup object + * @max_payload_len: maximum length of the payload associated to an IBI. If one + * IBI appears to have a payload that is bigger than this + * number, the IBI will be rejected. + * @num_slots: number of pre-allocated IBI slots. This should be chosen so that + * the system never runs out of IBI slots, otherwise you'll lose + * IBIs. + * @handler: IBI handler, every time an IBI is received. This handler is called + * in a workqueue context. It is allowed to sleep and send new + * messages on the bus, though it's recommended to keep the + * processing done there as fast as possible to avoid delaying + * processing of other queued on the same workqueue. + * + * Temporary structure used to pass information to i3c_device_request_ibi(). + * This object can be allocated on the stack since i3c_device_request_ibi() + * copies every bit of information and do not use it after + * i3c_device_request_ibi() has returned. + */ +struct i3c_ibi_setup { + unsigned int max_payload_len; + unsigned int num_slots; + void (*handler)(struct i3c_device *dev, + const struct i3c_ibi_payload *payload); +}; + +int i3c_device_request_ibi(struct i3c_device *dev, + const struct i3c_ibi_setup *setup); +void i3c_device_free_ibi(struct i3c_device *dev); +int i3c_device_enable_ibi(struct i3c_device *dev); +int i3c_device_disable_ibi(struct i3c_device *dev); + +#endif /* I3C_DEV_H */ diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h new file mode 100644 index 000000000000..f13fd8b1dd79 --- /dev/null +++ b/include/linux/i3c/master.h @@ -0,0 +1,648 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Cadence Design Systems Inc. + * + * Author: Boris Brezillon <boris.brezillon@bootlin.com> + */ + +#ifndef I3C_MASTER_H +#define I3C_MASTER_H + +#include <asm/bitsperlong.h> + +#include <linux/bitops.h> +#include <linux/i2c.h> +#include <linux/i3c/ccc.h> +#include <linux/i3c/device.h> +#include <linux/rwsem.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +#define I3C_HOT_JOIN_ADDR 0x2 +#define I3C_BROADCAST_ADDR 0x7e +#define I3C_MAX_ADDR GENMASK(6, 0) + +struct i3c_master_controller; +struct i3c_bus; +struct i2c_device; +struct i3c_device; + +/** + * struct i3c_i2c_dev_desc - Common part of the I3C/I2C device descriptor + * @node: node element used to insert the slot into the I2C or I3C device + * list + * @master: I3C master that instantiated this device. Will be used to do + * I2C/I3C transfers + * @master_priv: master private data assigned to the device. Can be used to + * add master specific information + * + * This structure is describing common I3C/I2C dev information. + */ +struct i3c_i2c_dev_desc { + struct list_head node; + struct i3c_master_controller *master; + void *master_priv; +}; + +#define I3C_LVR_I2C_INDEX_MASK GENMASK(7, 5) +#define I3C_LVR_I2C_INDEX(x) ((x) << 5) +#define I3C_LVR_I2C_FM_MODE BIT(4) + +#define I2C_MAX_ADDR GENMASK(9, 0) + +/** + * struct i2c_dev_boardinfo - I2C device board information + * @node: used to insert the boardinfo object in the I2C boardinfo list + * @base: regular I2C board information + * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about + * the I2C device limitations + * + * This structure is used to attach board-level information to an I2C device. + * Each I2C device connected on the I3C bus should have one. + */ +struct i2c_dev_boardinfo { + struct list_head node; + struct i2c_board_info base; + u8 lvr; +}; + +/** + * struct i2c_dev_desc - I2C device descriptor + * @common: common part of the I2C device descriptor + * @boardinfo: pointer to the boardinfo attached to this I2C device + * @dev: I2C device object registered to the I2C framework + * + * Each I2C device connected on the bus will have an i2c_dev_desc. + * This object is created by the core and later attached to the controller + * using &struct_i3c_master_controller->ops->attach_i2c_dev(). + * + * &struct_i2c_dev_desc is the internal representation of an I2C device + * connected on an I3C bus. This object is also passed to all + * &struct_i3c_master_controller_ops hooks. + */ +struct i2c_dev_desc { + struct i3c_i2c_dev_desc common; + const struct i2c_dev_boardinfo *boardinfo; + struct i2c_client *dev; +}; + +/** + * struct i3c_ibi_slot - I3C IBI (In-Band Interrupt) slot + * @work: work associated to this slot. The IBI handler will be called from + * there + * @dev: the I3C device that has generated this IBI + * @len: length of the payload associated to this IBI + * @data: payload buffer + * + * An IBI slot is an object pre-allocated by the controller and used when an + * IBI comes in. + * Every time an IBI comes in, the I3C master driver should find a free IBI + * slot in its IBI slot pool, retrieve the IBI payload and queue the IBI using + * i3c_master_queue_ibi(). + * + * How IBI slots are allocated is left to the I3C master driver, though, for + * simple kmalloc-based allocation, the generic IBI slot pool can be used. + */ +struct i3c_ibi_slot { + struct work_struct work; + struct i3c_dev_desc *dev; + unsigned int len; + void *data; +}; + +/** + * struct i3c_device_ibi_info - IBI information attached to a specific device + * @all_ibis_handled: used to be informed when no more IBIs are waiting to be + * processed. Used by i3c_device_disable_ibi() to wait for + * all IBIs to be dequeued + * @pending_ibis: count the number of pending IBIs. Each pending IBI has its + * work element queued to the controller workqueue + * @max_payload_len: maximum payload length for an IBI coming from this device. + * this value is specified when calling + * i3c_device_request_ibi() and should not change at run + * time. All messages IBIs exceeding this limit should be + * rejected by the master + * @num_slots: number of IBI slots reserved for this device + * @enabled: reflect the IBI status + * @handler: IBI handler specified at i3c_device_request_ibi() call time. This + * handler will be called from the controller workqueue, and as such + * is allowed to sleep (though it is recommended to process the IBI + * as fast as possible to not stall processing of other IBIs queued + * on the same workqueue). + * New I3C messages can be sent from the IBI handler + * + * The &struct_i3c_device_ibi_info object is allocated when + * i3c_device_request_ibi() is called and attached to a specific device. This + * object is here to manage IBIs coming from a specific I3C device. + * + * Note that this structure is the generic view of the IBI management + * infrastructure. I3C master drivers may have their own internal + * representation which they can associate to the device using + * controller-private data. + */ +struct i3c_device_ibi_info { + struct completion all_ibis_handled; + atomic_t pending_ibis; + unsigned int max_payload_len; + unsigned int num_slots; + unsigned int enabled; + void (*handler)(struct i3c_device *dev, + const struct i3c_ibi_payload *payload); +}; + +/** + * struct i3c_dev_boardinfo - I3C device board information + * @node: used to insert the boardinfo object in the I3C boardinfo list + * @init_dyn_addr: initial dynamic address requested by the FW. We provide no + * guarantee that the device will end up using this address, + * but try our best to assign this specific address to the + * device + * @static_addr: static address the I3C device listen on before it's been + * assigned a dynamic address by the master. Will be used during + * bus initialization to assign it a specific dynamic address + * before starting DAA (Dynamic Address Assignment) + * @pid: I3C Provisional ID exposed by the device. This is a unique identifier + * that may be used to attach boardinfo to i3c_dev_desc when the device + * does not have a static address + * @of_node: optional DT node in case the device has been described in the DT + * + * This structure is used to attach board-level information to an I3C device. + * Not all I3C devices connected on the bus will have a boardinfo. It's only + * needed if you want to attach extra resources to a device or assign it a + * specific dynamic address. + */ +struct i3c_dev_boardinfo { + struct list_head node; + u8 init_dyn_addr; + u8 static_addr; + u64 pid; + struct device_node *of_node; +}; + +/** + * struct i3c_dev_desc - I3C device descriptor + * @common: common part of the I3C device descriptor + * @info: I3C device information. Will be automatically filled when you create + * your device with i3c_master_add_i3c_dev_locked() + * @ibi_lock: lock used to protect the &struct_i3c_device->ibi + * @ibi: IBI info attached to a device. Should be NULL until + * i3c_device_request_ibi() is called + * @dev: pointer to the I3C device object exposed to I3C device drivers. This + * should never be accessed from I3C master controller drivers. Only core + * code should manipulate it in when updating the dev <-> desc link or + * when propagating IBI events to the driver + * @boardinfo: pointer to the boardinfo attached to this I3C device + * + * Internal representation of an I3C device. This object is only used by the + * core and passed to I3C master controller drivers when they're requested to + * do some operations on the device. + * The core maintains the link between the internal I3C dev descriptor and the + * object exposed to the I3C device drivers (&struct_i3c_device). + */ +struct i3c_dev_desc { + struct i3c_i2c_dev_desc common; + struct i3c_device_info info; + struct mutex ibi_lock; + struct i3c_device_ibi_info *ibi; + struct i3c_device *dev; + const struct i3c_dev_boardinfo *boardinfo; +}; + +/** + * struct i3c_device - I3C device object + * @dev: device object to register the I3C dev to the device model + * @desc: pointer to an i3c device descriptor object. This link is updated + * every time the I3C device is rediscovered with a different dynamic + * address assigned + * @bus: I3C bus this device is attached to + * + * I3C device object exposed to I3C device drivers. The takes care of linking + * this object to the relevant &struct_i3c_dev_desc one. + * All I3C devs on the I3C bus are represented, including I3C masters. For each + * of them, we have an instance of &struct i3c_device. + */ +struct i3c_device { + struct device dev; + struct i3c_dev_desc *desc; + struct i3c_bus *bus; +}; + +/* + * The I3C specification says the maximum number of devices connected on the + * bus is 11, but this number depends on external parameters like trace length, + * capacitive load per Device, and the types of Devices present on the Bus. + * I3C master can also have limitations, so this number is just here as a + * reference and should be adjusted on a per-controller/per-board basis. + */ +#define I3C_BUS_MAX_DEVS 11 + +#define I3C_BUS_MAX_I3C_SCL_RATE 12900000 +#define I3C_BUS_TYP_I3C_SCL_RATE 12500000 +#define I3C_BUS_I2C_FM_PLUS_SCL_RATE 1000000 +#define I3C_BUS_I2C_FM_SCL_RATE 400000 +#define I3C_BUS_TLOW_OD_MIN_NS 200 + +/** + * enum i3c_bus_mode - I3C bus mode + * @I3C_BUS_MODE_PURE: only I3C devices are connected to the bus. No limitation + * expected + * @I3C_BUS_MODE_MIXED_FAST: I2C devices with 50ns spike filter are present on + * the bus. The only impact in this mode is that the + * high SCL pulse has to stay below 50ns to trick I2C + * devices when transmitting I3C frames + * @I3C_BUS_MODE_MIXED_SLOW: I2C devices without 50ns spike filter are present + * on the bus + */ +enum i3c_bus_mode { + I3C_BUS_MODE_PURE, + I3C_BUS_MODE_MIXED_FAST, + I3C_BUS_MODE_MIXED_SLOW, +}; + +/** + * enum i3c_addr_slot_status - I3C address slot status + * @I3C_ADDR_SLOT_FREE: address is free + * @I3C_ADDR_SLOT_RSVD: address is reserved + * @I3C_ADDR_SLOT_I2C_DEV: address is assigned to an I2C device + * @I3C_ADDR_SLOT_I3C_DEV: address is assigned to an I3C device + * @I3C_ADDR_SLOT_STATUS_MASK: address slot mask + * + * On an I3C bus, addresses are assigned dynamically, and we need to know which + * addresses are free to use and which ones are already assigned. + * + * Addresses marked as reserved are those reserved by the I3C protocol + * (broadcast address, ...). + */ +enum i3c_addr_slot_status { + I3C_ADDR_SLOT_FREE, + I3C_ADDR_SLOT_RSVD, + I3C_ADDR_SLOT_I2C_DEV, + I3C_ADDR_SLOT_I3C_DEV, + I3C_ADDR_SLOT_STATUS_MASK = 3, +}; + +/** + * struct i3c_bus - I3C bus object + * @cur_master: I3C master currently driving the bus. Since I3C is multi-master + * this can change over the time. Will be used to let a master + * know whether it needs to request bus ownership before sending + * a frame or not + * @id: bus ID. Assigned by the framework when register the bus + * @addrslots: a bitmap with 2-bits per-slot to encode the address status and + * ease the DAA (Dynamic Address Assignment) procedure (see + * &enum i3c_addr_slot_status) + * @mode: bus mode (see &enum i3c_bus_mode) + * @scl_rate.i3c: maximum rate for the clock signal when doing I3C SDR/priv + * transfers + * @scl_rate.i2c: maximum rate for the clock signal when doing I2C transfers + * @scl_rate: SCL signal rate for I3C and I2C mode + * @devs.i3c: contains a list of I3C device descriptors representing I3C + * devices connected on the bus and successfully attached to the + * I3C master + * @devs.i2c: contains a list of I2C device descriptors representing I2C + * devices connected on the bus and successfully attached to the + * I3C master + * @devs: 2 lists containing all I3C/I2C devices connected to the bus + * @lock: read/write lock on the bus. This is needed to protect against + * operations that have an impact on the whole bus and the devices + * connected to it. For example, when asking slaves to drop their + * dynamic address (RSTDAA CCC), we need to make sure no one is trying + * to send I3C frames to these devices. + * Note that this lock does not protect against concurrency between + * devices: several drivers can send different I3C/I2C frames through + * the same master in parallel. This is the responsibility of the + * master to guarantee that frames are actually sent sequentially and + * not interlaced + * + * The I3C bus is represented with its own object and not implicitly described + * by the I3C master to cope with the multi-master functionality, where one bus + * can be shared amongst several masters, each of them requesting bus ownership + * when they need to. + */ +struct i3c_bus { + struct i3c_dev_desc *cur_master; + int id; + unsigned long addrslots[((I2C_MAX_ADDR + 1) * 2) / BITS_PER_LONG]; + enum i3c_bus_mode mode; + struct { + unsigned long i3c; + unsigned long i2c; + } scl_rate; + struct { + struct list_head i3c; + struct list_head i2c; + } devs; + struct rw_semaphore lock; +}; + +/** + * struct i3c_master_controller_ops - I3C master methods + * @bus_init: hook responsible for the I3C bus initialization. You should at + * least call master_set_info() from there and set the bus mode. + * You can also put controller specific initialization in there. + * This method is mandatory. + * @bus_cleanup: cleanup everything done in + * &i3c_master_controller_ops->bus_init(). + * This method is optional. + * @attach_i3c_dev: called every time an I3C device is attached to the bus. It + * can be after a DAA or when a device is statically declared + * by the FW, in which case it will only have a static address + * and the dynamic address will be 0. + * When this function is called, device information have not + * been retrieved yet. + * This is a good place to attach master controller specific + * data to I3C devices. + * This method is optional. + * @reattach_i3c_dev: called every time an I3C device has its addressed + * changed. It can be because the device has been powered + * down and has lost its address, or it can happen when a + * device had a static address and has been assigned a + * dynamic address with SETDASA. + * This method is optional. + * @detach_i3c_dev: called when an I3C device is detached from the bus. Usually + * happens when the master device is unregistered. + * This method is optional. + * @do_daa: do a DAA (Dynamic Address Assignment) procedure. This is procedure + * should send an ENTDAA CCC command and then add all devices + * discovered sure the DAA using i3c_master_add_i3c_dev_locked(). + * Add devices added with i3c_master_add_i3c_dev_locked() will then be + * attached or re-attached to the controller. + * This method is mandatory. + * @supports_ccc_cmd: should return true if the CCC command is supported, false + * otherwise. + * This method is optional, if not provided the core assumes + * all CCC commands are supported. + * @send_ccc_cmd: send a CCC command + * This method is mandatory. + * @priv_xfers: do one or several private I3C SDR transfers + * This method is mandatory. + * @attach_i2c_dev: called every time an I2C device is attached to the bus. + * This is a good place to attach master controller specific + * data to I2C devices. + * This method is optional. + * @detach_i2c_dev: called when an I2C device is detached from the bus. Usually + * happens when the master device is unregistered. + * This method is optional. + * @i2c_xfers: do one or several I2C transfers. Note that, unlike i3c + * transfers, the core does not guarantee that buffers attached to + * the transfers are DMA-safe. If drivers want to have DMA-safe + * buffers, they should use the i2c_get_dma_safe_msg_buf() + * and i2c_put_dma_safe_msg_buf() helpers provided by the I2C + * framework. + * This method is mandatory. + * @i2c_funcs: expose the supported I2C functionalities. + * This method is mandatory. + * @request_ibi: attach an IBI handler to an I3C device. This implies defining + * an IBI handler and the constraints of the IBI (maximum payload + * length and number of pre-allocated slots). + * Some controllers support less IBI-capable devices than regular + * devices, so this method might return -%EBUSY if there's no + * more space for an extra IBI registration + * This method is optional. + * @free_ibi: free an IBI previously requested with ->request_ibi(). The IBI + * should have been disabled with ->disable_irq() prior to that + * This method is mandatory only if ->request_ibi is not NULL. + * @enable_ibi: enable the IBI. Only valid if ->request_ibi() has been called + * prior to ->enable_ibi(). The controller should first enable + * the IBI on the controller end (for example, unmask the hardware + * IRQ) and then send the ENEC CCC command (with the IBI flag set) + * to the I3C device. + * This method is mandatory only if ->request_ibi is not NULL. + * @disable_ibi: disable an IBI. First send the DISEC CCC command with the IBI + * flag set and then deactivate the hardware IRQ on the + * controller end. + * This method is mandatory only if ->request_ibi is not NULL. + * @recycle_ibi_slot: recycle an IBI slot. Called every time an IBI has been + * processed by its handler. The IBI slot should be put back + * in the IBI slot pool so that the controller can re-use it + * for a future IBI + * This method is mandatory only if ->request_ibi is not + * NULL. + */ +struct i3c_master_controller_ops { + int (*bus_init)(struct i3c_master_controller *master); + void (*bus_cleanup)(struct i3c_master_controller *master); + int (*attach_i3c_dev)(struct i3c_dev_desc *dev); + int (*reattach_i3c_dev)(struct i3c_dev_desc *dev, u8 old_dyn_addr); + void (*detach_i3c_dev)(struct i3c_dev_desc *dev); + int (*do_daa)(struct i3c_master_controller *master); + bool (*supports_ccc_cmd)(struct i3c_master_controller *master, + const struct i3c_ccc_cmd *cmd); + int (*send_ccc_cmd)(struct i3c_master_controller *master, + struct i3c_ccc_cmd *cmd); + int (*priv_xfers)(struct i3c_dev_desc *dev, + struct i3c_priv_xfer *xfers, + int nxfers); + int (*attach_i2c_dev)(struct i2c_dev_desc *dev); + void (*detach_i2c_dev)(struct i2c_dev_desc *dev); + int (*i2c_xfers)(struct i2c_dev_desc *dev, + const struct i2c_msg *xfers, int nxfers); + u32 (*i2c_funcs)(struct i3c_master_controller *master); + int (*request_ibi)(struct i3c_dev_desc *dev, + const struct i3c_ibi_setup *req); + void (*free_ibi)(struct i3c_dev_desc *dev); + int (*enable_ibi)(struct i3c_dev_desc *dev); + int (*disable_ibi)(struct i3c_dev_desc *dev); + void (*recycle_ibi_slot)(struct i3c_dev_desc *dev, + struct i3c_ibi_slot *slot); +}; + +/** + * struct i3c_master_controller - I3C master controller object + * @dev: device to be registered to the device-model + * @this: an I3C device object representing this master. This device will be + * added to the list of I3C devs available on the bus + * @i2c: I2C adapter used for backward compatibility. This adapter is + * registered to the I2C subsystem to be as transparent as possible to + * existing I2C drivers + * @ops: master operations. See &struct i3c_master_controller_ops + * @secondary: true if the master is a secondary master + * @init_done: true when the bus initialization is done + * @boardinfo.i3c: list of I3C boardinfo objects + * @boardinfo.i2c: list of I2C boardinfo objects + * @boardinfo: board-level information attached to devices connected on the bus + * @bus: I3C bus exposed by this master + * @wq: workqueue used to execute IBI handlers. Can also be used by master + * drivers if they need to postpone operations that need to take place + * in a thread context. Typical examples are Hot Join processing which + * requires taking the bus lock in maintenance, which in turn, can only + * be done from a sleep-able context + * + * A &struct i3c_master_controller has to be registered to the I3C subsystem + * through i3c_master_register(). None of &struct i3c_master_controller fields + * should be set manually, just pass appropriate values to + * i3c_master_register(). + */ +struct i3c_master_controller { + struct device dev; + struct i3c_dev_desc *this; + struct i2c_adapter i2c; + const struct i3c_master_controller_ops *ops; + unsigned int secondary : 1; + unsigned int init_done : 1; + struct { + struct list_head i3c; + struct list_head i2c; + } boardinfo; + struct i3c_bus bus; + struct workqueue_struct *wq; +}; + +/** + * i3c_bus_for_each_i2cdev() - iterate over all I2C devices present on the bus + * @bus: the I3C bus + * @dev: an I2C device descriptor pointer updated to point to the current slot + * at each iteration of the loop + * + * Iterate over all I2C devs present on the bus. + */ +#define i3c_bus_for_each_i2cdev(bus, dev) \ + list_for_each_entry(dev, &(bus)->devs.i2c, common.node) + +/** + * i3c_bus_for_each_i3cdev() - iterate over all I3C devices present on the bus + * @bus: the I3C bus + * @dev: and I3C device descriptor pointer updated to point to the current slot + * at each iteration of the loop + * + * Iterate over all I3C devs present on the bus. + */ +#define i3c_bus_for_each_i3cdev(bus, dev) \ + list_for_each_entry(dev, &(bus)->devs.i3c, common.node) + +int i3c_master_do_i2c_xfers(struct i3c_master_controller *master, + const struct i2c_msg *xfers, + int nxfers); + +int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr, + u8 evts); +int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr, + u8 evts); +int i3c_master_entdaa_locked(struct i3c_master_controller *master); +int i3c_master_defslvs_locked(struct i3c_master_controller *master); + +int i3c_master_get_free_addr(struct i3c_master_controller *master, + u8 start_addr); + +int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master, + u8 addr); +int i3c_master_do_daa(struct i3c_master_controller *master); + +int i3c_master_set_info(struct i3c_master_controller *master, + const struct i3c_device_info *info); + +int i3c_master_register(struct i3c_master_controller *master, + struct device *parent, + const struct i3c_master_controller_ops *ops, + bool secondary); +int i3c_master_unregister(struct i3c_master_controller *master); + +/** + * i3c_dev_get_master_data() - get master private data attached to an I3C + * device descriptor + * @dev: the I3C device descriptor to get private data from + * + * Return: the private data previously attached with i3c_dev_set_master_data() + * or NULL if no data has been attached to the device. + */ +static inline void *i3c_dev_get_master_data(const struct i3c_dev_desc *dev) +{ + return dev->common.master_priv; +} + +/** + * i3c_dev_set_master_data() - attach master private data to an I3C device + * descriptor + * @dev: the I3C device descriptor to attach private data to + * @data: private data + * + * This functions allows a master controller to attach per-device private data + * which can then be retrieved with i3c_dev_get_master_data(). + */ +static inline void i3c_dev_set_master_data(struct i3c_dev_desc *dev, + void *data) +{ + dev->common.master_priv = data; +} + +/** + * i2c_dev_get_master_data() - get master private data attached to an I2C + * device descriptor + * @dev: the I2C device descriptor to get private data from + * + * Return: the private data previously attached with i2c_dev_set_master_data() + * or NULL if no data has been attached to the device. + */ +static inline void *i2c_dev_get_master_data(const struct i2c_dev_desc *dev) +{ + return dev->common.master_priv; +} + +/** + * i2c_dev_set_master_data() - attach master private data to an I2C device + * descriptor + * @dev: the I2C device descriptor to attach private data to + * @data: private data + * + * This functions allows a master controller to attach per-device private data + * which can then be retrieved with i2c_device_get_master_data(). + */ +static inline void i2c_dev_set_master_data(struct i2c_dev_desc *dev, + void *data) +{ + dev->common.master_priv = data; +} + +/** + * i3c_dev_get_master() - get master used to communicate with a device + * @dev: I3C dev + * + * Return: the master controller driving @dev + */ +static inline struct i3c_master_controller * +i3c_dev_get_master(struct i3c_dev_desc *dev) +{ + return dev->common.master; +} + +/** + * i2c_dev_get_master() - get master used to communicate with a device + * @dev: I2C dev + * + * Return: the master controller driving @dev + */ +static inline struct i3c_master_controller * +i2c_dev_get_master(struct i2c_dev_desc *dev) +{ + return dev->common.master; +} + +/** + * i3c_master_get_bus() - get the bus attached to a master + * @master: master object + * + * Return: the I3C bus @master is connected to + */ +static inline struct i3c_bus * +i3c_master_get_bus(struct i3c_master_controller *master) +{ + return &master->bus; +} + +struct i3c_generic_ibi_pool; + +struct i3c_generic_ibi_pool * +i3c_generic_ibi_alloc_pool(struct i3c_dev_desc *dev, + const struct i3c_ibi_setup *req); +void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool); + +struct i3c_ibi_slot * +i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool); +void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool, + struct i3c_ibi_slot *slot); + +void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot); + +struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev); + +#endif /* I3C_MASTER_H */ diff --git a/include/linux/i8253.h b/include/linux/i8253.h index e6bb36a97519..8336b2f6f834 100644 --- a/include/linux/i8253.h +++ b/include/linux/i8253.h @@ -21,6 +21,7 @@ #define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ) extern raw_spinlock_t i8253_lock; +extern bool i8253_clear_counter_on_shutdown; extern struct clock_event_device i8253_clockevent; extern void clockevent_i8253_init(bool oneshot); diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 0ef67f837ae1..3b04e72315e1 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -812,6 +812,8 @@ enum mesh_config_capab_flags { IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL = 0x40, }; +#define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1 + /** * mesh channel switch parameters element's flag indicator * @@ -1617,7 +1619,7 @@ struct ieee80211_he_mcs_nss_supp { * struct ieee80211_he_operation - HE capabilities element * * This structure is the "HE operation element" fields as - * described in P802.11ax_D2.0 section 9.4.2.238 + * described in P802.11ax_D3.0 section 9.4.2.238 */ struct ieee80211_he_operation { __le32 he_oper_params; @@ -2009,17 +2011,17 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) } /* HE Operation defines */ -#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x0000003f -#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x000001c0 -#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET 6 -#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200 -#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00 -#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10 -#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x00100000 -#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00200000 -#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000 -#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000 -#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000 +#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000003 +#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008 +#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0 +#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4 +#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00004000 +#define IEEE80211_HE_OPERATION_CO_LOCATED_BSS 0x00008000 +#define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000 +#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000 +#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24 +#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000 +#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000 /* * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size @@ -2044,7 +2046,7 @@ ieee80211_he_oper_size(const u8 *he_oper_ie) he_oper_params = le32_to_cpu(he_oper->he_oper_params); if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO) oper_len += 3; - if (he_oper_params & IEEE80211_HE_OPERATION_MULTI_BSSID_AP) + if (he_oper_params & IEEE80211_HE_OPERATION_CO_LOCATED_BSS) oper_len++; /* Add the first byte (extension ID) to the total length */ @@ -2685,6 +2687,10 @@ enum ieee80211_tdls_actioncode { */ #define WLAN_EXT_CAPA9_FTM_INITIATOR BIT(7) +/* Defines support for TWT Requester and TWT Responder */ +#define WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT BIT(5) +#define WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT BIT(6) + /* TDLS specific payload type in the LLC/SNAP header */ #define WLAN_TDLS_SNAP_RFTYPE 0x2 diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index c20c7e197d07..627b788ba0ff 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -119,6 +119,8 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, struct net_device *br_fdb_find_port(const struct net_device *br_dev, const unsigned char *addr, __u16 vid); +void br_fdb_clear_offload(const struct net_device *dev, u16 vid); +bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag); #else static inline struct net_device * br_fdb_find_port(const struct net_device *br_dev, @@ -127,6 +129,16 @@ br_fdb_find_port(const struct net_device *br_dev, { return NULL; } + +static inline void br_fdb_clear_offload(const struct net_device *dev, u16 vid) +{ +} + +static inline bool +br_port_flag_is_set(const struct net_device *dev, unsigned long flag) +{ + return false; +} #endif #endif diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 83ea4df6ab81..4cca4da7a6de 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -65,8 +65,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) #define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ #define VLAN_PRIO_SHIFT 13 -#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */ -#define VLAN_TAG_PRESENT VLAN_CFI_MASK +#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */ #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ #define VLAN_N_VID 4096 @@ -78,10 +77,11 @@ static inline bool is_vlan_dev(const struct net_device *dev) return dev->priv_flags & IFF_802_1Q_VLAN; } -#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) -#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) +#define skb_vlan_tag_present(__skb) ((__skb)->vlan_present) +#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci) #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) -#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK) +#define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK)) +#define skb_vlan_tag_get_prio(__skb) (((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT) static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev) { @@ -133,6 +133,9 @@ struct vlan_pcpu_stats { extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev, __be16 vlan_proto, u16 vlan_id); +extern int vlan_for_each(struct net_device *dev, + int (*action)(struct net_device *dev, int vid, + void *arg), void *arg); extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); extern u16 vlan_dev_vlan_id(const struct net_device *dev); extern __be16 vlan_dev_vlan_proto(const struct net_device *dev); @@ -236,6 +239,14 @@ __vlan_find_dev_deep_rcu(struct net_device *real_dev, return NULL; } +static inline int +vlan_for_each(struct net_device *dev, + int (*action)(struct net_device *dev, int vid, void *arg), + void *arg) +{ + return 0; +} + static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) { BUG(); @@ -461,6 +472,31 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, return skb; } +/** + * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info + * @skb: skbuff to clear + * + * Clears the VLAN information from @skb + */ +static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb) +{ + skb->vlan_present = 0; +} + +/** + * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb + * @dst: skbuff to copy to + * @src: skbuff to copy from + * + * Copies VLAN information from @src to @dst (for branchless code) + */ +static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src) +{ + dst->vlan_present = src->vlan_present; + dst->vlan_proto = src->vlan_proto; + dst->vlan_tci = src->vlan_tci; +} + /* * __vlan_hwaccel_push_inside - pushes vlan tag to the payload * @skb: skbuff to tag @@ -475,7 +511,7 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb) skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, skb_vlan_tag_get(skb)); if (likely(skb)) - skb->vlan_tci = 0; + __vlan_hwaccel_clear_tag(skb); return skb; } @@ -491,7 +527,8 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) { skb->vlan_proto = vlan_proto; - skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; + skb->vlan_tci = vlan_tci; + skb->vlan_present = 1; } /** @@ -531,8 +568,6 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb, } } -#define HAVE_VLAN_GET_TAG - /** * vlan_get_tag - get the VLAN ID from the skb * @skb: skbuff to query diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h new file mode 100644 index 000000000000..00d7e8e919c6 --- /dev/null +++ b/include/linux/indirect_call_wrapper.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_INDIRECT_CALL_WRAPPER_H +#define _LINUX_INDIRECT_CALL_WRAPPER_H + +#ifdef CONFIG_RETPOLINE + +/* + * INDIRECT_CALL_$NR - wrapper for indirect calls with $NR known builtin + * @f: function pointer + * @f$NR: builtin functions names, up to $NR of them + * @__VA_ARGS__: arguments for @f + * + * Avoid retpoline overhead for known builtin, checking @f vs each of them and + * eventually invoking directly the builtin function. The functions are check + * in the given order. Fallback to the indirect call. + */ +#define INDIRECT_CALL_1(f, f1, ...) \ + ({ \ + likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \ + }) +#define INDIRECT_CALL_2(f, f2, f1, ...) \ + ({ \ + likely(f == f2) ? f2(__VA_ARGS__) : \ + INDIRECT_CALL_1(f, f1, __VA_ARGS__); \ + }) + +#define INDIRECT_CALLABLE_DECLARE(f) f +#define INDIRECT_CALLABLE_SCOPE + +#else +#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__) +#define INDIRECT_CALLABLE_DECLARE(f) +#define INDIRECT_CALLABLE_SCOPE static +#endif + +/* + * We can use INDIRECT_CALL_$NR for ipv6 related functions only if ipv6 is + * builtin, this macro simplify dealing with indirect calls with only ipv4/ipv6 + * alternatives + */ +#if IS_BUILTIN(CONFIG_IPV6) +#define INDIRECT_CALL_INET(f, f2, f1, ...) \ + INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__) +#elif IS_ENABLED(CONFIG_INET) +#define INDIRECT_CALL_INET(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__) +#else +#define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__) +#endif + +#endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 1d6711c28271..c672f34235e7 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -247,10 +247,23 @@ struct irq_affinity_notify { * the MSI(-X) vector space * @post_vectors: Don't apply affinity to @post_vectors at end of * the MSI(-X) vector space + * @nr_sets: Length of passed in *sets array + * @sets: Number of affinitized sets */ struct irq_affinity { int pre_vectors; int post_vectors; + int nr_sets; + int *sets; +}; + +/** + * struct irq_affinity_desc - Interrupt affinity descriptor + * @mask: cpumask to hold the affinity assignment + */ +struct irq_affinity_desc { + struct cpumask mask; + unsigned int is_managed : 1; }; #if defined(CONFIG_SMP) @@ -299,7 +312,9 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); extern int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); -struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); +struct irq_affinity_desc * +irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); + int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd); #else /* CONFIG_SMP */ @@ -333,7 +348,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) return 0; } -static inline struct cpumask * +static inline struct irq_affinity_desc * irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) { return NULL; diff --git a/include/linux/irq.h b/include/linux/irq.h index c9bffda04a45..def2b2aac8b1 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -27,6 +27,7 @@ struct seq_file; struct module; struct msi_msg; +struct irq_affinity_desc; enum irqchip_irq_state; /* @@ -834,11 +835,12 @@ struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) unsigned int arch_dynirq_lower_bound(unsigned int from); int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, - struct module *owner, const struct cpumask *affinity); + struct module *owner, + const struct irq_affinity_desc *affinity); int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from, unsigned int cnt, int node, struct module *owner, - const struct cpumask *affinity); + const struct irq_affinity_desc *affinity); /* use macros to avoid needing export.h for THIS_MODULE */ #define irq_alloc_descs(irq, from, cnt, node) \ diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h index 630a57e55db6..4500d453a63e 100644 --- a/include/linux/irq_sim.h +++ b/include/linux/irq_sim.h @@ -16,7 +16,7 @@ struct irq_sim_work_ctx { struct irq_work work; - int irq; + unsigned long *pending; }; struct irq_sim_irq_ctx { diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h index 89c34b200671..950e4b2458f0 100644 --- a/include/linux/irqchip.h +++ b/include/linux/irqchip.h @@ -19,7 +19,7 @@ * the association between their DT compatible string and their * initialization function. * - * @name: name that must be unique accross all IRQCHIP_DECLARE of the + * @name: name that must be unique across all IRQCHIP_DECLARE of the * same file. * @compstr: compatible string of the irqchip driver * @fn: initialization function @@ -30,7 +30,7 @@ * This macro must be used by the different irqchip drivers to declare * the association between their version and their initialization function. * - * @name: name that must be unique accross all IRQCHIP_ACPI_DECLARE of the + * @name: name that must be unique across all IRQCHIP_ACPI_DECLARE of the * same file. * @subtable: Subtable to be identified in MADT * @validate: Function to be called on that subtable to check its validity. diff --git a/include/linux/irqchip/irq-madera.h b/include/linux/irqchip/irq-madera.h new file mode 100644 index 000000000000..1160fa3769ae --- /dev/null +++ b/include/linux/irqchip/irq-madera.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Interrupt support for Cirrus Logic Madera codecs + * + * Copyright (C) 2016-2018 Cirrus Logic, Inc. and + * Cirrus Logic International Semiconductor Ltd. + */ + +#ifndef IRQCHIP_MADERA_H +#define IRQCHIP_MADERA_H + +#include <linux/interrupt.h> +#include <linux/mfd/madera/core.h> + +#define MADERA_IRQ_FLL1_LOCK 0 +#define MADERA_IRQ_FLL2_LOCK 1 +#define MADERA_IRQ_FLL3_LOCK 2 +#define MADERA_IRQ_FLLAO_LOCK 3 +#define MADERA_IRQ_CLK_SYS_ERR 4 +#define MADERA_IRQ_CLK_ASYNC_ERR 5 +#define MADERA_IRQ_CLK_DSP_ERR 6 +#define MADERA_IRQ_HPDET 7 +#define MADERA_IRQ_MICDET1 8 +#define MADERA_IRQ_MICDET2 9 +#define MADERA_IRQ_JD1_RISE 10 +#define MADERA_IRQ_JD1_FALL 11 +#define MADERA_IRQ_JD2_RISE 12 +#define MADERA_IRQ_JD2_FALL 13 +#define MADERA_IRQ_MICD_CLAMP_RISE 14 +#define MADERA_IRQ_MICD_CLAMP_FALL 15 +#define MADERA_IRQ_DRC2_SIG_DET 16 +#define MADERA_IRQ_DRC1_SIG_DET 17 +#define MADERA_IRQ_ASRC1_IN1_LOCK 18 +#define MADERA_IRQ_ASRC1_IN2_LOCK 19 +#define MADERA_IRQ_ASRC2_IN1_LOCK 20 +#define MADERA_IRQ_ASRC2_IN2_LOCK 21 +#define MADERA_IRQ_DSP_IRQ1 22 +#define MADERA_IRQ_DSP_IRQ2 23 +#define MADERA_IRQ_DSP_IRQ3 24 +#define MADERA_IRQ_DSP_IRQ4 25 +#define MADERA_IRQ_DSP_IRQ5 26 +#define MADERA_IRQ_DSP_IRQ6 27 +#define MADERA_IRQ_DSP_IRQ7 28 +#define MADERA_IRQ_DSP_IRQ8 29 +#define MADERA_IRQ_DSP_IRQ9 30 +#define MADERA_IRQ_DSP_IRQ10 31 +#define MADERA_IRQ_DSP_IRQ11 32 +#define MADERA_IRQ_DSP_IRQ12 33 +#define MADERA_IRQ_DSP_IRQ13 34 +#define MADERA_IRQ_DSP_IRQ14 35 +#define MADERA_IRQ_DSP_IRQ15 36 +#define MADERA_IRQ_DSP_IRQ16 37 +#define MADERA_IRQ_HP1L_SC 38 +#define MADERA_IRQ_HP1R_SC 39 +#define MADERA_IRQ_HP2L_SC 40 +#define MADERA_IRQ_HP2R_SC 41 +#define MADERA_IRQ_HP3L_SC 42 +#define MADERA_IRQ_HP3R_SC 43 +#define MADERA_IRQ_SPKOUTL_SC 44 +#define MADERA_IRQ_SPKOUTR_SC 45 +#define MADERA_IRQ_HP1L_ENABLE_DONE 46 +#define MADERA_IRQ_HP1R_ENABLE_DONE 47 +#define MADERA_IRQ_HP2L_ENABLE_DONE 48 +#define MADERA_IRQ_HP2R_ENABLE_DONE 49 +#define MADERA_IRQ_HP3L_ENABLE_DONE 50 +#define MADERA_IRQ_HP3R_ENABLE_DONE 51 +#define MADERA_IRQ_SPKOUTL_ENABLE_DONE 52 +#define MADERA_IRQ_SPKOUTR_ENABLE_DONE 53 +#define MADERA_IRQ_SPK_SHUTDOWN 54 +#define MADERA_IRQ_SPK_OVERHEAT 55 +#define MADERA_IRQ_SPK_OVERHEAT_WARN 56 +#define MADERA_IRQ_GPIO1 57 +#define MADERA_IRQ_GPIO2 58 +#define MADERA_IRQ_GPIO3 59 +#define MADERA_IRQ_GPIO4 60 +#define MADERA_IRQ_GPIO5 61 +#define MADERA_IRQ_GPIO6 62 +#define MADERA_IRQ_GPIO7 63 +#define MADERA_IRQ_GPIO8 64 +#define MADERA_IRQ_DSP1_BUS_ERR 65 +#define MADERA_IRQ_DSP2_BUS_ERR 66 +#define MADERA_IRQ_DSP3_BUS_ERR 67 +#define MADERA_IRQ_DSP4_BUS_ERR 68 +#define MADERA_IRQ_DSP5_BUS_ERR 69 +#define MADERA_IRQ_DSP6_BUS_ERR 70 +#define MADERA_IRQ_DSP7_BUS_ERR 71 + +#define MADERA_NUM_IRQ 72 + +/* + * These wrapper functions are for use by other child drivers of the + * same parent MFD. + */ +static inline int madera_get_irq_mapping(struct madera *madera, int irq) +{ + if (!madera->irq_dev) + return -ENODEV; + + return regmap_irq_get_virq(madera->irq_data, irq); +} + +static inline int madera_request_irq(struct madera *madera, int irq, + const char *name, + irq_handler_t handler, void *data) +{ + irq = madera_get_irq_mapping(madera, irq); + if (irq < 0) + return irq; + + return request_threaded_irq(irq, NULL, handler, IRQF_ONESHOT, name, + data); +} + +static inline void madera_free_irq(struct madera *madera, int irq, void *data) +{ + irq = madera_get_irq_mapping(madera, irq); + if (irq < 0) + return; + + free_irq(irq, data); +} + +static inline int madera_set_irq_wake(struct madera *madera, int irq, int on) +{ + irq = madera_get_irq_mapping(madera, irq); + if (irq < 0) + return irq; + + return irq_set_irq_wake(irq, on); +} + +#endif diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 068aa46f0d55..35965f41d7be 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -43,6 +43,7 @@ struct irq_chip; struct irq_data; struct cpumask; struct seq_file; +struct irq_affinity_desc; /* Number of irqs reserved for a legacy isa controller */ #define NUM_ISA_INTERRUPTS 16 @@ -266,7 +267,7 @@ extern bool irq_domain_check_msi_remap(void); extern void irq_set_default_host(struct irq_domain *host); extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, irq_hw_number_t hwirq, int node, - const struct cpumask *affinity); + const struct irq_affinity_desc *affinity); static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) { @@ -449,7 +450,8 @@ static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *par extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, - bool realloc, const struct cpumask *affinity); + bool realloc, + const struct irq_affinity_desc *affinity); extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early); extern void irq_domain_deactivate_irq(struct irq_data *irq_data); diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index b708e5169d1d..0f919d5fe84f 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -575,6 +575,7 @@ struct transaction_s enum { T_RUNNING, T_LOCKED, + T_SWITCH, T_FLUSH, T_COMMIT, T_COMMIT_DFLUSH, @@ -662,13 +663,13 @@ struct transaction_s /* * Number of outstanding updates running on this transaction - * [t_handle_lock] + * [none] */ atomic_t t_updates; /* * Number of buffers reserved for use by all handles in this transaction - * handle but not yet modified. [t_handle_lock] + * handle but not yet modified. [none] */ atomic_t t_outstanding_credits; @@ -690,7 +691,7 @@ struct transaction_s ktime_t t_start_time; /* - * How many handles used this transaction? [t_handle_lock] + * How many handles used this transaction? [none] */ atomic_t t_handle_count; diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 9e4e638fb505..b9b1bc5f9669 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -143,6 +143,15 @@ extern const struct kexec_file_ops * const kexec_file_loaders[]; int kexec_image_probe_default(struct kimage *image, void *buf, unsigned long buf_len); +int kexec_image_post_load_cleanup_default(struct kimage *image); + +/* + * If kexec_buf.mem is set to this value, kexec_locate_mem_hole() + * will try to allocate free memory. Arch may overwrite it. + */ +#ifndef KEXEC_BUF_MEM_UNKNOWN +#define KEXEC_BUF_MEM_UNKNOWN 0 +#endif /** * struct kexec_buf - parameters for finding a place for a buffer in memory @@ -174,6 +183,7 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, bool get_value); void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name); +void * __weak arch_kexec_kernel_image_load(struct kimage *image); int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section, const Elf_Shdr *relsec, @@ -183,8 +193,6 @@ int __weak arch_kexec_apply_relocations(struct purgatory_info *pi, const Elf_Shdr *relsec, const Elf_Shdr *symtab); -int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf, - int (*func)(struct resource *, void *)); extern int kexec_add_buffer(struct kexec_buf *kbuf); int kexec_locate_mem_hole(struct kexec_buf *kbuf); diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index e909413e4e38..e07e91daaacc 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -242,10 +242,13 @@ extern int arch_init_kprobes(void); extern void show_registers(struct pt_regs *regs); extern void kprobes_inc_nmissed_count(struct kprobe *p); extern bool arch_within_kprobe_blacklist(unsigned long addr); +extern int arch_populate_kprobe_blacklist(void); extern bool arch_kprobe_on_func_entry(unsigned long offset); extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset); extern bool within_kprobe_blacklist(unsigned long addr); +extern int kprobe_add_ksym_blacklist(unsigned long entry); +extern int kprobe_add_area_blacklist(unsigned long start, unsigned long end); struct kprobe_insn_cache { struct mutex mutex; @@ -379,6 +382,9 @@ int enable_kprobe(struct kprobe *kp); void dump_kprobe(struct kprobe *kp); +void *alloc_insn_page(void); +void free_insn_page(void *page); + #else /* !CONFIG_KPROBES: */ static inline int kprobes_built_in(void) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c926698040e0..c38cc5eb7e73 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -449,6 +449,7 @@ struct kvm { #endif long tlbs_dirty; struct list_head devices; + bool manual_dirty_log_protect; struct dentry *debugfs_dentry; struct kvm_stat_data **debugfs_stat_data; struct srcu_struct srcu; @@ -694,7 +695,8 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - void *data, int offset, unsigned long len); + void *data, unsigned int offset, + unsigned long len); int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); @@ -753,7 +755,9 @@ int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, int *is_dirty); int kvm_get_dirty_log_protect(struct kvm *kvm, - struct kvm_dirty_log *log, bool *is_dirty); + struct kvm_dirty_log *log, bool *flush); +int kvm_clear_dirty_log_protect(struct kvm *kvm, + struct kvm_clear_dirty_log *log, bool *flush); void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, @@ -762,9 +766,13 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); +int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, + struct kvm_clear_dirty_log *log); int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, bool line_status); +int kvm_vm_ioctl_enable_cap(struct kvm *kvm, + struct kvm_enable_cap *cap); long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); diff --git a/include/linux/leds.h b/include/linux/leds.h index 7393a316d9fa..5263f87e1d2c 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -51,6 +51,7 @@ struct led_classdev { #define LED_PANIC_INDICATOR BIT(20) #define LED_BRIGHT_HW_CHANGED BIT(21) #define LED_RETAIN_AT_SHUTDOWN BIT(22) +#define LED_INIT_DEFAULT_TRIGGER BIT(23) /* set_brightness_work / blink_timer flags, atomic, private. */ unsigned long work_flags; @@ -487,4 +488,24 @@ struct led_pattern { int brightness; }; +enum led_audio { + LED_AUDIO_MUTE, /* master mute LED */ + LED_AUDIO_MICMUTE, /* mic mute LED */ + NUM_AUDIO_LEDS +}; + +#if IS_ENABLED(CONFIG_LEDS_TRIGGER_AUDIO) +enum led_brightness ledtrig_audio_get(enum led_audio type); +void ledtrig_audio_set(enum led_audio type, enum led_brightness state); +#else +static inline enum led_brightness ledtrig_audio_get(enum led_audio type) +{ + return LED_OFF; +} +static inline void ledtrig_audio_set(enum led_audio type, + enum led_brightness state) +{ +} +#endif + #endif /* __LINUX_LEDS_H_INCLUDED */ diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 7c47b1a471d4..7e020782ade2 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -79,6 +79,12 @@ #define ALIGN __ALIGN #define ALIGN_STR __ALIGN_STR +#ifndef GLOBAL +#define GLOBAL(name) \ + .globl name ASM_NL \ + name: +#endif + #ifndef ENTRY #define ENTRY(name) \ .globl name ASM_NL \ diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h index 22443d7fb5cd..a99c58866860 100644 --- a/include/linux/linkmode.h +++ b/include/linux/linkmode.h @@ -57,6 +57,15 @@ static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr) __clear_bit(nr, addr); } +static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr, + int set) +{ + if (set) + linkmode_set_bit(nr, addr); + else + linkmode_clear_bit(nr, addr); +} + static inline void linkmode_change_bit(int nr, volatile unsigned long *addr) { __change_bit(nr, addr); diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 1fd82ff99c65..c5335df2372f 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -97,8 +97,6 @@ struct lock_class { * Generation counter, when doing certain classes of graph walking, * to ensure that we check one node only once: */ - unsigned int version; - int name_version; const char *name; diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index bac395f1d00a..5228c62af416 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -139,8 +139,6 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, struct mempolicy *get_task_policy(struct task_struct *p); struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, unsigned long addr); -struct mempolicy *get_vma_policy(struct vm_area_struct *vma, - unsigned long addr); bool vma_policy_mof(struct vm_area_struct *vma); extern void numa_default_policy(void); diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index 517e60eecbcb..1293695245df 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -35,7 +35,7 @@ enum axp20x_variants { #define AXP152_ALDO_OP_MODE 0x13 #define AXP152_LDO0_CTRL 0x15 #define AXP152_DCDC2_V_OUT 0x23 -#define AXP152_DCDC2_V_SCAL 0x25 +#define AXP152_DCDC2_V_RAMP 0x25 #define AXP152_DCDC1_V_OUT 0x26 #define AXP152_DCDC3_V_OUT 0x27 #define AXP152_ALDO12_V_OUT 0x28 @@ -53,7 +53,7 @@ enum axp20x_variants { #define AXP20X_USB_OTG_STATUS 0x02 #define AXP20X_PWR_OUT_CTRL 0x12 #define AXP20X_DCDC2_V_OUT 0x23 -#define AXP20X_DCDC2_LDO3_V_SCAL 0x25 +#define AXP20X_DCDC2_LDO3_V_RAMP 0x25 #define AXP20X_DCDC3_V_OUT 0x27 #define AXP20X_LDO24_V_OUT 0x28 #define AXP20X_LDO3_V_OUT 0x29 diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h index b19c370fe81a..f346167c0e00 100644 --- a/include/linux/mfd/wm8994/pdata.h +++ b/include/linux/mfd/wm8994/pdata.h @@ -20,9 +20,6 @@ #define WM8994_NUM_AIF 3 struct wm8994_ldo_pdata { - /** GPIOs to enable regulator, 0 or less if not available */ - int enable; - const struct regulator_init_data *init_data; }; diff --git a/include/linux/mii.h b/include/linux/mii.h index 2da85b02e1c0..6fee8b1a4400 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -209,7 +209,7 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv) /** * linkmode_adv_to_mii_ctrl1000_t - * advertising: the linkmode advertisement settings + * @advertising: the linkmode advertisement settings * * A small helper function that translates linkmode advertisement * settings to phy autonegotiation advertisements for the @@ -288,6 +288,25 @@ static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa) } /** + * mii_stat1000_mod_linkmode_lpa_t + * @advertising: target the linkmode advertisement settings + * @adv: value of the MII_STAT1000 register + * + * A small helper function that translates MII_STAT1000 bits, when in + * 1000Base-T mode, to linkmode advertisement settings. Other bits in + * advertising are not changes. + */ +static inline void mii_stat1000_mod_linkmode_lpa_t(unsigned long *advertising, + u32 lpa) +{ + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + advertising, lpa & LPA_1000HALF); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + advertising, lpa & LPA_1000FULL); +} + +/** * ethtool_adv_to_mii_adv_x * @ethadv: the ethtool advertisement settings * @@ -354,50 +373,104 @@ static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa) } /** + * mii_adv_mod_linkmode_adv_t + * @advertising:pointer to destination link mode. + * @adv: value of the MII_ADVERTISE register + * + * A small helper function that translates MII_ADVERTISE bits to + * linkmode advertisement settings. Leaves other bits unchanged. + */ +static inline void mii_adv_mod_linkmode_adv_t(unsigned long *advertising, + u32 adv) +{ + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + advertising, adv & ADVERTISE_10HALF); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + advertising, adv & ADVERTISE_10FULL); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + advertising, adv & ADVERTISE_100HALF); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + advertising, adv & ADVERTISE_100FULL); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising, + adv & ADVERTISE_PAUSE_CAP); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + advertising, adv & ADVERTISE_PAUSE_ASYM); +} + +/** * mii_adv_to_linkmode_adv_t * @advertising:pointer to destination link mode. * @adv: value of the MII_ADVERTISE register * * A small helper function that translates MII_ADVERTISE bits - * to linkmode advertisement settings. + * to linkmode advertisement settings. Clears the old value + * of advertising. */ static inline void mii_adv_to_linkmode_adv_t(unsigned long *advertising, u32 adv) { linkmode_zero(advertising); - if (adv & ADVERTISE_10HALF) - linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, - advertising); - if (adv & ADVERTISE_10FULL) - linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, - advertising); - if (adv & ADVERTISE_100HALF) - linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, - advertising); - if (adv & ADVERTISE_100FULL) - linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, - advertising); - if (adv & ADVERTISE_PAUSE_CAP) - linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising); - if (adv & ADVERTISE_PAUSE_ASYM) - linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising); + mii_adv_mod_linkmode_adv_t(advertising, adv); +} + +/** + * mii_lpa_to_linkmode_lpa_t + * @adv: value of the MII_LPA register + * + * A small helper function that translates MII_LPA bits, when in + * 1000Base-T mode, to linkmode LP advertisement settings. Clears the + * old value of advertising + */ +static inline void mii_lpa_to_linkmode_lpa_t(unsigned long *lp_advertising, + u32 lpa) +{ + mii_adv_to_linkmode_adv_t(lp_advertising, lpa); + + if (lpa & LPA_LPACK) + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + lp_advertising); + +} + +/** + * mii_lpa_mod_linkmode_lpa_t + * @adv: value of the MII_LPA register + * + * A small helper function that translates MII_LPA bits, when in + * 1000Base-T mode, to linkmode LP advertisement settings. Leaves + * other bits unchanged. + */ +static inline void mii_lpa_mod_linkmode_lpa_t(unsigned long *lp_advertising, + u32 lpa) +{ + mii_adv_mod_linkmode_adv_t(lp_advertising, lpa); + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + lp_advertising, lpa & LPA_LPACK); } /** - * ethtool_adv_to_lcl_adv_t - * @advertising:pointer to ethtool advertising + * linkmode_adv_to_lcl_adv_t + * @advertising:pointer to linkmode advertising * - * A small helper function that translates ethtool advertising to LVL + * A small helper function that translates linkmode advertising to LVL * pause capabilities. */ -static inline u32 ethtool_adv_to_lcl_adv_t(u32 advertising) +static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising) { u32 lcl_adv = 0; - if (advertising & ADVERTISED_Pause) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, + advertising)) lcl_adv |= ADVERTISE_PAUSE_CAP; - if (advertising & ADVERTISED_Asym_Pause) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, + advertising)) lcl_adv |= ADVERTISE_PAUSE_ASYM; return lcl_adv; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index dca6ab4eaa99..36e412c3d657 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -226,6 +226,7 @@ enum { MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37, MLX4_DEV_CAP_FLAG2_USER_MAC_EN = 1ULL << 38, MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW = 1ULL << 39, + MLX4_DEV_CAP_FLAG2_SW_CQ_INIT = 1ULL << 40, }; enum { @@ -1136,7 +1137,8 @@ void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres, int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, - unsigned vector, int collapsed, int timestamp_en); + unsigned int vector, int collapsed, int timestamp_en, + void *buf_addr, bool user_cq); void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base, u8 flags, u8 usage); diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 31a750570c38..612c8c2f2466 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -60,7 +60,7 @@ struct mlx5_core_cq { } tasklet_ctx; int reset_notify_added; struct list_head reset_notify; - struct mlx5_eq *eq; + struct mlx5_eq_comp *eq; u16 uid; }; @@ -125,9 +125,9 @@ struct mlx5_cq_modify_params { }; enum { - CQE_SIZE_64 = 0, - CQE_SIZE_128 = 1, - CQE_SIZE_128_PAD = 2, + CQE_STRIDE_64 = 0, + CQE_STRIDE_128 = 1, + CQE_STRIDE_128_PAD = 2, }; #define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1) @@ -135,8 +135,8 @@ enum { static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en) { - return padding_128_en ? CQE_SIZE_128_PAD : - size == 64 ? CQE_SIZE_64 : CQE_SIZE_128; + return padding_128_en ? CQE_STRIDE_128_PAD : + size == 64 ? CQE_STRIDE_64 : CQE_STRIDE_128; } static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq) diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index b4c0457fbebd..8c4a820bd4c1 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -212,6 +212,13 @@ enum { MLX5_PFAULT_SUBTYPE_RDMA = 1, }; +enum wqe_page_fault_type { + MLX5_WQE_PF_TYPE_RMP = 0, + MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1, + MLX5_WQE_PF_TYPE_RESP = 2, + MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3, +}; + enum { MLX5_PERM_LOCAL_READ = 1 << 2, MLX5_PERM_LOCAL_WRITE = 1 << 3, @@ -294,9 +301,15 @@ enum { MLX5_EVENT_QUEUE_TYPE_DCT = 6, }; +/* mlx5 components can subscribe to any one of these events via + * mlx5_eq_notifier_register API. + */ enum mlx5_event { + /* Special value to subscribe to any event */ + MLX5_EVENT_TYPE_NOTIFY_ANY = 0x0, + /* HW events enum start: comp events are not subscribable */ MLX5_EVENT_TYPE_COMP = 0x0, - + /* HW Async events enum start: subscribable events */ MLX5_EVENT_TYPE_PATH_MIG = 0x01, MLX5_EVENT_TYPE_COMM_EST = 0x02, MLX5_EVENT_TYPE_SQ_DRAINED = 0x03, @@ -317,6 +330,7 @@ enum mlx5_event { MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17, MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22, + MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24, MLX5_EVENT_TYPE_PPS_EVENT = 0x25, MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, @@ -334,6 +348,8 @@ enum mlx5_event { MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26, + + MLX5_EVENT_TYPE_MAX = MLX5_EVENT_TYPE_DEVICE_TRACER + 1, }; enum { @@ -405,6 +421,7 @@ enum { MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15, MLX5_OPCODE_BIND_MW = 0x18, MLX5_OPCODE_CONFIG_CMD = 0x1f, + MLX5_OPCODE_ENHANCED_MPSW = 0x29, MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, MLX5_RECV_OPCODE_SEND = 0x01, @@ -766,6 +783,11 @@ static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) return (cqe->op_own >> 2) & 0x3; } +static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe) +{ + return cqe->op_own >> 4; +} + static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) { return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index aa5963b5d38e..4d16ba04790e 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -46,10 +46,11 @@ #include <linux/mempool.h> #include <linux/interrupt.h> #include <linux/idr.h> +#include <linux/notifier.h> #include <linux/mlx5/device.h> #include <linux/mlx5/doorbell.h> -#include <linux/mlx5/srq.h> +#include <linux/mlx5/eq.h> #include <linux/timecounter.h> #include <linux/ptp_clock_kernel.h> @@ -85,18 +86,6 @@ enum { }; enum { - MLX5_EQ_VEC_PAGES = 0, - MLX5_EQ_VEC_CMD = 1, - MLX5_EQ_VEC_ASYNC = 2, - MLX5_EQ_VEC_PFAULT = 3, - MLX5_EQ_VEC_COMP_BASE, -}; - -enum { - MLX5_MAX_IRQ_NAME = 32 -}; - -enum { MLX5_ATOMIC_MODE_OFFSET = 16, MLX5_ATOMIC_MODE_IB_COMP = 1, MLX5_ATOMIC_MODE_CX = 2, @@ -205,16 +194,7 @@ struct mlx5_rsc_debug { }; enum mlx5_dev_event { - MLX5_DEV_EVENT_SYS_ERROR, - MLX5_DEV_EVENT_PORT_UP, - MLX5_DEV_EVENT_PORT_DOWN, - MLX5_DEV_EVENT_PORT_INITIALIZED, - MLX5_DEV_EVENT_LID_CHANGE, - MLX5_DEV_EVENT_PKEY_CHANGE, - MLX5_DEV_EVENT_GUID_CHANGE, - MLX5_DEV_EVENT_CLIENT_REREG, - MLX5_DEV_EVENT_PPS, - MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, + MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */ }; enum mlx5_port_status { @@ -222,14 +202,6 @@ enum mlx5_port_status { MLX5_PORT_DOWN = 2, }; -enum mlx5_eq_type { - MLX5_EQ_TYPE_COMP, - MLX5_EQ_TYPE_ASYNC, -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - MLX5_EQ_TYPE_PF, -#endif -}; - struct mlx5_bfreg_info { u32 *sys_pages; int num_low_latency_bfregs; @@ -297,6 +269,8 @@ struct mlx5_cmd_stats { }; struct mlx5_cmd { + struct mlx5_nb nb; + void *cmd_alloc_buf; dma_addr_t alloc_dma; int alloc_size; @@ -366,51 +340,6 @@ struct mlx5_frag_buf_ctrl { u8 log_frag_strides; }; -struct mlx5_eq_tasklet { - struct list_head list; - struct list_head process_list; - struct tasklet_struct task; - /* lock on completion tasklet list */ - spinlock_t lock; -}; - -struct mlx5_eq_pagefault { - struct work_struct work; - /* Pagefaults lock */ - spinlock_t lock; - struct workqueue_struct *wq; - mempool_t *pool; -}; - -struct mlx5_cq_table { - /* protect radix tree */ - spinlock_t lock; - struct radix_tree_root tree; -}; - -struct mlx5_eq { - struct mlx5_core_dev *dev; - struct mlx5_cq_table cq_table; - __be32 __iomem *doorbell; - u32 cons_index; - struct mlx5_frag_buf buf; - int size; - unsigned int irqn; - u8 eqn; - int nent; - u64 mask; - struct list_head list; - int index; - struct mlx5_rsc_debug *dbg; - enum mlx5_eq_type type; - union { - struct mlx5_eq_tasklet tasklet_ctx; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - struct mlx5_eq_pagefault pf_ctx; -#endif - }; -}; - struct mlx5_core_psv { u32 psv_idx; struct psv_layout { @@ -463,36 +392,6 @@ struct mlx5_core_rsc_common { struct completion free; }; -struct mlx5_core_srq { - struct mlx5_core_rsc_common common; /* must be first */ - u32 srqn; - int max; - size_t max_gs; - size_t max_avail_gather; - int wqe_shift; - void (*event) (struct mlx5_core_srq *, enum mlx5_event); - - atomic_t refcount; - struct completion free; - u16 uid; -}; - -struct mlx5_eq_table { - void __iomem *update_ci; - void __iomem *update_arm_ci; - struct list_head comp_eqs_list; - struct mlx5_eq pages_eq; - struct mlx5_eq async_eq; - struct mlx5_eq cmd_eq; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - struct mlx5_eq pfault_eq; -#endif - int num_comp_vectors; - /* protect EQs list - */ - spinlock_t lock; -}; - struct mlx5_uars_page { void __iomem *map; bool wc; @@ -542,13 +441,8 @@ struct mlx5_core_health { }; struct mlx5_qp_table { - /* protect radix tree - */ - spinlock_t lock; - struct radix_tree_root tree; -}; + struct notifier_block nb; -struct mlx5_srq_table { /* protect radix tree */ spinlock_t lock; @@ -575,11 +469,6 @@ struct mlx5_core_sriov { int enabled_vfs; }; -struct mlx5_irq_info { - cpumask_var_t mask; - char name[MLX5_MAX_IRQ_NAME]; -}; - struct mlx5_fc_stats { spinlock_t counters_idr_lock; /* protects counters_idr */ struct idr counters_idr; @@ -593,10 +482,12 @@ struct mlx5_fc_stats { unsigned long sampling_interval; /* jiffies */ }; +struct mlx5_events; struct mlx5_mpfs; struct mlx5_eswitch; struct mlx5_lag; -struct mlx5_pagefault; +struct mlx5_devcom; +struct mlx5_eq_table; struct mlx5_rate_limit { u32 rate; @@ -619,37 +510,12 @@ struct mlx5_rl_table { struct mlx5_rl_entry *rl_entry; }; -enum port_module_event_status_type { - MLX5_MODULE_STATUS_PLUGGED = 0x1, - MLX5_MODULE_STATUS_UNPLUGGED = 0x2, - MLX5_MODULE_STATUS_ERROR = 0x3, - MLX5_MODULE_STATUS_NUM = 0x3, -}; - -enum port_module_event_error_type { - MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED, - MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE, - MLX5_MODULE_EVENT_ERROR_BUS_STUCK, - MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT, - MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST, - MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER, - MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE, - MLX5_MODULE_EVENT_ERROR_BAD_CABLE, - MLX5_MODULE_EVENT_ERROR_UNKNOWN, - MLX5_MODULE_EVENT_ERROR_NUM, -}; - -struct mlx5_port_module_event_stats { - u64 status_counters[MLX5_MODULE_STATUS_NUM]; - u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM]; -}; - struct mlx5_priv { char name[MLX5_MAX_NAME_LEN]; - struct mlx5_eq_table eq_table; - struct mlx5_irq_info *irq_info; + struct mlx5_eq_table *eq_table; /* pages stuff */ + struct mlx5_nb pg_nb; struct workqueue_struct *pg_wq; struct rb_root page_root; int fw_pages; @@ -659,8 +525,6 @@ struct mlx5_priv { struct mlx5_core_health health; - struct mlx5_srq_table srq_table; - /* start: qp staff */ struct mlx5_qp_table qp_table; struct dentry *qp_debugfs; @@ -690,28 +554,18 @@ struct mlx5_priv { struct list_head dev_list; struct list_head ctx_list; spinlock_t ctx_lock; - - struct list_head waiting_events_list; - bool is_accum_events; + struct mlx5_events *events; struct mlx5_flow_steering *steering; struct mlx5_mpfs *mpfs; struct mlx5_eswitch *eswitch; struct mlx5_core_sriov sriov; struct mlx5_lag *lag; + struct mlx5_devcom *devcom; unsigned long pci_dev_data; struct mlx5_fc_stats fc_stats; struct mlx5_rl_table rl_table; - struct mlx5_port_module_event_stats pme_stats; - -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - void (*pfault)(struct mlx5_core_dev *dev, - void *context, - struct mlx5_pagefault *pfault); - void *pfault_ctx; - struct srcu_struct pfault_srcu; -#endif struct mlx5_bfreg_data bfregs; struct mlx5_uars_page *uar; }; @@ -736,44 +590,6 @@ enum mlx5_pagefault_type_flags { MLX5_PFAULT_RDMA = 1 << 2, }; -/* Contains the details of a pagefault. */ -struct mlx5_pagefault { - u32 bytes_committed; - u32 token; - u8 event_subtype; - u8 type; - union { - /* Initiator or send message responder pagefault details. */ - struct { - /* Received packet size, only valid for responders. */ - u32 packet_size; - /* - * Number of resource holding WQE, depends on type. - */ - u32 wq_num; - /* - * WQE index. Refers to either the send queue or - * receive queue, according to event_subtype. - */ - u16 wqe_index; - } wqe; - /* RDMA responder pagefault details */ - struct { - u32 r_key; - /* - * Received packet size, minimal size page fault - * resolution required for forward progress. - */ - u32 packet_size; - u32 rdma_op_len; - u64 rdma_va; - } rdma; - }; - - struct mlx5_eq *eq; - struct work_struct work; -}; - struct mlx5_td { struct list_head tirs_list; u32 tdn; @@ -803,6 +619,8 @@ struct mlx5_pps { }; struct mlx5_clock { + struct mlx5_core_dev *mdev; + struct mlx5_nb pps_nb; seqlock_t lock; struct cyclecounter cycles; struct timecounter tc; @@ -810,7 +628,6 @@ struct mlx5_clock { u32 nominal_c_mult; unsigned long overflow_period; struct delayed_work overflow_work; - struct mlx5_core_dev *mdev; struct ptp_clock *ptp; struct ptp_clock_info ptp_info; struct mlx5_pps pps_info; @@ -843,9 +660,6 @@ struct mlx5_core_dev { /* sync interface state */ struct mutex intf_state_mutex; unsigned long intf_state; - void (*event) (struct mlx5_core_dev *dev, - enum mlx5_dev_event event, - unsigned long param); struct mlx5_priv priv; struct mlx5_profile *profile; atomic_t num_qps; @@ -859,9 +673,6 @@ struct mlx5_core_dev { #ifdef CONFIG_MLX5_FPGA struct mlx5_fpga_device *fpga; #endif -#ifdef CONFIG_RFS_ACCEL - struct cpu_rmap *rmap; -#endif struct mlx5_clock clock; struct mlx5_ib_clock_info *clock_info; struct page *clock_info_page; @@ -1070,13 +881,6 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev, gfp_t flags, int npages); void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev, struct mlx5_cmd_mailbox *head); -int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_srq_attr *in); -int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq); -int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_srq_attr *out); -int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - u16 lwm, int is_srq); void mlx5_init_mkey_table(struct mlx5_core_dev *dev); void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev); int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, @@ -1095,9 +899,9 @@ int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, u16 opmod, u8 port); -void mlx5_pagealloc_init(struct mlx5_core_dev *dev); +int mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); -int mlx5_pagealloc_start(struct mlx5_core_dev *dev); +void mlx5_pagealloc_start(struct mlx5_core_dev *dev); void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, s32 npages); @@ -1108,9 +912,6 @@ void mlx5_unregister_debugfs(void); void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); -void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); -void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); -struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, unsigned int *irqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); @@ -1155,6 +956,9 @@ int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg, bool map_wc, bool fast_path); void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); +unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev); +struct cpumask * +mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector); unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, u8 roce_version, u8 roce_l3_type, const u8 *gid, @@ -1202,23 +1006,21 @@ struct mlx5_interface { void (*remove)(struct mlx5_core_dev *dev, void *context); int (*attach)(struct mlx5_core_dev *dev, void *context); void (*detach)(struct mlx5_core_dev *dev, void *context); - void (*event)(struct mlx5_core_dev *dev, void *context, - enum mlx5_dev_event event, unsigned long param); - void (*pfault)(struct mlx5_core_dev *dev, - void *context, - struct mlx5_pagefault *pfault); - void * (*get_dev)(void *context); int protocol; struct list_head list; }; -void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol); int mlx5_register_interface(struct mlx5_interface *intf); void mlx5_unregister_interface(struct mlx5_interface *intf); +int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); +int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb); + int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id); int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); +bool mlx5_lag_is_roce(struct mlx5_core_dev *dev); +bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); bool mlx5_lag_is_active(struct mlx5_core_dev *dev); struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, @@ -1306,10 +1108,4 @@ enum { MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, }; -static inline const struct cpumask * -mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector) -{ - return dev->priv.irq_info[vector].mask; -} - #endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h new file mode 100644 index 000000000000..00045cc4ea11 --- /dev/null +++ b/include/linux/mlx5/eq.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2018 Mellanox Technologies. */ + +#ifndef MLX5_CORE_EQ_H +#define MLX5_CORE_EQ_H + +enum { + MLX5_EQ_PAGEREQ_IDX = 0, + MLX5_EQ_CMD_IDX = 1, + MLX5_EQ_ASYNC_IDX = 2, + /* reserved to be used by mlx5_core ulps (mlx5e/mlx5_ib) */ + MLX5_EQ_PFAULT_IDX = 3, + MLX5_EQ_MAX_ASYNC_EQS, + /* completion eqs vector indices start here */ + MLX5_EQ_VEC_COMP_BASE = MLX5_EQ_MAX_ASYNC_EQS, +}; + +#define MLX5_NUM_CMD_EQE (32) +#define MLX5_NUM_ASYNC_EQE (0x1000) +#define MLX5_NUM_SPARE_EQE (0x80) + +struct mlx5_eq; +struct mlx5_core_dev; + +struct mlx5_eq_param { + u8 index; + int nent; + u64 mask; + void *context; + irq_handler_t handler; +}; + +struct mlx5_eq * +mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name, + struct mlx5_eq_param *param); +int +mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq); + +struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc); +void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm); + +/* The HCA will think the queue has overflowed if we + * don't tell it we've been processing events. We + * create EQs with MLX5_NUM_SPARE_EQE extra entries, + * so we must update our consumer index at + * least that often. + * + * mlx5_eq_update_cc must be called on every EQE @EQ irq handler + */ +static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc) +{ + if (unlikely(cc >= MLX5_NUM_SPARE_EQE)) { + mlx5_eq_update_ci(eq, cc, 0); + cc = 0; + } + return cc; +} + +struct mlx5_nb { + struct notifier_block nb; + u8 event_type; +}; + +#define mlx5_nb_cof(ptr, type, member) \ + (container_of(container_of(ptr, struct mlx5_nb, nb), type, member)) + +#define MLX5_NB_INIT(name, handler, event) do { \ + (name)->nb.notifier_call = handler; \ + (name)->event_type = MLX5_EVENT_TYPE_##event; \ +} while (0) + +#endif /* MLX5_CORE_EQ_H */ diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 5660f07d3be0..9df51da04621 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -86,6 +86,11 @@ struct mlx5_flow_spec { u32 match_value[MLX5_ST_SZ_DW(fte_match_param)]; }; +enum { + MLX5_FLOW_DEST_VPORT_VHCA_ID = BIT(0), + MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1), +}; + struct mlx5_flow_destination { enum mlx5_flow_destination_type type; union { @@ -96,7 +101,8 @@ struct mlx5_flow_destination { struct { u16 num; u16 vhca_id; - bool vhca_id_valid; + u32 reformat_id; + u8 flags; } vport; }; }; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index dbff9ff28f2c..821b751485fb 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -161,6 +161,8 @@ enum { MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, + MLX5_CMD_OP_SET_MONITOR_COUNTER = 0x774, + MLX5_CMD_OP_ARM_MONITOR_COUNTER = 0x775, MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780, MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781, MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782, @@ -349,7 +351,7 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reformat_l3_tunnel_to_l2[0x1]; u8 reformat_l2_to_l3_tunnel[0x1]; u8 reformat_and_modify_action[0x1]; - u8 reserved_at_14[0xb]; + u8 reserved_at_15[0xb]; u8 reserved_at_20[0x2]; u8 log_max_ft_size[0x6]; u8 log_max_modify_header_context[0x8]; @@ -421,6 +423,16 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6; }; +struct mlx5_ifc_nvgre_key_bits { + u8 hi[0x18]; + u8 lo[0x8]; +}; + +union mlx5_ifc_gre_key_bits { + struct mlx5_ifc_nvgre_key_bits nvgre; + u8 key[0x20]; +}; + struct mlx5_ifc_fte_match_set_misc_bits { u8 reserved_at_0[0x8]; u8 source_sqn[0x18]; @@ -442,8 +454,7 @@ struct mlx5_ifc_fte_match_set_misc_bits { u8 reserved_at_64[0xc]; u8 gre_protocol[0x10]; - u8 gre_key_h[0x18]; - u8 gre_key_l[0x8]; + union mlx5_ifc_gre_key_bits gre_key; u8 vxlan_vni[0x18]; u8 reserved_at_b8[0x8]; @@ -582,11 +593,13 @@ struct mlx5_ifc_flow_table_nic_cap_bits { }; struct mlx5_ifc_flow_table_eswitch_cap_bits { - u8 reserved_at_0[0x1c]; - u8 fdb_multi_path_to_table[0x1]; - u8 reserved_at_1d[0x1]; + u8 reserved_at_0[0x1a]; u8 multi_fdb_encap[0x1]; - u8 reserved_at_1e[0x1e1]; + u8 reserved_at_1b[0x1]; + u8 fdb_multi_path_to_table[0x1]; + u8 reserved_at_1d[0x3]; + + u8 reserved_at_20[0x1e0]; struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb; @@ -597,20 +610,28 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits { u8 reserved_at_800[0x7800]; }; +enum { + MLX5_COUNTER_SOURCE_ESWITCH = 0x0, + MLX5_COUNTER_FLOW_ESWITCH = 0x1, +}; + struct mlx5_ifc_e_switch_cap_bits { u8 vport_svlan_strip[0x1]; u8 vport_cvlan_strip[0x1]; u8 vport_svlan_insert[0x1]; u8 vport_cvlan_insert_if_not_exist[0x1]; u8 vport_cvlan_insert_overwrite[0x1]; - u8 reserved_at_5[0x18]; + u8 reserved_at_5[0x17]; + u8 counter_eswitch_affinity[0x1]; u8 merged_eswitch[0x1]; u8 nic_vport_node_guid_modify[0x1]; u8 nic_vport_port_guid_modify[0x1]; u8 vxlan_encap_decap[0x1]; u8 nvgre_encap_decap[0x1]; - u8 reserved_at_22[0x9]; + u8 reserved_at_22[0x1]; + u8 log_max_fdb_encap_uplink[0x5]; + u8 reserved_at_21[0x3]; u8 log_max_packet_reformat_context[0x5]; u8 reserved_2b[0x6]; u8 max_encap_header_size[0xa]; @@ -829,7 +850,7 @@ struct mlx5_ifc_vector_calc_cap_bits { struct mlx5_ifc_calc_op calc2; struct mlx5_ifc_calc_op calc3; - u8 reserved_at_e0[0x720]; + u8 reserved_at_c0[0x720]; }; enum { @@ -883,6 +904,10 @@ enum { MLX5_CAP_UMR_FENCE_NONE = 0x2, }; +enum { + MLX5_UCTX_CAP_RAW_TX = 1UL << 0, +}; + struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_0[0x30]; u8 vhca_id[0x10]; @@ -1043,7 +1068,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 vector_calc[0x1]; u8 umr_ptr_rlky[0x1]; u8 imaicl[0x1]; - u8 reserved_at_232[0x4]; + u8 qp_packet_based[0x1]; + u8 reserved_at_233[0x3]; u8 qkv[0x1]; u8 pkv[0x1]; u8 set_deth_sqpn[0x1]; @@ -1193,7 +1219,19 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 num_vhca_ports[0x8]; u8 reserved_at_618[0x6]; u8 sw_owner_id[0x1]; - u8 reserved_at_61f[0x1e1]; + u8 reserved_at_61f[0x1]; + + u8 max_num_of_monitor_counters[0x10]; + u8 num_ppcnt_monitor_counters[0x10]; + + u8 reserved_at_640[0x10]; + u8 num_q_monitor_counters[0x10]; + + u8 reserved_at_660[0x40]; + + u8 uctx_cap[0x20]; + + u8 reserved_at_6c0[0x140]; }; enum mlx5_flow_destination_type { @@ -1209,8 +1247,10 @@ enum mlx5_flow_destination_type { struct mlx5_ifc_dest_format_struct_bits { u8 destination_type[0x8]; u8 destination_id[0x18]; + u8 destination_eswitch_owner_vhca_id_valid[0x1]; - u8 reserved_at_21[0xf]; + u8 packet_reformat[0x1]; + u8 reserved_at_22[0xe]; u8 destination_eswitch_owner_vhca_id[0x10]; }; @@ -1220,6 +1260,14 @@ struct mlx5_ifc_flow_counter_list_bits { u8 reserved_at_20[0x20]; }; +struct mlx5_ifc_extended_dest_format_bits { + struct mlx5_ifc_dest_format_struct_bits destination_entry; + + u8 packet_reformat_id[0x20]; + + u8 reserved_at_60[0x20]; +}; + union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits { struct mlx5_ifc_dest_format_struct_bits dest_format_struct; struct mlx5_ifc_flow_counter_list_bits flow_counter_list; @@ -2249,7 +2297,8 @@ struct mlx5_ifc_qpc_bits { u8 st[0x8]; u8 reserved_at_10[0x3]; u8 pm_state[0x2]; - u8 reserved_at_15[0x3]; + u8 reserved_at_15[0x1]; + u8 req_e2e_credit_mode[0x2]; u8 offload_type[0x4]; u8 end_padding_mode[0x2]; u8 reserved_at_1e[0x2]; @@ -2440,7 +2489,8 @@ struct mlx5_ifc_flow_context_bits { u8 reserved_at_60[0x10]; u8 action[0x10]; - u8 reserved_at_80[0x8]; + u8 extended_destination[0x1]; + u8 reserved_at_80[0x7]; u8 destination_list_size[0x18]; u8 reserved_at_a0[0x8]; @@ -2473,14 +2523,15 @@ struct mlx5_ifc_xrc_srqc_bits { u8 wq_signature[0x1]; u8 cont_srq[0x1]; - u8 dbr_umem_valid[0x1]; + u8 reserved_at_22[0x1]; u8 rlky[0x1]; u8 basic_cyclic_rcv_wqe[0x1]; u8 log_rq_stride[0x3]; u8 xrcd[0x18]; u8 page_offset[0x6]; - u8 reserved_at_46[0x2]; + u8 reserved_at_46[0x1]; + u8 dbr_umem_valid[0x1]; u8 cqn[0x18]; u8 reserved_at_60[0x20]; @@ -3795,6 +3846,83 @@ enum { MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1, }; +struct mlx5_ifc_arm_monitor_counter_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x20]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_arm_monitor_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +enum { + MLX5_QUERY_MONITOR_CNT_TYPE_PPCNT = 0x0, + MLX5_QUERY_MONITOR_CNT_TYPE_Q_COUNTER = 0x1, +}; + +enum mlx5_monitor_counter_ppcnt { + MLX5_QUERY_MONITOR_PPCNT_IN_RANGE_LENGTH_ERRORS = 0x0, + MLX5_QUERY_MONITOR_PPCNT_OUT_OF_RANGE_LENGTH_FIELD = 0x1, + MLX5_QUERY_MONITOR_PPCNT_FRAME_TOO_LONG_ERRORS = 0x2, + MLX5_QUERY_MONITOR_PPCNT_FRAME_CHECK_SEQUENCE_ERRORS = 0x3, + MLX5_QUERY_MONITOR_PPCNT_ALIGNMENT_ERRORS = 0x4, + MLX5_QUERY_MONITOR_PPCNT_IF_OUT_DISCARDS = 0x5, +}; + +enum { + MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER = 0x4, +}; + +struct mlx5_ifc_monitor_counter_output_bits { + u8 reserved_at_0[0x4]; + u8 type[0x4]; + u8 reserved_at_8[0x8]; + u8 counter[0x10]; + + u8 counter_group_id[0x20]; +}; + +#define MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 (6) +#define MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1 (1) +#define MLX5_CMD_SET_MONITOR_NUM_COUNTER (MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 +\ + MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1) + +struct mlx5_ifc_set_monitor_counter_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x10]; + u8 num_of_counters[0x10]; + + u8 reserved_at_60[0x20]; + + struct mlx5_ifc_monitor_counter_output_bits monitor_counter[MLX5_CMD_SET_MONITOR_NUM_COUNTER]; +}; + +struct mlx5_ifc_set_monitor_counter_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + struct mlx5_ifc_query_vport_state_in_bits { u8 opcode[0x10]; u8 reserved_at_10[0x10]; @@ -4660,7 +4788,7 @@ enum { MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1, MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2, - MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0X3, + MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3, }; struct mlx5_ifc_query_flow_group_out_bits { @@ -5566,7 +5694,7 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits { struct mlx5_ifc_modify_nic_vport_field_select_bits { u8 reserved_at_0[0x12]; u8 affiliation[0x1]; - u8 reserved_at_e[0x1]; + u8 reserved_at_13[0x1]; u8 disable_uc_local_lb[0x1]; u8 disable_mc_local_lb[0x1]; u8 node_guid[0x1]; @@ -6689,9 +6817,12 @@ struct mlx5_ifc_create_xrc_srq_in_bits { struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry; - u8 reserved_at_280[0x40]; + u8 reserved_at_280[0x60]; + u8 xrc_srq_umem_valid[0x1]; - u8 reserved_at_2c1[0x5bf]; + u8 reserved_at_2e1[0x1f]; + + u8 reserved_at_300[0x580]; u8 pas[0][0x40]; }; @@ -8160,7 +8291,9 @@ struct mlx5_ifc_pcam_regs_5000_to_507f_bits { u8 port_access_reg_cap_mask_31_to_13[0x13]; u8 pbmc[0x1]; u8 pptb[0x1]; - u8 port_access_reg_cap_mask_10_to_0[0xb]; + u8 port_access_reg_cap_mask_10_to_09[0x2]; + u8 ppcnt[0x1]; + u8 port_access_reg_cap_mask_07_to_00[0x8]; }; struct mlx5_ifc_pcam_reg_bits { @@ -9024,7 +9157,7 @@ struct mlx5_ifc_dcbx_param_bits { u8 dcbx_cee_cap[0x1]; u8 dcbx_ieee_cap[0x1]; u8 dcbx_standby_cap[0x1]; - u8 reserved_at_0[0x5]; + u8 reserved_at_3[0x5]; u8 port_number[0x8]; u8 reserved_at_10[0xa]; u8 max_application_table_size[6]; @@ -9272,7 +9405,9 @@ struct mlx5_ifc_umem_bits { struct mlx5_ifc_uctx_bits { u8 modify_field_select[0x40]; - u8 reserved_at_40[0x1c0]; + u8 cap[0x20]; + + u8 reserved_at_60[0x1a0]; }; struct mlx5_ifc_create_umem_in_bits { diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 34aed6032f86..bf4bc01ffb0c 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -107,9 +107,6 @@ enum mlx5e_connector_type { #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) -#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF -#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF - int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask, u8 local_port); diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index fbe322c966bc..b26ea9077384 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -596,6 +596,11 @@ int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id); int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, int reset, void *out, int out_size); +struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev, + int res_num, + enum mlx5_res_type res_type); +void mlx5_core_res_put(struct mlx5_core_rsc_common *res); + static inline const char *mlx5_qp_type_str(int type) { switch (type) { diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h deleted file mode 100644 index 1b1f3c20c6a3..000000000000 --- a/include/linux/mlx5/srq.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef MLX5_SRQ_H -#define MLX5_SRQ_H - -#include <linux/mlx5/driver.h> - -enum { - MLX5_SRQ_FLAG_ERR = (1 << 0), - MLX5_SRQ_FLAG_WQ_SIG = (1 << 1), - MLX5_SRQ_FLAG_RNDV = (1 << 2), -}; - -struct mlx5_srq_attr { - u32 type; - u32 flags; - u32 log_size; - u32 wqe_shift; - u32 log_page_size; - u32 wqe_cnt; - u32 srqn; - u32 xrcd; - u32 page_offset; - u32 cqn; - u32 pd; - u32 lwm; - u32 user_index; - u64 db_record; - __be64 *pas; - u32 tm_log_list_size; - u32 tm_next_tag; - u32 tm_hw_phase_cnt; - u32 tm_sw_phase_cnt; - u16 uid; -}; - -struct mlx5_core_dev; - -void mlx5_init_srq_table(struct mlx5_core_dev *dev); -void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev); - -#endif /* MLX5_SRQ_H */ diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index 7f5ca2cd3a32..a261d5528ff7 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h @@ -58,17 +58,6 @@ int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in, int inlen); void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn); -int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *rmpn); -int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen); -int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn); -int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out); -int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); -int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *rmpn); -int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn); -int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm); - int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqtn); int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, diff --git a/include/linux/mm.h b/include/linux/mm.h index fcf9cc9d535f..5411de93a363 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1744,11 +1744,15 @@ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address); static inline void mm_inc_nr_puds(struct mm_struct *mm) { + if (mm_pud_folded(mm)) + return; atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); } static inline void mm_dec_nr_puds(struct mm_struct *mm) { + if (mm_pud_folded(mm)) + return; atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes); } #endif @@ -1768,11 +1772,15 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); static inline void mm_inc_nr_pmds(struct mm_struct *mm) { + if (mm_pmd_folded(mm)) + return; atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); } static inline void mm_dec_nr_pmds(struct mm_struct *mm) { + if (mm_pmd_folded(mm)) + return; atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes); } #endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5ed8f6292a53..2c471a2c43fa 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -206,6 +206,11 @@ struct page { #endif } _struct_page_alignment; +/* + * Used for sizing the vmemmap region on some architectures + */ +#define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page))) + #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 4224902a8e22..4332199c71c2 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -42,6 +42,7 @@ #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 #define SDIO_DEVICE_ID_BROADCOM_4356 0x4356 #define SDIO_DEVICE_ID_CYPRESS_4373 0x4373 +#define SDIO_DEVICE_ID_CYPRESS_43012 43012 #define SDIO_VENDOR_ID_INTEL 0x0089 #define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 847705a6d0ec..077d797d1f60 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -314,7 +314,7 @@ enum zone_type { * Architecture Limit * --------------------------- * parisc, ia64, sparc <4G - * s390 <2G + * s390, powerpc <2G * arm Various * alpha Unlimited or 0-16MB. * @@ -783,6 +783,12 @@ void memory_present(int nid, unsigned long start, unsigned long end); static inline void memory_present(int nid, unsigned long start, unsigned long end) {} #endif +#if defined(CONFIG_SPARSEMEM) +void memblocks_present(void); +#else +static inline void memblocks_present(void) {} +#endif + #ifdef CONFIG_HAVE_MEMORYLESS_NODES int local_memory_node(int node_id); #else diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 01797cb4587e..f9bd2f34b99f 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -448,6 +448,23 @@ struct pci_epf_device_id { kernel_ulong_t driver_data; }; +/* i3c */ + +#define I3C_MATCH_DCR 0x1 +#define I3C_MATCH_MANUF 0x2 +#define I3C_MATCH_PART 0x4 +#define I3C_MATCH_EXTRA_INFO 0x8 + +struct i3c_device_id { + __u8 match_flags; + __u8 dcr; + __u16 manuf_id; + __u16 part_id; + __u16 extra_info; + + const void *data; +}; + /* spi */ #define SPI_NAME_SIZE 32 @@ -565,7 +582,7 @@ struct platform_device_id { /** * struct mdio_device_id - identifies PHY devices on an MDIO/MII bus * @phy_id: The result of - * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask + * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&MII_PHYSID2)) & @phy_id_mask * for this PHY type * @phy_id_mask: Defines the significant bits of @phy_id. A value of 0 * is used to terminate an array of struct mdio_device_id. diff --git a/include/linux/module.h b/include/linux/module.h index fce6b4335e36..d5453eb5a68b 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -432,6 +432,10 @@ struct module { unsigned int num_tracepoints; tracepoint_ptr_t *tracepoints_ptrs; #endif +#ifdef CONFIG_BPF_EVENTS + unsigned int num_bpf_raw_events; + struct bpf_raw_event_map *bpf_raw_events; +#endif #ifdef HAVE_JUMP_LABEL struct jump_entry *jump_entries; unsigned int num_jump_entries; @@ -486,6 +490,13 @@ struct module { #define MODULE_ARCH_INIT {} #endif +#ifndef HAVE_ARCH_KALLSYMS_SYMBOL_VALUE +static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym) +{ + return sym->st_value; +} +#endif + extern struct mutex module_mutex; /* FIXME: It'd be nice to isolate modules during init, too, so they diff --git a/include/linux/msi.h b/include/linux/msi.h index 0e9c50052ff3..784fb52b9900 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -76,7 +76,7 @@ struct msi_desc { unsigned int nvec_used; struct device *dev; struct msi_msg msg; - struct cpumask *affinity; + struct irq_affinity_desc *affinity; union { /* PCI MSI/X specific data */ @@ -116,6 +116,8 @@ struct msi_desc { list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) #define for_each_msi_entry(desc, dev) \ list_for_each_entry((desc), dev_to_msi_list((dev)), list) +#define for_each_msi_entry_safe(desc, tmp, dev) \ + list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list) #ifdef CONFIG_PCI_MSI #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) @@ -136,7 +138,7 @@ static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) #endif /* CONFIG_PCI_MSI */ struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, - const struct cpumask *affinity); + const struct irq_affinity_desc *affinity); void free_msi_entry(struct msi_desc *entry); void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index 9b57a9b1b081..cbf77168658c 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h @@ -377,6 +377,7 @@ struct cfi_fixup { #define CFI_MFR_SHARP 0x00B0 #define CFI_MFR_SST 0x00BF #define CFI_MFR_ST 0x0020 /* STMicroelectronics */ +#define CFI_MFR_MICRON 0x002C /* Micron */ #define CFI_MFR_TOSHIBA 0x0098 #define CFI_MFR_WINBOND 0x00DA diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index cd0be91bdefa..ba8fa9072aca 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -207,6 +207,7 @@ struct mtd_debug_info { struct mtd_info { u_char type; uint32_t flags; + uint32_t orig_flags; /* Flags as before running mtd checks */ uint64_t size; // Total size of the MTD /* "Major" erase size for the device. Naïve users may take this @@ -386,7 +387,7 @@ static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd) return dev_of_node(&mtd->dev); } -static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops) +static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops) { return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize; } diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index abe975c87b90..7f53ece2c039 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -324,9 +324,8 @@ static inline unsigned int nanddev_ntargets(const struct nand_device *nand) */ static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand) { - return (u64)nand->memorg.luns_per_target * - nand->memorg.eraseblocks_per_lun * - nand->memorg.pages_per_eraseblock; + return nand->memorg.ntargets * nand->memorg.luns_per_target * + nand->memorg.eraseblocks_per_lun; } /** @@ -569,7 +568,7 @@ static inline void nanddev_pos_next_eraseblock(struct nand_device *nand, } /** - * nanddev_pos_next_eraseblock() - Move a position to the next page + * nanddev_pos_next_page() - Move a position to the next page * @nand: NAND device * @pos: the position to update * diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index e10b126e148f..33e240acdc6d 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -203,9 +203,12 @@ enum nand_ecc_algo { */ #define NAND_IS_BOOT_MEDIUM 0x00400000 -/* Options set by nand scan */ -/* Nand scan has allocated controller struct */ -#define NAND_CONTROLLER_ALLOC 0x80000000 +/* + * Do not try to tweak the timings at runtime. This is needed when the + * controller initializes the timings on itself or when it relies on + * configuration done by the bootloader. + */ +#define NAND_KEEP_TIMINGS 0x00800000 /* Cell info constants */ #define NAND_CI_CHIPNR_MSK 0x03 @@ -245,49 +248,6 @@ struct nand_id { }; /** - * struct nand_controller_ops - Controller operations - * - * @attach_chip: this method is called after the NAND detection phase after - * flash ID and MTD fields such as erase size, page size and OOB - * size have been set up. ECC requirements are available if - * provided by the NAND chip or device tree. Typically used to - * choose the appropriate ECC configuration and allocate - * associated resources. - * This hook is optional. - * @detach_chip: free all resources allocated/claimed in - * nand_controller_ops->attach_chip(). - * This hook is optional. - */ -struct nand_controller_ops { - int (*attach_chip)(struct nand_chip *chip); - void (*detach_chip)(struct nand_chip *chip); -}; - -/** - * struct nand_controller - Structure used to describe a NAND controller - * - * @lock: protection lock - * @active: the mtd device which holds the controller currently - * @wq: wait queue to sleep on if a NAND operation is in - * progress used instead of the per chip wait queue - * when a hw controller is available. - * @ops: NAND controller operations. - */ -struct nand_controller { - spinlock_t lock; - struct nand_chip *active; - wait_queue_head_t wq; - const struct nand_controller_ops *ops; -}; - -static inline void nand_controller_init(struct nand_controller *nfc) -{ - nfc->active = NULL; - spin_lock_init(&nfc->lock); - init_waitqueue_head(&nfc->wq); -} - -/** * struct nand_ecc_step_info - ECC step information of ECC engine * @stepsize: data bytes per ECC step * @strengths: array of supported strengths @@ -879,18 +839,21 @@ struct nand_op_parser { /** * struct nand_operation - NAND operation descriptor + * @cs: the CS line to select for this NAND operation * @instrs: array of instructions to execute * @ninstrs: length of the @instrs array * * The actual operation structure that will be passed to chip->exec_op(). */ struct nand_operation { + unsigned int cs; const struct nand_op_instr *instrs; unsigned int ninstrs; }; -#define NAND_OPERATION(_instrs) \ +#define NAND_OPERATION(_cs, _instrs) \ { \ + .cs = _cs, \ .instrs = _instrs, \ .ninstrs = ARRAY_SIZE(_instrs), \ } @@ -898,11 +861,68 @@ struct nand_operation { int nand_op_parser_exec_op(struct nand_chip *chip, const struct nand_op_parser *parser, const struct nand_operation *op, bool check_only); +/** + * struct nand_controller_ops - Controller operations + * + * @attach_chip: this method is called after the NAND detection phase after + * flash ID and MTD fields such as erase size, page size and OOB + * size have been set up. ECC requirements are available if + * provided by the NAND chip or device tree. Typically used to + * choose the appropriate ECC configuration and allocate + * associated resources. + * This hook is optional. + * @detach_chip: free all resources allocated/claimed in + * nand_controller_ops->attach_chip(). + * This hook is optional. + * @exec_op: controller specific method to execute NAND operations. + * This method replaces chip->legacy.cmdfunc(), + * chip->legacy.{read,write}_{buf,byte,word}(), + * chip->legacy.dev_ready() and chip->legacy.waifunc(). + * @setup_data_interface: setup the data interface and timing. If + * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this + * means the configuration should not be applied but + * only checked. + * This hook is optional. + */ +struct nand_controller_ops { + int (*attach_chip)(struct nand_chip *chip); + void (*detach_chip)(struct nand_chip *chip); + int (*exec_op)(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only); + int (*setup_data_interface)(struct nand_chip *chip, int chipnr, + const struct nand_data_interface *conf); +}; + +/** + * struct nand_controller - Structure used to describe a NAND controller + * + * @lock: protection lock + * @active: the mtd device which holds the controller currently + * @wq: wait queue to sleep on if a NAND operation is in + * progress used instead of the per chip wait queue + * when a hw controller is available. + * @ops: NAND controller operations. + */ +struct nand_controller { + spinlock_t lock; + struct nand_chip *active; + wait_queue_head_t wq; + const struct nand_controller_ops *ops; +}; + +static inline void nand_controller_init(struct nand_controller *nfc) +{ + nfc->active = NULL; + spin_lock_init(&nfc->lock); + init_waitqueue_head(&nfc->wq); +} /** * struct nand_legacy - NAND chip legacy fields/hooks * @IO_ADDR_R: address to read the 8 I/O lines of the flash device * @IO_ADDR_W: address to write the 8 I/O lines of the flash device + * @select_chip: select/deselect a specific target/die * @read_byte: read one byte from the chip * @write_byte: write a single byte to the chip on the low 8 I/O lines * @write_buf: write data from the buffer to the chip @@ -921,6 +941,8 @@ int nand_op_parser_exec_op(struct nand_chip *chip, * @get_features: get the NAND chip features * @chip_delay: chip dependent delay for transferring data from array to read * regs (tR). + * @dummy_controller: dummy controller implementation for drivers that can + * only control a single chip * * If you look at this structure you're already wrong. These fields/hooks are * all deprecated. @@ -928,6 +950,7 @@ int nand_op_parser_exec_op(struct nand_chip *chip, struct nand_legacy { void __iomem *IO_ADDR_R; void __iomem *IO_ADDR_W; + void (*select_chip)(struct nand_chip *chip, int cs); u8 (*read_byte)(struct nand_chip *chip); void (*write_byte)(struct nand_chip *chip, u8 byte); void (*write_buf)(struct nand_chip *chip, const u8 *buf, int len); @@ -945,6 +968,7 @@ struct nand_legacy { int (*get_features)(struct nand_chip *chip, int feature_addr, u8 *subfeature_para); int chip_delay; + struct nand_controller dummy_controller; }; /** @@ -955,17 +979,10 @@ struct nand_legacy { * you're modifying an existing driver that is using those * fields/hooks, you should consider reworking the driver * avoid using them. - * @select_chip: [REPLACEABLE] select chip nr - * @exec_op: controller specific method to execute NAND operations. - * This method replaces ->cmdfunc(), - * ->legacy.{read,write}_{buf,byte,word}(), - * ->legacy.dev_ready() and ->waifunc(). * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for * setting the read-retry mode. Mostly needed for MLC NAND. * @ecc: [BOARDSPECIFIC] ECC control structure * @buf_align: minimum buffer alignment required by a platform - * @dummy_controller: dummy controller implementation for drivers that can - * only control a single chip * @state: [INTERN] the current state of the NAND device * @oob_poi: "poison value buffer," used for laying out OOB data * before writing @@ -1012,11 +1029,11 @@ struct nand_legacy { * this nand device will encounter their life times. * @blocks_per_die: [INTERN] The number of PEBs in a die * @data_interface: [INTERN] NAND interface timing information + * @cur_cs: currently selected target. -1 means no target selected, + * otherwise we should always have cur_cs >= 0 && + * cur_cs < numchips. NAND Controller drivers should not + * modify this value, but they're allowed to read it. * @read_retries: [INTERN] the number of read retry modes supported - * @setup_data_interface: [OPTIONAL] setup the data interface and timing. If - * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this - * means the configuration should not be applied but - * only checked. * @bbt: [INTERN] bad block table pointer * @bbt_td: [REPLACEABLE] bad block table descriptor for flash * lookup. @@ -1037,13 +1054,7 @@ struct nand_chip { struct nand_legacy legacy; - void (*select_chip)(struct nand_chip *chip, int cs); - int (*exec_op)(struct nand_chip *chip, - const struct nand_operation *op, - bool check_only); int (*setup_read_retry)(struct nand_chip *chip, int retry_mode); - int (*setup_data_interface)(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf); unsigned int options; unsigned int bbt_options; @@ -1073,6 +1084,8 @@ struct nand_chip { struct nand_data_interface data_interface; + int cur_cs; + int read_retries; flstate_t state; @@ -1082,7 +1095,6 @@ struct nand_chip { struct nand_ecc_ctrl ecc; unsigned long buf_align; - struct nand_controller dummy_controller; uint8_t *bbt; struct nand_bbt_descr *bbt_td; @@ -1098,15 +1110,6 @@ struct nand_chip { } manufacturer; }; -static inline int nand_exec_op(struct nand_chip *chip, - const struct nand_operation *op) -{ - if (!chip->exec_op) - return -ENOTSUPP; - - return chip->exec_op(chip, op, false); -} - extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops; @@ -1345,5 +1348,12 @@ void nand_release(struct nand_chip *chip); * instruction and have no physical pin to check it. */ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms); +struct gpio_desc; +int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, + unsigned long timeout_ms); + +/* Select/deselect a NAND target. */ +void nand_select_target(struct nand_chip *chip, unsigned int cs); +void nand_deselect_target(struct nand_chip *chip); #endif /* __LINUX_MTD_RAWNAND_H */ diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h index c759d403cbc0..78fc2d4218c8 100644 --- a/include/linux/mtd/sh_flctl.h +++ b/include/linux/mtd/sh_flctl.h @@ -1,20 +1,8 @@ -/* +/* SPDX-License-Identifier: GPL-2.0 + * * SuperH FLCTL nand controller * * Copyright © 2008 Renesas Solutions Corp. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; version 2 of the License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef __SH_FLCTL_H__ diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 7f0c7303575e..fa2d89e38e40 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -1,10 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2014 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. */ #ifndef __LINUX_MTD_SPI_NOR_H @@ -23,7 +19,8 @@ #define SNOR_MFR_ATMEL CFI_MFR_ATMEL #define SNOR_MFR_GIGADEVICE 0xc8 #define SNOR_MFR_INTEL CFI_MFR_INTEL -#define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */ +#define SNOR_MFR_ST CFI_MFR_ST /* ST Micro */ +#define SNOR_MFR_MICRON CFI_MFR_MICRON /* Micron */ #define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX #define SNOR_MFR_SPANSION CFI_MFR_AMD #define SNOR_MFR_SST CFI_MFR_SST @@ -236,6 +233,8 @@ enum spi_nor_option_flags { SNOR_F_READY_XSR_RDY = BIT(4), SNOR_F_USE_CLSR = BIT(5), SNOR_F_BROKEN_RESET = BIT(6), + SNOR_F_4B_OPCODES = BIT(7), + SNOR_F_HAS_4BAIT = BIT(8), }; /** diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 088ff96c3eb6..b92e2aa955b6 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -194,8 +194,10 @@ struct spinand_manufacturer { }; /* SPI NAND manufacturers */ +extern const struct spinand_manufacturer gigadevice_spinand_manufacturer; extern const struct spinand_manufacturer macronix_spinand_manufacturer; extern const struct spinand_manufacturer micron_spinand_manufacturer; +extern const struct spinand_manufacturer toshiba_spinand_manufacturer; extern const struct spinand_manufacturer winbond_spinand_manufacturer; /** diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h index c79e859408e6..fd458389f7d1 100644 --- a/include/linux/net_dim.h +++ b/include/linux/net_dim.h @@ -406,6 +406,8 @@ static inline void net_dim(struct net_dim *dim, } /* fall through */ case NET_DIM_START_MEASURE: + net_dim_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr, + &dim->start_sample); dim->state = NET_DIM_MEASURE_IN_PROGRESS; break; case NET_DIM_APPLY_NEW_PROFILE: diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index dc1d9ed33b31..1377d085ef99 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -845,6 +845,8 @@ enum tc_setup_type { TC_SETUP_QDISC_PRIO, TC_SETUP_QDISC_MQ, TC_SETUP_QDISC_ETF, + TC_SETUP_ROOT_QDISC, + TC_SETUP_QDISC_GRED, }; /* These structures hold the attributes of bpf state that are being passed @@ -863,9 +865,6 @@ enum bpf_netdev_command { XDP_QUERY_PROG, XDP_QUERY_PROG_HW, /* BPF program for offload callbacks, invoked at program load time. */ - BPF_OFFLOAD_VERIFIER_PREP, - BPF_OFFLOAD_TRANSLATE, - BPF_OFFLOAD_DESTROY, BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE, XDP_QUERY_XSK_UMEM, @@ -891,15 +890,6 @@ struct netdev_bpf { /* flags with which program was installed */ u32 prog_flags; }; - /* BPF_OFFLOAD_VERIFIER_PREP */ - struct { - struct bpf_prog *prog; - const struct bpf_prog_offload_ops *ops; /* callee set */ - } verifier; - /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */ - struct { - struct bpf_prog *prog; - } offload; /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ struct { struct bpf_offloaded_map *offmap; @@ -1175,7 +1165,7 @@ struct dev_ifalias { * entries to skb and update idx with the number of entries. * * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, - * u16 flags) + * u16 flags, struct netlink_ext_ack *extack) * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, * struct net_device *dev, u32 filter_mask, * int nlflags) @@ -1397,10 +1387,16 @@ struct net_device_ops { struct net_device *dev, struct net_device *filter_dev, int *idx); - + int (*ndo_fdb_get)(struct sk_buff *skb, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid, u32 portid, u32 seq, + struct netlink_ext_ack *extack); int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, - u16 flags); + u16 flags, + struct netlink_ext_ack *extack); int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, @@ -2388,13 +2384,13 @@ struct pcpu_sw_netstats { u64 tx_packets; u64 tx_bytes; struct u64_stats_sync syncp; -}; +} __aligned(4 * sizeof(u64)); struct pcpu_lstats { u64 packets; u64 bytes; struct u64_stats_sync syncp; -}; +} __aligned(2 * sizeof(u64)); #define __netdev_alloc_pcpu_stats(type, gfp) \ ({ \ @@ -2459,7 +2455,8 @@ enum netdev_cmd { NETDEV_REGISTER, NETDEV_UNREGISTER, NETDEV_CHANGEMTU, /* notify after mtu change happened */ - NETDEV_CHANGEADDR, + NETDEV_CHANGEADDR, /* notify after the address change */ + NETDEV_PRE_CHANGEADDR, /* notify before the address change */ NETDEV_GOING_DOWN, NETDEV_CHANGENAME, NETDEV_FEAT_CHANGE, @@ -2521,6 +2518,11 @@ struct netdev_notifier_changelowerstate_info { void *lower_state_info; /* is lower dev state */ }; +struct netdev_notifier_pre_changeaddr_info { + struct netdev_notifier_info info; /* must be first */ + const unsigned char *dev_addr; +}; + static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, struct net_device *dev) { @@ -2615,7 +2617,7 @@ struct net_device *dev_get_by_name(struct net *net, const char *name); struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); struct net_device *__dev_get_by_name(struct net *net, const char *name); int dev_alloc_name(struct net_device *dev, const char *name); -int dev_open(struct net_device *dev); +int dev_open(struct net_device *dev, struct netlink_ext_ack *extack); void dev_close(struct net_device *dev); void dev_close_many(struct list_head *head, bool unlink); void dev_disable_lro(struct net_device *dev); @@ -3190,6 +3192,26 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, #endif } +/* Variant of netdev_tx_sent_queue() for drivers that are aware + * that they should not test BQL status themselves. + * We do want to change __QUEUE_STATE_STACK_XOFF only for the last + * skb of a batch. + * Returns true if the doorbell must be used to kick the NIC. + */ +static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue, + unsigned int bytes, + bool xmit_more) +{ + if (xmit_more) { +#ifdef CONFIG_BQL + dql_queued(&dev_queue->dql, bytes); +#endif + return netif_tx_queue_stopped(dev_queue); + } + netdev_tx_sent_queue(dev_queue, bytes); + return true; +} + /** * netdev_sent_queue - report the number of bytes queued to hardware * @dev: network device @@ -3204,6 +3226,14 @@ static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); } +static inline bool __netdev_sent_queue(struct net_device *dev, + unsigned int bytes, + bool xmit_more) +{ + return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes, + xmit_more); +} + static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, unsigned int pkts, unsigned int bytes) { @@ -3593,8 +3623,10 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, int dev_ifconf(struct net *net, struct ifconf *, int); int dev_ethtool(struct net *net, struct ifreq *); unsigned int dev_get_flags(const struct net_device *); -int __dev_change_flags(struct net_device *, unsigned int flags); -int dev_change_flags(struct net_device *, unsigned int); +int __dev_change_flags(struct net_device *dev, unsigned int flags, + struct netlink_ext_ack *extack); +int dev_change_flags(struct net_device *dev, unsigned int flags, + struct netlink_ext_ack *extack); void __dev_notify_flags(struct net_device *, unsigned int old_flags, unsigned int gchanges); int dev_change_name(struct net_device *, const char *); @@ -3607,7 +3639,10 @@ int dev_set_mtu_ext(struct net_device *dev, int mtu, int dev_set_mtu(struct net_device *, int); int dev_change_tx_queue_len(struct net_device *, unsigned long); void dev_set_group(struct net_device *, int); -int dev_set_mac_address(struct net_device *, struct sockaddr *); +int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr, + struct netlink_ext_ack *extack); +int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa, + struct netlink_ext_ack *extack); int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_item_id *ppid); @@ -4048,6 +4083,16 @@ int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, int (*sync)(struct net_device *, const unsigned char *), int (*unsync)(struct net_device *, const unsigned char *)); +int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*sync)(struct net_device *, + const unsigned char *, int), + int (*unsync)(struct net_device *, + const unsigned char *, int)); +void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list, + struct net_device *dev, + int (*unsync)(struct net_device *, + const unsigned char *, int)); void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, struct net_device *dev, int (*unsync)(struct net_device *, @@ -4312,9 +4357,10 @@ static inline bool can_checksum_protocol(netdev_features_t features, } #ifdef CONFIG_BUG -void netdev_rx_csum_fault(struct net_device *dev); +void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb); #else -static inline void netdev_rx_csum_fault(struct net_device *dev) +static inline void netdev_rx_csum_fault(struct net_device *dev, + struct sk_buff *skb) { } #endif @@ -4340,7 +4386,7 @@ static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_devi struct netdev_queue *txq, bool more) { const struct net_device_ops *ops = dev->netdev_ops; - int rc; + netdev_tx_t rc; rc = __netdev_start_xmit(ops, skb, dev, more); if (rc == NETDEV_TX_OK) diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 34fc80f3eb90..f2e1e6b13ca4 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -303,18 +303,18 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set) /* Netlink CB args */ enum { IPSET_CB_NET = 0, /* net namespace */ + IPSET_CB_PROTO, /* ipset protocol */ IPSET_CB_DUMP, /* dump single set/all sets */ IPSET_CB_INDEX, /* set index */ IPSET_CB_PRIVATE, /* set private data */ IPSET_CB_ARG0, /* type specific */ - IPSET_CB_ARG1, }; /* register and unregister set references */ extern ip_set_id_t ip_set_get_byname(struct net *net, const char *name, struct ip_set **set); extern void ip_set_put_byindex(struct net *net, ip_set_id_t index); -extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index); +extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name); extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index); extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index); diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h index 8e2bab1e8e90..70877f8de7e9 100644 --- a/include/linux/netfilter/ipset/ip_set_comment.h +++ b/include/linux/netfilter/ipset/ip_set_comment.h @@ -43,11 +43,11 @@ ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment, rcu_assign_pointer(comment->c, c); } -/* Used only when dumping a set, protected by rcu_read_lock_bh() */ +/* Used only when dumping a set, protected by rcu_read_lock() */ static inline int ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment) { - struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c); + struct ip_set_comment_rcu *c = rcu_dereference(comment->c); if (!c) return 0; diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h index b8d95564bd53..6989e2e4eabf 100644 --- a/include/linux/netfilter/nf_conntrack_proto_gre.h +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h @@ -21,6 +21,19 @@ struct nf_ct_gre_keymap { struct nf_conntrack_tuple tuple; }; +enum grep_conntrack { + GRE_CT_UNREPLIED, + GRE_CT_REPLIED, + GRE_CT_MAX +}; + +struct netns_proto_gre { + struct nf_proto_net nf; + rwlock_t keymap_lock; + struct list_head keymap_list; + unsigned int gre_timeouts[GRE_CT_MAX]; +}; + /* add new tuple->key_reply pair to keymap */ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, struct nf_conntrack_tuple *t); @@ -28,7 +41,5 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, /* delete keymap entries */ void nf_ct_gre_keymap_destroy(struct nf_conn *ct); -void nf_nat_need_gre(void); - #endif /* __KERNEL__ */ #endif /* _CONNTRACK_PROTO_GRE_H */ diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 4a520d3304a2..cf09ab37b45b 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -62,18 +62,6 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id) } #endif /* CONFIG_PROVE_LOCKING */ -/* - * nfnl_dereference - fetch RCU pointer when updates are prevented by subsys mutex - * - * @p: The pointer to read, prior to dereferencing - * @ss: The nfnetlink subsystem ID - * - * Return the value of the specified RCU-protected pointer, but omit - * the READ_ONCE(), because caller holds the NFNL subsystem mutex. - */ -#define nfnl_dereference(p, ss) \ - rcu_dereference_protected(p, lockdep_nfnl_is_held(ss)) - #define MODULE_ALIAS_NFNL_SUBSYS(subsys) \ MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys)) diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index fa0686500970..5f2614d02e03 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -17,43 +17,58 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb) skb_dst_drop(skb); } +static inline struct nf_bridge_info * +nf_bridge_info_get(const struct sk_buff *skb) +{ + return skb_ext_find(skb, SKB_EXT_BRIDGE_NF); +} + +static inline bool nf_bridge_info_exists(const struct sk_buff *skb) +{ + return skb_ext_exist(skb, SKB_EXT_BRIDGE_NF); +} + static inline int nf_bridge_get_physinif(const struct sk_buff *skb) { - struct nf_bridge_info *nf_bridge; + const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); - if (skb->nf_bridge == NULL) + if (!nf_bridge) return 0; - nf_bridge = skb->nf_bridge; return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0; } static inline int nf_bridge_get_physoutif(const struct sk_buff *skb) { - struct nf_bridge_info *nf_bridge; + const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); - if (skb->nf_bridge == NULL) + if (!nf_bridge) return 0; - nf_bridge = skb->nf_bridge; return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0; } static inline struct net_device * nf_bridge_get_physindev(const struct sk_buff *skb) { - return skb->nf_bridge ? skb->nf_bridge->physindev : NULL; + const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); + + return nf_bridge ? nf_bridge->physindev : NULL; } static inline struct net_device * nf_bridge_get_physoutdev(const struct sk_buff *skb) { - return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL; + const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); + + return nf_bridge ? nf_bridge->physoutdev : NULL; } static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb) { - return skb->nf_bridge && skb->nf_bridge->in_prerouting; + const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); + + return nf_bridge && nf_bridge->in_prerouting; } #else #define br_drop_fake_rtable(skb) do { } while (0) diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 4da90a6ab536..4e8add270200 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -34,8 +34,8 @@ struct netlink_skb_parms { #define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds) -extern void netlink_table_grab(void); -extern void netlink_table_ungrab(void); +void netlink_table_grab(void); +void netlink_table_ungrab(void); #define NL_CFG_F_NONROOT_RECV (1 << 0) #define NL_CFG_F_NONROOT_SEND (1 << 1) @@ -51,7 +51,7 @@ struct netlink_kernel_cfg { bool (*compare)(struct net *net, struct sock *sk); }; -extern struct sock *__netlink_kernel_create(struct net *net, int unit, +struct sock *__netlink_kernel_create(struct net *net, int unit, struct module *module, struct netlink_kernel_cfg *cfg); static inline struct sock * @@ -110,24 +110,33 @@ struct netlink_ext_ack { } \ } while (0) -extern void netlink_kernel_release(struct sock *sk); -extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups); -extern int netlink_change_ngroups(struct sock *sk, unsigned int groups); -extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); -extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, - const struct netlink_ext_ack *extack); -extern int netlink_has_listeners(struct sock *sk, unsigned int group); - -extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); -extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, - __u32 group, gfp_t allocation); -extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, - __u32 portid, __u32 group, gfp_t allocation, - int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), - void *filter_data); -extern int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code); -extern int netlink_register_notifier(struct notifier_block *nb); -extern int netlink_unregister_notifier(struct notifier_block *nb); +static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack, + u64 cookie) +{ + u64 __cookie = cookie; + + memcpy(extack->cookie, &__cookie, sizeof(__cookie)); + extack->cookie_len = sizeof(__cookie); +} + +void netlink_kernel_release(struct sock *sk); +int __netlink_change_ngroups(struct sock *sk, unsigned int groups); +int netlink_change_ngroups(struct sock *sk, unsigned int groups); +void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); +void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, + const struct netlink_ext_ack *extack); +int netlink_has_listeners(struct sock *sk, unsigned int group); + +int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); +int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, + __u32 group, gfp_t allocation); +int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, + __u32 portid, __u32 group, gfp_t allocation, + int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), + void *filter_data); +int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code); +int netlink_register_notifier(struct notifier_block *nb); +int netlink_unregister_notifier(struct notifier_block *nb); /* finegrained unicast helpers: */ struct sock *netlink_getsockbyfilp(struct file *filp); @@ -203,7 +212,7 @@ struct netlink_dump_control { u16 min_dump_alloc; }; -extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, +int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, const struct nlmsghdr *nlh, struct netlink_dump_control *control); static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, @@ -222,8 +231,8 @@ struct netlink_tap { struct list_head list; }; -extern int netlink_add_tap(struct netlink_tap *nt); -extern int netlink_remove_tap(struct netlink_tap *nt); +int netlink_add_tap(struct netlink_tap *nt); +int netlink_remove_tap(struct netlink_tap *nt); bool __netlink_ns_capable(const struct netlink_skb_parms *nsp, struct user_namespace *ns, int cap); diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 08f9247e9827..9003e29cde46 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -119,6 +119,8 @@ static inline int hardlockup_detector_perf_init(void) { return 0; } void watchdog_nmi_stop(void); void watchdog_nmi_start(void); int watchdog_nmi_probe(void); +int watchdog_nmi_enable(unsigned int cpu); +void watchdog_nmi_disable(unsigned int cpu); /** * touch_nmi_watchdog - restart NMI watchdog timeout. diff --git a/include/linux/objagg.h b/include/linux/objagg.h new file mode 100644 index 000000000000..34f38c186ea0 --- /dev/null +++ b/include/linux/objagg.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ + +#ifndef _OBJAGG_H +#define _OBJAGG_H + +struct objagg_ops { + size_t obj_size; + void * (*delta_create)(void *priv, void *parent_obj, void *obj); + void (*delta_destroy)(void *priv, void *delta_priv); + void * (*root_create)(void *priv, void *obj); + void (*root_destroy)(void *priv, void *root_priv); +}; + +struct objagg; +struct objagg_obj; + +const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj); +const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj); +const void *objagg_obj_raw(const struct objagg_obj *objagg_obj); + +struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj); +void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj); +struct objagg *objagg_create(const struct objagg_ops *ops, void *priv); +void objagg_destroy(struct objagg *objagg); + +struct objagg_obj_stats { + unsigned int user_count; + unsigned int delta_user_count; /* includes delta object users */ +}; + +struct objagg_obj_stats_info { + struct objagg_obj_stats stats; + struct objagg_obj *objagg_obj; /* associated object */ + bool is_root; +}; + +struct objagg_stats { + unsigned int stats_info_count; + struct objagg_obj_stats_info stats_info[]; +}; + +const struct objagg_stats *objagg_stats_get(struct objagg *objagg); +void objagg_stats_put(const struct objagg_stats *objagg_stats); + +#endif diff --git a/include/linux/of.h b/include/linux/of.h index a5aee3c438ad..0fe5bef81a7e 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -66,7 +66,6 @@ struct device_node { unsigned long _flags; void *data; #if defined(CONFIG_SPARC) - const char *path_component_name; unsigned int unique_id; struct of_irq_controller *irq_trans; #endif diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index b9cd9ebdf9b9..a713e5d156d8 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -76,6 +76,7 @@ extern int early_init_dt_scan_memory(unsigned long node, const char *uname, extern int early_init_dt_scan_chosen_stdout(void); extern void early_init_fdt_scan_reserved_mem(void); extern void early_init_fdt_reserve_self(void); +extern void __init early_init_dt_scan_chosen_arch(unsigned long node); extern void early_init_dt_add_memory_arch(u64 base, u64 size); extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size); extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, diff --git a/include/linux/of_net.h b/include/linux/of_net.h index 90d81ee9e6a0..9cd72aab76fe 100644 --- a/include/linux/of_net.h +++ b/include/linux/of_net.h @@ -13,7 +13,6 @@ struct net_device; extern int of_get_phy_mode(struct device_node *np); extern const void *of_get_mac_address(struct device_node *np); -extern int of_get_nvmem_mac_address(struct device_node *np, void *addr); extern struct net_device *of_find_net_device_by_node(struct device_node *np); #else static inline int of_get_phy_mode(struct device_node *np) @@ -26,11 +25,6 @@ static inline const void *of_get_mac_address(struct device_node *np) return NULL; } -static inline int of_get_nvmem_mac_address(struct device_node *np, void *addr) -{ - return -ENODEV; -} - static inline struct net_device *of_find_net_device_by_node(struct device_node *np) { return NULL; diff --git a/include/linux/pci.h b/include/linux/pci.h index 11c71c4ecf75..51a5a5217667 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1960,7 +1960,11 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); int pcibios_add_device(struct pci_dev *dev); void pcibios_release_device(struct pci_dev *dev); +#ifdef CONFIG_PCI void pcibios_penalize_isa_irq(int irq, int active); +#else +static inline void pcibios_penalize_isa_irq(int irq, int active) {} +#endif int pcibios_alloc_irq(struct pci_dev *dev); void pcibios_free_irq(struct pci_dev *dev); resource_size_t pcibios_default_alignment(void); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 69f0abe1ba1a..d86d5a2477fc 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -545,6 +545,9 @@ #define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534 #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F3 0x1583 #define PCI_DEVICE_ID_AMD_16H_M30H_NB_F4 0x1584 +#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 +#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb +#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493 #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 @@ -2359,6 +2362,8 @@ #define PCI_VENDOR_ID_SYNOPSYS 0x16c3 +#define PCI_VENDOR_ID_USR 0x16ec + #define PCI_VENDOR_ID_VITESSE 0x1725 #define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174 diff --git a/include/linux/pe.h b/include/linux/pe.h index 143ce75be5f0..3482b18a48b5 100644 --- a/include/linux/pe.h +++ b/include/linux/pe.h @@ -166,7 +166,7 @@ struct mz_hdr { uint16_t oem_info; /* oem specific */ uint16_t reserved1[10]; /* reserved */ uint32_t peaddr; /* address of pe header */ - char message[64]; /* message to print */ + char message[]; /* message to print */ }; struct mz_reloc { diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 79b99d653e03..71b75643c432 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -41,7 +41,7 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore * * cannot both change sem->state from readers_fast and start checking * counters while we are here. So if we see !sem->state, we know that * the writer won't be checking until we're past the preempt_enable() - * and that one the synchronize_sched() is done, the writer will see + * and that once the synchronize_rcu() is done, the writer will see * anything we did within this RCU-sched read-size critical section. */ __this_cpu_inc(*sem->read_count); diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index bf309ff6f244..4641e850b204 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -102,8 +102,10 @@ struct arm_pmu { int (*filter_match)(struct perf_event *event); int num_events; bool secure_access; /* 32-bit ARM only */ -#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 +#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); +#define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000 + DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); struct platform_device *plat_device; struct pmu_hw_events __percpu *hw_events; struct hlist_node node; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 53c500f0ca79..1d5c551a5add 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -262,8 +262,8 @@ struct pmu { */ int capabilities; - int * __percpu pmu_disable_count; - struct perf_cpu_context * __percpu pmu_cpu_context; + int __percpu *pmu_disable_count; + struct perf_cpu_context __percpu *pmu_cpu_context; atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ int task_ctx_nr; int hrtimer_interval_ms; diff --git a/include/linux/phy.h b/include/linux/phy.h index 3ea87f774a76..da039f211c22 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -58,6 +58,11 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini #define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) #define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) +extern const int phy_10_100_features_array[4]; +extern const int phy_basic_t1_features_array[2]; +extern const int phy_gbit_features_array[2]; +extern const int phy_10gbit_features_array[1]; + /* * Set phydev->irq to PHY_POLL if interrupts are not supported, * or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if @@ -66,9 +71,8 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini #define PHY_POLL -1 #define PHY_IGNORE_INTERRUPT -2 -#define PHY_HAS_INTERRUPT 0x00000001 -#define PHY_IS_INTERNAL 0x00000002 -#define PHY_RST_AFTER_CLK_EN 0x00000004 +#define PHY_IS_INTERNAL 0x00000001 +#define PHY_RST_AFTER_CLK_EN 0x00000002 #define MDIO_DEVICE_IS_PHY 0x80000000 /* Interface Mode definitions */ @@ -178,7 +182,6 @@ static inline const char *phy_modes(phy_interface_t interface) #define PHY_INIT_TIMEOUT 100000 #define PHY_STATE_TIME 1 #define PHY_FORCE_TIMEOUT 10 -#define PHY_AN_TIMEOUT 10 #define PHY_MAX_ADDR 32 @@ -264,57 +267,27 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev) void devm_mdiobus_free(struct device *dev, struct mii_bus *bus); struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); -#define PHY_INTERRUPT_DISABLED 0x0 -#define PHY_INTERRUPT_ENABLED 0x80000000 +#define PHY_INTERRUPT_DISABLED false +#define PHY_INTERRUPT_ENABLED true /* PHY state machine states: * * DOWN: PHY device and driver are not ready for anything. probe * should be called if and only if the PHY is in this state, * given that the PHY device exists. - * - PHY driver probe function will, depending on the PHY, set - * the state to STARTING or READY - * - * STARTING: PHY device is coming up, and the ethernet driver is - * not ready. PHY drivers may set this in the probe function. - * If they do, they are responsible for making sure the state is - * eventually set to indicate whether the PHY is UP or READY, - * depending on the state when the PHY is done starting up. - * - PHY driver will set the state to READY - * - start will set the state to PENDING + * - PHY driver probe function will set the state to READY * * READY: PHY is ready to send and receive packets, but the * controller is not. By default, PHYs which do not implement - * probe will be set to this state by phy_probe(). If the PHY - * driver knows the PHY is ready, and the PHY state is STARTING, - * then it sets this STATE. + * probe will be set to this state by phy_probe(). * - start will set the state to UP * - * PENDING: PHY device is coming up, but the ethernet driver is - * ready. phy_start will set this state if the PHY state is - * STARTING. - * - PHY driver will set the state to UP when the PHY is ready - * * UP: The PHY and attached device are ready to do work. * Interrupts should be started here. - * - timer moves to AN - * - * AN: The PHY is currently negotiating the link state. Link is - * therefore down for now. phy_timer will set this state when it - * detects the state is UP. config_aneg will set this state - * whenever called with phydev->autoneg set to AUTONEG_ENABLE. - * - If autonegotiation finishes, but there's no link, it sets - * the state to NOLINK. - * - If aneg finishes with link, it sets the state to RUNNING, - * and calls adjust_link - * - If autonegotiation did not finish after an arbitrary amount - * of time, autonegotiation should be tried again if the PHY - * supports "magic" autonegotiation (back to AN) - * - If it didn't finish, and no magic_aneg, move to FORCING. + * - timer moves to NOLINK or RUNNING * * NOLINK: PHY is up, but not currently plugged in. - * - If the timer notes that the link comes back, we move to RUNNING - * - config_aneg moves to AN + * - irq or timer will set RUNNING if link comes back * - phy_stop moves to HALTED * * FORCING: PHY is being configured with forced settings @@ -325,11 +298,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); * * RUNNING: PHY is currently up, running, and possibly sending * and/or receiving packets - * - timer will set CHANGELINK if we're polling (this ensures the - * link state is polled every other cycle of this state machine, - * which makes it every other second) - * - irq will set CHANGELINK - * - config_aneg will set AN + * - irq or timer will set NOLINK if link goes down * - phy_stop moves to HALTED * * CHANGELINK: PHY experienced a change in link state @@ -349,16 +318,13 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); */ enum phy_state { PHY_DOWN = 0, - PHY_STARTING, PHY_READY, - PHY_PENDING, + PHY_HALTED, PHY_UP, - PHY_AN, PHY_RUNNING, PHY_NOLINK, PHY_FORCING, PHY_CHANGELINK, - PHY_HALTED, PHY_RESUMING }; @@ -390,7 +356,6 @@ struct phy_c45_device_ids { * giving up on the current attempt at acquiring a link * irq: IRQ number of the PHY's interrupt (-1 if none) * phy_timer: The timer for handling the state machine - * phy_queue: A work_queue for the phy_mac_interrupt * attached_dev: The attached enet driver's device instance ptr * adjust_link: Callback for the enet controller to respond to * changes in the link state. @@ -427,6 +392,9 @@ struct phy_device { /* The most recently read link state */ unsigned link:1; + /* Interrupts are enabled */ + unsigned interrupts:1; + enum phy_state state; u32 dev_flags; @@ -442,14 +410,11 @@ struct phy_device { int pause; int asym_pause; - /* Enabled Interrupts */ - u32 interrupts; - - /* Union of PHY and Attached devices' supported modes */ - /* See mii.h for more info */ - u32 supported; - u32 advertising; - u32 lp_advertising; + /* Union of PHY and Attached devices' supported link modes */ + /* See ethtool.h for more info */ + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); /* Energy efficient ethernet modes which should be prohibited */ u32 eee_broken_modes; @@ -475,7 +440,6 @@ struct phy_device { void *priv; /* Interrupt and Polling infrastructure */ - struct work_struct phy_queue; struct delayed_work state_queue; struct mutex lock; @@ -674,6 +638,10 @@ struct phy_driver { #define PHY_ANY_ID "MATCH ANY PHY" #define PHY_ANY_UID 0xffffffff +#define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 0) +#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 4) +#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 10) + /* A Structure for boards to register fixups with the PHY Lib */ struct phy_fixup { struct list_head list; @@ -697,9 +665,31 @@ struct phy_setting { const struct phy_setting * phy_lookup_setting(int speed, int duplex, const unsigned long *mask, - size_t maxbit, bool exact); + bool exact); size_t phy_speeds(unsigned int *speeds, size_t size, - unsigned long *mask, size_t maxbit); + unsigned long *mask); + +static inline bool __phy_is_started(struct phy_device *phydev) +{ + WARN_ON(!mutex_is_locked(&phydev->lock)); + + return phydev->state >= PHY_UP; +} + +/** + * phy_is_started - Convenience function to check whether PHY is started + * @phydev: The phy_device struct + */ +static inline bool phy_is_started(struct phy_device *phydev) +{ + bool started; + + mutex_lock(&phydev->lock); + started = __phy_is_started(phydev); + mutex_unlock(&phydev->lock); + + return started; +} void phy_resolve_aneg_linkmode(struct phy_device *phydev); @@ -1050,11 +1040,9 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner); int phy_drivers_register(struct phy_driver *new_driver, int n, struct module *owner); void phy_state_machine(struct work_struct *work); -void phy_change_work(struct work_struct *work); void phy_mac_interrupt(struct phy_device *phydev); void phy_start_machine(struct phy_device *phydev); void phy_stop_machine(struct phy_device *phydev); -void phy_trigger_machine(struct phy_device *phydev); int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd); void phy_ethtool_ksettings_get(struct phy_device *phydev, struct ethtool_link_ksettings *cmd); diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index ee54453a40a0..9525567b1951 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -13,6 +13,7 @@ struct fixed_phy_status { struct device_node; #if IS_ENABLED(CONFIG_FIXED_PHY) +extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier); extern int fixed_phy_add(unsigned int irq, int phy_id, struct fixed_phy_status *status, int link_gpio); @@ -47,6 +48,10 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev, { return -ENODEV; } +static inline int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier) +{ + return -EINVAL; +} #endif /* CONFIG_FIXED_PHY */ #endif /* __PHY_FIXED_H */ diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h index b37b05bfd1a6..4587ce362535 100644 --- a/include/linux/phy_led_triggers.h +++ b/include/linux/phy_led_triggers.h @@ -20,7 +20,7 @@ struct phy_device; #include <linux/leds.h> #include <linux/phy.h> -#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10 +#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 11 #define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \ FIELD_SIZEOF(struct mdio_device, addr)+\ diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h index 85ad68f9206a..7fe80f1c7e08 100644 --- a/include/linux/platform_data/davinci_asp.h +++ b/include/linux/platform_data/davinci_asp.h @@ -79,6 +79,7 @@ struct davinci_mcasp_pdata { /* McASP specific fields */ int tdm_slots; u8 op_mode; + u8 dismod; u8 num_serializer; u8 *serial_dir; u8 version; diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h index f92a47e18034..a93841bfb9f7 100644 --- a/include/linux/platform_data/gpio-davinci.h +++ b/include/linux/platform_data/gpio-davinci.h @@ -17,6 +17,8 @@ #define __DAVINCI_GPIO_PLATFORM_H struct davinci_gpio_platform_data { + bool no_auto_base; + u32 base; u32 ngpio; u32 gpio_unbanked; }; diff --git a/include/linux/platform_data/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h new file mode 100644 index 000000000000..13874fa6e767 --- /dev/null +++ b/include/linux/platform_data/mdio-gpio.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * MDIO-GPIO bus platform data structure + */ + +#ifndef __LINUX_MDIO_GPIO_PDATA_H +#define __LINUX_MDIO_GPIO_PDATA_H + +struct mdio_gpio_platform_data { + u32 phy_mask; + u32 phy_ignore_ta_mask; +}; + +#endif /* __LINUX_MDIO_GPIO_PDATA_H */ diff --git a/include/linux/pm.h b/include/linux/pm.h index e723b78d8357..0bd9de116826 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -26,6 +26,7 @@ #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/timer.h> +#include <linux/hrtimer.h> #include <linux/completion.h> /* @@ -608,7 +609,7 @@ struct dev_pm_info { unsigned int should_wakeup:1; #endif #ifdef CONFIG_PM - struct timer_list suspend_timer; + struct hrtimer suspend_timer; unsigned long timer_expires; struct work_struct work; wait_queue_head_t wait_queue; @@ -631,7 +632,7 @@ struct dev_pm_info { enum rpm_status runtime_status; int runtime_error; int autosuspend_delay; - unsigned long last_busy; + u64 last_busy; unsigned long active_jiffies; unsigned long suspended_jiffies; unsigned long accounting_timestamp; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 3b5d7280e52e..dd364abb649a 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -73,6 +73,7 @@ struct genpd_power_state { struct genpd_lock_ops; struct dev_pm_opp; +struct opp_table; struct generic_pm_domain { struct device dev; @@ -94,6 +95,7 @@ struct generic_pm_domain { unsigned int performance_state; /* Aggregated max performance state */ int (*power_off)(struct generic_pm_domain *domain); int (*power_on)(struct generic_pm_domain *domain); + struct opp_table *opp_table; /* OPP table of the genpd */ unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd, struct dev_pm_opp *opp); int (*set_performance_state)(struct generic_pm_domain *genpd, @@ -134,6 +136,10 @@ struct gpd_link { struct list_head master_node; struct generic_pm_domain *slave; struct list_head slave_node; + + /* Sub-domain's per-master domain performance state */ + unsigned int performance_state; + unsigned int prev_performance_state; }; struct gpd_timing_data { @@ -258,8 +264,8 @@ int of_genpd_add_subdomain(struct of_phandle_args *parent, struct generic_pm_domain *of_genpd_remove_last(struct device_node *np); int of_genpd_parse_idle_states(struct device_node *dn, struct genpd_power_state **states, int *n); -unsigned int of_genpd_opp_to_performance_state(struct device *dev, - struct device_node *np); +unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev, + struct dev_pm_opp *opp); int genpd_dev_pm_attach(struct device *dev); struct device *genpd_dev_pm_attach_by_id(struct device *dev, @@ -300,8 +306,8 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn, } static inline unsigned int -of_genpd_opp_to_performance_state(struct device *dev, - struct device_node *np) +pm_genpd_opp_to_performance_state(struct device *genpd_dev, + struct dev_pm_opp *opp) { return 0; } diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 5d399eeef172..0a2a88e5a383 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -126,6 +126,9 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name); void dev_pm_opp_put_clkname(struct opp_table *opp_table); struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data)); void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table); +struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, struct device *virt_dev, int index); +void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev); +int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); @@ -272,6 +275,18 @@ static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {} +static inline struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, struct device *virt_dev, int index) +{ + return ERR_PTR(-ENOTSUPP); +} + +static inline void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev) {} + +static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate) +{ + return -ENOTSUPP; +} + static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { return -ENOTSUPP; @@ -305,8 +320,8 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask); int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); -struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np); struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); +int of_get_required_opp_performance_state(struct device_node *np, int index); #else static inline int dev_pm_opp_of_add_table(struct device *dev) { @@ -341,13 +356,13 @@ static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device return NULL; } -static inline struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np) +static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) { return NULL; } -static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) +static inline int of_get_required_opp_performance_state(struct device_node *np, int index) { - return NULL; + return -ENOTSUPP; } #endif diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index f0fc4700b6ff..54af4eef169f 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -51,7 +51,7 @@ extern void pm_runtime_no_callbacks(struct device *dev); extern void pm_runtime_irq_safe(struct device *dev); extern void __pm_runtime_use_autosuspend(struct device *dev, bool use); extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); -extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev); +extern u64 pm_runtime_autosuspend_expiration(struct device *dev); extern void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns); extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); @@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev) static inline void pm_runtime_mark_last_busy(struct device *dev) { - WRITE_ONCE(dev->power.last_busy, jiffies); + WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get())); } static inline bool pm_runtime_is_irq_safe(struct device *dev) @@ -168,7 +168,7 @@ static inline void __pm_runtime_use_autosuspend(struct device *dev, bool use) {} static inline void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) {} -static inline unsigned long pm_runtime_autosuspend_expiration( +static inline u64 pm_runtime_autosuspend_expiration( struct device *dev) { return 0; } static inline void pm_runtime_set_memalloc_noio(struct device *dev, bool enable){} diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h index 7b81dad712de..d0b37e937037 100644 --- a/include/linux/power/smartreflex.h +++ b/include/linux/power/smartreflex.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * OMAP Smartreflex Defines and Routines * @@ -11,10 +12,6 @@ * * Copyright (C) 2007 Texas Instruments, Inc. * Lesly A M <x0080970@ti.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef __POWER_SMARTREFLEX_H @@ -303,9 +300,6 @@ void omap_sr_enable(struct voltagedomain *voltdm); void omap_sr_disable(struct voltagedomain *voltdm); void omap_sr_disable_reset_volt(struct voltagedomain *voltdm); -/* API to register the pmic specific data with the smartreflex driver. */ -void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data); - /* Smartreflex driver hooks to be called from Smartreflex class driver */ int sr_enable(struct omap_sr *sr, unsigned long volt); void sr_disable(struct omap_sr *sr); @@ -320,7 +314,5 @@ static inline void omap_sr_enable(struct voltagedomain *voltdm) {} static inline void omap_sr_disable(struct voltagedomain *voltdm) {} static inline void omap_sr_disable_reset_volt( struct voltagedomain *voltdm) {} -static inline void omap_sr_register_pmic( - struct omap_sr_pmic_data *pmic_data) {} #endif #endif diff --git a/include/linux/preempt.h b/include/linux/preempt.h index c01813c3fbe9..dd92b1a93919 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -53,9 +53,6 @@ #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) -/* We use the MSB mostly because its available */ -#define PREEMPT_NEED_RESCHED 0x80000000 - #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) /* diff --git a/include/linux/printk.h b/include/linux/printk.h index cf3eccfe1543..55aa96975fa2 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -166,11 +166,6 @@ int vprintk_emit(int facility, int level, asmlinkage __printf(1, 0) int vprintk(const char *fmt, va_list args); -asmlinkage __printf(5, 6) __cold -int printk_emit(int facility, int level, - const char *dict, size_t dictlen, - const char *fmt, ...); - asmlinkage __printf(1, 2) __cold int printk(const char *fmt, ...); diff --git a/include/linux/property.h b/include/linux/property.h index ac8a1ebc4c1b..3789ec755fb6 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -311,4 +311,16 @@ fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port, int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, struct fwnode_endpoint *endpoint); +/* -------------------------------------------------------------------------- */ +/* Software fwnode support - when HW description is incomplete or missing */ + +bool is_software_node(const struct fwnode_handle *fwnode); + +int software_node_notify(struct device *dev, unsigned long action); + +struct fwnode_handle * +fwnode_create_software_node(const struct property_entry *properties, + const struct fwnode_handle *parent); +void fwnode_remove_software_node(struct fwnode_handle *fwnode); + #endif /* _LINUX_PROPERTY_H_ */ diff --git a/include/linux/psi.h b/include/linux/psi.h index 8e0725aac0aa..7006008d5b72 100644 --- a/include/linux/psi.h +++ b/include/linux/psi.h @@ -1,6 +1,7 @@ #ifndef _LINUX_PSI_H #define _LINUX_PSI_H +#include <linux/jump_label.h> #include <linux/psi_types.h> #include <linux/sched.h> @@ -9,7 +10,7 @@ struct css_set; #ifdef CONFIG_PSI -extern bool psi_disabled; +extern struct static_key_false psi_disabled; void psi_init(void); diff --git a/include/linux/pstore.h b/include/linux/pstore.h index a15bc4d48752..b146181e8709 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -26,27 +26,38 @@ #include <linux/errno.h> #include <linux/kmsg_dump.h> #include <linux/mutex.h> -#include <linux/spinlock.h> +#include <linux/semaphore.h> #include <linux/time.h> #include <linux/types.h> struct module; -/* pstore record types (see fs/pstore/inode.c for filename templates) */ +/* + * pstore record types (see fs/pstore/platform.c for pstore_type_names[]) + * These values may be written to storage (see EFI vars backend), so + * they are kind of an ABI. Be careful changing the mappings. + */ enum pstore_type_id { + /* Frontend storage types */ PSTORE_TYPE_DMESG = 0, PSTORE_TYPE_MCE = 1, PSTORE_TYPE_CONSOLE = 2, PSTORE_TYPE_FTRACE = 3, - /* PPC64 partition types */ + + /* PPC64-specific partition types */ PSTORE_TYPE_PPC_RTAS = 4, PSTORE_TYPE_PPC_OF = 5, PSTORE_TYPE_PPC_COMMON = 6, PSTORE_TYPE_PMSG = 7, PSTORE_TYPE_PPC_OPAL = 8, - PSTORE_TYPE_UNKNOWN = 255 + + /* End of the list */ + PSTORE_TYPE_MAX }; +const char *pstore_type_to_name(enum pstore_type_id type); +enum pstore_type_id pstore_name_to_type(const char *name); + struct pstore_info; /** * struct pstore_record - details of a pstore record entry @@ -85,12 +96,15 @@ struct pstore_record { /** * struct pstore_info - backend pstore driver structure * - * @owner: module which is repsonsible for this backend driver + * @owner: module which is responsible for this backend driver * @name: name of the backend driver * - * @buf_lock: spinlock to serialize access to @buf + * @buf_lock: semaphore to serialize access to @buf * @buf: preallocated crash dump buffer - * @bufsize: size of @buf available for crash dump writes + * @bufsize: size of @buf available for crash dump bytes (must match + * smallest number of bytes available for writing to a + * backend entry, since compressed bytes don't take kindly + * to being truncated) * * @read_mutex: serializes @open, @read, @close, and @erase callbacks * @flags: bitfield of frontends the backend can accept writes for @@ -170,7 +184,7 @@ struct pstore_info { struct module *owner; char *name; - spinlock_t buf_lock; + struct semaphore buf_lock; char *buf; size_t bufsize; @@ -189,14 +203,13 @@ struct pstore_info { }; /* Supported frontends */ -#define PSTORE_FLAGS_DMESG (1 << 0) -#define PSTORE_FLAGS_CONSOLE (1 << 1) -#define PSTORE_FLAGS_FTRACE (1 << 2) -#define PSTORE_FLAGS_PMSG (1 << 3) +#define PSTORE_FLAGS_DMESG BIT(0) +#define PSTORE_FLAGS_CONSOLE BIT(1) +#define PSTORE_FLAGS_FTRACE BIT(2) +#define PSTORE_FLAGS_PMSG BIT(3) extern int pstore_register(struct pstore_info *); extern void pstore_unregister(struct pstore_info *); -extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason); struct pstore_ftrace_record { unsigned long ip; diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h index 602d64725222..337971c41980 100644 --- a/include/linux/pstore_ram.h +++ b/include/linux/pstore_ram.h @@ -22,6 +22,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> +#include <linux/pstore.h> #include <linux/types.h> /* @@ -30,6 +31,11 @@ * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required. */ #define PRZ_FLAG_NO_LOCK BIT(0) +/* + * If a PRZ should only have a single-boot lifetime, this marks it as + * getting wiped after its contents get copied out after boot. + */ +#define PRZ_FLAG_ZAP_OLD BIT(1) struct persistent_ram_buffer; struct rs_control; @@ -42,17 +48,55 @@ struct persistent_ram_ecc_info { uint16_t *par; }; +/** + * struct persistent_ram_zone - Details of a persistent RAM zone (PRZ) + * used as a pstore backend + * + * @paddr: physical address of the mapped RAM area + * @size: size of mapping + * @label: unique name of this PRZ + * @type: frontend type for this PRZ + * @flags: holds PRZ_FLAGS_* bits + * + * @buffer_lock: + * locks access to @buffer "size" bytes and "start" offset + * @buffer: + * pointer to actual RAM area managed by this PRZ + * @buffer_size: + * bytes in @buffer->data (not including any trailing ECC bytes) + * + * @par_buffer: + * pointer into @buffer->data containing ECC bytes for @buffer->data + * @par_header: + * pointer into @buffer->data containing ECC bytes for @buffer header + * (i.e. all fields up to @data) + * @rs_decoder: + * RSLIB instance for doing ECC calculations + * @corrected_bytes: + * ECC corrected bytes accounting since boot + * @bad_blocks: + * ECC uncorrectable bytes accounting since boot + * @ecc_info: + * ECC configuration details + * + * @old_log: + * saved copy of @buffer->data prior to most recent wipe + * @old_log_size: + * bytes contained in @old_log + * + */ struct persistent_ram_zone { phys_addr_t paddr; size_t size; void *vaddr; char *label; - struct persistent_ram_buffer *buffer; - size_t buffer_size; + enum pstore_type_id type; u32 flags; + raw_spinlock_t buffer_lock; + struct persistent_ram_buffer *buffer; + size_t buffer_size; - /* ECC correction */ char *par_buffer; char *par_header; struct rs_control *rs_decoder; diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 51349d124ee5..7121bbe76979 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -39,6 +39,15 @@ struct ptp_clock_request { }; struct system_device_crosststamp; + +/** + * struct ptp_system_timestamp - system time corresponding to a PHC timestamp + */ +struct ptp_system_timestamp { + struct timespec64 pre_ts; + struct timespec64 post_ts; +}; + /** * struct ptp_clock_info - decribes a PTP hardware clock * @@ -73,8 +82,18 @@ struct system_device_crosststamp; * parameter delta: Desired change in nanoseconds. * * @gettime64: Reads the current time from the hardware clock. + * This method is deprecated. New drivers should implement + * the @gettimex64 method instead. * parameter ts: Holds the result. * + * @gettimex64: Reads the current time from the hardware clock and optionally + * also the system clock. + * parameter ts: Holds the PHC timestamp. + * parameter sts: If not NULL, it holds a pair of timestamps from + * the system clock. The first reading is made right before + * reading the lowest bits of the PHC timestamp and the second + * reading immediately follows that. + * * @getcrosststamp: Reads the current time from the hardware clock and * system clock simultaneously. * parameter cts: Contains timestamp (device,system) pair, @@ -124,6 +143,8 @@ struct ptp_clock_info { int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts); + int (*gettimex64)(struct ptp_clock_info *ptp, struct timespec64 *ts, + struct ptp_system_timestamp *sts); int (*getcrosststamp)(struct ptp_clock_info *ptp, struct system_device_crosststamp *cts); int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts); @@ -247,4 +268,16 @@ static inline int ptp_schedule_worker(struct ptp_clock *ptp, #endif +static inline void ptp_read_system_prets(struct ptp_system_timestamp *sts) +{ + if (sts) + ktime_get_real_ts64(&sts->pre_ts); +} + +static inline void ptp_read_system_postts(struct ptp_system_timestamp *sts) +{ + if (sts) + ktime_get_real_ts64(&sts->post_ts); +} + #endif diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 6c2ffed907f5..edb9b040c94c 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -64,15 +64,12 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); #define PTRACE_MODE_NOAUDIT 0x04 #define PTRACE_MODE_FSCREDS 0x08 #define PTRACE_MODE_REALCREDS 0x10 -#define PTRACE_MODE_SCHED 0x20 -#define PTRACE_MODE_IBPB 0x40 /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */ #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS) #define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS) #define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS) #define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS) -#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB) /** * ptrace_may_access - check whether the caller is permitted to access @@ -90,20 +87,6 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead); */ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); -/** - * ptrace_may_access - check whether the caller is permitted to access - * a target task. - * @task: target task - * @mode: selects type of access and caller credentials - * - * Returns true on success, false on denial. - * - * Similar to ptrace_may_access(). Only to be called from context switch - * code. Does not call into audit and the regular LSM hooks due to locking - * constraints. - */ -extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode); - static inline int ptrace_reparented(struct task_struct *child) { return !same_thread_group(child->real_parent, child->parent); @@ -428,4 +411,5 @@ extern int task_current_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc); +extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact); #endif diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 56518adc31dd..d5199b507d79 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -349,42 +349,6 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns, } /** - * pwm_set_polarity() - configure the polarity of a PWM signal - * @pwm: PWM device - * @polarity: new polarity of the PWM signal - * - * Note that the polarity cannot be configured while the PWM device is - * enabled. - * - * Returns: 0 on success or a negative error code on failure. - */ -static inline int pwm_set_polarity(struct pwm_device *pwm, - enum pwm_polarity polarity) -{ - struct pwm_state state; - - if (!pwm) - return -EINVAL; - - pwm_get_state(pwm, &state); - if (state.polarity == polarity) - return 0; - - /* - * Changing the polarity of a running PWM without adjusting the - * dutycycle/period value is a bit risky (can introduce glitches). - * Return -EBUSY in this case. - * Note that this is allowed when using pwm_apply_state() because - * the user specifies all the parameters. - */ - if (state.enabled) - return -EBUSY; - - state.polarity = polarity; - return pwm_apply_state(pwm, &state); -} - -/** * pwm_enable() - start a PWM output toggling * @pwm: PWM device * @@ -483,12 +447,6 @@ static inline int pwm_capture(struct pwm_device *pwm, return -EINVAL; } -static inline int pwm_set_polarity(struct pwm_device *pwm, - enum pwm_polarity polarity) -{ - return -ENOTSUPP; -} - static inline int pwm_enable(struct pwm_device *pwm) { return -EINVAL; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index a47321a0d572..91c536a01b56 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -47,6 +47,7 @@ #include <linux/slab.h> #include <linux/qed/common_hsi.h> #include <linux/qed/qed_chain.h> +#include <linux/io-64-nonatomic-lo-hi.h> enum dcbx_protocol_type { DCBX_PROTOCOL_ISCSI, @@ -448,11 +449,24 @@ struct qed_mfw_tlv_iscsi { bool tx_bytes_set; }; +enum qed_db_rec_width { + DB_REC_WIDTH_32B, + DB_REC_WIDTH_64B, +}; + +enum qed_db_rec_space { + DB_REC_KERNEL, + DB_REC_USER, +}; + #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \ (void __iomem *)(reg_addr)) #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) +#define DIRECT_REG_WR64(reg_addr, val) writeq((u32)val, \ + (void __iomem *)(reg_addr)) + #define QED_COALESCE_MAX 0x1FF #define QED_DEFAULT_RX_USECS 12 #define QED_DEFAULT_TX_USECS 48 @@ -1015,6 +1029,33 @@ struct qed_common_ops { */ int (*set_led)(struct qed_dev *cdev, enum qed_led_mode mode); +/** + * @brief db_recovery_add - add doorbell information to the doorbell + * recovery mechanism. + * + * @param cdev + * @param db_addr - doorbell address + * @param db_data - address of where db_data is stored + * @param db_is_32b - doorbell is 32b pr 64b + * @param db_is_user - doorbell recovery addresses are user or kernel space + */ + int (*db_recovery_add)(struct qed_dev *cdev, + void __iomem *db_addr, + void *db_data, + enum qed_db_rec_width db_width, + enum qed_db_rec_space db_space); + +/** + * @brief db_recovery_del - remove doorbell information from the doorbell + * recovery mechanism. db_data serves as key (db_addr is not unique). + * + * @param cdev + * @param db_addr - doorbell address + * @param db_data - address where db_data is stored. Serves as key for the + * entry to delete. + */ + int (*db_recovery_del)(struct qed_dev *cdev, + void __iomem *db_addr, void *db_data); /** * @brief update_drv_state - API to inform the change in the driver state. diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h index 8a16c3eb3dd0..c0578ba23c1a 100644 --- a/include/linux/rcupdate_wait.h +++ b/include/linux/rcupdate_wait.h @@ -31,21 +31,4 @@ do { \ #define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) -/** - * synchronize_rcu_mult - Wait concurrently for multiple grace periods - * @...: List of call_rcu() functions for different grace periods to wait on - * - * This macro waits concurrently for multiple types of RCU grace periods. - * For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait - * on concurrent RCU and RCU-tasks grace periods. Waiting on a give SRCU - * domain requires you to write a wrapper function for that SRCU domain's - * call_srcu() function, supplying the corresponding srcu_struct. - * - * If Tiny RCU, tell _wait_rcu_gp() does not bother waiting for RCU, - * given that anywhere synchronize_rcu_mult() can be called is automatically - * a grace period. - */ -#define synchronize_rcu_mult(...) \ - _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) - #endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */ diff --git a/include/linux/regmap.h b/include/linux/regmap.h index a367d59c301d..1781b6cb793c 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -1089,27 +1089,48 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id, int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id, unsigned int mask, unsigned int val, bool *change, bool async, bool force); +/** + * struct regmap_irq_type - IRQ type definitions. + * + * @type_reg_offset: Offset register for the irq type setting. + * @type_rising_val: Register value to configure RISING type irq. + * @type_falling_val: Register value to configure FALLING type irq. + * @type_level_low_val: Register value to configure LEVEL_LOW type irq. + * @type_level_high_val: Register value to configure LEVEL_HIGH type irq. + * @types_supported: logical OR of IRQ_TYPE_* flags indicating supported types. + */ +struct regmap_irq_type { + unsigned int type_reg_offset; + unsigned int type_reg_mask; + unsigned int type_rising_val; + unsigned int type_falling_val; + unsigned int type_level_low_val; + unsigned int type_level_high_val; + unsigned int types_supported; +}; /** * struct regmap_irq - Description of an IRQ for the generic regmap irq_chip. * * @reg_offset: Offset of the status/mask register within the bank * @mask: Mask used to flag/control the register. - * @type_reg_offset: Offset register for the irq type setting. - * @type_rising_mask: Mask bit to configure RISING type irq. - * @type_falling_mask: Mask bit to configure FALLING type irq. + * @type: IRQ trigger type setting details if supported. */ struct regmap_irq { unsigned int reg_offset; unsigned int mask; - unsigned int type_reg_offset; - unsigned int type_rising_mask; - unsigned int type_falling_mask; + struct regmap_irq_type type; }; #define REGMAP_IRQ_REG(_irq, _off, _mask) \ [_irq] = { .reg_offset = (_off), .mask = (_mask) } +#define REGMAP_IRQ_REG_LINE(_id, _reg_bits) \ + [_id] = { \ + .mask = BIT((_id) % (_reg_bits)), \ + .reg_offset = (_id) / (_reg_bits), \ + } + /** * struct regmap_irq_chip - Description of a generic regmap irq_chip. * @@ -1131,6 +1152,12 @@ struct regmap_irq { * @ack_invert: Inverted ack register: cleared bits for ack. * @wake_invert: Inverted wake register: cleared bits are wake enabled. * @type_invert: Invert the type flags. + * @type_in_mask: Use the mask registers for controlling irq type. For + * interrupts defining type_rising/falling_mask use mask_base + * for edge configuration and never update bits in type_base. + * @clear_on_unmask: For chips with interrupts cleared on read: read the status + * registers before unmasking interrupts to clear any bits + * set when they were masked. * @runtime_pm: Hold a runtime PM lock on the device when accessing it. * * @num_regs: Number of registers in each control bank. @@ -1169,6 +1196,8 @@ struct regmap_irq_chip { bool wake_invert:1; bool runtime_pm:1; bool type_invert:1; + bool type_in_mask:1; + bool clear_on_unmask:1; int num_regs; diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 25602afd4844..f3f76051e8b0 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -508,7 +508,7 @@ static inline int regulator_get_error_flags(struct regulator *regulator, static inline int regulator_set_load(struct regulator *regulator, int load_uA) { - return REGULATOR_MODE_NORMAL; + return 0; } static inline int regulator_allow_bypass(struct regulator *regulator, diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index a9c030192147..389bcaf7900f 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -15,11 +15,12 @@ #ifndef __LINUX_REGULATOR_DRIVER_H_ #define __LINUX_REGULATOR_DRIVER_H_ -#define MAX_COUPLED 4 +#define MAX_COUPLED 2 #include <linux/device.h> #include <linux/notifier.h> #include <linux/regulator/consumer.h> +#include <linux/ww_mutex.h> struct gpio_desc; struct regmap; @@ -462,7 +463,7 @@ struct regulator_dev { struct coupling_desc coupling_desc; struct blocking_notifier_head notifier; - struct mutex mutex; /* consumer lock */ + struct ww_mutex mutex; /* consumer lock */ struct task_struct *mutex_owner; int ref_cnt; struct module *owner; @@ -473,7 +474,6 @@ struct regulator_dev { struct regmap *regmap; struct delayed_work disable_work; - int deferred_disables; void *reg_data; /* regulator_dev data */ @@ -545,4 +545,7 @@ int regulator_set_active_discharge_regmap(struct regulator_dev *rdev, bool enable); void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); +void regulator_lock(struct regulator_dev *rdev); +void regulator_unlock(struct regulator_dev *rdev); + #endif diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index a459a5e973a7..1d34a70ffda2 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -158,6 +158,9 @@ struct regulation_constraints { /* used for coupled regulators */ int max_spread; + /* used for changing voltage in steps */ + int max_uV_step; + /* valid regulator operating modes for this machine */ unsigned int valid_modes_mask; diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h index cb5aecd40f07..331d7d940c7a 100644 --- a/include/linux/regulator/pfuze100.h +++ b/include/linux/regulator/pfuze100.h @@ -33,7 +33,8 @@ #define PFUZE100_VGEN4 12 #define PFUZE100_VGEN5 13 #define PFUZE100_VGEN6 14 -#define PFUZE100_MAX_REGULATOR 15 +#define PFUZE100_COIN 15 +#define PFUZE100_MAX_REGULATOR 16 #define PFUZE200_SW1AB 0 #define PFUZE200_SW2 1 diff --git a/include/linux/reservation.h b/include/linux/reservation.h index 02166e815afb..2f0ffca35780 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -68,7 +68,6 @@ struct reservation_object_list { * @seq: sequence count for managing RCU read-side synchronization * @fence_excl: the exclusive fence, if there is one currently * @fence: list of current shared fences - * @staged: staged copy of shared fences for RCU updates */ struct reservation_object { struct ww_mutex lock; @@ -76,7 +75,6 @@ struct reservation_object { struct dma_fence __rcu *fence_excl; struct reservation_object_list __rcu *fence; - struct reservation_object_list *staged; }; #define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) @@ -95,7 +93,6 @@ reservation_object_init(struct reservation_object *obj) __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class); RCU_INIT_POINTER(obj->fence, NULL); RCU_INIT_POINTER(obj->fence_excl, NULL); - obj->staged = NULL; } /** @@ -124,7 +121,6 @@ reservation_object_fini(struct reservation_object *obj) kfree(fobj); } - kfree(obj->staged); ww_mutex_destroy(&obj->lock); } @@ -218,6 +214,11 @@ reservation_object_trylock(struct reservation_object *obj) static inline void reservation_object_unlock(struct reservation_object *obj) { +#ifdef CONFIG_DEBUG_MUTEXES + /* Test shared fence slot reservation */ + if (obj->fence) + obj->fence->shared_max = obj->fence->shared_count; +#endif ww_mutex_unlock(&obj->lock); } @@ -265,7 +266,8 @@ reservation_object_get_excl_rcu(struct reservation_object *obj) return fence; } -int reservation_object_reserve_shared(struct reservation_object *obj); +int reservation_object_reserve_shared(struct reservation_object *obj, + unsigned int num_fences); void reservation_object_add_shared_fence(struct reservation_object *obj, struct dma_fence *fence); diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index eb7111039247..20f9c6af7473 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -75,8 +75,19 @@ struct bucket_table { struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; +/* + * NULLS_MARKER() expects a hash value with the low + * bits mostly likely to be significant, and it discards + * the msb. + * We git it an address, in which the bottom 2 bits are + * always 0, and the msb might be significant. + * So we shift the address down one bit to align with + * expectations and avoid losing a significant bit. + */ +#define RHT_NULLS_MARKER(ptr) \ + ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1)) #define INIT_RHT_NULLS_HEAD(ptr) \ - ((ptr) = (typeof(ptr)) NULLS_MARKER(0)) + ((ptr) = RHT_NULLS_MARKER(&(ptr))) static inline bool rht_is_a_nulls(const struct rhash_head *ptr) { @@ -471,6 +482,7 @@ static inline struct rhash_head *__rhashtable_lookup( .ht = ht, .key = key, }; + struct rhash_head __rcu * const *head; struct bucket_table *tbl; struct rhash_head *he; unsigned int hash; @@ -478,13 +490,19 @@ static inline struct rhash_head *__rhashtable_lookup( tbl = rht_dereference_rcu(ht->tbl, ht); restart: hash = rht_key_hashfn(ht, tbl, key, params); - rht_for_each_rcu(he, tbl, hash) { - if (params.obj_cmpfn ? - params.obj_cmpfn(&arg, rht_obj(ht, he)) : - rhashtable_compare(&arg, rht_obj(ht, he))) - continue; - return he; - } + head = rht_bucket(tbl, hash); + do { + rht_for_each_rcu_continue(he, *head, tbl, hash) { + if (params.obj_cmpfn ? + params.obj_cmpfn(&arg, rht_obj(ht, he)) : + rhashtable_compare(&arg, rht_obj(ht, he))) + continue; + return he; + } + /* An object might have been moved to a different hash chain, + * while we walk along it - better check and retry. + */ + } while (he != RHT_NULLS_MARKER(head)); /* Ensure we see any new tables. */ smp_rmb(); diff --git a/include/linux/sched.h b/include/linux/sched.h index a51c13c2b1a0..89541d248893 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -176,7 +176,7 @@ struct task_group; * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). * * However, with slightly different timing the wakeup TASK_RUNNING store can - * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not + * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not * a problem either because that will result in one extra go around the loop * and our @cond test will save the day. * @@ -515,7 +515,7 @@ struct sched_dl_entity { /* * Actual scheduling parameters. Initialized with the values above, - * they are continously updated during task execution. Note that + * they are continuously updated during task execution. Note that * the remaining runtime could be < 0 in case we are in overrun. */ s64 runtime; /* Remaining runtime for this instance */ @@ -572,8 +572,10 @@ union rcu_special { struct { u8 blocked; u8 need_qs; + u8 exp_hint; /* Hint for performance. */ + u8 pad; /* No garbage from compiler! */ } b; /* Bits. */ - u16 s; /* Set of bits. */ + u32 s; /* Set of bits. */ }; enum perf_event_task_context { @@ -993,7 +995,7 @@ struct task_struct { /* cg_list protected by css_set_lock and tsk->alloc_lock: */ struct list_head cg_list; #endif -#ifdef CONFIG_INTEL_RDT +#ifdef CONFIG_RESCTRL u32 closid; u32 rmid; #endif @@ -1116,6 +1118,7 @@ struct task_struct { #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Index of current stored address in ret_stack: */ int curr_ret_stack; + int curr_ret_depth; /* Stack of return addresses for return function tracing: */ struct ftrace_ret_stack *ret_stack; @@ -1453,6 +1456,8 @@ static inline bool is_percpu_thread(void) #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ #define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */ #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ +#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */ +#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */ #define TASK_PFA_TEST(name, func) \ static inline bool task_##func(struct task_struct *p) \ @@ -1484,6 +1489,13 @@ TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) +TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable) +TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable) +TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable) + +TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) +TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) + static inline void current_restore_flags(unsigned long orig_flags, unsigned long flags) { diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h index 59667444669f..afa940cd50dc 100644 --- a/include/linux/sched/cpufreq.h +++ b/include/linux/sched/cpufreq.h @@ -20,6 +20,12 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, void (*func)(struct update_util_data *data, u64 time, unsigned int flags)); void cpufreq_remove_update_util_hook(int cpu); + +static inline unsigned long map_util_freq(unsigned long util, + unsigned long freq, unsigned long cap) +{ + return (freq + (freq >> 2)) * util / cap; +} #endif /* CONFIG_CPU_FREQ */ #endif /* _LINUX_SCHED_CPUFREQ_H */ diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index 4a6582c27dea..b0fb1446fe04 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -16,7 +16,7 @@ enum hk_flags { }; #ifdef CONFIG_CPU_ISOLATION -DECLARE_STATIC_KEY_FALSE(housekeeping_overriden); +DECLARE_STATIC_KEY_FALSE(housekeeping_overridden); extern int housekeeping_any_cpu(enum hk_flags flags); extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags); extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags); @@ -43,7 +43,7 @@ static inline void housekeeping_init(void) { } static inline bool housekeeping_cpu(int cpu, enum hk_flags flags) { #ifdef CONFIG_CPU_ISOLATION - if (static_branch_unlikely(&housekeeping_overriden)) + if (static_branch_unlikely(&housekeeping_overridden)) return housekeeping_test_cpu(cpu, flags); #endif return true; diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index aebb370a0006..3bfa6a0cbba4 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -153,7 +153,7 @@ static inline gfp_t current_gfp_context(gfp_t flags) { /* * NOIO implies both NOIO and NOFS and it is a weaker context - * so always make sure it makes precendence + * so always make sure it makes precedence */ if (unlikely(current->flags & PF_MEMALLOC_NOIO)) flags &= ~(__GFP_IO | __GFP_FS); diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h new file mode 100644 index 000000000000..59d3736c454c --- /dev/null +++ b/include/linux/sched/smt.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SCHED_SMT_H +#define _LINUX_SCHED_SMT_H + +#include <linux/static_key.h> + +#ifdef CONFIG_SCHED_SMT +extern struct static_key_false sched_smt_present; + +static __always_inline bool sched_smt_active(void) +{ + return static_branch_likely(&sched_smt_present); +} +#else +static inline bool sched_smt_active(void) { return false; } +#endif + +void arch_smt_update(void); + +#endif diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h index f30954cc059d..568286411b43 100644 --- a/include/linux/sched/stat.h +++ b/include/linux/sched/stat.h @@ -8,7 +8,7 @@ * Various counters maintained by the scheduler and fork(), * exposed via /proc, sys.c or used by drivers via these APIs. * - * ( Note that all these values are aquired without locking, + * ( Note that all these values are acquired without locking, * so they can only be relied on in narrow circumstances. ) */ diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 6b9976180c1e..c31d3a47a47c 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -89,7 +89,6 @@ struct sched_domain { unsigned int newidle_idx; unsigned int wake_idx; unsigned int forkexec_idx; - unsigned int smt_gain; int nohz_idle; /* NOHZ IDLE status */ int flags; /* See SD_* */ @@ -202,6 +201,14 @@ extern void set_sched_topology(struct sched_domain_topology_level *tl); # define SD_INIT_NAME(type) #endif +#ifndef arch_scale_cpu_capacity +static __always_inline +unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu) +{ + return SCHED_CAPACITY_SCALE; +} +#endif + #else /* CONFIG_SMP */ struct sched_domain_attr; @@ -217,6 +224,14 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) return true; } +#ifndef arch_scale_cpu_capacity +static __always_inline +unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu) +{ + return SCHED_CAPACITY_SCALE; +} +#endif + #endif /* !CONFIG_SMP */ static inline int task_node(const struct task_struct *p) diff --git a/include/linux/sfp.h b/include/linux/sfp.h index d37518e89db2..d9d9de3fcf8e 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -224,7 +224,7 @@ struct sfp_eeprom_ext { * * See the SFF-8472 specification and related documents for the definition * of these structure members. This can be obtained from - * ftp://ftp.seagate.com/sff + * https://www.snia.org/technology-communities/sff/specifications */ struct sfp_eeprom_id { struct sfp_eeprom_base base; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0ba687454267..2a57a365c711 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -245,6 +245,7 @@ struct iov_iter; struct napi_struct; struct bpf_prog; union bpf_attr; +struct skb_ext; #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) struct nf_conntrack { @@ -254,7 +255,6 @@ struct nf_conntrack { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) struct nf_bridge_info { - refcount_t use; enum { BRNF_PROTO_UNCHANGED, BRNF_PROTO_8021Q, @@ -481,10 +481,11 @@ static inline void sock_zerocopy_get(struct ubuf_info *uarg) } void sock_zerocopy_put(struct ubuf_info *uarg); -void sock_zerocopy_put_abort(struct ubuf_info *uarg); +void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref); void sock_zerocopy_callback(struct ubuf_info *uarg, bool success); +int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len); int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, struct msghdr *msg, int len, struct ubuf_info *uarg); @@ -615,6 +616,8 @@ typedef unsigned char *sk_buff_data_t; * @pkt_type: Packet class * @fclone: skbuff clone status * @ipvs_property: skbuff is owned by ipvs + * @offload_fwd_mark: Packet was L2-forwarded in hardware + * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware * @tc_skip_classify: do not classify packet. set by IFB device * @tc_at_ingress: used within tc_classify to distinguish in/egress * @tc_redirected: packet was redirected by a tc action @@ -633,6 +636,7 @@ typedef unsigned char *sk_buff_data_t; * @queue_mapping: Queue mapping for multiqueue devices * @xmit_more: More SKBs are pending for this queue * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves + * @active_extensions: active extensions (skb_ext_id types) * @ndisc_nodetype: router type (from link layer) * @ooo_okay: allow the mapping of a socket to a queue to be changed * @l4_hash: indicate hash is a canonical 4-tuple hash over transport @@ -662,6 +666,7 @@ typedef unsigned char *sk_buff_data_t; * @data: Data head pointer * @truesize: Buffer size * @users: User count - see {datagram,tcp}.c + * @extensions: allocated extensions, valid if active_extensions is nonzero */ struct sk_buff { @@ -709,15 +714,9 @@ struct sk_buff { struct list_head tcp_tsorted_anchor; }; -#ifdef CONFIG_XFRM - struct sec_path *sp; -#endif #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) unsigned long _nfct; #endif -#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) - struct nf_bridge_info *nf_bridge; -#endif unsigned int len, data_len; __u16 mac_len, @@ -744,7 +743,9 @@ struct sk_buff { head_frag:1, xmit_more:1, pfmemalloc:1; - +#ifdef CONFIG_SKB_EXTENSIONS + __u8 active_extensions; +#endif /* fields enclosed in headers_start/headers_end are copied * using a single memcpy() in __copy_skb_header() */ @@ -777,6 +778,14 @@ struct sk_buff { __u8 encap_hdr_csum:1; __u8 csum_valid:1; +#ifdef __BIG_ENDIAN_BITFIELD +#define PKT_VLAN_PRESENT_BIT 7 +#else +#define PKT_VLAN_PRESENT_BIT 0 +#endif +#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset) + __u8 __pkt_vlan_present_offset[0]; + __u8 vlan_present:1; __u8 csum_complete_sw:1; __u8 csum_level:2; __u8 csum_not_inet:1; @@ -784,13 +793,13 @@ struct sk_buff { #ifdef CONFIG_IPV6_NDISC_NODETYPE __u8 ndisc_nodetype:2; #endif - __u8 ipvs_property:1; + __u8 ipvs_property:1; __u8 inner_protocol_type:1; __u8 remcsum_offload:1; #ifdef CONFIG_NET_SWITCHDEV __u8 offload_fwd_mark:1; - __u8 offload_mr_fwd_mark:1; + __u8 offload_l3_fwd_mark:1; #endif #ifdef CONFIG_NET_CLS_ACT __u8 tc_skip_classify:1; @@ -858,6 +867,11 @@ struct sk_buff { *data; unsigned int truesize; refcount_t users; + +#ifdef CONFIG_SKB_EXTENSIONS + /* only useable after checking ->active_extensions != 0 */ + struct skb_ext *extensions; +#endif }; #ifdef __KERNEL__ @@ -1317,15 +1331,35 @@ static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb) return is_zcopy ? skb_uarg(skb) : NULL; } -static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg) +static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg, + bool *have_ref) { if (skb && uarg && !skb_zcopy(skb)) { - sock_zerocopy_get(uarg); + if (unlikely(have_ref && *have_ref)) + *have_ref = false; + else + sock_zerocopy_get(uarg); skb_shinfo(skb)->destructor_arg = uarg; skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; } } +static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val) +{ + skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL); + skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; +} + +static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb) +{ + return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL; +} + +static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb) +{ + return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL); +} + /* Release a reference on a zerocopy structure */ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) { @@ -1335,7 +1369,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) if (uarg->callback == sock_zerocopy_callback) { uarg->zerocopy = uarg->zerocopy && zerocopy; sock_zerocopy_put(uarg); - } else { + } else if (!skb_zcopy_is_nouarg(skb)) { uarg->callback(uarg, zerocopy); } @@ -1349,7 +1383,7 @@ static inline void skb_zcopy_abort(struct sk_buff *skb) struct ubuf_info *uarg = skb_zcopy(skb); if (uarg) { - sock_zerocopy_put_abort(uarg); + sock_zerocopy_put_abort(uarg, false); skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG; } } @@ -1725,8 +1759,6 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list, * The "__skb_xxxx()" functions are the non-atomic ones that * can only be called with interrupts disabled. */ -void skb_insert(struct sk_buff *old, struct sk_buff *newsk, - struct sk_buff_head *list); static inline void __skb_insert(struct sk_buff *newsk, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *list) @@ -2508,10 +2540,8 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len); static inline void __skb_set_length(struct sk_buff *skb, unsigned int len) { - if (unlikely(skb_is_nonlinear(skb))) { - WARN_ON(1); + if (WARN_ON(skb_is_nonlinear(skb))) return; - } skb->len = len; skb_set_tail_pointer(skb, len); } @@ -3329,7 +3359,6 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, unsigned int flags); int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, int len); -int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len); void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); unsigned int skb_zerocopy_headlen(const struct sk_buff *from); int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, @@ -3870,18 +3899,97 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct) atomic_inc(&nfct->use); } #endif + +#ifdef CONFIG_SKB_EXTENSIONS +enum skb_ext_id { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) -static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) + SKB_EXT_BRIDGE_NF, +#endif +#ifdef CONFIG_XFRM + SKB_EXT_SEC_PATH, +#endif + SKB_EXT_NUM, /* must be last */ +}; + +/** + * struct skb_ext - sk_buff extensions + * @refcnt: 1 on allocation, deallocated on 0 + * @offset: offset to add to @data to obtain extension address + * @chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units + * @data: start of extension data, variable sized + * + * Note: offsets/lengths are stored in chunks of 8 bytes, this allows + * to use 'u8' types while allowing up to 2kb worth of extension data. + */ +struct skb_ext { + refcount_t refcnt; + u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */ + u8 chunks; /* same */ + char data[0] __aligned(8); +}; + +void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id); +void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id); +void __skb_ext_put(struct skb_ext *ext); + +static inline void skb_ext_put(struct sk_buff *skb) { - if (nf_bridge && refcount_dec_and_test(&nf_bridge->use)) - kfree(nf_bridge); + if (skb->active_extensions) + __skb_ext_put(skb->extensions); } -static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) + +static inline void __skb_ext_copy(struct sk_buff *dst, + const struct sk_buff *src) +{ + dst->active_extensions = src->active_extensions; + + if (src->active_extensions) { + struct skb_ext *ext = src->extensions; + + refcount_inc(&ext->refcnt); + dst->extensions = ext; + } +} + +static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src) +{ + skb_ext_put(dst); + __skb_ext_copy(dst, src); +} + +static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i) { - if (nf_bridge) - refcount_inc(&nf_bridge->use); + return !!ext->offset[i]; } -#endif /* CONFIG_BRIDGE_NETFILTER */ + +static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id) +{ + return skb->active_extensions & (1 << id); +} + +static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) +{ + if (skb_ext_exist(skb, id)) + __skb_ext_del(skb, id); +} + +static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id) +{ + if (skb_ext_exist(skb, id)) { + struct skb_ext *ext = skb->extensions; + + return (void *)ext + (ext->offset[id] << 3); + } + + return NULL; +} +#else +static inline void skb_ext_put(struct sk_buff *skb) {} +static inline void skb_ext_del(struct sk_buff *skb, int unused) {} +static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {} +static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {} +#endif /* CONFIG_SKB_EXTENSIONS */ + static inline void nf_reset(struct sk_buff *skb) { #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) @@ -3889,8 +3997,7 @@ static inline void nf_reset(struct sk_buff *skb) skb->_nfct = 0; #endif #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) - nf_bridge_put(skb->nf_bridge); - skb->nf_bridge = NULL; + skb_ext_del(skb, SKB_EXT_BRIDGE_NF); #endif } @@ -3908,7 +4015,7 @@ static inline void ipvs_reset(struct sk_buff *skb) #endif } -/* Note: This doesn't put any conntrack and bridge info in dst. */ +/* Note: This doesn't put any conntrack info in dst. */ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, bool copy) { @@ -3916,10 +4023,6 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src, dst->_nfct = src->_nfct; nf_conntrack_get(skb_nfct(src)); #endif -#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) - dst->nf_bridge = src->nf_bridge; - nf_bridge_get(src->nf_bridge); -#endif #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) if (copy) dst->nf_trace = src->nf_trace; @@ -3931,9 +4034,6 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(skb_nfct(dst)); #endif -#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) - nf_bridge_put(dst->nf_bridge); -#endif __nf_copy(dst, src, true); } @@ -3955,12 +4055,19 @@ static inline void skb_init_secmark(struct sk_buff *skb) { } #endif +static inline int secpath_exists(const struct sk_buff *skb) +{ +#ifdef CONFIG_XFRM + return skb_ext_exist(skb, SKB_EXT_SEC_PATH); +#else + return 0; +#endif +} + static inline bool skb_irq_freeable(const struct sk_buff *skb) { return !skb->destructor && -#if IS_ENABLED(CONFIG_XFRM) - !skb->sp && -#endif + !secpath_exists(skb) && !skb_nfct(skb) && !skb->_skb_refdst && !skb_has_frag_list(skb); @@ -4006,10 +4113,10 @@ static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb) return skb->dst_pending_confirm != 0; } -static inline struct sec_path *skb_sec_path(struct sk_buff *skb) +static inline struct sec_path *skb_sec_path(const struct sk_buff *skb) { #ifdef CONFIG_XFRM - return skb->sp; + return skb_ext_find(skb, SKB_EXT_SEC_PATH); #else return NULL; #endif diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 2a11e9d91dfa..178a3933a71b 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -36,6 +36,7 @@ struct sk_msg_sg { struct scatterlist data[MAX_MSG_FRAGS + 1]; }; +/* UAPI in filter.c depends on struct sk_msg_sg being first element. */ struct sk_msg { struct sk_msg_sg sg; void *data; @@ -416,6 +417,14 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) sk_psock_drop(sk, psock); } +static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock) +{ + if (psock->parser.enabled) + psock->parser.saved_data_ready(sk); + else + sk->sk_data_ready(sk); +} + static inline void psock_set_prog(struct bpf_prog **pprog, struct bpf_prog *prog) { diff --git a/include/linux/socket.h b/include/linux/socket.h index 333b5df8a1b2..ab2041a00e01 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -286,6 +286,7 @@ struct ucred { #define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */ #define MSG_MORE 0x8000 /* Sender will send more */ #define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */ +#define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */ #define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */ #define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ #define MSG_EOF MSG_FIN diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index 9ec4c147abbc..b0674e330ef6 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -25,6 +25,7 @@ struct dma_chan; struct pxa2xx_spi_master { u16 num_chipselect; u8 enable_dma; + bool is_slave; /* DMA engine specific config */ bool (*dma_filter)(struct dma_chan *chan, void *param); diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 69ee30456864..3fe24500c5ee 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -57,10 +57,12 @@ /** * enum spi_mem_data_dir - describes the direction of a SPI memory data * transfer from the controller perspective + * @SPI_MEM_NO_DATA: no data transferred * @SPI_MEM_DATA_IN: data coming from the SPI memory - * @SPI_MEM_DATA_OUT: data sent the SPI memory + * @SPI_MEM_DATA_OUT: data sent to the SPI memory */ enum spi_mem_data_dir { + SPI_MEM_NO_DATA, SPI_MEM_DATA_IN, SPI_MEM_DATA_OUT, }; @@ -123,6 +125,49 @@ struct spi_mem_op { } /** + * struct spi_mem_dirmap_info - Direct mapping information + * @op_tmpl: operation template that should be used by the direct mapping when + * the memory device is accessed + * @offset: absolute offset this direct mapping is pointing to + * @length: length in byte of this direct mapping + * + * These information are used by the controller specific implementation to know + * the portion of memory that is directly mapped and the spi_mem_op that should + * be used to access the device. + * A direct mapping is only valid for one direction (read or write) and this + * direction is directly encoded in the ->op_tmpl.data.dir field. + */ +struct spi_mem_dirmap_info { + struct spi_mem_op op_tmpl; + u64 offset; + u64 length; +}; + +/** + * struct spi_mem_dirmap_desc - Direct mapping descriptor + * @mem: the SPI memory device this direct mapping is attached to + * @info: information passed at direct mapping creation time + * @nodirmap: set to 1 if the SPI controller does not implement + * ->mem_ops->dirmap_create() or when this function returned an + * error. If @nodirmap is true, all spi_mem_dirmap_{read,write}() + * calls will use spi_mem_exec_op() to access the memory. This is a + * degraded mode that allows spi_mem drivers to use the same code + * no matter whether the controller supports direct mapping or not + * @priv: field pointing to controller specific data + * + * Common part of a direct mapping descriptor. This object is created by + * spi_mem_dirmap_create() and controller implementation of ->create_dirmap() + * can create/attach direct mapping resources to the descriptor in the ->priv + * field. + */ +struct spi_mem_dirmap_desc { + struct spi_mem *mem; + struct spi_mem_dirmap_info info; + unsigned int nodirmap; + void *priv; +}; + +/** * struct spi_mem - describes a SPI memory device * @spi: the underlying SPI device * @drvpriv: spi_mem_driver private data @@ -177,10 +222,32 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem) * Note that if the implementation of this function allocates memory * dynamically, then it should do so with devm_xxx(), as we don't * have a ->free_name() function. + * @dirmap_create: create a direct mapping descriptor that can later be used to + * access the memory device. This method is optional + * @dirmap_destroy: destroy a memory descriptor previous created by + * ->dirmap_create() + * @dirmap_read: read data from the memory device using the direct mapping + * created by ->dirmap_create(). The function can return less + * data than requested (for example when the request is crossing + * the currently mapped area), and the caller of + * spi_mem_dirmap_read() is responsible for calling it again in + * this case. + * @dirmap_write: write data to the memory device using the direct mapping + * created by ->dirmap_create(). The function can return less + * data than requested (for example when the request is crossing + * the currently mapped area), and the caller of + * spi_mem_dirmap_write() is responsible for calling it again in + * this case. * * This interface should be implemented by SPI controllers providing an * high-level interface to execute SPI memory operation, which is usually the * case for QSPI controllers. + * + * Note on ->dirmap_{read,write}(): drivers should avoid accessing the direct + * mapping from the CPU because doing that can stall the CPU waiting for the + * SPI mem transaction to finish, and this will make real-time maintainers + * unhappy and might make your system less reactive. Instead, drivers should + * use DMA to access this direct mapping. */ struct spi_controller_mem_ops { int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op); @@ -189,6 +256,12 @@ struct spi_controller_mem_ops { int (*exec_op)(struct spi_mem *mem, const struct spi_mem_op *op); const char *(*get_name)(struct spi_mem *mem); + int (*dirmap_create)(struct spi_mem_dirmap_desc *desc); + void (*dirmap_destroy)(struct spi_mem_dirmap_desc *desc); + ssize_t (*dirmap_read)(struct spi_mem_dirmap_desc *desc, + u64 offs, size_t len, void *buf); + ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *desc, + u64 offs, size_t len, const void *buf); }; /** @@ -249,6 +322,15 @@ int spi_mem_exec_op(struct spi_mem *mem, const char *spi_mem_get_name(struct spi_mem *mem); +struct spi_mem_dirmap_desc * +spi_mem_dirmap_create(struct spi_mem *mem, + const struct spi_mem_dirmap_info *info); +void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc); +ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, + u64 offs, size_t len, void *buf); +ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, + u64 offs, size_t len, const void *buf); + int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv, struct module *owner); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 6be77fa5ab90..314d922ca607 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -154,7 +154,10 @@ struct spi_device { #define SPI_TX_QUAD 0x200 /* transmit with 4 wires */ #define SPI_RX_DUAL 0x400 /* receive with 2 wires */ #define SPI_RX_QUAD 0x800 /* receive with 4 wires */ -#define SPI_CS_WORD 0x1000 /* toggle cs after each word */ +#define SPI_CS_WORD 0x1000 /* toggle cs after each word */ +#define SPI_TX_OCTAL 0x2000 /* transmit with 8 wires */ +#define SPI_RX_OCTAL 0x4000 /* receive with 8 wires */ +#define SPI_3WIRE_HIZ 0x8000 /* high impedance turnaround */ int irq; void *controller_state; void *controller_data; diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 67135d4a8a30..c614375cd264 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -38,20 +38,20 @@ struct srcu_struct; #ifdef CONFIG_DEBUG_LOCK_ALLOC -int __init_srcu_struct(struct srcu_struct *sp, const char *name, +int __init_srcu_struct(struct srcu_struct *ssp, const char *name, struct lock_class_key *key); -#define init_srcu_struct(sp) \ +#define init_srcu_struct(ssp) \ ({ \ static struct lock_class_key __srcu_key; \ \ - __init_srcu_struct((sp), #sp, &__srcu_key); \ + __init_srcu_struct((ssp), #ssp, &__srcu_key); \ }) #define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -int init_srcu_struct(struct srcu_struct *sp); +int init_srcu_struct(struct srcu_struct *ssp); #define __SRCU_DEP_MAP_INIT(srcu_name) #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ @@ -67,28 +67,28 @@ int init_srcu_struct(struct srcu_struct *sp); struct srcu_struct { }; #endif -void call_srcu(struct srcu_struct *sp, struct rcu_head *head, +void call_srcu(struct srcu_struct *ssp, struct rcu_head *head, void (*func)(struct rcu_head *head)); -void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced); -int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); -void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); -void synchronize_srcu(struct srcu_struct *sp); +void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced); +int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); +void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); +void synchronize_srcu(struct srcu_struct *ssp); /** * cleanup_srcu_struct - deconstruct a sleep-RCU structure - * @sp: structure to clean up. + * @ssp: structure to clean up. * * Must invoke this after you are finished using a given srcu_struct that * was initialized via init_srcu_struct(), else you leak memory. */ -static inline void cleanup_srcu_struct(struct srcu_struct *sp) +static inline void cleanup_srcu_struct(struct srcu_struct *ssp) { - _cleanup_srcu_struct(sp, false); + _cleanup_srcu_struct(ssp, false); } /** * cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure - * @sp: structure to clean up. + * @ssp: structure to clean up. * * Must invoke this after you are finished using a given srcu_struct that * was initialized via init_srcu_struct(), else you leak memory. Also, @@ -103,16 +103,16 @@ static inline void cleanup_srcu_struct(struct srcu_struct *sp) * (with high probability, anyway), and will also cause the srcu_struct * to be leaked. */ -static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp) +static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *ssp) { - _cleanup_srcu_struct(sp, true); + _cleanup_srcu_struct(ssp, true); } #ifdef CONFIG_DEBUG_LOCK_ALLOC /** * srcu_read_lock_held - might we be in SRCU read-side critical section? - * @sp: The srcu_struct structure to check + * @ssp: The srcu_struct structure to check * * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, @@ -126,16 +126,16 @@ static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp) * relies on normal RCU, it can be called from the CPU which * is in the idle loop from an RCU point of view or offline. */ -static inline int srcu_read_lock_held(const struct srcu_struct *sp) +static inline int srcu_read_lock_held(const struct srcu_struct *ssp) { if (!debug_lockdep_rcu_enabled()) return 1; - return lock_is_held(&sp->dep_map); + return lock_is_held(&ssp->dep_map); } #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -static inline int srcu_read_lock_held(const struct srcu_struct *sp) +static inline int srcu_read_lock_held(const struct srcu_struct *ssp) { return 1; } @@ -145,7 +145,7 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp) /** * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing * @p: the pointer to fetch and protect for later dereferencing - * @sp: pointer to the srcu_struct, which is used to check that we + * @ssp: pointer to the srcu_struct, which is used to check that we * really are in an SRCU read-side critical section. * @c: condition to check for update-side use * @@ -154,29 +154,32 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp) * to 1. The @c argument will normally be a logical expression containing * lockdep_is_held() calls. */ -#define srcu_dereference_check(p, sp, c) \ - __rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu) +#define srcu_dereference_check(p, ssp, c) \ + __rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu) /** * srcu_dereference - fetch SRCU-protected pointer for later dereferencing * @p: the pointer to fetch and protect for later dereferencing - * @sp: pointer to the srcu_struct, which is used to check that we + * @ssp: pointer to the srcu_struct, which is used to check that we * really are in an SRCU read-side critical section. * * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU * is enabled, invoking this outside of an RCU read-side critical * section will result in an RCU-lockdep splat. */ -#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0) +#define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0) /** * srcu_dereference_notrace - no tracing and no lockdep calls from here + * @p: the pointer to fetch and protect for later dereferencing + * @ssp: pointer to the srcu_struct, which is used to check that we + * really are in an SRCU read-side critical section. */ -#define srcu_dereference_notrace(p, sp) srcu_dereference_check((p), (sp), 1) +#define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1) /** * srcu_read_lock - register a new reader for an SRCU-protected structure. - * @sp: srcu_struct in which to register the new reader. + * @ssp: srcu_struct in which to register the new reader. * * Enter an SRCU read-side critical section. Note that SRCU read-side * critical sections may be nested. However, it is illegal to @@ -191,44 +194,44 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp) * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() * was invoked in process context. */ -static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) +static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp) { int retval; - retval = __srcu_read_lock(sp); - rcu_lock_acquire(&(sp)->dep_map); + retval = __srcu_read_lock(ssp); + rcu_lock_acquire(&(ssp)->dep_map); return retval; } /* Used by tracing, cannot be traced and cannot invoke lockdep. */ static inline notrace int -srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp) +srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) { int retval; - retval = __srcu_read_lock(sp); + retval = __srcu_read_lock(ssp); return retval; } /** * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. - * @sp: srcu_struct in which to unregister the old reader. + * @ssp: srcu_struct in which to unregister the old reader. * @idx: return value from corresponding srcu_read_lock(). * * Exit an SRCU read-side critical section. */ -static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) - __releases(sp) +static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) + __releases(ssp) { - rcu_lock_release(&(sp)->dep_map); - __srcu_read_unlock(sp, idx); + rcu_lock_release(&(ssp)->dep_map); + __srcu_read_unlock(ssp, idx); } /* Used by tracing, cannot be traced and cannot call lockdep. */ static inline notrace void -srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp) +srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp) { - __srcu_read_unlock(sp, idx); + __srcu_read_unlock(ssp, idx); } /** diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index f41d2fb09f87..b19216aaaef2 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -60,7 +60,7 @@ void srcu_drive_gp(struct work_struct *wp); #define DEFINE_STATIC_SRCU(name) \ static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name) -void synchronize_srcu(struct srcu_struct *sp); +void synchronize_srcu(struct srcu_struct *ssp); /* * Counts the new reader in the appropriate per-CPU element of the @@ -68,36 +68,36 @@ void synchronize_srcu(struct srcu_struct *sp); * __srcu_read_unlock() must be in the same handler instance. Returns an * index that must be passed to the matching srcu_read_unlock(). */ -static inline int __srcu_read_lock(struct srcu_struct *sp) +static inline int __srcu_read_lock(struct srcu_struct *ssp) { int idx; - idx = READ_ONCE(sp->srcu_idx); - WRITE_ONCE(sp->srcu_lock_nesting[idx], sp->srcu_lock_nesting[idx] + 1); + idx = READ_ONCE(ssp->srcu_idx); + WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1); return idx; } -static inline void synchronize_srcu_expedited(struct srcu_struct *sp) +static inline void synchronize_srcu_expedited(struct srcu_struct *ssp) { - synchronize_srcu(sp); + synchronize_srcu(ssp); } -static inline void srcu_barrier(struct srcu_struct *sp) +static inline void srcu_barrier(struct srcu_struct *ssp) { - synchronize_srcu(sp); + synchronize_srcu(ssp); } /* Defined here to avoid size increase for non-torture kernels. */ -static inline void srcu_torture_stats_print(struct srcu_struct *sp, +static inline void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) { int idx; - idx = READ_ONCE(sp->srcu_idx) & 0x1; + idx = READ_ONCE(ssp->srcu_idx) & 0x1; pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n", tt, tf, idx, - READ_ONCE(sp->srcu_lock_nesting[!idx]), - READ_ONCE(sp->srcu_lock_nesting[idx])); + READ_ONCE(ssp->srcu_lock_nesting[!idx]), + READ_ONCE(ssp->srcu_lock_nesting[idx])); } #endif diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 0ae91b3a7406..6f292bd3e7db 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -51,7 +51,7 @@ struct srcu_data { unsigned long grpmask; /* Mask for leaf srcu_node */ /* ->srcu_data_have_cbs[]. */ int cpu; - struct srcu_struct *sp; + struct srcu_struct *ssp; }; /* @@ -138,8 +138,8 @@ struct srcu_struct { #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) -void synchronize_srcu_expedited(struct srcu_struct *sp); -void srcu_barrier(struct srcu_struct *sp); -void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf); +void synchronize_srcu_expedited(struct srcu_struct *ssp); +void srcu_barrier(struct srcu_struct *ssp); +void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf); #endif diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 43106ffa6788..2ec128060239 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -72,7 +72,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) buf->head[0].iov_base = start; buf->head[0].iov_len = len; buf->tail[0].iov_len = 0; - buf->bvec = NULL; buf->pages = NULL; buf->page_len = 0; buf->flags = 0; diff --git a/include/linux/swap.h b/include/linux/swap.h index d8a07a4f171d..a8f6d5d89524 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -18,6 +18,8 @@ struct notifier_block; struct bio; +struct pagevec; + #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ #define SWAP_FLAG_PRIO_MASK 0x7fff #define SWAP_FLAG_PRIO_SHIFT 0 @@ -369,7 +371,7 @@ static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, #endif extern int page_evictable(struct page *page); -extern void check_move_unevictable_pages(struct page **, int nr_pages); +extern void check_move_unevictable_pages(struct pagevec *pvec); extern int kswapd_run(int nid); extern void kswapd_stop(int nid); diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 987cefa337de..786816cf4aa5 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -234,7 +234,7 @@ int __must_check sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns); int __must_check sysfs_create_files(struct kobject *kobj, - const struct attribute **attr); + const struct attribute * const *attr); int __must_check sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, umode_t mode); struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, @@ -243,7 +243,7 @@ void sysfs_unbreak_active_protection(struct kernfs_node *kn); void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, const void *ns); bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); -void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr); +void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr); int __must_check sysfs_create_bin_file(struct kobject *kobj, const struct bin_attribute *attr); @@ -342,7 +342,7 @@ static inline int sysfs_create_file_ns(struct kobject *kobj, } static inline int sysfs_create_files(struct kobject *kobj, - const struct attribute **attr) + const struct attribute * const *attr) { return 0; } @@ -377,7 +377,7 @@ static inline bool sysfs_remove_file_self(struct kobject *kobj, } static inline void sysfs_remove_files(struct kobject *kobj, - const struct attribute **attr) + const struct attribute * const *attr) { } diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h index b9626aa7e90c..3e2a80cc7b56 100644 --- a/include/linux/t10-pi.h +++ b/include/linux/t10-pi.h @@ -39,12 +39,13 @@ struct t10_pi_tuple { static inline u32 t10_pi_ref_tag(struct request *rq) { + unsigned int shift = ilog2(queue_logical_block_size(rq->q)); + #ifdef CONFIG_BLK_DEV_INTEGRITY - return blk_rq_pos(rq) >> - (rq->q->integrity.interval_exp - 9) & 0xffffffff; -#else - return -1U; + if (rq->q->integrity.interval_exp) + shift = rq->q->integrity.interval_exp; #endif + return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff; } extern const struct blk_integrity_profile t10_pi_type1_crc; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 8ed77bb4ed86..a9b0280687d5 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -196,6 +196,7 @@ struct tcp_sock { u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */ + u32 compressed_ack_rcv_nxt; u32 tsoffset; /* timestamp offset */ diff --git a/include/linux/thinkpad_acpi.h b/include/linux/thinkpad_acpi.h deleted file mode 100644 index 9fb317970c01..000000000000 --- a/include/linux/thinkpad_acpi.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __THINKPAD_ACPI_H__ -#define __THINKPAD_ACPI_H__ - -/* These two functions return 0 if success, or negative error code - (e g -ENODEV if no led present) */ - -enum { - TPACPI_LED_MUTE, - TPACPI_LED_MICMUTE, - TPACPI_LED_MAX, -}; - -int tpacpi_led_set(int whichled, bool on); - -#endif diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 4130a5497d40..8a62731673f7 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -471,7 +471,8 @@ void perf_event_detach_bpf_prog(struct perf_event *event); int perf_event_query_prog_array(struct perf_event *event, void __user *info); int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog); int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog); -struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name); +struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name); +void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp); int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, u64 *probe_offset, u64 *probe_addr); @@ -502,10 +503,13 @@ static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf { return -EOPNOTSUPP; } -static inline struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name) +static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name) { return NULL; } +static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) +{ +} static inline int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, u32 *fd_type, const char **buf, u64 *probe_offset, diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 40b0b4c1bf7b..df20f8bdbfa3 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h @@ -83,8 +83,8 @@ static inline int ptrace_report_syscall(struct pt_regs *regs) * tracehook_report_syscall_entry - task is about to attempt a system call * @regs: user register state of current task * - * This will be called if %TIF_SYSCALL_TRACE has been set, when the - * current task has just entered the kernel for a system call. + * This will be called if %TIF_SYSCALL_TRACE or %TIF_SYSCALL_EMU have been set, + * when the current task has just entered the kernel for a system call. * Full user register state is available here. Changing the values * in @regs can affect the system call number and arguments to be tried. * It is safe to block here, preventing the system call from beginning. diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 538ba1a58f5b..9c3186578ce0 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -82,7 +82,7 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb) static inline void tracepoint_synchronize_unregister(void) { synchronize_srcu(&tracepoint_srcu); - synchronize_sched(); + synchronize_rcu(); } #else static inline void tracepoint_synchronize_unregister(void) @@ -166,7 +166,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) struct tracepoint_func *it_func_ptr; \ void *it_func; \ void *__data; \ - int __maybe_unused idx = 0; \ + int __maybe_unused __idx = 0; \ \ if (!(cond)) \ return; \ @@ -182,7 +182,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * doesn't work from the idle path. \ */ \ if (rcuidle) { \ - idx = srcu_read_lock_notrace(&tracepoint_srcu); \ + __idx = srcu_read_lock_notrace(&tracepoint_srcu);\ rcu_irq_enter_irqson(); \ } \ \ @@ -198,7 +198,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) \ if (rcuidle) { \ rcu_irq_exit_irqson(); \ - srcu_read_unlock_notrace(&tracepoint_srcu, idx);\ + srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\ } \ \ preempt_enable_notrace(); \ diff --git a/include/linux/tty.h b/include/linux/tty.h index 414db2bce715..392138fe59b6 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -556,6 +556,7 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx); extern void tty_release_struct(struct tty_struct *tty, int idx); extern int tty_release(struct inode *inode, struct file *filp); extern void tty_init_termios(struct tty_struct *tty); +extern void tty_save_termios(struct tty_struct *tty); extern int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty); diff --git a/include/linux/types.h b/include/linux/types.h index 9834e90aa010..c2615d6a019e 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -212,8 +212,8 @@ struct ustat { * weird ABI and we need to ask it explicitly. * * The alignment is required to guarantee that bit 0 of @next will be - * clear under normal conditions -- as long as we use call_rcu(), - * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback. + * clear under normal conditions -- as long as we use call_rcu() or + * call_srcu() to queue the callback. * * This guarantee is important for few reasons: * - future call_rcu_lazy() will make use of lower bits in the pointer; diff --git a/include/linux/udp.h b/include/linux/udp.h index 320d49d85484..2725c83395bf 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -49,7 +49,13 @@ struct udp_sock { unsigned int corkflag; /* Cork is required */ __u8 encap_type; /* Is this an Encapsulation socket? */ unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */ - no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */ + no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */ + encap_enabled:1, /* This socket enabled encap + * processing; UDP tunnels and + * different encapsulation layer set + * this + */ + gro_enabled:1; /* Can accept GRO packets */ /* * Following member retains the information to create a UDP header * when the socket is uncorked. @@ -71,6 +77,7 @@ struct udp_sock { * For encapsulation sockets. */ int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); + int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb); void (*encap_destroy)(struct sock *sk); /* GRO functions for UDP socket */ @@ -115,6 +122,23 @@ static inline bool udp_get_no_check6_rx(struct sock *sk) return udp_sk(sk)->no_check6_rx; } +static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb) +{ + int gso_size; + + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + gso_size = skb_shinfo(skb)->gso_size; + put_cmsg(msg, SOL_UDP, UDP_GRO, sizeof(gso_size), &gso_size); + } +} + +static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb) +{ + return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) && + skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4; +} + #define udp_portaddr_for_each_entry(__sk, list) \ hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node) diff --git a/include/linux/usb.h b/include/linux/usb.h index 4cdd515a4385..5e49e82c4368 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -407,11 +407,11 @@ struct usb_host_bos { }; int __usb_get_extra_descriptor(char *buffer, unsigned size, - unsigned char type, void **ptr); + unsigned char type, void **ptr, size_t min); #define usb_get_extra_descriptor(ifpoint, type, ptr) \ __usb_get_extra_descriptor((ifpoint)->extra, \ (ifpoint)->extralen, \ - type, (void **)ptr) + type, (void **)ptr, sizeof(**(ptr))) /* ----------------------------------------------------------------------- */ diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index b7a99ce56bc9..a1be64c9940f 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -66,4 +66,7 @@ /* Device needs a pause after every control message. */ #define USB_QUIRK_DELAY_CTRL_MSG BIT(13) +/* Hub needs extra delay after resetting its port. */ +#define USB_QUIRK_HUB_SLOW_RESET BIT(14) + #endif /* __LINUX_USB_QUIRKS_H */ diff --git a/include/linux/xarray.h b/include/linux/xarray.h index d9514928ddac..f492e21c4aa2 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -289,9 +289,7 @@ struct xarray { void xa_init_flags(struct xarray *, gfp_t flags); void *xa_load(struct xarray *, unsigned long index); void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); -void *xa_cmpxchg(struct xarray *, unsigned long index, - void *old, void *entry, gfp_t); -int xa_reserve(struct xarray *, unsigned long index, gfp_t); +void *xa_erase(struct xarray *, unsigned long index); void *xa_store_range(struct xarray *, unsigned long first, unsigned long last, void *entry, gfp_t); bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t); @@ -344,65 +342,6 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) } /** - * xa_erase() - Erase this entry from the XArray. - * @xa: XArray. - * @index: Index of entry. - * - * This function is the equivalent of calling xa_store() with %NULL as - * the third argument. The XArray does not need to allocate memory, so - * the user does not need to provide GFP flags. - * - * Context: Process context. Takes and releases the xa_lock. - * Return: The entry which used to be at this index. - */ -static inline void *xa_erase(struct xarray *xa, unsigned long index) -{ - return xa_store(xa, index, NULL, 0); -} - -/** - * xa_insert() - Store this entry in the XArray unless another entry is - * already present. - * @xa: XArray. - * @index: Index into array. - * @entry: New entry. - * @gfp: Memory allocation flags. - * - * If you would rather see the existing entry in the array, use xa_cmpxchg(). - * This function is for users who don't care what the entry is, only that - * one is present. - * - * Context: Process context. Takes and releases the xa_lock. - * May sleep if the @gfp flags permit. - * Return: 0 if the store succeeded. -EEXIST if another entry was present. - * -ENOMEM if memory could not be allocated. - */ -static inline int xa_insert(struct xarray *xa, unsigned long index, - void *entry, gfp_t gfp) -{ - void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp); - if (!curr) - return 0; - if (xa_is_err(curr)) - return xa_err(curr); - return -EEXIST; -} - -/** - * xa_release() - Release a reserved entry. - * @xa: XArray. - * @index: Index of entry. - * - * After calling xa_reserve(), you can call this function to release the - * reservation. If the entry at @index has been stored to, this function - * will do nothing. - */ -static inline void xa_release(struct xarray *xa, unsigned long index) -{ - xa_cmpxchg(xa, index, NULL, NULL, 0); -} - -/** * xa_for_each() - Iterate over a portion of an XArray. * @xa: XArray. * @entry: Entry retrieved from array. @@ -455,6 +394,7 @@ void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, void *entry, gfp_t); int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); +int __xa_reserve(struct xarray *, unsigned long index, gfp_t); void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); @@ -487,6 +427,58 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index, } /** + * xa_store_bh() - Store this entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * This function is like calling xa_store() except it disables softirqs + * while holding the array lock. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. + * Return: The entry which used to be at this index. + */ +static inline void *xa_store_bh(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock_bh(xa); + curr = __xa_store(xa, index, entry, gfp); + xa_unlock_bh(xa); + + return curr; +} + +/** + * xa_store_irq() - Erase this entry from the XArray. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * This function is like calling xa_store() except it disables interrupts + * while holding the array lock. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. + * Return: The entry which used to be at this index. + */ +static inline void *xa_store_irq(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock_irq(xa); + curr = __xa_store(xa, index, entry, gfp); + xa_unlock_irq(xa); + + return curr; +} + +/** * xa_erase_bh() - Erase this entry from the XArray. * @xa: XArray. * @index: Index of entry. @@ -495,7 +487,7 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index, * the third argument. The XArray does not need to allocate memory, so * the user does not need to provide GFP flags. * - * Context: Process context. Takes and releases the xa_lock while + * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. * Return: The entry which used to be at this index. */ @@ -535,6 +527,115 @@ static inline void *xa_erase_irq(struct xarray *xa, unsigned long index) } /** + * xa_cmpxchg() - Conditionally replace an entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @old: Old value to test against. + * @entry: New value to place in array. + * @gfp: Memory allocation flags. + * + * If the entry at @index is the same as @old, replace it with @entry. + * If the return value is equal to @old, then the exchange was successful. + * + * Context: Any context. Takes and releases the xa_lock. May sleep + * if the @gfp flags permit. + * Return: The old value at this index or xa_err() if an error happened. + */ +static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock(xa); + curr = __xa_cmpxchg(xa, index, old, entry, gfp); + xa_unlock(xa); + + return curr; +} + +/** + * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @old: Old value to test against. + * @entry: New value to place in array. + * @gfp: Memory allocation flags. + * + * This function is like calling xa_cmpxchg() except it disables softirqs + * while holding the array lock. + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. May sleep if the @gfp flags permit. + * Return: The old value at this index or xa_err() if an error happened. + */ +static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock_bh(xa); + curr = __xa_cmpxchg(xa, index, old, entry, gfp); + xa_unlock_bh(xa); + + return curr; +} + +/** + * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray. + * @xa: XArray. + * @index: Index into array. + * @old: Old value to test against. + * @entry: New value to place in array. + * @gfp: Memory allocation flags. + * + * This function is like calling xa_cmpxchg() except it disables interrupts + * while holding the array lock. + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. May sleep if the @gfp flags permit. + * Return: The old value at this index or xa_err() if an error happened. + */ +static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) +{ + void *curr; + + xa_lock_irq(xa); + curr = __xa_cmpxchg(xa, index, old, entry, gfp); + xa_unlock_irq(xa); + + return curr; +} + +/** + * xa_insert() - Store this entry in the XArray unless another entry is + * already present. + * @xa: XArray. + * @index: Index into array. + * @entry: New entry. + * @gfp: Memory allocation flags. + * + * If you would rather see the existing entry in the array, use xa_cmpxchg(). + * This function is for users who don't care what the entry is, only that + * one is present. + * + * Context: Process context. Takes and releases the xa_lock. + * May sleep if the @gfp flags permit. + * Return: 0 if the store succeeded. -EEXIST if another entry was present. + * -ENOMEM if memory could not be allocated. + */ +static inline int xa_insert(struct xarray *xa, unsigned long index, + void *entry, gfp_t gfp) +{ + void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp); + if (!curr) + return 0; + if (xa_is_err(curr)) + return xa_err(curr); + return -EEXIST; +} + +/** * xa_alloc() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. @@ -575,7 +676,7 @@ static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, * Updates the @id pointer with the index, then stores the entry at that * index. A concurrent lookup will not see an uninitialised @id. * - * Context: Process context. Takes and releases the xa_lock while + * Context: Any context. Takes and releases the xa_lock while * disabling softirqs. May sleep if the @gfp flags permit. * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if * there is no more space in the XArray. @@ -621,6 +722,98 @@ static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry, return err; } +/** + * xa_reserve() - Reserve this index in the XArray. + * @xa: XArray. + * @index: Index into array. + * @gfp: Memory allocation flags. + * + * Ensures there is somewhere to store an entry at @index in the array. + * If there is already something stored at @index, this function does + * nothing. If there was nothing there, the entry is marked as reserved. + * Loading from a reserved entry returns a %NULL pointer. + * + * If you do not use the entry that you have reserved, call xa_release() + * or xa_erase() to free any unnecessary memory. + * + * Context: Any context. Takes and releases the xa_lock. + * May sleep if the @gfp flags permit. + * Return: 0 if the reservation succeeded or -ENOMEM if it failed. + */ +static inline +int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) +{ + int ret; + + xa_lock(xa); + ret = __xa_reserve(xa, index, gfp); + xa_unlock(xa); + + return ret; +} + +/** + * xa_reserve_bh() - Reserve this index in the XArray. + * @xa: XArray. + * @index: Index into array. + * @gfp: Memory allocation flags. + * + * A softirq-disabling version of xa_reserve(). + * + * Context: Any context. Takes and releases the xa_lock while + * disabling softirqs. + * Return: 0 if the reservation succeeded or -ENOMEM if it failed. + */ +static inline +int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) +{ + int ret; + + xa_lock_bh(xa); + ret = __xa_reserve(xa, index, gfp); + xa_unlock_bh(xa); + + return ret; +} + +/** + * xa_reserve_irq() - Reserve this index in the XArray. + * @xa: XArray. + * @index: Index into array. + * @gfp: Memory allocation flags. + * + * An interrupt-disabling version of xa_reserve(). + * + * Context: Process context. Takes and releases the xa_lock while + * disabling interrupts. + * Return: 0 if the reservation succeeded or -ENOMEM if it failed. + */ +static inline +int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) +{ + int ret; + + xa_lock_irq(xa); + ret = __xa_reserve(xa, index, gfp); + xa_unlock_irq(xa); + + return ret; +} + +/** + * xa_release() - Release a reserved entry. + * @xa: XArray. + * @index: Index of entry. + * + * After calling xa_reserve(), you can call this function to release the + * reservation. If the entry at @index has been stored to, this function + * will do nothing. + */ +static inline void xa_release(struct xarray *xa, unsigned long index) +{ + xa_cmpxchg(xa, index, NULL, NULL, 0); +} + /* Everything below here is the Advanced API. Proceed with caution. */ /* |