summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/4level-fixup.h2
-rw-r--r--include/asm-generic/5level-fixup.h2
-rw-r--r--include/asm-generic/pgtable-nop4d-hack.h2
-rw-r--r--include/asm-generic/pgtable-nop4d.h2
-rw-r--r--include/asm-generic/pgtable-nopmd.h2
-rw-r--r--include/asm-generic/pgtable-nopud.h2
-rw-r--r--include/asm-generic/pgtable.h16
-rw-r--r--include/linux/can/dev.h1
-rw-r--r--include/linux/can/rx-offload.h7
-rw-r--r--include/linux/ceph/ceph_features.h8
-rw-r--r--include/linux/compiler-gcc.h12
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/compiler_attributes.h14
-rw-r--r--include/linux/compiler_types.h4
-rw-r--r--include/linux/dax.h14
-rw-r--r--include/linux/dma-direct.h2
-rw-r--r--include/linux/efi.h7
-rw-r--r--include/linux/filter.h11
-rw-r--r--include/linux/fscache-cache.h3
-rw-r--r--include/linux/ftrace.h4
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/hid-sensor-hub.h4
-rw-r--r--include/linux/hid.h32
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/i8253.h1
-rw-r--r--include/linux/mempolicy.h2
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/mtd/nand.h7
-rw-r--r--include/linux/net_dim.h2
-rw-r--r--include/linux/netdevice.h20
-rw-r--r--include/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h13
-rw-r--r--include/linux/nmi.h2
-rw-r--r--include/linux/platform_data/gpio-davinci.h2
-rw-r--r--include/linux/psi.h3
-rw-r--r--include/linux/pstore.h5
-rw-r--r--include/linux/ptrace.h17
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/sched/smt.h20
-rw-r--r--include/linux/sfp.h2
-rw-r--r--include/linux/skbuff.h18
-rw-r--r--include/linux/sunrpc/xdr.h1
-rw-r--r--include/linux/tcp.h1
-rw-r--r--include/linux/tracehook.h4
-rw-r--r--include/linux/tracepoint.h6
-rw-r--r--include/linux/tty.h1
-rw-r--r--include/linux/usb.h4
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/xarray.h267
-rw-r--r--include/media/media-request.h2
-rw-r--r--include/media/v4l2-mem2mem.h2
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/af_rxrpc.h3
-rw-r--r--include/net/if_inet6.h2
-rw-r--r--include/net/neighbour.h28
-rw-r--r--include/net/netfilter/ipv4/nf_nat_masquerade.h2
-rw-r--r--include/net/netfilter/ipv6/nf_nat_masquerade.h2
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h39
-rw-r--r--include/net/sctp/sctp.h17
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/sound/pcm_params.h4
-rw-r--r--include/sound/soc.h2
-rw-r--r--include/trace/events/kyber.h8
-rw-r--r--include/trace/events/rxrpc.h2
-rw-r--r--include/trace/events/sched.h12
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/linux/bpf.h56
-rw-r--r--include/uapi/linux/input-event-codes.h10
-rw-r--r--include/uapi/linux/kfd_ioctl.h18
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h4
-rw-r--r--include/uapi/linux/netfilter_bridge.h4
-rw-r--r--include/uapi/linux/prctl.h1
-rw-r--r--include/uapi/linux/sctp.h3
-rw-r--r--include/uapi/linux/v4l2-controls.h5
-rw-r--r--include/xen/balloon.h5
-rw-r--r--include/xen/xen-ops.h12
77 files changed, 601 insertions, 247 deletions
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
index 89f3b03b1445..e3667c9a33a5 100644
--- a/include/asm-generic/4level-fixup.h
+++ b/include/asm-generic/4level-fixup.h
@@ -3,7 +3,7 @@
#define _4LEVEL_FIXUP_H
#define __ARCH_HAS_4LEVEL_HACK
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
#define PUD_SHIFT PGDIR_SHIFT
#define PUD_SIZE PGDIR_SIZE
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index 9c2e0708eb82..73474bb52344 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -3,7 +3,7 @@
#define _5LEVEL_FIXUP_H
#define __ARCH_HAS_5LEVEL_HACK
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
#define P4D_SHIFT PGDIR_SHIFT
#define P4D_SIZE PGDIR_SIZE
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
index 0c34215263b8..1d6dd38c0e5e 100644
--- a/include/asm-generic/pgtable-nop4d-hack.h
+++ b/include/asm-generic/pgtable-nop4d-hack.h
@@ -5,7 +5,7 @@
#ifndef __ASSEMBLY__
#include <asm-generic/5level-fixup.h>
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
/*
* Having the pud type consist of a pgd gets the size right, and allows
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index 1a29b2a0282b..04cb913797bc 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -4,7 +4,7 @@
#ifndef __ASSEMBLY__
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
typedef struct { pgd_t pgd; } p4d_t;
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index f35f6e8149e4..b85b8271a73d 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -8,7 +8,7 @@
struct mm_struct;
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
/*
* Having the pmd type consist of a pud gets the size right, and allows
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index e950b9c50f34..9bef475db6fe 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -9,7 +9,7 @@
#else
#include <asm-generic/pgtable-nop4d.h>
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
/*
* Having the pud type consist of a p4d gets the size right, and allows
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 5657a20e0c59..359fb935ded6 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1127,4 +1127,20 @@ static inline bool arch_has_pfn_modify_check(void)
#endif
#endif
+/*
+ * On some architectures it depends on the mm if the p4d/pud or pmd
+ * layer of the page table hierarchy is folded or not.
+ */
+#ifndef mm_p4d_folded
+#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
+#endif
+
+#ifndef mm_pud_folded
+#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
+#endif
+
+#ifndef mm_pmd_folded
+#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
+#endif
+
#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index a83e1f632eb7..f01623aef2f7 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -169,6 +169,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int idx);
+struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr);
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
void can_free_echo_skb(struct net_device *dev, unsigned int idx);
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index cb31683bbe15..8268811a697e 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -41,7 +41,12 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *
int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
-int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb);
+int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+ struct sk_buff *skb, u32 timestamp);
+unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
+ unsigned int idx, u32 timestamp);
+int can_rx_offload_queue_tail(struct can_rx_offload *offload,
+ struct sk_buff *skb);
void can_rx_offload_reset(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload);
void can_rx_offload_enable(struct can_rx_offload *offload);
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 6b92b3395fa9..65a38c4a02a1 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -213,12 +213,6 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
CEPH_FEATURE_CEPHX_V2)
-#define CEPH_FEATURES_REQUIRED_DEFAULT \
- (CEPH_FEATURE_NOSRCADDR | \
- CEPH_FEATURE_SUBSCRIBE2 | \
- CEPH_FEATURE_RECONNECT_SEQ | \
- CEPH_FEATURE_PGID64 | \
- CEPH_FEATURE_PGPOOL3 | \
- CEPH_FEATURE_OSDENC)
+#define CEPH_FEATURES_REQUIRED_DEFAULT 0
#endif
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index c0f5db3a9621..2010493e1040 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -143,18 +143,6 @@
#define KASAN_ABI_VERSION 3
#endif
-/*
- * Because __no_sanitize_address conflicts with inlining:
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- * we do one or the other.
- */
-#ifdef CONFIG_KASAN
-#define __no_sanitize_address_or_inline \
- __no_sanitize_address __maybe_unused notrace
-#else
-#define __no_sanitize_address_or_inline inline
-#endif
-
#if GCC_VERSION >= 50100
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 18c80cfa4fc4..06396c1cf127 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -189,7 +189,7 @@ void __read_once_size(const volatile void *p, void *res, int size)
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
-# define __no_kasan_or_inline __no_sanitize_address __maybe_unused
+# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
#else
# define __no_kasan_or_inline __always_inline
#endif
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 6b28c1b7310c..f8c400ba1929 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -4,22 +4,26 @@
/*
* The attributes in this file are unconditionally defined and they directly
- * map to compiler attribute(s) -- except those that are optional.
+ * map to compiler attribute(s), unless one of the compilers does not support
+ * the attribute. In that case, __has_attribute is used to check for support
+ * and the reason is stated in its comment ("Optional: ...").
*
* Any other "attributes" (i.e. those that depend on a configuration option,
* on a compiler, on an architecture, on plugins, on other attributes...)
* should be defined elsewhere (e.g. compiler_types.h or compiler-*.h).
+ * The intention is to keep this file as simple as possible, as well as
+ * compiler- and version-agnostic (e.g. avoiding GCC_VERSION checks).
*
* This file is meant to be sorted (by actual attribute name,
* not by #define identifier). Use the __attribute__((__name__)) syntax
* (i.e. with underscores) to avoid future collisions with other macros.
- * If an attribute is optional, state the reason in the comment.
+ * Provide links to the documentation of each supported compiler, if it exists.
*/
/*
- * To check for optional attributes, we use __has_attribute, which is supported
- * on gcc >= 5, clang >= 2.9 and icc >= 17. In the meantime, to support
- * 4.6 <= gcc < 5, we implement __has_attribute by hand.
+ * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
+ * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute
+ * by hand.
*
* sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__
* depending on the compiler used to build it; however, these attributes have
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 3439d7d0249a..4a3f9c09c92d 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -130,6 +130,10 @@ struct ftrace_likely_data {
# define randomized_struct_fields_end
#endif
+#ifndef asm_volatile_goto
+#define asm_volatile_goto(x...) asm goto(x)
+#endif
+
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 450b28db9533..0dd316a74a29 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -7,6 +7,8 @@
#include <linux/radix-tree.h>
#include <asm/pgtable.h>
+typedef unsigned long dax_entry_t;
+
struct iomap_ops;
struct dax_device;
struct dax_operations {
@@ -88,8 +90,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
struct block_device *bdev, struct writeback_control *wbc);
struct page *dax_layout_busy_page(struct address_space *mapping);
-bool dax_lock_mapping_entry(struct page *page);
-void dax_unlock_mapping_entry(struct page *page);
+dax_entry_t dax_lock_page(struct page *page);
+void dax_unlock_page(struct page *page, dax_entry_t cookie);
#else
static inline bool bdev_dax_supported(struct block_device *bdev,
int blocksize)
@@ -122,14 +124,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
return -EOPNOTSUPP;
}
-static inline bool dax_lock_mapping_entry(struct page *page)
+static inline dax_entry_t dax_lock_page(struct page *page)
{
if (IS_DAX(page->mapping->host))
- return true;
- return false;
+ return ~0UL;
+ return 0;
}
-static inline void dax_unlock_mapping_entry(struct page *page)
+static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
{
}
#endif
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index bd73e7a91410..9e66bfe369aa 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -5,7 +5,7 @@
#include <linux/dma-mapping.h>
#include <linux/mem_encrypt.h>
-#define DIRECT_MAPPING_ERROR 0
+#define DIRECT_MAPPING_ERROR (~(dma_addr_t)0)
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
#include <asm/dma-direct.h>
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 845174e113ce..100ce4a4aff6 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1167,6 +1167,8 @@ static inline bool efi_enabled(int feature)
extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
extern bool efi_is_table_address(unsigned long phys_addr);
+
+extern int efi_apply_persistent_mem_reservations(void);
#else
static inline bool efi_enabled(int feature)
{
@@ -1185,6 +1187,11 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
{
return false;
}
+
+static inline int efi_apply_persistent_mem_reservations(void)
+{
+ return 0;
+}
#endif
extern int efi_status_to_err(efi_status_t status);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index de629b706d1d..795ff0b869bb 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -449,6 +449,13 @@ struct sock_reuseport;
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
#define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \
offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
+#if BITS_PER_LONG == 64
+# define bpf_ctx_range_ptr(TYPE, MEMBER) \
+ offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
+#else
+# define bpf_ctx_range_ptr(TYPE, MEMBER) \
+ offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1
+#endif /* BITS_PER_LONG == 64 */
#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \
({ \
@@ -866,6 +873,10 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr);
void bpf_jit_free(struct bpf_prog *fp);
+int bpf_jit_get_func_addr(const struct bpf_prog *prog,
+ const struct bpf_insn *insn, bool extra_pass,
+ u64 *func_addr, bool *func_addr_fixed);
+
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 34cf0fdd7dc7..610815e3f1aa 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -196,8 +196,7 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
int n_pages)
{
- atomic_sub(n_pages, &op->n_pages);
- if (atomic_read(&op->n_pages) <= 0)
+ if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0)
fscache_op_complete(&op->op, false);
}
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index a397907e8d72..dd16e8218db3 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -777,8 +777,8 @@ struct ftrace_ret_stack {
extern void return_to_handler(void);
extern int
-ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
- unsigned long frame_pointer, unsigned long *retp);
+function_graph_enter(unsigned long ret, unsigned long func,
+ unsigned long frame_pointer, unsigned long *retp);
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 76f8db0b0e71..0705164f928c 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -510,18 +510,22 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
}
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
- int node);
+ int node, bool hugepage);
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
+ alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
#else
#define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\
+#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
+ alloc_pages(gfp_mask, order)
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
alloc_pages(gfp_mask, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
#define alloc_page_vma(gfp_mask, vma, addr) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
+ alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, node)
+ alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 331dc377c275..dc12f5c4b076 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -177,6 +177,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
* @attr_usage_id: Attribute usage id as per spec
* @report_id: Report id to look for
* @flag: Synchronous or asynchronous read
+* @is_signed: If true then fields < 32 bits will be sign-extended
*
* Issues a synchronous or asynchronous read request for an input attribute.
* Returns data upto 32 bits.
@@ -190,7 +191,8 @@ enum sensor_hub_read_flags {
int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
u32 usage_id,
u32 attr_usage_id, u32 report_id,
- enum sensor_hub_read_flags flag
+ enum sensor_hub_read_flags flag,
+ bool is_signed
);
/**
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 2827b87590d8..a355d61940f2 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -722,8 +722,8 @@ struct hid_usage_id {
* input will not be passed to raw_event unless hid_device_io_start is
* called.
*
- * raw_event and event should return 0 on no action performed, 1 when no
- * further processing should be done and negative on error
+ * raw_event and event should return negative on error, any other value will
+ * pass the event on to .event() typically return 0 for success.
*
* input_mapping shall return a negative value to completely ignore this usage
* (e.g. doubled or invalid usage), zero to continue with parsing of this
@@ -1139,34 +1139,6 @@ static inline u32 hid_report_len(struct hid_report *report)
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
int interrupt);
-
-/**
- * struct hid_scroll_counter - Utility class for processing high-resolution
- * scroll events.
- * @dev: the input device for which events should be reported.
- * @microns_per_hi_res_unit: the amount moved by the user's finger for each
- * high-resolution unit reported by the mouse, in
- * microns.
- * @resolution_multiplier: the wheel's resolution in high-resolution mode as a
- * multiple of its lower resolution. For example, if
- * moving the wheel by one "notch" would result in a
- * value of 1 in low-resolution mode but 8 in
- * high-resolution, the multiplier is 8.
- * @remainder: counts the number of high-resolution units moved since the last
- * low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should
- * only be used by class methods.
- */
-struct hid_scroll_counter {
- struct input_dev *dev;
- int microns_per_hi_res_unit;
- int resolution_multiplier;
-
- int remainder;
-};
-
-void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter,
- int hi_res_value);
-
/* HID quirks API */
unsigned long hid_lookup_quirk(const struct hid_device *hdev);
int hid_quirks_init(char **quirks_param, __u16 bus, int count);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index b3e24368930a..14131b6fae68 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -905,6 +905,13 @@ struct vmbus_channel {
bool probe_done;
+ /*
+ * We must offload the handling of the primary/sub channels
+ * from the single-threaded vmbus_connection.work_queue to
+ * two different workqueue, otherwise we can block
+ * vmbus_connection.work_queue and hang: see vmbus_process_offer().
+ */
+ struct work_struct add_channel_work;
};
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
diff --git a/include/linux/i8253.h b/include/linux/i8253.h
index e6bb36a97519..8336b2f6f834 100644
--- a/include/linux/i8253.h
+++ b/include/linux/i8253.h
@@ -21,6 +21,7 @@
#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
extern raw_spinlock_t i8253_lock;
+extern bool i8253_clear_counter_on_shutdown;
extern struct clock_event_device i8253_clockevent;
extern void clockevent_i8253_init(bool oneshot);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index bac395f1d00a..5228c62af416 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -139,8 +139,6 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
struct mempolicy *get_task_policy(struct task_struct *p);
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
unsigned long addr);
-struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
- unsigned long addr);
bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fcf9cc9d535f..5411de93a363 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1744,11 +1744,15 @@ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
static inline void mm_inc_nr_puds(struct mm_struct *mm)
{
+ if (mm_pud_folded(mm))
+ return;
atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
static inline void mm_dec_nr_puds(struct mm_struct *mm)
{
+ if (mm_pud_folded(mm))
+ return;
atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
#endif
@@ -1768,11 +1772,15 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
static inline void mm_inc_nr_pmds(struct mm_struct *mm)
{
+ if (mm_pmd_folded(mm))
+ return;
atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
static inline void mm_dec_nr_pmds(struct mm_struct *mm)
{
+ if (mm_pmd_folded(mm))
+ return;
atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
#endif
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index abe975c87b90..7f53ece2c039 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -324,9 +324,8 @@ static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
*/
static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
{
- return (u64)nand->memorg.luns_per_target *
- nand->memorg.eraseblocks_per_lun *
- nand->memorg.pages_per_eraseblock;
+ return nand->memorg.ntargets * nand->memorg.luns_per_target *
+ nand->memorg.eraseblocks_per_lun;
}
/**
@@ -569,7 +568,7 @@ static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
}
/**
- * nanddev_pos_next_eraseblock() - Move a position to the next page
+ * nanddev_pos_next_page() - Move a position to the next page
* @nand: NAND device
* @pos: the position to update
*
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h
index c79e859408e6..fd458389f7d1 100644
--- a/include/linux/net_dim.h
+++ b/include/linux/net_dim.h
@@ -406,6 +406,8 @@ static inline void net_dim(struct net_dim *dim,
}
/* fall through */
case NET_DIM_START_MEASURE:
+ net_dim_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr,
+ &dim->start_sample);
dim->state = NET_DIM_MEASURE_IN_PROGRESS;
break;
case NET_DIM_APPLY_NEW_PROFILE:
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index dc1d9ed33b31..857f8abf7b91 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3190,6 +3190,26 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
#endif
}
+/* Variant of netdev_tx_sent_queue() for drivers that are aware
+ * that they should not test BQL status themselves.
+ * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
+ * skb of a batch.
+ * Returns true if the doorbell must be used to kick the NIC.
+ */
+static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+ unsigned int bytes,
+ bool xmit_more)
+{
+ if (xmit_more) {
+#ifdef CONFIG_BQL
+ dql_queued(&dev_queue->dql, bytes);
+#endif
+ return netif_tx_queue_stopped(dev_queue);
+ }
+ netdev_tx_sent_queue(dev_queue, bytes);
+ return true;
+}
+
/**
* netdev_sent_queue - report the number of bytes queued to hardware
* @dev: network device
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 34fc80f3eb90..1d100efe74ec 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -314,7 +314,7 @@ enum {
extern ip_set_id_t ip_set_get_byname(struct net *net,
const char *name, struct ip_set **set);
extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
-extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
+extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
index 8e2bab1e8e90..70877f8de7e9 100644
--- a/include/linux/netfilter/ipset/ip_set_comment.h
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -43,11 +43,11 @@ ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
rcu_assign_pointer(comment->c, c);
}
-/* Used only when dumping a set, protected by rcu_read_lock_bh() */
+/* Used only when dumping a set, protected by rcu_read_lock() */
static inline int
ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
{
- struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
+ struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
if (!c)
return 0;
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index b8d95564bd53..14edb795ab43 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -21,6 +21,19 @@ struct nf_ct_gre_keymap {
struct nf_conntrack_tuple tuple;
};
+enum grep_conntrack {
+ GRE_CT_UNREPLIED,
+ GRE_CT_REPLIED,
+ GRE_CT_MAX
+};
+
+struct netns_proto_gre {
+ struct nf_proto_net nf;
+ rwlock_t keymap_lock;
+ struct list_head keymap_list;
+ unsigned int gre_timeouts[GRE_CT_MAX];
+};
+
/* add new tuple->key_reply pair to keymap */
int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t);
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 08f9247e9827..9003e29cde46 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -119,6 +119,8 @@ static inline int hardlockup_detector_perf_init(void) { return 0; }
void watchdog_nmi_stop(void);
void watchdog_nmi_start(void);
int watchdog_nmi_probe(void);
+int watchdog_nmi_enable(unsigned int cpu);
+void watchdog_nmi_disable(unsigned int cpu);
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
index f92a47e18034..a93841bfb9f7 100644
--- a/include/linux/platform_data/gpio-davinci.h
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -17,6 +17,8 @@
#define __DAVINCI_GPIO_PLATFORM_H
struct davinci_gpio_platform_data {
+ bool no_auto_base;
+ u32 base;
u32 ngpio;
u32 gpio_unbanked;
};
diff --git a/include/linux/psi.h b/include/linux/psi.h
index 8e0725aac0aa..7006008d5b72 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_PSI_H
#define _LINUX_PSI_H
+#include <linux/jump_label.h>
#include <linux/psi_types.h>
#include <linux/sched.h>
@@ -9,7 +10,7 @@ struct css_set;
#ifdef CONFIG_PSI
-extern bool psi_disabled;
+extern struct static_key_false psi_disabled;
void psi_init(void);
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index a15bc4d48752..30fcec375a3a 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -90,7 +90,10 @@ struct pstore_record {
*
* @buf_lock: spinlock to serialize access to @buf
* @buf: preallocated crash dump buffer
- * @bufsize: size of @buf available for crash dump writes
+ * @bufsize: size of @buf available for crash dump bytes (must match
+ * smallest number of bytes available for writing to a
+ * backend entry, since compressed bytes don't take kindly
+ * to being truncated)
*
* @read_mutex: serializes @open, @read, @close, and @erase callbacks
* @flags: bitfield of frontends the backend can accept writes for
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 6c2ffed907f5..de20ede2c5c8 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -64,15 +64,12 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
#define PTRACE_MODE_NOAUDIT 0x04
#define PTRACE_MODE_FSCREDS 0x08
#define PTRACE_MODE_REALCREDS 0x10
-#define PTRACE_MODE_SCHED 0x20
-#define PTRACE_MODE_IBPB 0x40
/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
-#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB)
/**
* ptrace_may_access - check whether the caller is permitted to access
@@ -90,20 +87,6 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
*/
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
-/**
- * ptrace_may_access - check whether the caller is permitted to access
- * a target task.
- * @task: target task
- * @mode: selects type of access and caller credentials
- *
- * Returns true on success, false on denial.
- *
- * Similar to ptrace_may_access(). Only to be called from context switch
- * code. Does not call into audit and the regular LSM hooks due to locking
- * constraints.
- */
-extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode);
-
static inline int ptrace_reparented(struct task_struct *child)
{
return !same_thread_group(child->real_parent, child->parent);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a51c13c2b1a0..291a9bd5b97f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1116,6 +1116,7 @@ struct task_struct {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack: */
int curr_ret_stack;
+ int curr_ret_depth;
/* Stack of return addresses for return function tracing: */
struct ftrace_ret_stack *ret_stack;
@@ -1453,6 +1454,8 @@ static inline bool is_percpu_thread(void)
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
+#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
+#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
#define TASK_PFA_TEST(name, func) \
static inline bool task_##func(struct task_struct *p) \
@@ -1484,6 +1487,13 @@ TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
+
+TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+
static inline void
current_restore_flags(unsigned long orig_flags, unsigned long flags)
{
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
new file mode 100644
index 000000000000..59d3736c454c
--- /dev/null
+++ b/include/linux/sched/smt.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_SMT_H
+#define _LINUX_SCHED_SMT_H
+
+#include <linux/static_key.h>
+
+#ifdef CONFIG_SCHED_SMT
+extern struct static_key_false sched_smt_present;
+
+static __always_inline bool sched_smt_active(void)
+{
+ return static_branch_likely(&sched_smt_present);
+}
+#else
+static inline bool sched_smt_active(void) { return false; }
+#endif
+
+void arch_smt_update(void);
+
+#endif
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index d37518e89db2..d9d9de3fcf8e 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -224,7 +224,7 @@ struct sfp_eeprom_ext {
*
* See the SFF-8472 specification and related documents for the definition
* of these structure members. This can be obtained from
- * ftp://ftp.seagate.com/sff
+ * https://www.snia.org/technology-communities/sff/specifications
*/
struct sfp_eeprom_id {
struct sfp_eeprom_base base;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0ba687454267..0d1b2c3f127b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1326,6 +1326,22 @@ static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
}
}
+static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
+{
+ skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
+ skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
+}
+
+static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
+{
+ return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
+}
+
+static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
+{
+ return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
+}
+
/* Release a reference on a zerocopy structure */
static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
{
@@ -1335,7 +1351,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
if (uarg->callback == sock_zerocopy_callback) {
uarg->zerocopy = uarg->zerocopy && zerocopy;
sock_zerocopy_put(uarg);
- } else {
+ } else if (!skb_zcopy_is_nouarg(skb)) {
uarg->callback(uarg, zerocopy);
}
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 43106ffa6788..2ec128060239 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -72,7 +72,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
buf->head[0].iov_base = start;
buf->head[0].iov_len = len;
buf->tail[0].iov_len = 0;
- buf->bvec = NULL;
buf->pages = NULL;
buf->page_len = 0;
buf->flags = 0;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 8ed77bb4ed86..a9b0280687d5 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -196,6 +196,7 @@ struct tcp_sock {
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */
+ u32 compressed_ack_rcv_nxt;
u32 tsoffset; /* timestamp offset */
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 40b0b4c1bf7b..df20f8bdbfa3 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -83,8 +83,8 @@ static inline int ptrace_report_syscall(struct pt_regs *regs)
* tracehook_report_syscall_entry - task is about to attempt a system call
* @regs: user register state of current task
*
- * This will be called if %TIF_SYSCALL_TRACE has been set, when the
- * current task has just entered the kernel for a system call.
+ * This will be called if %TIF_SYSCALL_TRACE or %TIF_SYSCALL_EMU have been set,
+ * when the current task has just entered the kernel for a system call.
* Full user register state is available here. Changing the values
* in @regs can affect the system call number and arguments to be tried.
* It is safe to block here, preventing the system call from beginning.
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 538ba1a58f5b..e9de8ad0bad7 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -166,7 +166,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
struct tracepoint_func *it_func_ptr; \
void *it_func; \
void *__data; \
- int __maybe_unused idx = 0; \
+ int __maybe_unused __idx = 0; \
\
if (!(cond)) \
return; \
@@ -182,7 +182,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* doesn't work from the idle path. \
*/ \
if (rcuidle) { \
- idx = srcu_read_lock_notrace(&tracepoint_srcu); \
+ __idx = srcu_read_lock_notrace(&tracepoint_srcu);\
rcu_irq_enter_irqson(); \
} \
\
@@ -198,7 +198,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
\
if (rcuidle) { \
rcu_irq_exit_irqson(); \
- srcu_read_unlock_notrace(&tracepoint_srcu, idx);\
+ srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\
} \
\
preempt_enable_notrace(); \
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 414db2bce715..392138fe59b6 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -556,6 +556,7 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
extern void tty_release_struct(struct tty_struct *tty, int idx);
extern int tty_release(struct inode *inode, struct file *filp);
extern void tty_init_termios(struct tty_struct *tty);
+extern void tty_save_termios(struct tty_struct *tty);
extern int tty_standard_install(struct tty_driver *driver,
struct tty_struct *tty);
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 4cdd515a4385..5e49e82c4368 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -407,11 +407,11 @@ struct usb_host_bos {
};
int __usb_get_extra_descriptor(char *buffer, unsigned size,
- unsigned char type, void **ptr);
+ unsigned char type, void **ptr, size_t min);
#define usb_get_extra_descriptor(ifpoint, type, ptr) \
__usb_get_extra_descriptor((ifpoint)->extra, \
(ifpoint)->extralen, \
- type, (void **)ptr)
+ type, (void **)ptr, sizeof(**(ptr)))
/* ----------------------------------------------------------------------- */
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index b7a99ce56bc9..a1be64c9940f 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -66,4 +66,7 @@
/* Device needs a pause after every control message. */
#define USB_QUIRK_DELAY_CTRL_MSG BIT(13)
+/* Hub needs extra delay after resetting its port. */
+#define USB_QUIRK_HUB_SLOW_RESET BIT(14)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index d9514928ddac..564892e19f8c 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -289,9 +289,7 @@ struct xarray {
void xa_init_flags(struct xarray *, gfp_t flags);
void *xa_load(struct xarray *, unsigned long index);
void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
-void *xa_cmpxchg(struct xarray *, unsigned long index,
- void *old, void *entry, gfp_t);
-int xa_reserve(struct xarray *, unsigned long index, gfp_t);
+void *xa_erase(struct xarray *, unsigned long index);
void *xa_store_range(struct xarray *, unsigned long first, unsigned long last,
void *entry, gfp_t);
bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
@@ -344,65 +342,6 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
}
/**
- * xa_erase() - Erase this entry from the XArray.
- * @xa: XArray.
- * @index: Index of entry.
- *
- * This function is the equivalent of calling xa_store() with %NULL as
- * the third argument. The XArray does not need to allocate memory, so
- * the user does not need to provide GFP flags.
- *
- * Context: Process context. Takes and releases the xa_lock.
- * Return: The entry which used to be at this index.
- */
-static inline void *xa_erase(struct xarray *xa, unsigned long index)
-{
- return xa_store(xa, index, NULL, 0);
-}
-
-/**
- * xa_insert() - Store this entry in the XArray unless another entry is
- * already present.
- * @xa: XArray.
- * @index: Index into array.
- * @entry: New entry.
- * @gfp: Memory allocation flags.
- *
- * If you would rather see the existing entry in the array, use xa_cmpxchg().
- * This function is for users who don't care what the entry is, only that
- * one is present.
- *
- * Context: Process context. Takes and releases the xa_lock.
- * May sleep if the @gfp flags permit.
- * Return: 0 if the store succeeded. -EEXIST if another entry was present.
- * -ENOMEM if memory could not be allocated.
- */
-static inline int xa_insert(struct xarray *xa, unsigned long index,
- void *entry, gfp_t gfp)
-{
- void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
- if (!curr)
- return 0;
- if (xa_is_err(curr))
- return xa_err(curr);
- return -EEXIST;
-}
-
-/**
- * xa_release() - Release a reserved entry.
- * @xa: XArray.
- * @index: Index of entry.
- *
- * After calling xa_reserve(), you can call this function to release the
- * reservation. If the entry at @index has been stored to, this function
- * will do nothing.
- */
-static inline void xa_release(struct xarray *xa, unsigned long index)
-{
- xa_cmpxchg(xa, index, NULL, NULL, 0);
-}
-
-/**
* xa_for_each() - Iterate over a portion of an XArray.
* @xa: XArray.
* @entry: Entry retrieved from array.
@@ -455,6 +394,7 @@ void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
void *entry, gfp_t);
int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
+int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
@@ -487,6 +427,58 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index,
}
/**
+ * xa_store_bh() - Store this entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_store() except it disables softirqs
+ * while holding the array lock.
+ *
+ * Context: Any context. Takes and releases the xa_lock while
+ * disabling softirqs.
+ * Return: The entry which used to be at this index.
+ */
+static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
+ void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock_bh(xa);
+ curr = __xa_store(xa, index, entry, gfp);
+ xa_unlock_bh(xa);
+
+ return curr;
+}
+
+/**
+ * xa_store_irq() - Erase this entry from the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_store() except it disables interrupts
+ * while holding the array lock.
+ *
+ * Context: Process context. Takes and releases the xa_lock while
+ * disabling interrupts.
+ * Return: The entry which used to be at this index.
+ */
+static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
+ void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock_irq(xa);
+ curr = __xa_store(xa, index, entry, gfp);
+ xa_unlock_irq(xa);
+
+ return curr;
+}
+
+/**
* xa_erase_bh() - Erase this entry from the XArray.
* @xa: XArray.
* @index: Index of entry.
@@ -495,7 +487,7 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index,
* the third argument. The XArray does not need to allocate memory, so
* the user does not need to provide GFP flags.
*
- * Context: Process context. Takes and releases the xa_lock while
+ * Context: Any context. Takes and releases the xa_lock while
* disabling softirqs.
* Return: The entry which used to be at this index.
*/
@@ -535,6 +527,61 @@ static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
}
/**
+ * xa_cmpxchg() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * If the entry at @index is the same as @old, replace it with @entry.
+ * If the return value is equal to @old, then the exchange was successful.
+ *
+ * Context: Any context. Takes and releases the xa_lock. May sleep
+ * if the @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock(xa);
+ curr = __xa_cmpxchg(xa, index, old, entry, gfp);
+ xa_unlock(xa);
+
+ return curr;
+}
+
+/**
+ * xa_insert() - Store this entry in the XArray unless another entry is
+ * already present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * If you would rather see the existing entry in the array, use xa_cmpxchg().
+ * This function is for users who don't care what the entry is, only that
+ * one is present.
+ *
+ * Context: Process context. Takes and releases the xa_lock.
+ * May sleep if the @gfp flags permit.
+ * Return: 0 if the store succeeded. -EEXIST if another entry was present.
+ * -ENOMEM if memory could not be allocated.
+ */
+static inline int xa_insert(struct xarray *xa, unsigned long index,
+ void *entry, gfp_t gfp)
+{
+ void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
+ if (!curr)
+ return 0;
+ if (xa_is_err(curr))
+ return xa_err(curr);
+ return -EEXIST;
+}
+
+/**
* xa_alloc() - Find somewhere to store this entry in the XArray.
* @xa: XArray.
* @id: Pointer to ID.
@@ -575,7 +622,7 @@ static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry,
* Updates the @id pointer with the index, then stores the entry at that
* index. A concurrent lookup will not see an uninitialised @id.
*
- * Context: Process context. Takes and releases the xa_lock while
+ * Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
* there is no more space in the XArray.
@@ -621,6 +668,98 @@ static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry,
return err;
}
+/**
+ * xa_reserve() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * Ensures there is somewhere to store an entry at @index in the array.
+ * If there is already something stored at @index, this function does
+ * nothing. If there was nothing there, the entry is marked as reserved.
+ * Loading from a reserved entry returns a %NULL pointer.
+ *
+ * If you do not use the entry that you have reserved, call xa_release()
+ * or xa_erase() to free any unnecessary memory.
+ *
+ * Context: Any context. Takes and releases the xa_lock.
+ * May sleep if the @gfp flags permit.
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+static inline
+int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ int ret;
+
+ xa_lock(xa);
+ ret = __xa_reserve(xa, index, gfp);
+ xa_unlock(xa);
+
+ return ret;
+}
+
+/**
+ * xa_reserve_bh() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * A softirq-disabling version of xa_reserve().
+ *
+ * Context: Any context. Takes and releases the xa_lock while
+ * disabling softirqs.
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+static inline
+int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ int ret;
+
+ xa_lock_bh(xa);
+ ret = __xa_reserve(xa, index, gfp);
+ xa_unlock_bh(xa);
+
+ return ret;
+}
+
+/**
+ * xa_reserve_irq() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * An interrupt-disabling version of xa_reserve().
+ *
+ * Context: Process context. Takes and releases the xa_lock while
+ * disabling interrupts.
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+static inline
+int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+ int ret;
+
+ xa_lock_irq(xa);
+ ret = __xa_reserve(xa, index, gfp);
+ xa_unlock_irq(xa);
+
+ return ret;
+}
+
+/**
+ * xa_release() - Release a reserved entry.
+ * @xa: XArray.
+ * @index: Index of entry.
+ *
+ * After calling xa_reserve(), you can call this function to release the
+ * reservation. If the entry at @index has been stored to, this function
+ * will do nothing.
+ */
+static inline void xa_release(struct xarray *xa, unsigned long index)
+{
+ xa_cmpxchg(xa, index, NULL, NULL, 0);
+}
+
/* Everything below here is the Advanced API. Proceed with caution. */
/*
diff --git a/include/media/media-request.h b/include/media/media-request.h
index 0ce75c35131f..bd36d7431698 100644
--- a/include/media/media-request.h
+++ b/include/media/media-request.h
@@ -68,7 +68,7 @@ struct media_request {
unsigned int access_count;
struct list_head objects;
unsigned int num_incomplete_objects;
- struct wait_queue_head poll_wait;
+ wait_queue_head_t poll_wait;
spinlock_t lock;
};
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 58c1ecf3d648..5467264771ec 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -624,7 +624,7 @@ v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
/* v4l2 request helper */
-void vb2_m2m_request_queue(struct media_request *req);
+void v4l2_m2m_request_queue(struct media_request *req);
/* v4l2 ioctl helpers */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 14b789a123e7..1656c5978498 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -317,6 +317,8 @@ bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
+int ipv6_anycast_init(void);
+void ipv6_anycast_cleanup(void);
/* Device notifier */
int register_inet6addr_notifier(struct notifier_block *nb);
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index de587948042a..1adefe42c0a6 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -77,7 +77,8 @@ int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *, struct key *);
int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *,
enum rxrpc_call_completion *, u32 *);
-u32 rxrpc_kernel_check_life(struct socket *, struct rxrpc_call *);
+u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
+void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
ktime_t *);
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index d7578cf49c3a..c9c78c15bce0 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -146,10 +146,12 @@ struct ifacaddr6 {
struct in6_addr aca_addr;
struct fib6_info *aca_rt;
struct ifacaddr6 *aca_next;
+ struct hlist_node aca_addr_lst;
int aca_users;
refcount_t aca_refcnt;
unsigned long aca_cstamp;
unsigned long aca_tstamp;
+ struct rcu_head rcu;
};
#define IFA_HOST IPV6_ADDR_LOOPBACK
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index f58b384aa6c9..665990c7dec8 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -454,6 +454,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
{
+ unsigned int hh_alen = 0;
unsigned int seq;
unsigned int hh_len;
@@ -461,16 +462,33 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
seq = read_seqbegin(&hh->hh_lock);
hh_len = hh->hh_len;
if (likely(hh_len <= HH_DATA_MOD)) {
- /* this is inlined by gcc */
- memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
+ hh_alen = HH_DATA_MOD;
+
+ /* skb_push() would proceed silently if we have room for
+ * the unaligned size but not for the aligned size:
+ * check headroom explicitly.
+ */
+ if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
+ /* this is inlined by gcc */
+ memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
+ HH_DATA_MOD);
+ }
} else {
- unsigned int hh_alen = HH_DATA_ALIGN(hh_len);
+ hh_alen = HH_DATA_ALIGN(hh_len);
- memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+ if (likely(skb_headroom(skb) >= hh_alen)) {
+ memcpy(skb->data - hh_alen, hh->hh_data,
+ hh_alen);
+ }
}
} while (read_seqretry(&hh->hh_lock, seq));
- skb_push(skb, hh_len);
+ if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
+ __skb_push(skb, hh_len);
return dev_queue_xmit(skb);
}
diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h
index cd24be4c4a99..13d55206bb9f 100644
--- a/include/net/netfilter/ipv4/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h
@@ -9,7 +9,7 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
const struct nf_nat_range2 *range,
const struct net_device *out);
-void nf_nat_masquerade_ipv4_register_notifier(void);
+int nf_nat_masquerade_ipv4_register_notifier(void);
void nf_nat_masquerade_ipv4_unregister_notifier(void);
#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */
diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h
index 0c3b5ebf0bb8..2917bf95c437 100644
--- a/include/net/netfilter/ipv6/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h
@@ -5,7 +5,7 @@
unsigned int
nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
const struct net_device *out);
-void nf_nat_masquerade_ipv6_register_notifier(void);
+int nf_nat_masquerade_ipv6_register_notifier(void);
void nf_nat_masquerade_ipv6_unregister_notifier(void);
#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index eed04af9b75e..ae7b86f587f2 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -153,4 +153,43 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
const char *fmt, ...) { }
#endif /* CONFIG_SYSCTL */
+static inline struct nf_generic_net *nf_generic_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.generic;
+}
+
+static inline struct nf_tcp_net *nf_tcp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.tcp;
+}
+
+static inline struct nf_udp_net *nf_udp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.udp;
+}
+
+static inline struct nf_icmp_net *nf_icmp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmp;
+}
+
+static inline struct nf_icmp_net *nf_icmpv6_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmpv6;
+}
+
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+static inline struct nf_dccp_net *nf_dccp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.dccp;
+}
+#endif
+
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.sctp;
+}
+#endif
+
#endif /*_NF_CONNTRACK_PROTOCOL_H*/
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 8c2caa370e0f..2abbc15824af 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -608,4 +608,21 @@ static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
SCTP_DEFAULT_MINSEGMENT));
}
+static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
+{
+ __u32 pmtu = sctp_dst_mtu(t->dst);
+
+ if (t->pathmtu == pmtu)
+ return true;
+
+ t->pathmtu = pmtu;
+
+ return false;
+}
+
+static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
+{
+ return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
+}
+
#endif /* __net_sctp_h__ */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index a11f93790476..feada358d872 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -2075,6 +2075,8 @@ struct sctp_association {
__u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
__u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
+
+ struct rcu_head rcu;
};
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index 2dd37cada7c0..888a833d3b00 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -254,11 +254,13 @@ static inline int snd_interval_empty(const struct snd_interval *i)
static inline int snd_interval_single(const struct snd_interval *i)
{
return (i->min == i->max ||
- (i->min + 1 == i->max && i->openmax));
+ (i->min + 1 == i->max && (i->openmin || i->openmax)));
}
static inline int snd_interval_value(const struct snd_interval *i)
{
+ if (i->openmin && !i->openmax)
+ return i->max;
return i->min;
}
diff --git a/include/sound/soc.h b/include/sound/soc.h
index f1dab1f4b194..70c10a8f3e90 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1192,7 +1192,7 @@ struct snd_soc_pcm_runtime {
((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \
(i)++)
#define for_each_rtd_codec_dai_rollback(rtd, i, dai) \
- for (; ((i--) >= 0) && ((dai) = rtd->codec_dais[i]);)
+ for (; ((--i) >= 0) && ((dai) = rtd->codec_dais[i]);)
/* mixer control */
diff --git a/include/trace/events/kyber.h b/include/trace/events/kyber.h
index a9834c37ac40..c0e7d24ca256 100644
--- a/include/trace/events/kyber.h
+++ b/include/trace/events/kyber.h
@@ -31,8 +31,8 @@ TRACE_EVENT(kyber_latency,
TP_fast_assign(
__entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
- strlcpy(__entry->domain, domain, DOMAIN_LEN);
- strlcpy(__entry->type, type, DOMAIN_LEN);
+ strlcpy(__entry->domain, domain, sizeof(__entry->domain));
+ strlcpy(__entry->type, type, sizeof(__entry->type));
__entry->percentile = percentile;
__entry->numerator = numerator;
__entry->denominator = denominator;
@@ -60,7 +60,7 @@ TRACE_EVENT(kyber_adjust,
TP_fast_assign(
__entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
- strlcpy(__entry->domain, domain, DOMAIN_LEN);
+ strlcpy(__entry->domain, domain, sizeof(__entry->domain));
__entry->depth = depth;
),
@@ -82,7 +82,7 @@ TRACE_EVENT(kyber_throttled,
TP_fast_assign(
__entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
- strlcpy(__entry->domain, domain, DOMAIN_LEN);
+ strlcpy(__entry->domain, domain, sizeof(__entry->domain));
),
TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev),
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 573d5b901fb1..5b50fe4906d2 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -181,6 +181,7 @@ enum rxrpc_timer_trace {
enum rxrpc_propose_ack_trace {
rxrpc_propose_ack_client_tx_end,
rxrpc_propose_ack_input_data,
+ rxrpc_propose_ack_ping_for_check_life,
rxrpc_propose_ack_ping_for_keepalive,
rxrpc_propose_ack_ping_for_lost_ack,
rxrpc_propose_ack_ping_for_lost_reply,
@@ -380,6 +381,7 @@ enum rxrpc_tx_point {
#define rxrpc_propose_ack_traces \
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
EM(rxrpc_propose_ack_input_data, "DataIn ") \
+ EM(rxrpc_propose_ack_ping_for_check_life, "ChkLife") \
EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index f07b270d4fc4..9a4bdfadab07 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -107,6 +107,8 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
#ifdef CREATE_TRACE_POINTS
static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
{
+ unsigned int state;
+
#ifdef CONFIG_SCHED_DEBUG
BUG_ON(p != current);
#endif /* CONFIG_SCHED_DEBUG */
@@ -118,7 +120,15 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
if (preempt)
return TASK_REPORT_MAX;
- return 1 << task_state_index(p);
+ /*
+ * task_state_index() uses fls() and returns a value from 0-8 range.
+ * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
+ * it for left shift operation to get the correct task->state
+ * mapping.
+ */
+ state = task_state_index(p);
+
+ return state ? (1 << (state - 1)) : state;
}
#endif /* CREATE_TRACE_POINTS */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 538546edbfbd..c7f3321fbe43 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -760,8 +760,10 @@ __SYSCALL(__NR_rseq, sys_rseq)
#define __NR_ftruncate __NR3264_ftruncate
#define __NR_lseek __NR3264_lseek
#define __NR_sendfile __NR3264_sendfile
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR_newfstatat __NR3264_fstatat
#define __NR_fstat __NR3264_fstat
+#endif
#define __NR_mmap __NR3264_mmap
#define __NR_fadvise64 __NR3264_fadvise64
#ifdef __NR3264_stat
@@ -776,8 +778,10 @@ __SYSCALL(__NR_rseq, sys_rseq)
#define __NR_ftruncate64 __NR3264_ftruncate
#define __NR_llseek __NR3264_lseek
#define __NR_sendfile64 __NR3264_sendfile
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR_fstatat64 __NR3264_fstatat
#define __NR_fstat64 __NR3264_fstat
+#endif
#define __NR_mmap2 __NR3264_mmap
#define __NR_fadvise64_64 __NR3264_fadvise64
#ifdef __NR3264_stat
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 852dc17ab47a..72c453a8bf50 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -2170,7 +2170,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for TCP socket matching *tuple*, optionally in a child
* network namespace *netns*. The return value must be checked,
@@ -2187,12 +2187,14 @@ union bpf_attr {
* **sizeof**\ (*tuple*\ **->ipv6**)
* Look for an IPv6 socket.
*
- * If the *netns* is zero, then the socket lookup table in the
- * netns associated with the *ctx* will be used. For the TC hooks,
- * this in the netns of the device in the skb. For socket hooks,
- * this in the netns of the socket. If *netns* is non-zero, then
- * it specifies the ID of the netns relative to the netns
- * associated with the *ctx*.
+ * If the *netns* is a negative signed 32-bit integer, then the
+ * socket lookup table in the netns associated with the *ctx* will
+ * will be used. For the TC hooks, this is the netns of the device
+ * in the skb. For socket hooks, this is the netns of the socket.
+ * If *netns* is any other signed 32-bit value greater than or
+ * equal to zero then it specifies the ID of the netns relative to
+ * the netns associated with the *ctx*. *netns* values beyond the
+ * range of 32-bit integers are reserved for future use.
*
* All values for *flags* are reserved for future usage, and must
* be left at zero.
@@ -2201,8 +2203,10 @@ union bpf_attr {
* **CONFIG_NET** configuration option.
* Return
* Pointer to *struct bpf_sock*, or NULL in case of failure.
+ * For sockets with reuseport option, the *struct bpf_sock*
+ * result is from reuse->socks[] using the hash of the tuple.
*
- * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for UDP socket matching *tuple*, optionally in a child
* network namespace *netns*. The return value must be checked,
@@ -2219,12 +2223,14 @@ union bpf_attr {
* **sizeof**\ (*tuple*\ **->ipv6**)
* Look for an IPv6 socket.
*
- * If the *netns* is zero, then the socket lookup table in the
- * netns associated with the *ctx* will be used. For the TC hooks,
- * this in the netns of the device in the skb. For socket hooks,
- * this in the netns of the socket. If *netns* is non-zero, then
- * it specifies the ID of the netns relative to the netns
- * associated with the *ctx*.
+ * If the *netns* is a negative signed 32-bit integer, then the
+ * socket lookup table in the netns associated with the *ctx* will
+ * will be used. For the TC hooks, this is the netns of the device
+ * in the skb. For socket hooks, this is the netns of the socket.
+ * If *netns* is any other signed 32-bit value greater than or
+ * equal to zero then it specifies the ID of the netns relative to
+ * the netns associated with the *ctx*. *netns* values beyond the
+ * range of 32-bit integers are reserved for future use.
*
* All values for *flags* are reserved for future usage, and must
* be left at zero.
@@ -2233,6 +2239,8 @@ union bpf_attr {
* **CONFIG_NET** configuration option.
* Return
* Pointer to *struct bpf_sock*, or NULL in case of failure.
+ * For sockets with reuseport option, the *struct bpf_sock*
+ * result is from reuse->socks[] using the hash of the tuple.
*
* int bpf_sk_release(struct bpf_sock *sk)
* Description
@@ -2405,6 +2413,9 @@ enum bpf_func_id {
/* BPF_FUNC_perf_event_output for sk_buff input context. */
#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
+/* Current network namespace */
+#define BPF_F_CURRENT_NETNS (-1L)
+
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
@@ -2422,6 +2433,12 @@ enum bpf_lwt_encap_mode {
BPF_LWT_ENCAP_SEG6_INLINE
};
+#define __bpf_md_ptr(type, name) \
+union { \
+ type name; \
+ __u64 :64; \
+} __attribute__((aligned(8)))
+
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
@@ -2456,7 +2473,7 @@ struct __sk_buff {
/* ... here. */
__u32 data_meta;
- struct bpf_flow_keys *flow_keys;
+ __bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
};
struct bpf_tunnel_key {
@@ -2572,8 +2589,8 @@ enum sk_action {
* be added to the end of this structure
*/
struct sk_msg_md {
- void *data;
- void *data_end;
+ __bpf_md_ptr(void *, data);
+ __bpf_md_ptr(void *, data_end);
__u32 family;
__u32 remote_ip4; /* Stored in network byte order */
@@ -2589,8 +2606,9 @@ struct sk_reuseport_md {
* Start of directly accessible data. It begins from
* the tcp/udp header.
*/
- void *data;
- void *data_end; /* End of directly accessible data */
+ __bpf_md_ptr(void *, data);
+ /* End of directly accessible data */
+ __bpf_md_ptr(void *, data_end);
/*
* Total length of packet (starting from the tcp/udp header).
* Note that the directly accessible bytes (data_end - data)
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 6d180cc60a5d..3eb5a4c3d60a 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -716,7 +716,6 @@
* the situation described above.
*/
#define REL_RESERVED 0x0a
-#define REL_WHEEL_HI_RES 0x0b
#define REL_MAX 0x0f
#define REL_CNT (REL_MAX+1)
@@ -753,15 +752,6 @@
#define ABS_MISC 0x28
-/*
- * 0x2e is reserved and should not be used in input drivers.
- * It was used by HID as ABS_MISC+6 and userspace needs to detect if
- * the next ABS_* event is correct or is just ABS_MISC + n.
- * We define here ABS_RESERVED so userspace can rely on it and detect
- * the situation described above.
- */
-#define ABS_RESERVED 0x2e
-
#define ABS_MT_SLOT 0x2f /* MT slot being modified */
#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index f5ff8a76e208..b01eb502d49c 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -83,11 +83,11 @@ struct kfd_ioctl_set_cu_mask_args {
};
struct kfd_ioctl_get_queue_wave_state_args {
- uint64_t ctl_stack_address; /* to KFD */
- uint32_t ctl_stack_used_size; /* from KFD */
- uint32_t save_area_used_size; /* from KFD */
- uint32_t queue_id; /* to KFD */
- uint32_t pad;
+ __u64 ctl_stack_address; /* to KFD */
+ __u32 ctl_stack_used_size; /* from KFD */
+ __u32 save_area_used_size; /* from KFD */
+ __u32 queue_id; /* to KFD */
+ __u32 pad;
};
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
@@ -255,10 +255,10 @@ struct kfd_hsa_memory_exception_data {
/* hw exception data */
struct kfd_hsa_hw_exception_data {
- uint32_t reset_type;
- uint32_t reset_cause;
- uint32_t memory_lost;
- uint32_t gpu_id;
+ __u32 reset_type;
+ __u32 reset_cause;
+ __u32 memory_lost;
+ __u32 gpu_id;
};
/* Event data */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 579974b0bf0d..7de4f1bdaf06 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -1635,8 +1635,8 @@ enum nft_ng_attributes {
NFTA_NG_MODULUS,
NFTA_NG_TYPE,
NFTA_NG_OFFSET,
- NFTA_NG_SET_NAME,
- NFTA_NG_SET_ID,
+ NFTA_NG_SET_NAME, /* deprecated */
+ NFTA_NG_SET_ID, /* deprecated */
__NFTA_NG_MAX
};
#define NFTA_NG_MAX (__NFTA_NG_MAX - 1)
diff --git a/include/uapi/linux/netfilter_bridge.h b/include/uapi/linux/netfilter_bridge.h
index 156ccd089df1..1610fdbab98d 100644
--- a/include/uapi/linux/netfilter_bridge.h
+++ b/include/uapi/linux/netfilter_bridge.h
@@ -11,6 +11,10 @@
#include <linux/if_vlan.h>
#include <linux/if_pppox.h>
+#ifndef __KERNEL__
+#include <limits.h> /* for INT_MIN, INT_MAX */
+#endif
+
/* Bridge Hooks */
/* After promisc drops, checksum checks. */
#define NF_BR_PRE_ROUTING 0
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index c0d7ea0bf5b6..b17201edfa09 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -212,6 +212,7 @@ struct prctl_mm_map {
#define PR_SET_SPECULATION_CTRL 53
/* Speculation control variants */
# define PR_SPEC_STORE_BYPASS 0
+# define PR_SPEC_INDIRECT_BRANCH 1
/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
# define PR_SPEC_NOT_AFFECTED 0
# define PR_SPEC_PRCTL (1UL << 0)
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index 34dd3d497f2c..c81feb373d3e 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -568,6 +568,8 @@ struct sctp_assoc_reset_event {
#define SCTP_ASSOC_CHANGE_DENIED 0x0004
#define SCTP_ASSOC_CHANGE_FAILED 0x0008
+#define SCTP_STREAM_CHANGE_DENIED SCTP_ASSOC_CHANGE_DENIED
+#define SCTP_STREAM_CHANGE_FAILED SCTP_ASSOC_CHANGE_FAILED
struct sctp_stream_change_event {
__u16 strchange_type;
__u16 strchange_flags;
@@ -1151,6 +1153,7 @@ struct sctp_add_streams {
/* SCTP Stream schedulers */
enum sctp_sched_type {
SCTP_SS_FCFS,
+ SCTP_SS_DEFAULT = SCTP_SS_FCFS,
SCTP_SS_PRIO,
SCTP_SS_RR,
SCTP_SS_MAX = SCTP_SS_RR
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 51b095898f4b..998983a6e6b7 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -50,6 +50,8 @@
#ifndef __LINUX_V4L2_CONTROLS_H
#define __LINUX_V4L2_CONTROLS_H
+#include <linux/types.h>
+
/* Control classes */
#define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */
#define V4L2_CTRL_CLASS_MPEG 0x00990000 /* MPEG-compression controls */
@@ -1110,6 +1112,7 @@ struct v4l2_mpeg2_sequence {
__u8 profile_and_level_indication;
__u8 progressive_sequence;
__u8 chroma_format;
+ __u8 pad;
};
struct v4l2_mpeg2_picture {
@@ -1128,6 +1131,7 @@ struct v4l2_mpeg2_picture {
__u8 alternate_scan;
__u8 repeat_first_field;
__u8 progressive_frame;
+ __u8 pad;
};
struct v4l2_ctrl_mpeg2_slice_params {
@@ -1142,6 +1146,7 @@ struct v4l2_ctrl_mpeg2_slice_params {
__u8 backward_ref_index;
__u8 forward_ref_index;
+ __u8 pad;
};
struct v4l2_ctrl_mpeg2_quantization {
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 61f410fd74e4..4914b93a23f2 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -44,8 +44,3 @@ static inline void xen_balloon_init(void)
{
}
#endif
-
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-struct resource;
-void arch_xen_balloon_init(struct resource *hostmem_resource);
-#endif
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 18803ff76e27..4969817124a8 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -42,16 +42,12 @@ int xen_setup_shutdown_event(void);
extern unsigned long *xen_contiguous_bitmap;
-#ifdef CONFIG_XEN_PV
+#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle);
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
-
-int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
- xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
- unsigned int domid, bool no_translate, struct page **pages);
#else
static inline int xen_create_contiguous_region(phys_addr_t pstart,
unsigned int order,
@@ -63,7 +59,13 @@ static inline int xen_create_contiguous_region(phys_addr_t pstart,
static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
unsigned int order) { }
+#endif
+#if defined(CONFIG_XEN_PV)
+int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+ xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
+ unsigned int domid, bool no_translate, struct page **pages);
+#else
static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr,
pgprot_t prot, unsigned int domid,