summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h1
-rw-r--r--include/drm/ttm/ttm_bo_api.h6
-rw-r--r--include/linux/acpi.h9
-rw-r--r--include/linux/amba/bus.h11
-rw-r--r--include/linux/bpf.h33
-rw-r--r--include/linux/device-mapper.h15
-rw-r--r--include/linux/efi.h6
-rw-r--r--include/linux/host1x.h9
-rw-r--r--include/linux/hugetlb_cgroup.h15
-rw-r--r--include/linux/if_macvlan.h3
-rw-r--r--include/linux/io_uring.h25
-rw-r--r--include/linux/memblock.h4
-rw-r--r--include/linux/mlx5/qp.h7
-rw-r--r--include/linux/mm.h18
-rw-r--r--include/linux/mmu_notifier.h10
-rw-r--r--include/linux/module.h3
-rw-r--r--include/linux/mutex.h2
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/netfilter/x_tables.h7
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--include/linux/restart_block.h1
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/sunrpc/svc_rdma.h1
-rw-r--r--include/linux/thread_info.h13
-rw-r--r--include/linux/usb_usual.h2
-rw-r--r--include/linux/usermode_driver.h1
-rw-r--r--include/linux/vdpa.h10
-rw-r--r--include/linux/virtio.h2
-rw-r--r--include/linux/ww_mutex.h5
-rw-r--r--include/linux/xarray.h4
-rw-r--r--include/net/dst.h11
-rw-r--r--include/net/inet_connection_sock.h2
-rw-r--r--include/net/netfilter/nf_tables.h3
-rw-r--r--include/net/nexthop.h24
-rw-r--r--include/net/red.h12
-rw-r--r--include/net/rtnetlink.h2
-rw-r--r--include/net/sock.h2
-rw-r--r--include/trace/events/workqueue.h6
-rw-r--r--include/uapi/linux/bpf.h16
-rw-r--r--include/uapi/linux/fuse.h3
-rw-r--r--include/uapi/linux/psample.h5
41 files changed, 223 insertions, 91 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 02a716a0af5d..f28b097c658f 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -233,6 +233,7 @@ struct acpi_pnp_type {
struct acpi_device_pnp {
acpi_bus_id bus_id; /* Object name */
+ int instance_no; /* Instance number of this object */
struct acpi_pnp_type type; /* ID type */
acpi_bus_address bus_address; /* _ADR */
char *unique_id; /* _UID */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index e17be324d95f..b8ca13664fa2 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -612,9 +612,11 @@ static inline void ttm_bo_pin(struct ttm_buffer_object *bo)
static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
{
dma_resv_assert_held(bo->base.resv);
- WARN_ON_ONCE(!bo->pin_count);
WARN_ON_ONCE(!kref_read(&bo->kref));
- --bo->pin_count;
+ if (bo->pin_count)
+ --bo->pin_count;
+ else
+ WARN_ON_ONCE(true);
}
int ttm_mem_evict_first(struct ttm_bo_device *bdev,
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index fcdaab723916..3bdcfc4401b7 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -222,10 +222,14 @@ void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
void __acpi_unmap_table(void __iomem *map, unsigned long size);
int early_acpi_boot_init(void);
int acpi_boot_init (void);
+void acpi_boot_table_prepare (void);
void acpi_boot_table_init (void);
int acpi_mps_check (void);
int acpi_numa_init (void);
+int acpi_locate_initial_tables (void);
+void acpi_reserve_initial_tables (void);
+void acpi_table_init_complete (void);
int acpi_table_init (void);
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
int __init acpi_table_parse_entries(char *id, unsigned long table_size,
@@ -814,9 +818,12 @@ static inline int acpi_boot_init(void)
return 0;
}
+static inline void acpi_boot_table_prepare(void)
+{
+}
+
static inline void acpi_boot_table_init(void)
{
- return;
}
static inline int acpi_mps_check(void)
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 6cc93ab5b809..c68d87b87283 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -105,8 +105,19 @@ extern struct bus_type amba_bustype;
#define amba_get_drvdata(d) dev_get_drvdata(&d->dev)
#define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p)
+#ifdef CONFIG_ARM_AMBA
int amba_driver_register(struct amba_driver *);
void amba_driver_unregister(struct amba_driver *);
+#else
+static inline int amba_driver_register(struct amba_driver *drv)
+{
+ return -EINVAL;
+}
+static inline void amba_driver_unregister(struct amba_driver *drv)
+{
+}
+#endif
+
struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t);
void amba_device_put(struct amba_device *);
int amba_device_add(struct amba_device *, struct resource *);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index cccaef1088ea..3625f019767d 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -21,6 +21,7 @@
#include <linux/capability.h>
#include <linux/sched/mm.h>
#include <linux/slab.h>
+#include <linux/percpu-refcount.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -556,7 +557,8 @@ struct bpf_tramp_progs {
* fentry = a set of program to run before calling original function
* fexit = a set of program to run after original function
*/
-int arch_prepare_bpf_trampoline(void *image, void *image_end,
+struct bpf_tramp_image;
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
struct bpf_tramp_progs *tprogs,
void *orig_call);
@@ -565,6 +567,8 @@ u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
+void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
+void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
struct bpf_ksym {
unsigned long start;
@@ -583,6 +587,18 @@ enum bpf_tramp_prog_type {
BPF_TRAMP_REPLACE, /* more than MAX */
};
+struct bpf_tramp_image {
+ void *image;
+ struct bpf_ksym ksym;
+ struct percpu_ref pcref;
+ void *ip_after_call;
+ void *ip_epilogue;
+ union {
+ struct rcu_head rcu;
+ struct work_struct work;
+ };
+};
+
struct bpf_trampoline {
/* hlist for trampoline_table */
struct hlist_node hlist;
@@ -605,9 +621,8 @@ struct bpf_trampoline {
/* Number of attached programs. A counter per kind. */
int progs_cnt[BPF_TRAMP_MAX];
/* Executable image of trampoline */
- void *image;
+ struct bpf_tramp_image *cur_image;
u64 selector;
- struct bpf_ksym ksym;
};
struct bpf_attach_target_info {
@@ -691,6 +706,8 @@ void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
void bpf_image_ksym_del(struct bpf_ksym *ksym);
void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym);
+int bpf_jit_charge_modmem(u32 pages);
+void bpf_jit_uncharge_modmem(u32 pages);
#else
static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
struct bpf_trampoline *tr)
@@ -787,7 +804,6 @@ struct bpf_prog_aux {
bool func_proto_unreliable;
bool sleepable;
bool tail_call_reachable;
- enum bpf_tramp_prog_type trampoline_prog_type;
struct hlist_node tramp_hlist;
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
const struct btf_type *attach_func_proto;
@@ -1093,7 +1109,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
_ret; \
})
-#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
+#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \
({ \
struct bpf_prog_array_item *_item; \
struct bpf_prog *_prog; \
@@ -1106,7 +1122,8 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
goto _out; \
_item = &_array->items[0]; \
while ((_prog = READ_ONCE(_item->prog))) { \
- bpf_cgroup_storage_set(_item->cgroup_storage); \
+ if (set_cg_storage) \
+ bpf_cgroup_storage_set(_item->cgroup_storage); \
_ret &= func(_prog, ctx); \
_item++; \
} \
@@ -1153,10 +1170,10 @@ _out: \
})
#define BPF_PROG_RUN_ARRAY(array, ctx, func) \
- __BPF_PROG_RUN_ARRAY(array, ctx, func, false)
+ __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true)
#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \
- __BPF_PROG_RUN_ARRAY(array, ctx, func, true)
+ __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false)
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 7f4ac87c0b32..5c641f930caf 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -253,7 +253,11 @@ struct target_type {
#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
/*
- * Indicates that a target supports host-managed zoned block devices.
+ * Indicates support for zoned block devices:
+ * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
+ * block devices but does not support combining different zoned models.
+ * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
+ * devices with different zoned models.
*/
#ifdef CONFIG_BLK_DEV_ZONED
#define DM_TARGET_ZONED_HM 0x00000040
@@ -275,6 +279,15 @@ struct target_type {
#define DM_TARGET_PASSES_CRYPTO 0x00000100
#define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
+#ifdef CONFIG_BLK_DEV_ZONED
+#define DM_TARGET_MIXED_ZONED_MODEL 0x00000200
+#define dm_target_supports_mixed_zoned_model(type) \
+ ((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
+#else
+#define DM_TARGET_MIXED_ZONED_MODEL 0x00000000
+#define dm_target_supports_mixed_zoned_model(type) (false)
+#endif
+
struct dm_target {
struct dm_table *table;
struct target_type *type;
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 8710f5710c1d..6b5d36babfcc 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -72,8 +72,10 @@ typedef void *efi_handle_t;
*/
typedef guid_t efi_guid_t __aligned(__alignof__(u32));
-#define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \
- GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7)
+#define EFI_GUID(a, b, c, d...) (efi_guid_t){ { \
+ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
+ (b) & 0xff, ((b) >> 8) & 0xff, \
+ (c) & 0xff, ((c) >> 8) & 0xff, d } }
/*
* Generic EFI table header
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index ce59a6a6a008..9eb77c87a83b 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -320,7 +320,14 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
int host1x_device_init(struct host1x_device *device);
int host1x_device_exit(struct host1x_device *device);
-int host1x_client_register(struct host1x_client *client);
+int __host1x_client_register(struct host1x_client *client,
+ struct lock_class_key *key);
+#define host1x_client_register(class) \
+ ({ \
+ static struct lock_class_key __key; \
+ __host1x_client_register(class, &__key); \
+ })
+
int host1x_client_unregister(struct host1x_client *client);
int host1x_client_suspend(struct host1x_client *client);
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 2ad6e92f124a..0bff345c4bc6 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -113,6 +113,11 @@ static inline bool hugetlb_cgroup_disabled(void)
return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
}
+static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
+{
+ css_put(&h_cg->css);
+}
+
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr);
extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
@@ -138,7 +143,8 @@ extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
struct file_region *rg,
- unsigned long nr_pages);
+ unsigned long nr_pages,
+ bool region_del);
extern void hugetlb_cgroup_file_init(void) __init;
extern void hugetlb_cgroup_migrate(struct page *oldhpage,
@@ -147,7 +153,8 @@ extern void hugetlb_cgroup_migrate(struct page *oldhpage,
#else
static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
struct file_region *rg,
- unsigned long nr_pages)
+ unsigned long nr_pages,
+ bool region_del)
{
}
@@ -185,6 +192,10 @@ static inline bool hugetlb_cgroup_disabled(void)
return true;
}
+static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
+{
+}
+
static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr)
{
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 96556c64c95d..10c94a3936ca 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -43,13 +43,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
if (likely(success)) {
struct vlan_pcpu_stats *pcpu_stats;
- pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
+ pcpu_stats = get_cpu_ptr(vlan->pcpu_stats);
u64_stats_update_begin(&pcpu_stats->syncp);
pcpu_stats->rx_packets++;
pcpu_stats->rx_bytes += len;
if (multicast)
pcpu_stats->rx_multicast++;
u64_stats_update_end(&pcpu_stats->syncp);
+ put_cpu_ptr(vlan->pcpu_stats);
} else {
this_cpu_inc(vlan->pcpu_stats->rx_errors);
}
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 9761a0ec9f95..79cde9906be0 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -5,31 +5,6 @@
#include <linux/sched.h>
#include <linux/xarray.h>
-struct io_wq_work_node {
- struct io_wq_work_node *next;
-};
-
-struct io_wq_work_list {
- struct io_wq_work_node *first;
- struct io_wq_work_node *last;
-};
-
-struct io_uring_task {
- /* submission side */
- struct xarray xa;
- struct wait_queue_head wait;
- void *last;
- void *io_wq;
- struct percpu_counter inflight;
- atomic_t in_idle;
- bool sqpoll;
-
- spinlock_t task_lock;
- struct io_wq_work_list task_list;
- unsigned long task_state;
- struct callback_head task_work;
-};
-
#if defined(CONFIG_IO_URING)
struct sock *io_uring_get_socket(struct file *file);
void __io_uring_task_cancel(void);
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index d13e3cd938b4..5984fff3f175 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -460,7 +460,7 @@ static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
/*
* Set the allocation direction to bottom-up or top-down.
*/
-static inline __init void memblock_set_bottom_up(bool enable)
+static inline __init_memblock void memblock_set_bottom_up(bool enable)
{
memblock.bottom_up = enable;
}
@@ -470,7 +470,7 @@ static inline __init void memblock_set_bottom_up(bool enable)
* if this is true, that said, memblock will allocate memory
* in bottom-up direction.
*/
-static inline __init bool memblock_bottom_up(void)
+static inline __init_memblock bool memblock_bottom_up(void)
{
return memblock.bottom_up;
}
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index d75ef8aa8fac..b7deb790f257 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -547,4 +547,11 @@ static inline const char *mlx5_qp_state_str(int state)
}
}
+static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev)
+{
+ return !MLX5_CAP_ROCE(dev, qp_ts_format) ?
+ MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING :
+ MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
+}
+
#endif /* MLX5_QP_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 64a71bf20536..8ba434287387 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1461,16 +1461,28 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
+/*
+ * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
+ * setting tags for all pages to native kernel tag value 0xff, as the default
+ * value 0x00 maps to 0xff.
+ */
+
static inline u8 page_kasan_tag(const struct page *page)
{
- if (kasan_enabled())
- return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
- return 0xff;
+ u8 tag = 0xff;
+
+ if (kasan_enabled()) {
+ tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+ tag ^= 0xff;
+ }
+
+ return tag;
}
static inline void page_kasan_tag_set(struct page *page, u8 tag)
{
if (kasan_enabled()) {
+ tag ^= 0xff;
page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
}
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index b8200782dede..1a6a9eb6d3fa 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -169,11 +169,11 @@ struct mmu_notifier_ops {
* the last refcount is dropped.
*
* If blockable argument is set to false then the callback cannot
- * sleep and has to return with -EAGAIN. 0 should be returned
- * otherwise. Please note that if invalidate_range_start approves
- * a non-blocking behavior then the same applies to
- * invalidate_range_end.
- *
+ * sleep and has to return with -EAGAIN if sleeping would be required.
+ * 0 should be returned otherwise. Please note that notifiers that can
+ * fail invalidate_range_start are not allowed to implement
+ * invalidate_range_end, as there is no mechanism for informing the
+ * notifier that its start failed.
*/
int (*invalidate_range_start)(struct mmu_notifier *subscription,
const struct mmu_notifier_range *range);
diff --git a/include/linux/module.h b/include/linux/module.h
index 59f094fa6f74..da4b6fbe8ebe 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -30,9 +30,6 @@
#include <linux/percpu.h>
#include <asm/module.h>
-/* Not Yet Implemented */
-#define MODULE_SUPPORTED_DEVICE(name)
-
#define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN
struct modversion_info {
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 0cd631a19727..515cff77a4f4 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -185,7 +185,7 @@ extern void mutex_lock_io(struct mutex *lock);
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
-# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
+# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
#endif
/*
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5b67ea89d5f2..87a5d186faff 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -360,6 +360,7 @@ enum {
NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */
NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/
+ NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */
};
enum {
@@ -372,6 +373,7 @@ enum {
NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL),
NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
+ NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED),
};
enum gro_result {
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 8ebb64193757..8ec48466410a 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -227,7 +227,7 @@ struct xt_table {
unsigned int valid_hooks;
/* Man behind the curtain... */
- struct xt_table_info __rcu *private;
+ struct xt_table_info *private;
/* Set this to THIS_MODULE if you are a module, otherwise NULL */
struct module *me;
@@ -376,7 +376,7 @@ static inline unsigned int xt_write_recseq_begin(void)
* since addend is most likely 1
*/
__this_cpu_add(xt_recseq.sequence, addend);
- smp_wmb();
+ smp_mb();
return addend;
}
@@ -448,9 +448,6 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
-struct xt_table_info
-*xt_table_get_private_protected(const struct xt_table *table);
-
#ifdef CONFIG_COMPAT
#include <net/compat.h>
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 20225b067583..8c9947fd62f3 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -559,7 +559,6 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
return pgoff;
}
-/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
struct wait_page_key {
struct page *page;
int bit_nr;
@@ -683,6 +682,7 @@ static inline int wait_on_page_locked_killable(struct page *page)
int put_and_wait_on_page_locked(struct page *page, int state);
void wait_on_page_writeback(struct page *page);
+int wait_on_page_writeback_killable(struct page *page);
extern void end_page_writeback(struct page *page);
void wait_for_stable_page(struct page *page);
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index bba2920e9c05..980a65594412 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -23,6 +23,7 @@ enum timespec_type {
* System call restart block.
*/
struct restart_block {
+ unsigned long arch_data;
long (*fn)(struct restart_block *);
union {
/* For futex_wait and futex_wait_requeue_pi */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 6d0a33d1c0db..f2c9ee71cb2c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -285,6 +285,7 @@ struct nf_bridge_info {
struct tc_skb_ext {
__u32 chain;
__u16 mru;
+ bool post_ct;
};
#endif
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 7c693b31965e..1e76ed688044 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -104,7 +104,6 @@ struct svcxprt_rdma {
wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */
unsigned long sc_flags;
- u32 sc_pending_recvs;
struct list_head sc_read_complete_q;
struct work_struct sc_work;
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 9b2158c69275..157762db9d4b 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/bug.h>
#include <linux/restart_block.h>
+#include <linux/errno.h>
#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
@@ -59,6 +60,18 @@ enum syscall_work_bit {
#ifdef __KERNEL__
+#ifndef arch_set_restart_data
+#define arch_set_restart_data(restart) do { } while (0)
+#endif
+
+static inline long set_restart_fn(struct restart_block *restart,
+ long (*fn)(struct restart_block *))
+{
+ restart->fn = fn;
+ arch_set_restart_data(restart);
+ return -ERESTART_RESTARTBLOCK;
+}
+
#ifndef THREAD_ALIGN
#define THREAD_ALIGN THREAD_SIZE
#endif
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 6b03fdd69d27..712363c7a2e8 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -86,6 +86,8 @@
/* lies about caching, so always sync */ \
US_FLAG(NO_SAME, 0x40000000) \
/* Cannot handle WRITE_SAME */ \
+ US_FLAG(SENSE_AFTER_SYNC, 0x80000000) \
+ /* Do REQUEST_SENSE after SYNCHRONIZE_CACHE */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/usermode_driver.h b/include/linux/usermode_driver.h
index 073a9e0ec07d..ad970416260d 100644
--- a/include/linux/usermode_driver.h
+++ b/include/linux/usermode_driver.h
@@ -14,5 +14,6 @@ struct umd_info {
int umd_load_blob(struct umd_info *info, const void *data, size_t len);
int umd_unload_blob(struct umd_info *info);
int fork_usermode_driver(struct umd_info *info);
+void umd_cleanup_helper(struct umd_info *info);
#endif /* __LINUX_USERMODE_DRIVER_H__ */
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index 4ab5494503a8..15fa085fab05 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -250,20 +250,20 @@ struct vdpa_config_ops {
struct vdpa_device *__vdpa_alloc_device(struct device *parent,
const struct vdpa_config_ops *config,
- int nvqs, size_t size, const char *name);
+ size_t size, const char *name);
-#define vdpa_alloc_device(dev_struct, member, parent, config, nvqs, name) \
+#define vdpa_alloc_device(dev_struct, member, parent, config, name) \
container_of(__vdpa_alloc_device( \
- parent, config, nvqs, \
+ parent, config, \
sizeof(dev_struct) + \
BUILD_BUG_ON_ZERO(offsetof( \
dev_struct, member)), name), \
dev_struct, member)
-int vdpa_register_device(struct vdpa_device *vdev);
+int vdpa_register_device(struct vdpa_device *vdev, int nvqs);
void vdpa_unregister_device(struct vdpa_device *vdev);
-int _vdpa_register_device(struct vdpa_device *vdev);
+int _vdpa_register_device(struct vdpa_device *vdev, int nvqs);
void _vdpa_unregister_device(struct vdpa_device *vdev);
/**
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 55ea329fe72a..b1894e0323fa 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -132,8 +132,6 @@ bool is_virtio_device(struct device *dev);
void virtio_break_device(struct virtio_device *dev);
void virtio_config_changed(struct virtio_device *dev);
-void virtio_config_disable(struct virtio_device *dev);
-void virtio_config_enable(struct virtio_device *dev);
int virtio_finalize_features(struct virtio_device *dev);
#ifdef CONFIG_PM_SLEEP
int virtio_device_freeze(struct virtio_device *dev);
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 850424e5d030..6ecf2a0220db 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -173,9 +173,10 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
*/
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
{
-#ifdef CONFIG_DEBUG_MUTEXES
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
mutex_release(&ctx->dep_map, _THIS_IP_);
-
+#endif
+#ifdef CONFIG_DEBUG_MUTEXES
DEBUG_LOCKS_WARN_ON(ctx->acquired);
if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
/*
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 92c0160b3352..a91e3d90df8a 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -229,9 +229,10 @@ static inline int xa_err(void *entry)
*
* This structure is used either directly or via the XA_LIMIT() macro
* to communicate the range of IDs that are valid for allocation.
- * Two common ranges are predefined for you:
+ * Three common ranges are predefined for you:
* * xa_limit_32b - [0 - UINT_MAX]
* * xa_limit_31b - [0 - INT_MAX]
+ * * xa_limit_16b - [0 - USHRT_MAX]
*/
struct xa_limit {
u32 max;
@@ -242,6 +243,7 @@ struct xa_limit {
#define xa_limit_32b XA_LIMIT(0, UINT_MAX)
#define xa_limit_31b XA_LIMIT(0, INT_MAX)
+#define xa_limit_16b XA_LIMIT(0, USHRT_MAX)
typedef unsigned __bitwise xa_mark_t;
#define XA_MARK_0 ((__force xa_mark_t)0U)
diff --git a/include/net/dst.h b/include/net/dst.h
index 26f134ad3a25..75b1e734e9c2 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -550,4 +550,15 @@ static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
}
+struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
+void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu, bool confirm_neigh);
+void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb);
+u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old);
+struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr);
+unsigned int dst_blackhole_mtu(const struct dst_entry *dst);
+
#endif /* _NET_DST_H */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 10a625760de9..3c8c59471bc1 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -282,7 +282,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
}
-void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
+bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index fdec57d862b7..5aaced6bf13e 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1536,6 +1536,7 @@ struct nft_trans_flowtable {
struct nft_flowtable *flowtable;
bool update;
struct list_head hook_list;
+ u32 flags;
};
#define nft_trans_flowtable(trans) \
@@ -1544,6 +1545,8 @@ struct nft_trans_flowtable {
(((struct nft_trans_flowtable *)trans->data)->update)
#define nft_trans_flowtable_hooks(trans) \
(((struct nft_trans_flowtable *)trans->data)->hook_list)
+#define nft_trans_flowtable_flags(trans) \
+ (((struct nft_trans_flowtable *)trans->data)->flags)
int __init nft_chain_filter_init(void);
void nft_chain_filter_fini(void);
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 7bc057aee40b..a10a319d7eb2 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -410,6 +410,7 @@ static inline struct fib_nh *fib_info_nh(struct fib_info *fi, int nhsel)
int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
struct netlink_ext_ack *extack);
+/* Caller should either hold rcu_read_lock(), or RTNL. */
static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
{
struct nh_info *nhi;
@@ -430,6 +431,29 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
return NULL;
}
+/* Variant of nexthop_fib6_nh().
+ * Caller should either hold rcu_read_lock_bh(), or RTNL.
+ */
+static inline struct fib6_nh *nexthop_fib6_nh_bh(struct nexthop *nh)
+{
+ struct nh_info *nhi;
+
+ if (nh->is_group) {
+ struct nh_group *nh_grp;
+
+ nh_grp = rcu_dereference_bh_rtnl(nh->nh_grp);
+ nh = nexthop_mpath_select(nh_grp, 0);
+ if (!nh)
+ return NULL;
+ }
+
+ nhi = rcu_dereference_bh_rtnl(nh->nh_info);
+ if (nhi->family == AF_INET6)
+ return &nhi->fib6_nh;
+
+ return NULL;
+}
+
static inline struct net_device *fib6_info_nh_dev(struct fib6_info *f6i)
{
struct fib6_nh *fib6_nh;
diff --git a/include/net/red.h b/include/net/red.h
index 932f0d79d60c..0b39eff1d50a 100644
--- a/include/net/red.h
+++ b/include/net/red.h
@@ -168,7 +168,8 @@ static inline void red_set_vars(struct red_vars *v)
v->qcount = -1;
}
-static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_log)
+static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
+ u8 Scell_log, u8 *stab)
{
if (fls(qth_min) + Wlog > 32)
return false;
@@ -178,6 +179,13 @@ static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_
return false;
if (qth_max < qth_min)
return false;
+ if (stab) {
+ int i;
+
+ for (i = 0; i < RED_STAB_SIZE; i++)
+ if (stab[i] >= 32)
+ return false;
+ }
return true;
}
@@ -287,7 +295,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms
int shift;
/*
- * The problem: ideally, average length queue recalcultion should
+ * The problem: ideally, average length queue recalculation should
* be done over constant clock intervals. This is too expensive, so
* that the calculation is driven by outgoing packets.
* When the queue is idle we have to model this clock by hand.
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index e2091bb2b3a8..4da61c950e93 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -33,6 +33,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
*
* @list: Used internally
* @kind: Identifier
+ * @netns_refund: Physical device, move to init_net on netns exit
* @maxtype: Highest device specific netlink attribute number
* @policy: Netlink policy for device specific attribute validation
* @validate: Optional validation function for netlink/changelink parameters
@@ -64,6 +65,7 @@ struct rtnl_link_ops {
size_t priv_size;
void (*setup)(struct net_device *dev);
+ bool netns_refund;
unsigned int maxtype;
const struct nla_policy *policy;
int (*validate)(struct nlattr *tb[],
diff --git a/include/net/sock.h b/include/net/sock.h
index 636810ddcd9b..0b6266fd6bf6 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -936,7 +936,7 @@ static inline void sk_acceptq_added(struct sock *sk)
static inline bool sk_acceptq_is_full(const struct sock *sk)
{
- return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
+ return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog);
}
/*
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index 970cc2ea2850..6154a2e72bce 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -30,7 +30,7 @@ TRACE_EVENT(workqueue_queue_work,
TP_STRUCT__entry(
__field( void *, work )
__field( void *, function)
- __field( const char *, workqueue)
+ __string( workqueue, pwq->wq->name)
__field( unsigned int, req_cpu )
__field( unsigned int, cpu )
),
@@ -38,13 +38,13 @@ TRACE_EVENT(workqueue_queue_work,
TP_fast_assign(
__entry->work = work;
__entry->function = work->func;
- __entry->workqueue = pwq->wq->name;
+ __assign_str(workqueue, pwq->wq->name);
__entry->req_cpu = req_cpu;
__entry->cpu = pwq->pool->cpu;
),
TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%u cpu=%u",
- __entry->work, __entry->function, __entry->workqueue,
+ __entry->work, __entry->function, __get_str(workqueue),
__entry->req_cpu, __entry->cpu)
);
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 79c893310492..4ba4ef0ff63a 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -3850,7 +3850,7 @@ union bpf_attr {
*
* long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags)
* Description
- * Check ctx packet size against exceeding MTU of net device (based
+ * Check packet size against exceeding MTU of net device (based
* on *ifindex*). This helper will likely be used in combination
* with helpers that adjust/change the packet size.
*
@@ -3867,6 +3867,14 @@ union bpf_attr {
* against the current net device. This is practical if this isn't
* used prior to redirect.
*
+ * On input *mtu_len* must be a valid pointer, else verifier will
+ * reject BPF program. If the value *mtu_len* is initialized to
+ * zero then the ctx packet size is use. When value *mtu_len* is
+ * provided as input this specify the L3 length that the MTU check
+ * is done against. Remember XDP and TC length operate at L2, but
+ * this value is L3 as this correlate to MTU and IP-header tot_len
+ * values which are L3 (similar behavior as bpf_fib_lookup).
+ *
* The Linux kernel route table can configure MTUs on a more
* specific per route level, which is not provided by this helper.
* For route level MTU checks use the **bpf_fib_lookup**\ ()
@@ -3891,11 +3899,9 @@ union bpf_attr {
*
* On return *mtu_len* pointer contains the MTU value of the net
* device. Remember the net device configured MTU is the L3 size,
- * which is returned here and XDP and TX length operate at L2.
+ * which is returned here and XDP and TC length operate at L2.
* Helper take this into account for you, but remember when using
- * MTU value in your BPF-code. On input *mtu_len* must be a valid
- * pointer and be initialized (to zero), else verifier will reject
- * BPF program.
+ * MTU value in your BPF-code.
*
* Return
* * 0 on success, and populate MTU value in *mtu_len* pointer.
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 98ca64d1beb6..54442612c48b 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -903,7 +903,8 @@ struct fuse_notify_retrieve_in {
};
/* Device ioctls: */
-#define FUSE_DEV_IOC_CLONE _IOR(229, 0, uint32_t)
+#define FUSE_DEV_IOC_MAGIC 229
+#define FUSE_DEV_IOC_CLONE _IOR(FUSE_DEV_IOC_MAGIC, 0, uint32_t)
struct fuse_lseek_in {
uint64_t fh;
diff --git a/include/uapi/linux/psample.h b/include/uapi/linux/psample.h
index aea26ab1431c..bff5032c98df 100644
--- a/include/uapi/linux/psample.h
+++ b/include/uapi/linux/psample.h
@@ -3,7 +3,6 @@
#define __UAPI_PSAMPLE_H
enum {
- /* sampled packet metadata */
PSAMPLE_ATTR_IIFINDEX,
PSAMPLE_ATTR_OIFINDEX,
PSAMPLE_ATTR_ORIGSIZE,
@@ -11,10 +10,8 @@ enum {
PSAMPLE_ATTR_GROUP_SEQ,
PSAMPLE_ATTR_SAMPLE_RATE,
PSAMPLE_ATTR_DATA,
- PSAMPLE_ATTR_TUNNEL,
-
- /* commands attributes */
PSAMPLE_ATTR_GROUP_REFCOUNT,
+ PSAMPLE_ATTR_TUNNEL,
__PSAMPLE_ATTR_MAX
};