summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLeon Romanovsky <leon@kernel.org>2026-03-30 12:02:58 +0300
committerLeon Romanovsky <leon@kernel.org>2026-03-30 20:47:44 +0300
commite6fd2491789745ed8c3df86a660dfa7207129d22 (patch)
tree62d79e06a87f78a9949c1e695386dec2ad0fcf22 /include
parentadc09d7fbbb9d286057c98675994c445d81c27fa (diff)
parent7aaa8047eafd0bd628065b15757d9b48c5f9c07d (diff)
downloadlinux-e6fd2491789745ed8c3df86a660dfa7207129d22.tar.xz
Merge branch 'master' into rdma-next
Let's bring v7.0-rc6 to the -next branch, so we can merge the DMA attributes fix [1] without merge conflicts. [1] https://lore.kernel.org/all/20260323-umem-dma-attrs-v1-1-d6890f2e6a1e@nvidia.com Signed-off-by: Leon Romanovsky <leon@kernel.org> * master: (1688 commits) Linux 7.0-rc6 ...
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/vmlinux.lds.h4
-rw-r--r--include/drm/display/drm_dp.h2
-rw-r--r--include/hyperv/hvgdk_mini.h3
-rw-r--r--include/kunit/run-in-irq-context.h44
-rw-r--r--include/linux/auxvec.h2
-rw-r--r--include/linux/build_bug.h4
-rw-r--r--include/linux/console_struct.h1
-rw-r--r--include/linux/damon.h6
-rw-r--r--include/linux/device.h54
-rw-r--r--include/linux/device/bus.h6
-rw-r--r--include/linux/dma-mapping.h19
-rw-r--r--include/linux/etherdevice.h3
-rw-r--r--include/linux/eventpoll.h11
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h8
-rw-r--r--include/linux/fs/super_types.h1
-rw-r--r--include/linux/hid.h7
-rw-r--r--include/linux/if_ether.h3
-rw-r--r--include/linux/indirect_call_wrapper.h18
-rw-r--r--include/linux/io-pgtable.h10
-rw-r--r--include/linux/io_uring_types.h4
-rw-r--r--include/linux/ipv6.h7
-rw-r--r--include/linux/kthread.h21
-rw-r--r--include/linux/kvm_host.h83
-rw-r--r--include/linux/leafops.h32
-rw-r--r--include/linux/local_lock_internal.h2
-rw-r--r--include/linux/mempolicy.h1
-rw-r--r--include/linux/migrate.h10
-rw-r--r--include/linux/mm.h17
-rw-r--r--include/linux/mmu_notifier.h31
-rw-r--r--include/linux/netdevice.h68
-rw-r--r--include/linux/netfs.h1
-rw-r--r--include/linux/ns_common.h2
-rw-r--r--include/linux/nvme-auth.h2
-rw-r--r--include/linux/pagemap.h11
-rw-r--r--include/linux/platform_data/mlxreg.h14
-rw-r--r--include/linux/platform_data/x86/int3472.h5
-rw-r--r--include/linux/platform_device.h5
-rw-r--r--include/linux/ring_buffer.h1
-rw-r--r--include/linux/rseq_types.h6
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/security.h1
-rw-r--r--include/linux/serial_8250.h1
-rw-r--r--include/linux/spi/spi.h5
-rw-r--r--include/linux/srcutiny.h4
-rw-r--r--include/linux/srcutree.h9
-rw-r--r--include/linux/uaccess.h58
-rw-r--r--include/linux/usb.h8
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/usb/r8152.h1
-rw-r--r--include/linux/usb/usbnet.h1
-rw-r--r--include/linux/virtio_net.h53
-rw-r--r--include/net/act_api.h1
-rw-r--r--include/net/bluetooth/l2cap.h1
-rw-r--r--include/net/bonding.h1
-rw-r--r--include/net/codel_impl.h1
-rw-r--r--include/net/inet6_hashtables.h2
-rw-r--r--include/net/inet_hashtables.h16
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ip6_fib.h21
-rw-r--r--include/net/ip6_tunnel.h14
-rw-r--r--include/net/ip_fib.h2
-rw-r--r--include/net/ip_tunnels.h37
-rw-r--r--include/net/libeth/xsk.h3
-rw-r--r--include/net/mac80211.h4
-rw-r--r--include/net/netfilter/nf_conntrack_core.h5
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h20
-rw-r--r--include/net/netfilter/nf_tables.h13
-rw-r--r--include/net/netns/xfrm.h2
-rw-r--r--include/net/page_pool/types.h2
-rw-r--r--include/net/sch_generic.h43
-rw-r--r--include/net/secure_seq.h45
-rw-r--r--include/net/tc_act/tc_gate.h33
-rw-r--r--include/net/tc_act/tc_ife.h4
-rw-r--r--include/net/tcp.h6
-rw-r--r--include/net/udp_tunnel.h2
-rw-r--r--include/net/xdp_sock_drv.h16
-rw-r--r--include/sound/cs35l56.h1
-rw-r--r--include/sound/sdca_function.h5
-rw-r--r--include/sound/tas2781.h1
-rw-r--r--include/trace/events/btrfs.h11
-rw-r--r--include/trace/events/dma.h4
-rw-r--r--include/trace/events/netfs.h12
-rw-r--r--include/trace/events/task.h7
-rw-r--r--include/uapi/linux/dma-buf.h1
-rw-r--r--include/uapi/linux/io_uring.h3
-rw-r--r--include/uapi/linux/kvm.h8
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h4
-rw-r--r--include/xen/xenbus.h4
88 files changed, 745 insertions, 287 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index eeb070f330bd..1e1580febe4b 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -848,12 +848,14 @@
/* Required sections not related to debugging. */
#define ELF_DETAILS \
- .modinfo : { *(.modinfo) . = ALIGN(8); } \
.comment 0 : { *(.comment) } \
.symtab 0 : { *(.symtab) } \
.strtab 0 : { *(.strtab) } \
.shstrtab 0 : { *(.shstrtab) }
+#define MODINFO \
+ .modinfo : { *(.modinfo) . = ALIGN(8); }
+
#ifdef CONFIG_GENERIC_BUG
#define BUG_TABLE \
. = ALIGN(8); \
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index e4eebabab975..8b15d3eeb716 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -571,6 +571,8 @@
# define DP_PANEL_REPLAY_LINK_OFF_SUPPORTED_IN_PR_AFTER_ADAPTIVE_SYNC_SDP (1 << 7)
#define DP_PANEL_REPLAY_CAP_X_GRANULARITY 0xb2
+# define DP_PANEL_REPLAY_FULL_LINE_GRANULARITY 0xffff
+
#define DP_PANEL_REPLAY_CAP_Y_GRANULARITY 0xb4
/* Link Configuration */
diff --git a/include/hyperv/hvgdk_mini.h b/include/hyperv/hvgdk_mini.h
index 056ef7b6b360..1823a290a7b7 100644
--- a/include/hyperv/hvgdk_mini.h
+++ b/include/hyperv/hvgdk_mini.h
@@ -477,7 +477,6 @@ union hv_vp_assist_msr_contents { /* HV_REGISTER_VP_ASSIST_PAGE */
#define HVCALL_NOTIFY_PARTITION_EVENT 0x0087
#define HVCALL_ENTER_SLEEP_STATE 0x0084
#define HVCALL_NOTIFY_PORT_RING_EMPTY 0x008b
-#define HVCALL_SCRUB_PARTITION 0x008d
#define HVCALL_REGISTER_INTERCEPT_RESULT 0x0091
#define HVCALL_ASSERT_VIRTUAL_INTERRUPT 0x0094
#define HVCALL_CREATE_PORT 0x0095
@@ -1121,6 +1120,8 @@ enum hv_register_name {
HV_X64_REGISTER_MSR_MTRR_FIX4KF8000 = 0x0008007A,
HV_X64_REGISTER_REG_PAGE = 0x0009001C,
+#elif defined(CONFIG_ARM64)
+ HV_ARM64_REGISTER_SINT_RESERVED_INTERRUPT_ID = 0x00070001,
#endif
};
diff --git a/include/kunit/run-in-irq-context.h b/include/kunit/run-in-irq-context.h
index c89b1b1b12dd..bfe60d6cf28d 100644
--- a/include/kunit/run-in-irq-context.h
+++ b/include/kunit/run-in-irq-context.h
@@ -12,16 +12,16 @@
#include <linux/hrtimer.h>
#include <linux/workqueue.h>
-#define KUNIT_IRQ_TEST_HRTIMER_INTERVAL us_to_ktime(5)
-
struct kunit_irq_test_state {
bool (*func)(void *test_specific_state);
void *test_specific_state;
bool task_func_reported_failure;
bool hardirq_func_reported_failure;
bool softirq_func_reported_failure;
+ atomic_t task_func_calls;
atomic_t hardirq_func_calls;
atomic_t softirq_func_calls;
+ ktime_t interval;
struct hrtimer timer;
struct work_struct bh_work;
};
@@ -30,14 +30,25 @@ static enum hrtimer_restart kunit_irq_test_timer_func(struct hrtimer *timer)
{
struct kunit_irq_test_state *state =
container_of(timer, typeof(*state), timer);
+ int task_calls, hardirq_calls, softirq_calls;
WARN_ON_ONCE(!in_hardirq());
- atomic_inc(&state->hardirq_func_calls);
+ task_calls = atomic_read(&state->task_func_calls);
+ hardirq_calls = atomic_inc_return(&state->hardirq_func_calls);
+ softirq_calls = atomic_read(&state->softirq_func_calls);
+
+ /*
+ * If the timer is firing too often for the softirq or task to ever have
+ * a chance to run, increase the timer interval. This is needed on very
+ * slow systems.
+ */
+ if (hardirq_calls >= 20 && (softirq_calls == 0 || task_calls == 0))
+ state->interval = ktime_add_ns(state->interval, 250);
if (!state->func(state->test_specific_state))
state->hardirq_func_reported_failure = true;
- hrtimer_forward_now(&state->timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL);
+ hrtimer_forward_now(&state->timer, state->interval);
queue_work(system_bh_wq, &state->bh_work);
return HRTIMER_RESTART;
}
@@ -86,10 +97,14 @@ static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *),
struct kunit_irq_test_state state = {
.func = func,
.test_specific_state = test_specific_state,
+ /*
+ * Start with a 5us timer interval. If the system can't keep
+ * up, kunit_irq_test_timer_func() will increase it.
+ */
+ .interval = us_to_ktime(5),
};
unsigned long end_jiffies;
- int hardirq_calls, softirq_calls;
- bool allctx = false;
+ int task_calls, hardirq_calls, softirq_calls;
/*
* Set up a hrtimer (the way we access hardirq context) and a work
@@ -104,21 +119,18 @@ static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *),
* and hardirq), or 1 second, whichever comes first.
*/
end_jiffies = jiffies + HZ;
- hrtimer_start(&state.timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL,
- HRTIMER_MODE_REL_HARD);
- for (int task_calls = 0, calls = 0;
- ((calls < max_iterations) || !allctx) &&
- !time_after(jiffies, end_jiffies);
- task_calls++) {
+ hrtimer_start(&state.timer, state.interval, HRTIMER_MODE_REL_HARD);
+ do {
if (!func(test_specific_state))
state.task_func_reported_failure = true;
+ task_calls = atomic_inc_return(&state.task_func_calls);
hardirq_calls = atomic_read(&state.hardirq_func_calls);
softirq_calls = atomic_read(&state.softirq_func_calls);
- calls = task_calls + hardirq_calls + softirq_calls;
- allctx = (task_calls > 0) && (hardirq_calls > 0) &&
- (softirq_calls > 0);
- }
+ } while ((task_calls + hardirq_calls + softirq_calls < max_iterations ||
+ (task_calls == 0 || hardirq_calls == 0 ||
+ softirq_calls == 0)) &&
+ !time_after(jiffies, end_jiffies));
/* Cancel the timer and work. */
hrtimer_cancel(&state.timer);
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index 407f7005e6d6..8bcb9b726262 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -4,6 +4,6 @@
#include <uapi/linux/auxvec.h>
-#define AT_VECTOR_SIZE_BASE 22 /* NEW_AUX_ENT entries in auxiliary table */
+#define AT_VECTOR_SIZE_BASE 24 /* NEW_AUX_ENT entries in auxiliary table */
/* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
#endif /* _LINUX_AUXVEC_H */
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
index 2cfbb4c65c78..d3dc5dc5f916 100644
--- a/include/linux/build_bug.h
+++ b/include/linux/build_bug.h
@@ -32,7 +32,8 @@
/**
* BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied
* error message.
- * @condition: the condition which the compiler should know is false.
+ * @cond: the condition which the compiler should know is false.
+ * @msg: build-time error message
*
* See BUILD_BUG_ON for description.
*/
@@ -60,6 +61,7 @@
/**
* static_assert - check integer constant expression at build time
+ * @expr: expression to be checked
*
* static_assert() is a wrapper for the C11 _Static_assert, with a
* little macro magic to make the message optional (defaulting to the
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index 13b35637bd5a..d5ca855116df 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -160,6 +160,7 @@ struct vc_data {
struct uni_pagedict **uni_pagedict_loc; /* [!] Location of uni_pagedict variable for this console */
u32 **vc_uni_lines; /* unicode screen content */
u16 *vc_saved_screen;
+ u32 **vc_saved_uni_lines;
unsigned int vc_saved_cols;
unsigned int vc_saved_rows;
/* additional information is in vt_kern.h */
diff --git a/include/linux/damon.h b/include/linux/damon.h
index a4fea23da857..be3d198043ff 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -810,6 +810,12 @@ struct damon_ctx {
struct damos_walk_control *walk_control;
struct mutex walk_control_lock;
+ /*
+ * indicate if this may be corrupted. Currentonly this is set only for
+ * damon_commit_ctx() failure.
+ */
+ bool maybe_corrupted;
+
/* Working thread of the given DAMON context */
struct task_struct *kdamond;
/* Protects @kdamond field access */
diff --git a/include/linux/device.h b/include/linux/device.h
index 0be95294b6e6..e65d564f01cd 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -483,6 +483,8 @@ struct device_physical_location {
* on. This shrinks the "Board Support Packages" (BSPs) and
* minimizes board-specific #ifdefs in drivers.
* @driver_data: Private pointer for driver specific info.
+ * @driver_override: Driver name to force a match. Do not touch directly; use
+ * device_set_driver_override() instead.
* @links: Links to suppliers and consumers of this device.
* @power: For device power management.
* See Documentation/driver-api/pm/devices.rst for details.
@@ -576,6 +578,10 @@ struct device {
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
dev_set_drvdata/dev_get_drvdata */
+ struct {
+ const char *name;
+ spinlock_t lock;
+ } driver_override;
struct mutex mutex; /* mutex to synchronize calls to
* its driver.
*/
@@ -701,6 +707,54 @@ struct device_link {
#define kobj_to_dev(__kobj) container_of_const(__kobj, struct device, kobj)
+int __device_set_driver_override(struct device *dev, const char *s, size_t len);
+
+/**
+ * device_set_driver_override() - Helper to set or clear driver override.
+ * @dev: Device to change
+ * @s: NUL-terminated string, new driver name to force a match, pass empty
+ * string to clear it ("" or "\n", where the latter is only for sysfs
+ * interface).
+ *
+ * Helper to set or clear driver override of a device.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static inline int device_set_driver_override(struct device *dev, const char *s)
+{
+ return __device_set_driver_override(dev, s, s ? strlen(s) : 0);
+}
+
+/**
+ * device_has_driver_override() - Check if a driver override has been set.
+ * @dev: device to check
+ *
+ * Returns true if a driver override has been set for this device.
+ */
+static inline bool device_has_driver_override(struct device *dev)
+{
+ guard(spinlock)(&dev->driver_override.lock);
+ return !!dev->driver_override.name;
+}
+
+/**
+ * device_match_driver_override() - Match a driver against the device's driver_override.
+ * @dev: device to check
+ * @drv: driver to match against
+ *
+ * Returns > 0 if a driver override is set and matches the given driver, 0 if a
+ * driver override is set but does not match, or < 0 if a driver override is not
+ * set at all.
+ */
+static inline int device_match_driver_override(struct device *dev,
+ const struct device_driver *drv)
+{
+ guard(spinlock)(&dev->driver_override.lock);
+ if (dev->driver_override.name)
+ return !strcmp(dev->driver_override.name, drv->name);
+ return -1;
+}
+
/**
* device_iommu_mapped - Returns true when the device DMA is translated
* by an IOMMU
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index 99c3c83ea520..c1b463cd6464 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -35,6 +35,8 @@ struct fwnode_handle;
* otherwise. It may also return error code if determining that
* the driver supports the device is not possible. In case of
* -EPROBE_DEFER it will queue the device for deferred probing.
+ * Note: This callback may be invoked with or without the device
+ * lock held.
* @uevent: Called when a device is added, removed, or a few other things
* that generate uevents to add the environment variables.
* @probe: Called when a new device or driver add to this bus, and callback
@@ -63,6 +65,9 @@ struct fwnode_handle;
* this bus.
* @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops.
+ * @driver_override: Set to true if this bus supports the driver_override
+ * mechanism, which allows userspace to force a specific
+ * driver to bind to a device via a sysfs attribute.
* @need_parent_lock: When probing or removing a device on this bus, the
* device core should lock the device's parent.
*
@@ -104,6 +109,7 @@ struct bus_type {
const struct dev_pm_ops *pm;
+ bool driver_override;
bool need_parent_lock;
};
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 29973baa0581..99ef042ecdb4 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -80,11 +80,18 @@
#define DMA_ATTR_MMIO (1UL << 10)
/*
- * DMA_ATTR_CPU_CACHE_CLEAN: Indicates the CPU will not dirty any cacheline
- * overlapping this buffer while it is mapped for DMA. All mappings sharing
- * a cacheline must have this attribute for this to be considered safe.
+ * DMA_ATTR_DEBUGGING_IGNORE_CACHELINES: Indicates the CPU cache line can be
+ * overlapped. All mappings sharing a cacheline must have this attribute for
+ * this to be considered safe.
*/
-#define DMA_ATTR_CPU_CACHE_CLEAN (1UL << 11)
+#define DMA_ATTR_DEBUGGING_IGNORE_CACHELINES (1UL << 11)
+
+/*
+ * DMA_ATTR_REQUIRE_COHERENT: Indicates that DMA coherency is required.
+ * All mappings that carry this attribute can't work with SWIOTLB and cache
+ * flushing.
+ */
+#define DMA_ATTR_REQUIRE_COHERENT (1UL << 12)
/*
* A dma_addr_t can hold any valid DMA or bus address for the platform. It can
@@ -248,8 +255,8 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
{
return NULL;
}
-static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle, unsigned long attrs)
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
{
}
static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 9a1eacf35d37..df8f88f63a70 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -42,7 +42,8 @@ extern const struct header_ops eth_header_ops;
int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
const void *daddr, const void *saddr, unsigned len);
-int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
+int eth_header_parse(const struct sk_buff *skb, const struct net_device *dev,
+ unsigned char *haddr);
int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
__be16 type);
void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index ccb478eb174b..ea9ca0e4172a 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -82,11 +82,14 @@ static inline struct epoll_event __user *
epoll_put_uevent(__poll_t revents, __u64 data,
struct epoll_event __user *uevent)
{
- if (__put_user(revents, &uevent->events) ||
- __put_user(data, &uevent->data))
- return NULL;
-
+ scoped_user_write_access_size(uevent, sizeof(*uevent), efault) {
+ unsafe_put_user(revents, &uevent->events, efault);
+ unsafe_put_user(data, &uevent->data, efault);
+ }
return uevent+1;
+
+efault:
+ return NULL;
}
#endif
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index d290060f4c73..91013161e9db 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -68,12 +68,12 @@
* timeout value used in Stratix10 FPGA manager driver.
* timeout value used in RSU driver
*/
-#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 300
-#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 720
-#define SVC_RSU_REQUEST_TIMEOUT_MS 300
+#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 5000
+#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 5000
+#define SVC_RSU_REQUEST_TIMEOUT_MS 2000
#define SVC_FCS_REQUEST_TIMEOUT_MS 2000
#define SVC_COMPLETED_TIMEOUT_MS 30000
-#define SVC_HWMON_REQUEST_TIMEOUT_MS 300
+#define SVC_HWMON_REQUEST_TIMEOUT_MS 2000
struct stratix10_svc_chan;
diff --git a/include/linux/fs/super_types.h b/include/linux/fs/super_types.h
index fa7638b81246..383050e7fdf5 100644
--- a/include/linux/fs/super_types.h
+++ b/include/linux/fs/super_types.h
@@ -338,5 +338,6 @@ struct super_block {
#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */
#define SB_I_NOIDMAP 0x00002000 /* No idmapped mounts on this superblock */
#define SB_I_ALLOW_HSM 0x00004000 /* Allow HSM events on this superblock */
+#define SB_I_NO_DATA_INTEGRITY 0x00008000 /* fs cannot guarantee data persistence on sync */
#endif /* _LINUX_FS_SUPER_TYPES_H */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index dce862cafbbd..31324609af4d 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -682,6 +682,7 @@ struct hid_device {
__s32 battery_charge_status;
enum hid_battery_status battery_status;
bool battery_avoid_query;
+ bool battery_present;
ktime_t battery_ratelimit_time;
#endif
@@ -836,6 +837,12 @@ struct hid_usage_id {
* raw_event and event should return negative on error, any other value will
* pass the event on to .event() typically return 0 for success.
*
+ * report_fixup must return a report descriptor pointer whose lifetime is at
+ * least that of the input rdesc. This is usually done by mutating the input
+ * rdesc and returning it or a sub-portion of it. In case a new buffer is
+ * allocated and returned, the implementation of report_fixup is responsible for
+ * freeing it later.
+ *
* input_mapping shall return a negative value to completely ignore this usage
* (e.g. doubled or invalid usage), zero to continue with parsing of this
* usage by generic code (no special handling needed) or positive to skip
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 61b7335aa037..ca9afa824aa4 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -40,7 +40,8 @@ static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
return (struct ethhdr *)skb_inner_mac_header(skb);
}
-int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
+int eth_header_parse(const struct sk_buff *skb, const struct net_device *dev,
+ unsigned char *haddr);
extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
index 35227d47cfc9..dc272b514a01 100644
--- a/include/linux/indirect_call_wrapper.h
+++ b/include/linux/indirect_call_wrapper.h
@@ -16,22 +16,26 @@
*/
#define INDIRECT_CALL_1(f, f1, ...) \
({ \
- likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \
+ typeof(f) __f1 = (f); \
+ likely(__f1 == f1) ? f1(__VA_ARGS__) : __f1(__VA_ARGS__); \
})
#define INDIRECT_CALL_2(f, f2, f1, ...) \
({ \
- likely(f == f2) ? f2(__VA_ARGS__) : \
- INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
+ typeof(f) __f2 = (f); \
+ likely(__f2 == f2) ? f2(__VA_ARGS__) : \
+ INDIRECT_CALL_1(__f2, f1, __VA_ARGS__); \
})
#define INDIRECT_CALL_3(f, f3, f2, f1, ...) \
({ \
- likely(f == f3) ? f3(__VA_ARGS__) : \
- INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \
+ typeof(f) __f3 = (f); \
+ likely(__f3 == f3) ? f3(__VA_ARGS__) : \
+ INDIRECT_CALL_2(__f3, f2, f1, __VA_ARGS__); \
})
#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \
({ \
- likely(f == f4) ? f4(__VA_ARGS__) : \
- INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \
+ typeof(f) __f4 = (f); \
+ likely(__f4 == f4) ? f4(__VA_ARGS__) : \
+ INDIRECT_CALL_3(__f4, f3, f2, f1, __VA_ARGS__); \
})
#define INDIRECT_CALLABLE_DECLARE(f) f
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 7a1516011ccf..e19872e37e06 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -53,7 +53,7 @@ struct iommu_flush_ops {
* tables.
* @ias: Input address (iova) size, in bits.
* @oas: Output address (paddr) size, in bits.
- * @coherent_walk A flag to indicate whether or not page table walks made
+ * @coherent_walk: A flag to indicate whether or not page table walks made
* by the IOMMU are coherent with the CPU caches.
* @tlb: TLB management callbacks for this set of tables.
* @iommu_dev: The device representing the DMA configuration for the
@@ -136,6 +136,7 @@ struct io_pgtable_cfg {
void (*free)(void *cookie, void *pages, size_t size);
/* Low-level data specific to the table format */
+ /* private: */
union {
struct {
u64 ttbr;
@@ -203,6 +204,9 @@ struct arm_lpae_io_pgtable_walk_data {
* @unmap_pages: Unmap a range of virtually contiguous pages of the same size.
* @iova_to_phys: Translate iova to physical address.
* @pgtable_walk: (optional) Perform a page table walk for a given iova.
+ * @read_and_clear_dirty: Record dirty info per IOVA. If an IOVA is dirty,
+ * clear its dirty state from the PTE unless the
+ * IOMMU_DIRTY_NO_CLEAR flag is passed in.
*
* These functions map directly onto the iommu_ops member functions with
* the same names.
@@ -231,7 +235,9 @@ struct io_pgtable_ops {
* the configuration actually provided by the allocator (e.g. the
* pgsize_bitmap may be restricted).
* @cookie: An opaque token provided by the IOMMU driver and passed back to
- * the callback routines in cfg->tlb.
+ * the callback routines.
+ *
+ * Returns: Pointer to the &struct io_pgtable_ops for this set of page tables.
*/
struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
struct io_pgtable_cfg *cfg,
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 3e4a82a6f817..214fdbd49052 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -388,6 +388,7 @@ struct io_ring_ctx {
* regularly bounce b/w CPUs.
*/
struct {
+ struct io_rings __rcu *rings_rcu;
struct llist_head work_llist;
struct llist_head retry_llist;
unsigned long check_cq;
@@ -540,6 +541,7 @@ enum {
REQ_F_BL_NO_RECYCLE_BIT,
REQ_F_BUFFERS_COMMIT_BIT,
REQ_F_BUF_NODE_BIT,
+ REQ_F_BUF_MORE_BIT,
REQ_F_HAS_METADATA_BIT,
REQ_F_IMPORT_BUFFER_BIT,
REQ_F_SQE_COPIED_BIT,
@@ -625,6 +627,8 @@ enum {
REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
/* buf node is valid */
REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT),
+ /* incremental buffer consumption, more space available */
+ REQ_F_BUF_MORE = IO_REQ_FLAG(REQ_F_BUF_MORE_BIT),
/* request has read/write metadata assigned */
REQ_F_HAS_METADATA = IO_REQ_FLAG(REQ_F_HAS_METADATA_BIT),
/*
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 443053a76dcf..a7421382a916 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -333,7 +333,12 @@ struct tcp6_timewait_sock {
};
#if IS_ENABLED(CONFIG_IPV6)
-bool ipv6_mod_enabled(void);
+extern int disable_ipv6_mod;
+
+static inline bool ipv6_mod_enabled(void)
+{
+ return disable_ipv6_mod == 0;
+}
static inline struct ipv6_pinfo *inet6_sk(const struct sock *__sk)
{
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c92c1149ee6e..a01a474719a7 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -7,6 +7,24 @@
struct mm_struct;
+/* opaque kthread data */
+struct kthread;
+
+/*
+ * When "(p->flags & PF_KTHREAD)" is set the task is a kthread and will
+ * always remain a kthread. For kthreads p->worker_private always
+ * points to a struct kthread. For tasks that are not kthreads
+ * p->worker_private is used to point to other things.
+ *
+ * Return NULL for any task that is not a kthread.
+ */
+static inline struct kthread *tsk_is_kthread(struct task_struct *p)
+{
+ if (p->flags & PF_KTHREAD)
+ return p->worker_private;
+ return NULL;
+}
+
__printf(4, 5)
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
void *data,
@@ -98,9 +116,10 @@ void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k);
void kthread_unpark(struct task_struct *k);
void kthread_parkme(void);
-void kthread_exit(long result) __noreturn;
+#define kthread_exit(result) do_exit(result)
void kthread_complete_and_exit(struct completion *, long) __noreturn;
int kthreads_update_housekeeping(void);
+void kthread_do_exit(struct kthread *, long);
int kthreadd(void *unused);
extern struct task_struct *kthreadd_task;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 34759a262b28..6b76e7a6f4c2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1940,56 +1940,43 @@ enum kvm_stat_kind {
struct kvm_stat_data {
struct kvm *kvm;
- const struct _kvm_stats_desc *desc;
+ const struct kvm_stats_desc *desc;
enum kvm_stat_kind kind;
};
-struct _kvm_stats_desc {
- struct kvm_stats_desc desc;
- char name[KVM_STATS_NAME_SIZE];
-};
-
-#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \
- .flags = type | unit | base | \
- BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
- BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
- BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
- .exponent = exp, \
- .size = sz, \
+#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \
+ .flags = type | unit | base | \
+ BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
+ BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
+ BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
+ .exponent = exp, \
+ .size = sz, \
.bucket_size = bsz
-#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
- { \
- { \
- STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
- .offset = offsetof(struct kvm_vm_stat, generic.stat) \
- }, \
- .name = #stat, \
- }
-#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
- { \
- { \
- STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
- .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
- }, \
- .name = #stat, \
- }
-#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
- { \
- { \
- STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
- .offset = offsetof(struct kvm_vm_stat, stat) \
- }, \
- .name = #stat, \
- }
-#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
- { \
- { \
- STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
- .offset = offsetof(struct kvm_vcpu_stat, stat) \
- }, \
- .name = #stat, \
- }
+#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+{ \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vm_stat, generic.stat), \
+ .name = #stat, \
+}
+#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+{ \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vcpu_stat, generic.stat), \
+ .name = #stat, \
+}
+#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+{ \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vm_stat, stat), \
+ .name = #stat, \
+}
+#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+{ \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vcpu_stat, stat), \
+ .name = #stat, \
+}
/* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */
#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \
SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz)
@@ -2066,7 +2053,7 @@ struct _kvm_stats_desc {
STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking)
ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
- const struct _kvm_stats_desc *desc,
+ const struct kvm_stats_desc *desc,
void *stats, size_t size_stats,
char __user *user_buffer, size_t size, loff_t *offset);
@@ -2111,9 +2098,9 @@ static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
extern const struct kvm_stats_header kvm_vm_stats_header;
-extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
+extern const struct kvm_stats_desc kvm_vm_stats_desc[];
extern const struct kvm_stats_header kvm_vcpu_stats_header;
-extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
+extern const struct kvm_stats_desc kvm_vcpu_stats_desc[];
static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
{
diff --git a/include/linux/leafops.h b/include/linux/leafops.h
index a9ff94b744f2..05673d3529e7 100644
--- a/include/linux/leafops.h
+++ b/include/linux/leafops.h
@@ -363,6 +363,23 @@ static inline unsigned long softleaf_to_pfn(softleaf_t entry)
return swp_offset(entry) & SWP_PFN_MASK;
}
+static inline void softleaf_migration_sync(softleaf_t entry,
+ struct folio *folio)
+{
+ /*
+ * Ensure we do not race with split, which might alter tail pages into new
+ * folios and thus result in observing an unlocked folio.
+ * This matches the write barrier in __split_folio_to_order().
+ */
+ smp_rmb();
+
+ /*
+ * Any use of migration entries may only occur while the
+ * corresponding page is locked
+ */
+ VM_WARN_ON_ONCE(!folio_test_locked(folio));
+}
+
/**
* softleaf_to_page() - Obtains struct page for PFN encoded within leaf entry.
* @entry: Leaf entry, softleaf_has_pfn(@entry) must return true.
@@ -374,11 +391,8 @@ static inline struct page *softleaf_to_page(softleaf_t entry)
struct page *page = pfn_to_page(softleaf_to_pfn(entry));
VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
- /*
- * Any use of migration entries may only occur while the
- * corresponding page is locked
- */
- VM_WARN_ON_ONCE(softleaf_is_migration(entry) && !PageLocked(page));
+ if (softleaf_is_migration(entry))
+ softleaf_migration_sync(entry, page_folio(page));
return page;
}
@@ -394,12 +408,8 @@ static inline struct folio *softleaf_to_folio(softleaf_t entry)
struct folio *folio = pfn_folio(softleaf_to_pfn(entry));
VM_WARN_ON_ONCE(!softleaf_has_pfn(entry));
- /*
- * Any use of migration entries may only occur while the
- * corresponding folio is locked.
- */
- VM_WARN_ON_ONCE(softleaf_is_migration(entry) &&
- !folio_test_locked(folio));
+ if (softleaf_is_migration(entry))
+ softleaf_migration_sync(entry, folio);
return folio;
}
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index eff711bf973f..234be7f12c15 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -315,7 +315,7 @@ do { \
#endif /* CONFIG_PREEMPT_RT */
-#if defined(WARN_CONTEXT_ANALYSIS)
+#if defined(WARN_CONTEXT_ANALYSIS) && !defined(__CHECKER__)
/*
* Because the compiler only knows about the base per-CPU variable, use this
* helper function to make the compiler think we lock/unlock the @base variable,
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 0fe96f3ab3ef..65c732d440d2 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -55,6 +55,7 @@ struct mempolicy {
nodemask_t cpuset_mems_allowed; /* relative to these nodes */
nodemask_t user_nodemask; /* nodemask passed by user */
} w;
+ struct rcu_head rcu;
};
/*
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 26ca00c325d9..d5af2b7f577b 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -65,7 +65,7 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
-void migration_entry_wait_on_locked(softleaf_t entry, spinlock_t *ptl)
+void softleaf_entry_wait_on_locked(softleaf_t entry, spinlock_t *ptl)
__releases(ptl);
void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
int folio_migrate_mapping(struct address_space *mapping,
@@ -97,6 +97,14 @@ static inline int set_movable_ops(const struct movable_operations *ops, enum pag
return -ENOSYS;
}
+static inline void softleaf_entry_wait_on_locked(softleaf_t entry, spinlock_t *ptl)
+ __releases(ptl)
+{
+ WARN_ON_ONCE(1);
+
+ spin_unlock(ptl);
+}
+
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_NUMA_BALANCING
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5be3d8a8f806..abb4963c1f06 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3514,26 +3514,21 @@ static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
static inline void ptlock_free(struct ptdesc *ptdesc) {}
#endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
-static inline unsigned long ptdesc_nr_pages(const struct ptdesc *ptdesc)
-{
- return compound_nr(ptdesc_page(ptdesc));
-}
-
static inline void __pagetable_ctor(struct ptdesc *ptdesc)
{
- pg_data_t *pgdat = NODE_DATA(memdesc_nid(ptdesc->pt_flags));
+ struct folio *folio = ptdesc_folio(ptdesc);
- __SetPageTable(ptdesc_page(ptdesc));
- mod_node_page_state(pgdat, NR_PAGETABLE, ptdesc_nr_pages(ptdesc));
+ __folio_set_pgtable(folio);
+ lruvec_stat_add_folio(folio, NR_PAGETABLE);
}
static inline void pagetable_dtor(struct ptdesc *ptdesc)
{
- pg_data_t *pgdat = NODE_DATA(memdesc_nid(ptdesc->pt_flags));
+ struct folio *folio = ptdesc_folio(ptdesc);
ptlock_free(ptdesc);
- __ClearPageTable(ptdesc_page(ptdesc));
- mod_node_page_state(pgdat, NR_PAGETABLE, -ptdesc_nr_pages(ptdesc));
+ __folio_clear_pgtable(folio);
+ lruvec_stat_sub_folio(folio, NR_PAGETABLE);
}
static inline void pagetable_dtor_free(struct ptdesc *ptdesc)
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 07a2bbaf86e9..8450e18a87c2 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -234,7 +234,7 @@ struct mmu_notifier {
};
/**
- * struct mmu_interval_notifier_ops
+ * struct mmu_interval_notifier_ops - callback for range notification
* @invalidate: Upon return the caller must stop using any SPTEs within this
* range. This function can sleep. Return false only if sleeping
* was required but mmu_notifier_range_blockable(range) is false.
@@ -309,8 +309,8 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
/**
* mmu_interval_set_seq - Save the invalidation sequence
- * @interval_sub - The subscription passed to invalidate
- * @cur_seq - The cur_seq passed to the invalidate() callback
+ * @interval_sub: The subscription passed to invalidate
+ * @cur_seq: The cur_seq passed to the invalidate() callback
*
* This must be called unconditionally from the invalidate callback of a
* struct mmu_interval_notifier_ops under the same lock that is used to call
@@ -329,8 +329,8 @@ mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
/**
* mmu_interval_read_retry - End a read side critical section against a VA range
- * interval_sub: The subscription
- * seq: The return of the paired mmu_interval_read_begin()
+ * @interval_sub: The subscription
+ * @seq: The return of the paired mmu_interval_read_begin()
*
* This MUST be called under a user provided lock that is also held
* unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
@@ -338,7 +338,7 @@ mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
* Each call should be paired with a single mmu_interval_read_begin() and
* should be used to conclude the read side.
*
- * Returns true if an invalidation collided with this critical section, and
+ * Returns: true if an invalidation collided with this critical section, and
* the caller should retry.
*/
static inline bool
@@ -350,20 +350,21 @@ mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
/**
* mmu_interval_check_retry - Test if a collision has occurred
- * interval_sub: The subscription
- * seq: The return of the matching mmu_interval_read_begin()
+ * @interval_sub: The subscription
+ * @seq: The return of the matching mmu_interval_read_begin()
*
* This can be used in the critical section between mmu_interval_read_begin()
- * and mmu_interval_read_retry(). A return of true indicates an invalidation
- * has collided with this critical region and a future
- * mmu_interval_read_retry() will return true.
- *
- * False is not reliable and only suggests a collision may not have
- * occurred. It can be called many times and does not have to hold the user
- * provided lock.
+ * and mmu_interval_read_retry().
*
* This call can be used as part of loops and other expensive operations to
* expedite a retry.
+ * It can be called many times and does not have to hold the user
+ * provided lock.
+ *
+ * Returns: true indicates an invalidation has collided with this critical
+ * region and a future mmu_interval_read_retry() will return true.
+ * False is not reliable and only suggests a collision may not have
+ * occurred.
*/
static inline bool
mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d4e6e00bb90a..7ca01eb3f7d2 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -311,7 +311,9 @@ struct header_ops {
int (*create) (struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned int len);
- int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
+ int (*parse)(const struct sk_buff *skb,
+ const struct net_device *dev,
+ unsigned char *haddr);
int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
void (*cache_update)(struct hh_cache *hh,
const struct net_device *dev,
@@ -2155,6 +2157,7 @@ struct net_device {
unsigned long state;
unsigned int flags;
unsigned short hard_header_len;
+ enum netdev_stat_type pcpu_stat_type:8;
netdev_features_t features;
struct inet6_dev __rcu *ip6_ptr;
__cacheline_group_end(net_device_read_txrx);
@@ -2404,8 +2407,6 @@ struct net_device {
void *ml_priv;
enum netdev_ml_priv_type ml_priv_type;
- enum netdev_stat_type pcpu_stat_type:8;
-
#if IS_ENABLED(CONFIG_GARP)
struct garp_port __rcu *garp_port;
#endif
@@ -3446,7 +3447,7 @@ static inline int dev_parse_header(const struct sk_buff *skb,
if (!dev->header_ops || !dev->header_ops->parse)
return 0;
- return dev->header_ops->parse(skb, haddr);
+ return dev->header_ops->parse(skb, dev, haddr);
}
static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb)
@@ -3576,17 +3577,49 @@ struct page_pool_bh {
};
DECLARE_PER_CPU(struct page_pool_bh, system_page_pool);
+#define XMIT_RECURSION_LIMIT 8
+
#ifndef CONFIG_PREEMPT_RT
static inline int dev_recursion_level(void)
{
return this_cpu_read(softnet_data.xmit.recursion);
}
+
+static inline bool dev_xmit_recursion(void)
+{
+ return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
+ XMIT_RECURSION_LIMIT);
+}
+
+static inline void dev_xmit_recursion_inc(void)
+{
+ __this_cpu_inc(softnet_data.xmit.recursion);
+}
+
+static inline void dev_xmit_recursion_dec(void)
+{
+ __this_cpu_dec(softnet_data.xmit.recursion);
+}
#else
static inline int dev_recursion_level(void)
{
return current->net_xmit.recursion;
}
+static inline bool dev_xmit_recursion(void)
+{
+ return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT);
+}
+
+static inline void dev_xmit_recursion_inc(void)
+{
+ current->net_xmit.recursion++;
+}
+
+static inline void dev_xmit_recursion_dec(void)
+{
+ current->net_xmit.recursion--;
+}
#endif
void __netif_schedule(struct Qdisc *q);
@@ -4711,7 +4744,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ /* Pairs with READ_ONCE() in netif_tx_owned() */
WRITE_ONCE(txq->xmit_lock_owner, cpu);
}
@@ -4729,7 +4762,7 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ /* Pairs with READ_ONCE() in netif_tx_owned() */
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
}
@@ -4738,7 +4771,7 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
bool ok = spin_trylock(&txq->_xmit_lock);
if (likely(ok)) {
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ /* Pairs with READ_ONCE() in netif_tx_owned() */
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
}
return ok;
@@ -4746,14 +4779,14 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ /* Pairs with READ_ONCE() in netif_tx_owned() */
WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock(&txq->_xmit_lock);
}
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ /* Pairs with READ_ONCE() in netif_tx_owned() */
WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock_bh(&txq->_xmit_lock);
}
@@ -4846,6 +4879,23 @@ static inline void netif_tx_disable(struct net_device *dev)
local_bh_enable();
}
+#ifndef CONFIG_PREEMPT_RT
+static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
+{
+ /* Other cpus might concurrently change txq->xmit_lock_owner
+ * to -1 or to their cpu id, but not to our id.
+ */
+ return READ_ONCE(txq->xmit_lock_owner) == cpu;
+}
+
+#else
+static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
+{
+ return rt_mutex_owner(&txq->_xmit_lock.lock) == current;
+}
+
+#endif
+
static inline void netif_addr_lock(struct net_device *dev)
{
unsigned char nest_level = 0;
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index 72ee7d210a74..ba17ac5bf356 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -140,7 +140,6 @@ struct netfs_io_stream {
void (*issue_write)(struct netfs_io_subrequest *subreq);
/* Collection tracking */
struct list_head subrequests; /* Contributory I/O operations */
- struct netfs_io_subrequest *front; /* Op being collected */
unsigned long long collected_to; /* Position we've collected results to */
size_t transferred; /* The amount transferred from this stream */
unsigned short error; /* Aggregate error for the stream */
diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h
index 825f5865bfc5..c8e227a3f9e2 100644
--- a/include/linux/ns_common.h
+++ b/include/linux/ns_common.h
@@ -55,6 +55,8 @@ static __always_inline bool is_ns_init_id(const struct ns_common *ns)
#define ns_common_free(__ns) __ns_common_free(to_ns_common((__ns)))
+bool may_see_all_namespaces(void);
+
static __always_inline __must_check int __ns_ref_active_read(const struct ns_common *ns)
{
return atomic_read(&ns->__ns_ref_active);
diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h
index 60e069a6757f..e75c29c51464 100644
--- a/include/linux/nvme-auth.h
+++ b/include/linux/nvme-auth.h
@@ -11,7 +11,7 @@
struct nvme_dhchap_key {
size_t len;
u8 hash;
- u8 key[];
+ u8 key[] __counted_by(len);
};
u32 nvme_auth_get_seqnum(void);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index ec442af3f886..31a848485ad9 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -210,7 +210,6 @@ enum mapping_flags {
AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM = 9,
AS_KERNEL_FILE = 10, /* mapping for a fake kernel file that shouldn't
account usage to user cgroups */
- AS_NO_DATA_INTEGRITY = 11, /* no data integrity guarantees */
/* Bits 16-25 are used for FOLIO_ORDER */
AS_FOLIO_ORDER_BITS = 5,
AS_FOLIO_ORDER_MIN = 16,
@@ -346,16 +345,6 @@ static inline bool mapping_writeback_may_deadlock_on_reclaim(const struct addres
return test_bit(AS_WRITEBACK_MAY_DEADLOCK_ON_RECLAIM, &mapping->flags);
}
-static inline void mapping_set_no_data_integrity(struct address_space *mapping)
-{
- set_bit(AS_NO_DATA_INTEGRITY, &mapping->flags);
-}
-
-static inline bool mapping_no_data_integrity(const struct address_space *mapping)
-{
- return test_bit(AS_NO_DATA_INTEGRITY, &mapping->flags);
-}
-
static inline gfp_t mapping_gfp_mask(const struct address_space *mapping)
{
return mapping->gfp_mask;
diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h
index f6cca7a035c7..50b6be57da66 100644
--- a/include/linux/platform_data/mlxreg.h
+++ b/include/linux/platform_data/mlxreg.h
@@ -13,10 +13,10 @@
/**
* enum mlxreg_wdt_type - type of HW watchdog
*
- * TYPE1 HW watchdog implementation exist in old systems.
- * All new systems have TYPE2 HW watchdog.
- * TYPE3 HW watchdog can exist on all systems with new CPLD.
- * TYPE3 is selected by WD capability bit.
+ * @MLX_WDT_TYPE1: HW watchdog implementation in old systems.
+ * @MLX_WDT_TYPE2: All new systems have TYPE2 HW watchdog.
+ * @MLX_WDT_TYPE3: HW watchdog that can exist on all systems with new CPLD.
+ * TYPE3 is selected by WD capability bit.
*/
enum mlxreg_wdt_type {
MLX_WDT_TYPE1,
@@ -35,7 +35,7 @@ enum mlxreg_wdt_type {
* @MLXREG_HOTPLUG_LC_SYNCED: entry for line card synchronization events, coming
* after hardware-firmware synchronization handshake;
* @MLXREG_HOTPLUG_LC_READY: entry for line card ready events, indicating line card
- PHYs ready / unready state;
+ * PHYs ready / unready state;
* @MLXREG_HOTPLUG_LC_ACTIVE: entry for line card active events, indicating firmware
* availability / unavailability for the ports on line card;
* @MLXREG_HOTPLUG_LC_THERMAL: entry for line card thermal shutdown events, positive
@@ -123,8 +123,8 @@ struct mlxreg_hotplug_device {
* @reg_pwr: attribute power register;
* @reg_ena: attribute enable register;
* @mode: access mode;
- * @np - pointer to node platform associated with attribute;
- * @hpdev - hotplug device data;
+ * @np: pointer to node platform associated with attribute;
+ * @hpdev: hotplug device data;
* @notifier: pointer to event notifier block;
* @health_cntr: dynamic device health indication counter;
* @attached: true if device has been attached after good health indication;
diff --git a/include/linux/platform_data/x86/int3472.h b/include/linux/platform_data/x86/int3472.h
index b1b837583d54..dbe745dc88d5 100644
--- a/include/linux/platform_data/x86/int3472.h
+++ b/include/linux/platform_data/x86/int3472.h
@@ -26,6 +26,7 @@
#define INT3472_GPIO_TYPE_POWER_ENABLE 0x0b
#define INT3472_GPIO_TYPE_CLK_ENABLE 0x0c
#define INT3472_GPIO_TYPE_PRIVACY_LED 0x0d
+#define INT3472_GPIO_TYPE_DOVDD 0x10
#define INT3472_GPIO_TYPE_HANDSHAKE 0x12
#define INT3472_GPIO_TYPE_HOTPLUG_DETECT 0x13
@@ -33,8 +34,8 @@
#define INT3472_MAX_SENSOR_GPIOS 3
#define INT3472_MAX_REGULATORS 3
-/* E.g. "avdd\0" */
-#define GPIO_SUPPLY_NAME_LENGTH 5
+/* E.g. "dovdd\0" */
+#define GPIO_SUPPLY_NAME_LENGTH 6
/* 12 chars for acpi_dev_name() + "-", e.g. "ABCD1234:00-" */
#define GPIO_REGULATOR_NAME_LENGTH (12 + GPIO_SUPPLY_NAME_LENGTH)
/* lower- and upper-case mapping */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 813da101b5bf..ed1d50d1c3c1 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -31,11 +31,6 @@ struct platform_device {
struct resource *resource;
const struct platform_device_id *id_entry;
- /*
- * Driver name to force a match. Do not set directly, because core
- * frees it. Use driver_set_override() to set or clear it.
- */
- const char *driver_override;
/* MFD cell pointer */
struct mfd_cell *mfd_cell;
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 876358cfe1b1..d862fa610270 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -248,6 +248,7 @@ int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
int ring_buffer_map(struct trace_buffer *buffer, int cpu,
struct vm_area_struct *vma);
+void ring_buffer_map_dup(struct trace_buffer *buffer, int cpu);
int ring_buffer_unmap(struct trace_buffer *buffer, int cpu);
int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu);
#endif /* _LINUX_RING_BUFFER_H */
diff --git a/include/linux/rseq_types.h b/include/linux/rseq_types.h
index da5fa6f40294..0b42045988db 100644
--- a/include/linux/rseq_types.h
+++ b/include/linux/rseq_types.h
@@ -133,10 +133,12 @@ struct rseq_data { };
* @active: MM CID is active for the task
* @cid: The CID associated to the task either permanently or
* borrowed from the CPU
+ * @node: Queued in the per MM MMCID list
*/
struct sched_mm_cid {
unsigned int active;
unsigned int cid;
+ struct hlist_node node;
};
/**
@@ -157,6 +159,7 @@ struct mm_cid_pcpu {
* @work: Regular work to handle the affinity mode change case
* @lock: Spinlock to protect against affinity setting which can't take @mutex
* @mutex: Mutex to serialize forks and exits related to this mm
+ * @user_list: List of the MM CID users of a MM
* @nr_cpus_allowed: The number of CPUs in the per MM allowed CPUs map. The map
* is growth only.
* @users: The number of tasks sharing this MM. Separate from mm::mm_users
@@ -177,13 +180,14 @@ struct mm_mm_cid {
raw_spinlock_t lock;
struct mutex mutex;
+ struct hlist_head user_list;
/* Low frequency modified */
unsigned int nr_cpus_allowed;
unsigned int users;
unsigned int pcpu_thrs;
unsigned int update_deferred;
-}____cacheline_aligned_in_smp;
+} ____cacheline_aligned;
#else /* CONFIG_SCHED_MM_CID */
struct mm_mm_cid { };
struct sched_mm_cid { };
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a7b4a980eb2f..5a5d3dbc9cdf 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2354,7 +2354,6 @@ static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct allo
#ifdef CONFIG_SCHED_MM_CID
void sched_mm_cid_before_execve(struct task_struct *t);
void sched_mm_cid_after_execve(struct task_struct *t);
-void sched_mm_cid_fork(struct task_struct *t);
void sched_mm_cid_exit(struct task_struct *t);
static __always_inline int task_mm_cid(struct task_struct *t)
{
@@ -2363,7 +2362,6 @@ static __always_inline int task_mm_cid(struct task_struct *t)
#else
static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
-static inline void sched_mm_cid_fork(struct task_struct *t) { }
static inline void sched_mm_cid_exit(struct task_struct *t) { }
static __always_inline int task_mm_cid(struct task_struct *t)
{
diff --git a/include/linux/security.h b/include/linux/security.h
index 83a646d72f6f..ee88dd2d2d1f 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -145,6 +145,7 @@ enum lockdown_reason {
LOCKDOWN_BPF_WRITE_USER,
LOCKDOWN_DBG_WRITE_KERNEL,
LOCKDOWN_RTAS_ERROR_INJECTION,
+ LOCKDOWN_XEN_USER_ACTIONS,
LOCKDOWN_INTEGRITY_MAX,
LOCKDOWN_KCORE,
LOCKDOWN_KPROBES,
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 01efdce0fda0..a95b2d143d24 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -195,6 +195,7 @@ void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl);
void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
unsigned int quot);
int fsl8250_handle_irq(struct uart_port *port);
+void serial8250_handle_irq_locked(struct uart_port *port, unsigned int iir);
int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
u16 serial8250_rx_chars(struct uart_8250_port *up, u16 lsr);
void serial8250_read_char(struct uart_8250_port *up, u16 lsr);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index af7cfee7b8f6..0dc671c07d3a 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -159,10 +159,6 @@ extern void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
* @modalias: Name of the driver to use with this device, or an alias
* for that name. This appears in the sysfs "modalias" attribute
* for driver coldplugging, and in uevents used for hotplugging
- * @driver_override: If the name of a driver is written to this attribute, then
- * the device will bind to the named driver and only the named driver.
- * Do not set directly, because core frees it; use driver_set_override() to
- * set or clear it.
* @pcpu_statistics: statistics for the spi_device
* @word_delay: delay to be inserted between consecutive
* words of a transfer
@@ -224,7 +220,6 @@ struct spi_device {
void *controller_state;
void *controller_data;
char modalias[SPI_NAME_SIZE];
- const char *driver_override;
/* The statistics */
struct spi_statistics __percpu *pcpu_statistics;
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index dec7cbe015aa..905b629e8fa3 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -11,6 +11,7 @@
#ifndef _LINUX_SRCU_TINY_H
#define _LINUX_SRCU_TINY_H
+#include <linux/irq_work_types.h>
#include <linux/swait.h>
struct srcu_struct {
@@ -24,18 +25,21 @@ struct srcu_struct {
struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */
struct rcu_head **srcu_cb_tail; /* Pending callbacks: Tail. */
struct work_struct srcu_work; /* For driving grace periods. */
+ struct irq_work srcu_irq_work; /* Defer schedule_work() to irq work. */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
};
void srcu_drive_gp(struct work_struct *wp);
+void srcu_tiny_irq_work(struct irq_work *irq_work);
#define __SRCU_STRUCT_INIT(name, __ignored, ___ignored, ____ignored) \
{ \
.srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \
.srcu_cb_tail = &name.srcu_cb_head, \
.srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \
+ .srcu_irq_work = { .func = srcu_tiny_irq_work }, \
__SRCU_DEP_MAP_INIT(name) \
}
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 958cb7ef41cb..be76fa4fc170 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -34,7 +34,7 @@ struct srcu_data {
/* Values: SRCU_READ_FLAVOR_.* */
/* Update-side state. */
- spinlock_t __private lock ____cacheline_internodealigned_in_smp;
+ raw_spinlock_t __private lock ____cacheline_internodealigned_in_smp;
struct rcu_segcblist srcu_cblist; /* List of callbacks.*/
unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
@@ -55,7 +55,7 @@ struct srcu_data {
* Node in SRCU combining tree, similar in function to rcu_data.
*/
struct srcu_node {
- spinlock_t __private lock;
+ raw_spinlock_t __private lock;
unsigned long srcu_have_cbs[4]; /* GP seq for children having CBs, but only */
/* if greater than ->srcu_gp_seq. */
unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs have CBs for given GP? */
@@ -74,7 +74,7 @@ struct srcu_usage {
/* First node at each level. */
int srcu_size_state; /* Small-to-big transition state. */
struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
- spinlock_t __private lock; /* Protect counters and size state. */
+ raw_spinlock_t __private lock; /* Protect counters and size state. */
struct mutex srcu_gp_mutex; /* Serialize GP work. */
unsigned long srcu_gp_seq; /* Grace-period seq #. */
unsigned long srcu_gp_seq_needed; /* Latest gp_seq needed. */
@@ -95,6 +95,7 @@ struct srcu_usage {
unsigned long reschedule_jiffies;
unsigned long reschedule_count;
struct delayed_work work;
+ struct irq_work irq_work;
struct srcu_struct *srcu_ssp;
};
@@ -156,7 +157,7 @@ struct srcu_struct {
#define __SRCU_USAGE_INIT(name) \
{ \
- .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
.srcu_gp_seq = SRCU_GP_SEQ_INITIAL_VAL, \
.srcu_gp_seq_needed = SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE, \
.srcu_gp_seq_needed_exp = SRCU_GP_SEQ_INITIAL_VAL, \
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 1f3804245c06..4fe63169d5a2 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -647,36 +647,22 @@ static inline void user_access_restore(unsigned long flags) { }
/* Define RW variant so the below _mode macro expansion works */
#define masked_user_rw_access_begin(u) masked_user_access_begin(u)
#define user_rw_access_begin(u, s) user_access_begin(u, s)
-#define user_rw_access_end() user_access_end()
/* Scoped user access */
-#define USER_ACCESS_GUARD(_mode) \
-static __always_inline void __user * \
-class_user_##_mode##_begin(void __user *ptr) \
-{ \
- return ptr; \
-} \
- \
-static __always_inline void \
-class_user_##_mode##_end(void __user *ptr) \
-{ \
- user_##_mode##_access_end(); \
-} \
- \
-DEFINE_CLASS(user_ ##_mode## _access, void __user *, \
- class_user_##_mode##_end(_T), \
- class_user_##_mode##_begin(ptr), void __user *ptr) \
- \
-static __always_inline class_user_##_mode##_access_t \
-class_user_##_mode##_access_ptr(void __user *scope) \
-{ \
- return scope; \
-}
-USER_ACCESS_GUARD(read)
-USER_ACCESS_GUARD(write)
-USER_ACCESS_GUARD(rw)
-#undef USER_ACCESS_GUARD
+/* Cleanup wrapper functions */
+static __always_inline void __scoped_user_read_access_end(const void *p)
+{
+ user_read_access_end();
+};
+static __always_inline void __scoped_user_write_access_end(const void *p)
+{
+ user_write_access_end();
+};
+static __always_inline void __scoped_user_rw_access_end(const void *p)
+{
+ user_access_end();
+};
/**
* __scoped_user_access_begin - Start a scoped user access
@@ -750,13 +736,13 @@ USER_ACCESS_GUARD(rw)
*
* Don't use directly. Use scoped_masked_user_$MODE_access() instead.
*/
-#define __scoped_user_access(mode, uptr, size, elbl) \
-for (bool done = false; !done; done = true) \
- for (void __user *_tmpptr = __scoped_user_access_begin(mode, uptr, size, elbl); \
- !done; done = true) \
- for (CLASS(user_##mode##_access, scope)(_tmpptr); !done; done = true) \
- /* Force modified pointer usage within the scope */ \
- for (const typeof(uptr) uptr = _tmpptr; !done; done = true)
+#define __scoped_user_access(mode, uptr, size, elbl) \
+for (bool done = false; !done; done = true) \
+ for (auto _tmpptr = __scoped_user_access_begin(mode, uptr, size, elbl); \
+ !done; done = true) \
+ /* Force modified pointer usage within the scope */ \
+ for (const auto uptr __cleanup(__scoped_user_##mode##_access_end) = \
+ _tmpptr; !done; done = true)
/**
* scoped_user_read_access_size - Start a scoped user read access with given size
@@ -806,7 +792,7 @@ for (bool done = false; !done; done = true) \
/**
* scoped_user_rw_access_size - Start a scoped user read/write access with given size
- * @uptr Pointer to the user space address to read from and write to
+ * @uptr: Pointer to the user space address to read from and write to
* @size: Size of the access starting from @uptr
* @elbl: Error label to goto when the access region is rejected
*
@@ -817,7 +803,7 @@ for (bool done = false; !done; done = true) \
/**
* scoped_user_rw_access - Start a scoped user read/write access
- * @uptr Pointer to the user space address to read from and write to
+ * @uptr: Pointer to the user space address to read from and write to
* @elbl: Error label to goto when the access region is rejected
*
* The size of the access starting from @uptr is determined via sizeof(*@uptr)).
diff --git a/include/linux/usb.h b/include/linux/usb.h
index fbfcc70b07fb..04277af4bb9d 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1862,14 +1862,18 @@ void usb_free_noncoherent(struct usb_device *dev, size_t size,
* SYNCHRONOUS CALL SUPPORT *
*-------------------------------------------------------------------*/
+/* Maximum value allowed for timeout in synchronous routines below */
+#define USB_MAX_SYNCHRONOUS_TIMEOUT 60000 /* ms */
+
extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
__u8 request, __u8 requesttype, __u16 value, __u16 index,
void *data, __u16 size, int timeout);
extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
void *data, int len, int *actual_length, int timeout);
extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
- void *data, int len, int *actual_length,
- int timeout);
+ void *data, int len, int *actual_length, int timeout);
+extern int usb_bulk_msg_killable(struct usb_device *usb_dev, unsigned int pipe,
+ void *data, int len, int *actual_length, int timeout);
/* wrappers around usb_control_msg() for the most common standard requests */
int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request,
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 2f7bd2fdc616..b3cc7beab4a3 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -78,4 +78,7 @@
/* skip BOS descriptor request */
#define USB_QUIRK_NO_BOS BIT(17)
+/* Device claims zero configurations, forcing to 1 */
+#define USB_QUIRK_FORCE_ONE_CONFIG BIT(18)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h
index 2ca60828f28b..1502b2a355f9 100644
--- a/include/linux/usb/r8152.h
+++ b/include/linux/usb/r8152.h
@@ -32,6 +32,7 @@
#define VENDOR_ID_DLINK 0x2001
#define VENDOR_ID_DELL 0x413c
#define VENDOR_ID_ASUS 0x0b05
+#define VENDOR_ID_TRENDNET 0x20f4
#if IS_REACHABLE(CONFIG_USB_RTL8152)
extern u8 rtl8152_get_version(struct usb_interface *intf);
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index b0e84896e6ac..bbf799ccf3b3 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -132,6 +132,7 @@ struct driver_info {
#define FLAG_MULTI_PACKET 0x2000
#define FLAG_RX_ASSEMBLE 0x4000 /* rx packets may span >1 frames */
#define FLAG_NOARP 0x8000 /* device can't do ARP */
+#define FLAG_NOMAXMTU 0x10000 /* allow max_mtu above hard_mtu */
/* init device ... can sleep, or cause probe() failure */
int (*bind)(struct usbnet *, struct usb_interface *);
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 75dabb763c65..f36d21b5bc19 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -207,6 +207,39 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
return __virtio_net_hdr_to_skb(skb, hdr, little_endian, hdr->gso_type);
}
+/* This function must be called after virtio_net_hdr_from_skb(). */
+static inline void __virtio_net_set_hdrlen(const struct sk_buff *skb,
+ struct virtio_net_hdr *hdr,
+ bool little_endian)
+{
+ u16 hdr_len;
+
+ hdr_len = skb_transport_offset(skb);
+
+ if (hdr->gso_type == VIRTIO_NET_HDR_GSO_UDP_L4)
+ hdr_len += sizeof(struct udphdr);
+ else
+ hdr_len += tcp_hdrlen(skb);
+
+ hdr->hdr_len = __cpu_to_virtio16(little_endian, hdr_len);
+}
+
+/* This function must be called after virtio_net_hdr_from_skb(). */
+static inline void __virtio_net_set_tnl_hdrlen(const struct sk_buff *skb,
+ struct virtio_net_hdr *hdr)
+{
+ u16 hdr_len;
+
+ hdr_len = skb_inner_transport_offset(skb);
+
+ if (hdr->gso_type == VIRTIO_NET_HDR_GSO_UDP_L4)
+ hdr_len += sizeof(struct udphdr);
+ else
+ hdr_len += inner_tcp_hdrlen(skb);
+
+ hdr->hdr_len = __cpu_to_virtio16(true, hdr_len);
+}
+
static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
struct virtio_net_hdr *hdr,
bool little_endian,
@@ -385,7 +418,8 @@ virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb,
bool tnl_hdr_negotiated,
bool little_endian,
int vlan_hlen,
- bool has_data_valid)
+ bool has_data_valid,
+ bool feature_hdrlen)
{
struct virtio_net_hdr *hdr = (struct virtio_net_hdr *)vhdr;
unsigned int inner_nh, outer_th;
@@ -394,9 +428,17 @@ virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb,
tnl_gso_type = skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM);
- if (!tnl_gso_type)
- return virtio_net_hdr_from_skb(skb, hdr, little_endian,
- has_data_valid, vlan_hlen);
+ if (!tnl_gso_type) {
+ ret = virtio_net_hdr_from_skb(skb, hdr, little_endian,
+ has_data_valid, vlan_hlen);
+ if (ret)
+ return ret;
+
+ if (feature_hdrlen && hdr->hdr_len)
+ __virtio_net_set_hdrlen(skb, hdr, little_endian);
+
+ return ret;
+ }
/* Tunnel support not negotiated but skb ask for it. */
if (!tnl_hdr_negotiated)
@@ -414,6 +456,9 @@ virtio_net_hdr_tnl_from_skb(const struct sk_buff *skb,
if (ret)
return ret;
+ if (feature_hdrlen && hdr->hdr_len)
+ __virtio_net_set_tnl_hdrlen(skb, hdr);
+
if (skb->protocol == htons(ETH_P_IPV6))
hdr->gso_type |= VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6;
else
diff --git a/include/net/act_api.h b/include/net/act_api.h
index e1e8f0f7dacb..d11b79107930 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -70,6 +70,7 @@ struct tc_action {
#define TCA_ACT_FLAGS_REPLACE (1U << (TCA_ACT_FLAGS_USER_BITS + 2))
#define TCA_ACT_FLAGS_NO_RTNL (1U << (TCA_ACT_FLAGS_USER_BITS + 3))
#define TCA_ACT_FLAGS_AT_INGRESS (1U << (TCA_ACT_FLAGS_USER_BITS + 4))
+#define TCA_ACT_FLAGS_AT_INGRESS_OR_CLSACT (1U << (TCA_ACT_FLAGS_USER_BITS + 5))
/* Update lastuse only if needed, to avoid dirtying a cache line.
* We use a temp variable to avoid fetching jiffies twice.
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 010f1a8fd15f..5172afee5494 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -658,6 +658,7 @@ struct l2cap_conn {
struct sk_buff *rx_skb;
__u32 rx_len;
struct ida tx_ida;
+ __u8 tx_ident;
struct sk_buff_head pending_rx;
struct work_struct pending_rx_work;
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 4ad5521e7731..395c6e281c5f 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -699,6 +699,7 @@ void bond_debug_register(struct bonding *bond);
void bond_debug_unregister(struct bonding *bond);
void bond_debug_reregister(struct bonding *bond);
const char *bond_mode_name(int mode);
+bool __bond_xdp_check(int mode, int xmit_policy);
bool bond_xdp_check(struct bonding *bond, int mode);
void bond_setup(struct net_device *bond_dev);
unsigned int bond_get_num_tx_queues(void);
diff --git a/include/net/codel_impl.h b/include/net/codel_impl.h
index 78a27ac73070..b2c359c6dd1b 100644
--- a/include/net/codel_impl.h
+++ b/include/net/codel_impl.h
@@ -158,6 +158,7 @@ static struct sk_buff *codel_dequeue(void *ctx,
bool drop;
if (!skb) {
+ vars->first_above_time = 0;
vars->dropping = false;
return skb;
}
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 282e29237d93..c16de5b7963f 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -175,7 +175,7 @@ static inline bool inet6_match(const struct net *net, const struct sock *sk,
{
if (!net_eq(sock_net(sk), net) ||
sk->sk_family != AF_INET6 ||
- sk->sk_portpair != ports ||
+ READ_ONCE(sk->sk_portpair) != ports ||
!ipv6_addr_equal(&sk->sk_v6_daddr, saddr) ||
!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
return false;
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index ac05a52d9e13..6d936e9f2fd3 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -264,6 +264,20 @@ inet_bhashfn_portaddr(const struct inet_hashinfo *hinfo, const struct sock *sk,
return &hinfo->bhash2[hash & (hinfo->bhash_size - 1)];
}
+static inline bool inet_use_hash2_on_bind(const struct sock *sk)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6) {
+ if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
+ return false;
+
+ if (!ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
+ return true;
+ }
+#endif
+ return sk->sk_rcv_saddr != htonl(INADDR_ANY);
+}
+
struct inet_bind_hashbucket *
inet_bhash2_addr_any_hashbucket(const struct sock *sk, const struct net *net, int port);
@@ -345,7 +359,7 @@ static inline bool inet_match(const struct net *net, const struct sock *sk,
int dif, int sdif)
{
if (!net_eq(sock_net(sk), net) ||
- sk->sk_portpair != ports ||
+ READ_ONCE(sk->sk_portpair) != ports ||
sk->sk_addrpair != cookie)
return false;
diff --git a/include/net/ip.h b/include/net/ip.h
index 69d5cef46004..7f9abd457e01 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -101,7 +101,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
ipcm->addr = inet->inet_saddr;
- ipcm->protocol = inet->inet_num;
+ ipcm->protocol = READ_ONCE(inet->inet_num);
}
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 88b0dd4d8e09..9f8b6814a96a 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -507,12 +507,14 @@ void fib6_rt_update(struct net *net, struct fib6_info *rt,
void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
unsigned int flags);
+void fib6_age_exceptions(struct fib6_info *rt, struct fib6_gc_args *gc_args,
+ unsigned long now);
void fib6_run_gc(unsigned long expires, struct net *net, bool force);
-
void fib6_gc_cleanup(void);
int fib6_init(void);
+#if IS_ENABLED(CONFIG_IPV6)
/* Add the route to the gc list if it is not already there
*
* The callers should hold f6i->fib6_table->tb6_lock.
@@ -545,6 +547,23 @@ static inline void fib6_remove_gc_list(struct fib6_info *f6i)
hlist_del_init(&f6i->gc_link);
}
+static inline void fib6_may_remove_gc_list(struct net *net,
+ struct fib6_info *f6i)
+{
+ struct fib6_gc_args gc_args;
+
+ if (hlist_unhashed(&f6i->gc_link))
+ return;
+
+ gc_args.timeout = READ_ONCE(net->ipv6.sysctl.ip6_rt_gc_interval);
+ gc_args.more = 0;
+
+ rcu_read_lock();
+ fib6_age_exceptions(f6i, &gc_args, jiffies);
+ rcu_read_unlock();
+}
+#endif
+
struct ipv6_route_iter {
struct seq_net_private p;
struct fib6_walker w;
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 120db2865811..359b595f1df9 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -156,6 +156,18 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
{
int pkt_len, err;
+ if (unlikely(dev_recursion_level() > IP_TUNNEL_RECURSION_LIMIT)) {
+ if (dev) {
+ net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
+ dev->name);
+ DEV_STATS_INC(dev, tx_errors);
+ }
+ kfree_skb(skb);
+ return;
+ }
+
+ dev_xmit_recursion_inc();
+
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
IP6CB(skb)->flags = ip6cb_flags;
pkt_len = skb->len - skb_inner_network_offset(skb);
@@ -166,6 +178,8 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
pkt_len = -1;
iptunnel_xmit_stats(dev, pkt_len);
}
+
+ dev_xmit_recursion_dec();
}
#endif
#endif
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index b4495c38e0a0..318593743b6e 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -559,7 +559,7 @@ static inline u32 fib_multipath_hash_from_keys(const struct net *net,
siphash_aligned_key_t hash_key;
u32 mp_seed;
- mp_seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).mp_seed;
+ mp_seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed.mp_seed);
fib_multipath_hash_construct_key(&hash_key, mp_seed);
return flow_hash_from_keys_seed(keys, &hash_key);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 4021e6a73e32..1f577a4f8ce9 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -27,6 +27,13 @@
#include <net/ip6_route.h>
#endif
+/* Recursion limit for tunnel xmit to detect routing loops.
+ * Unlike XMIT_RECURSION_LIMIT (8) used in the no-qdisc path, tunnel
+ * recursion involves route lookups and full IP output, consuming much
+ * more stack per level, so a lower limit is needed.
+ */
+#define IP_TUNNEL_RECURSION_LIMIT 4
+
/* Keep error state on tunnel for 30 sec */
#define IPTUNNEL_ERR_TIMEO (30*HZ)
@@ -658,13 +665,29 @@ static inline int iptunnel_pull_offloads(struct sk_buff *skb)
static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
{
if (pkt_len > 0) {
- struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
-
- u64_stats_update_begin(&tstats->syncp);
- u64_stats_add(&tstats->tx_bytes, pkt_len);
- u64_stats_inc(&tstats->tx_packets);
- u64_stats_update_end(&tstats->syncp);
- put_cpu_ptr(tstats);
+ if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_DSTATS) {
+ struct pcpu_dstats *dstats = get_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_add(&dstats->tx_bytes, pkt_len);
+ u64_stats_inc(&dstats->tx_packets);
+ u64_stats_update_end(&dstats->syncp);
+ put_cpu_ptr(dstats);
+ return;
+ }
+ if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) {
+ struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ u64_stats_add(&tstats->tx_bytes, pkt_len);
+ u64_stats_inc(&tstats->tx_packets);
+ u64_stats_update_end(&tstats->syncp);
+ put_cpu_ptr(tstats);
+ return;
+ }
+ pr_err_once("iptunnel_xmit_stats pcpu_stat_type=%d\n",
+ dev->pcpu_stat_type);
+ WARN_ON_ONCE(1);
return;
}
diff --git a/include/net/libeth/xsk.h b/include/net/libeth/xsk.h
index 481a7b28e6f2..82b5d21aae87 100644
--- a/include/net/libeth/xsk.h
+++ b/include/net/libeth/xsk.h
@@ -597,6 +597,7 @@ __libeth_xsk_run_pass(struct libeth_xdp_buff *xdp,
* @pending: current number of XSkFQEs to refill
* @thresh: threshold below which the queue is refilled
* @buf_len: HW-writeable length per each buffer
+ * @truesize: step between consecutive buffers, 0 if none exists
* @nid: ID of the closest NUMA node with memory
*/
struct libeth_xskfq {
@@ -614,6 +615,8 @@ struct libeth_xskfq {
u32 thresh;
u32 buf_len;
+ u32 truesize;
+
int nid;
};
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 7f9d96939a4e..adce2144a678 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -7407,7 +7407,9 @@ void ieee80211_report_wowlan_wakeup(struct ieee80211_vif *vif,
* @band: the band to transmit on
* @sta: optional pointer to get the station to send the frame to
*
- * Return: %true if the skb was prepared, %false otherwise
+ * Return: %true if the skb was prepared, %false otherwise.
+ * On failure, the skb is freed by this function; callers must not
+ * free it again.
*
* Note: must be called under RCU lock
*/
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 3384859a8921..8883575adcc1 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -83,6 +83,11 @@ void nf_conntrack_lock(spinlock_t *lock);
extern spinlock_t nf_conntrack_expect_lock;
+static inline void lockdep_nfct_expect_lock_held(void)
+{
+ lockdep_assert_held(&nf_conntrack_expect_lock);
+}
+
/* ctnetlink code shared by both ctnetlink and nf_conntrack_bpf */
static inline void __nf_ct_set_timeout(struct nf_conn *ct, u64 timeout)
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 165e7a03b8e9..e9a8350e7ccf 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -22,10 +22,16 @@ struct nf_conntrack_expect {
/* Hash member */
struct hlist_node hnode;
+ /* Network namespace */
+ possible_net_t net;
+
/* We expect this tuple, with the following mask */
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_mask mask;
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ struct nf_conntrack_zone zone;
+#endif
/* Usage count. */
refcount_t use;
@@ -40,7 +46,7 @@ struct nf_conntrack_expect {
struct nf_conntrack_expect *this);
/* Helper to assign to new connection */
- struct nf_conntrack_helper *helper;
+ struct nf_conntrack_helper __rcu *helper;
/* The conntrack of the master connection */
struct nf_conn *master;
@@ -62,7 +68,17 @@ struct nf_conntrack_expect {
static inline struct net *nf_ct_exp_net(struct nf_conntrack_expect *exp)
{
- return nf_ct_net(exp->master);
+ return read_pnet(&exp->net);
+}
+
+static inline bool nf_ct_exp_zone_equal_any(const struct nf_conntrack_expect *a,
+ const struct nf_conntrack_zone *b)
+{
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+ return a->zone.id == b->id;
+#else
+ return true;
+#endif
}
#define NF_CT_EXP_POLICY_NAME_LEN 16
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 426534a711b0..ec8a8ec9c0aa 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -277,8 +277,6 @@ struct nft_userdata {
unsigned char data[];
};
-#define NFT_SET_ELEM_INTERNAL_LAST 0x1
-
/* placeholder structure for opaque set element backend representation. */
struct nft_elem_priv { };
@@ -288,7 +286,6 @@ struct nft_elem_priv { };
* @key: element key
* @key_end: closing element key
* @data: element data
- * @flags: flags
* @priv: element private data and extensions
*/
struct nft_set_elem {
@@ -304,7 +301,6 @@ struct nft_set_elem {
u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
struct nft_data val;
} data;
- u32 flags;
struct nft_elem_priv *priv;
};
@@ -320,11 +316,13 @@ static inline void *nft_elem_priv_cast(const struct nft_elem_priv *priv)
* @NFT_ITER_UNSPEC: unspecified, to catch errors
* @NFT_ITER_READ: read-only iteration over set elements
* @NFT_ITER_UPDATE: iteration under mutex to update set element state
+ * @NFT_ITER_UPDATE_CLONE: clone set before iteration under mutex to update element
*/
enum nft_iter_type {
NFT_ITER_UNSPEC,
NFT_ITER_READ,
NFT_ITER_UPDATE,
+ NFT_ITER_UPDATE_CLONE,
};
struct nft_set;
@@ -876,6 +874,8 @@ struct nft_elem_priv *nft_set_elem_init(const struct nft_set *set,
u64 timeout, u64 expiration, gfp_t gfp);
int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_expr *expr_array[]);
+void nft_set_elem_expr_destroy(const struct nft_ctx *ctx,
+ struct nft_set_elem_expr *elem_expr);
void nft_set_elem_destroy(const struct nft_set *set,
const struct nft_elem_priv *elem_priv,
bool destroy_expr);
@@ -1861,6 +1861,11 @@ struct nft_trans_gc {
struct rcu_head rcu;
};
+static inline int nft_trans_gc_space(const struct nft_trans_gc *trans)
+{
+ return NFT_TRANS_GC_BATCHCOUNT - trans->count;
+}
+
static inline void nft_ctx_update(struct nft_ctx *ctx,
const struct nft_trans *trans)
{
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 23dd647fe024..b73983a17e08 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -59,7 +59,7 @@ struct netns_xfrm {
struct list_head inexact_bins;
- struct sock *nlsk;
+ struct sock __rcu *nlsk;
struct sock *nlsk_stash;
u32 sysctl_aevent_etime;
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
index 0d453484a585..cdd95477af7a 100644
--- a/include/net/page_pool/types.h
+++ b/include/net/page_pool/types.h
@@ -247,7 +247,7 @@ struct page_pool {
/* User-facing fields, protected by page_pools_lock */
struct {
struct hlist_node list;
- u64 detach_time;
+ ktime_t detach_time;
u32 id;
} user;
};
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c3a7268b567e..c3d657359a3d 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -716,6 +716,34 @@ void qdisc_destroy(struct Qdisc *qdisc);
void qdisc_put(struct Qdisc *qdisc);
void qdisc_put_unlocked(struct Qdisc *qdisc);
void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
+
+static inline void dev_reset_queue(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ void *_unused)
+{
+ struct Qdisc *qdisc;
+ bool nolock;
+
+ qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
+ if (!qdisc)
+ return;
+
+ nolock = qdisc->flags & TCQ_F_NOLOCK;
+
+ if (nolock)
+ spin_lock_bh(&qdisc->seqlock);
+ spin_lock_bh(qdisc_lock(qdisc));
+
+ qdisc_reset(qdisc);
+
+ spin_unlock_bh(qdisc_lock(qdisc));
+ if (nolock) {
+ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+ clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
+ spin_unlock_bh(&qdisc->seqlock);
+ }
+}
+
#ifdef CONFIG_NET_SCHED
int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
void *type_data);
@@ -778,13 +806,23 @@ static inline bool skb_skip_tc_classify(struct sk_buff *skb)
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
{
struct Qdisc *qdisc;
+ bool nolock;
for (; i < dev->num_tx_queues; i++) {
qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
if (qdisc) {
+ nolock = qdisc->flags & TCQ_F_NOLOCK;
+
+ if (nolock)
+ spin_lock_bh(&qdisc->seqlock);
spin_lock_bh(qdisc_lock(qdisc));
qdisc_reset(qdisc);
spin_unlock_bh(qdisc_lock(qdisc));
+ if (nolock) {
+ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+ clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
+ spin_unlock_bh(&qdisc->seqlock);
+ }
}
}
}
@@ -1419,6 +1457,11 @@ void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
struct tcf_block *block);
+static inline bool mini_qdisc_pair_inited(struct mini_Qdisc_pair *miniqp)
+{
+ return !!miniqp->p_miniq;
+}
+
void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);
int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index cddebafb9f77..6f996229167b 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -5,16 +5,47 @@
#include <linux/types.h>
struct net;
+extern struct net init_net;
+
+union tcp_seq_and_ts_off {
+ struct {
+ u32 seq;
+ u32 ts_off;
+ };
+ u64 hash64;
+};
u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
__be16 dport);
-u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport);
-u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr);
-u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
- __be16 sport, __be16 dport);
-u32 secure_tcpv6_ts_off(const struct net *net,
- const __be32 *saddr, const __be32 *daddr);
+union tcp_seq_and_ts_off
+secure_tcp_seq_and_ts_off(const struct net *net, __be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport);
+
+static inline u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport)
+{
+ union tcp_seq_and_ts_off ts;
+
+ ts = secure_tcp_seq_and_ts_off(&init_net, saddr, daddr,
+ sport, dport);
+
+ return ts.seq;
+}
+
+union tcp_seq_and_ts_off
+secure_tcpv6_seq_and_ts_off(const struct net *net, const __be32 *saddr,
+ const __be32 *daddr,
+ __be16 sport, __be16 dport);
+
+static inline u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
+ __be16 sport, __be16 dport)
+{
+ union tcp_seq_and_ts_off ts;
+
+ ts = secure_tcpv6_seq_and_ts_off(&init_net, saddr, daddr,
+ sport, dport);
+ return ts.seq;
+}
#endif /* _NET_SECURE_SEQ */
diff --git a/include/net/tc_act/tc_gate.h b/include/net/tc_act/tc_gate.h
index b147a3bb1a46..e0fded18e18c 100644
--- a/include/net/tc_act/tc_gate.h
+++ b/include/net/tc_act/tc_gate.h
@@ -32,6 +32,7 @@ struct tcf_gate_params {
s32 tcfg_clockid;
size_t num_entries;
struct list_head entries;
+ struct rcu_head rcu;
};
#define GATE_ACT_GATE_OPEN BIT(0)
@@ -39,7 +40,7 @@ struct tcf_gate_params {
struct tcf_gate {
struct tc_action common;
- struct tcf_gate_params param;
+ struct tcf_gate_params __rcu *param;
u8 current_gate_status;
ktime_t current_close_time;
u32 current_entry_octets;
@@ -51,47 +52,65 @@ struct tcf_gate {
#define to_gate(a) ((struct tcf_gate *)a)
+static inline struct tcf_gate_params *tcf_gate_params_locked(const struct tc_action *a)
+{
+ struct tcf_gate *gact = to_gate(a);
+
+ return rcu_dereference_protected(gact->param,
+ lockdep_is_held(&gact->tcf_lock));
+}
+
static inline s32 tcf_gate_prio(const struct tc_action *a)
{
+ struct tcf_gate_params *p;
s32 tcfg_prio;
- tcfg_prio = to_gate(a)->param.tcfg_priority;
+ p = tcf_gate_params_locked(a);
+ tcfg_prio = p->tcfg_priority;
return tcfg_prio;
}
static inline u64 tcf_gate_basetime(const struct tc_action *a)
{
+ struct tcf_gate_params *p;
u64 tcfg_basetime;
- tcfg_basetime = to_gate(a)->param.tcfg_basetime;
+ p = tcf_gate_params_locked(a);
+ tcfg_basetime = p->tcfg_basetime;
return tcfg_basetime;
}
static inline u64 tcf_gate_cycletime(const struct tc_action *a)
{
+ struct tcf_gate_params *p;
u64 tcfg_cycletime;
- tcfg_cycletime = to_gate(a)->param.tcfg_cycletime;
+ p = tcf_gate_params_locked(a);
+ tcfg_cycletime = p->tcfg_cycletime;
return tcfg_cycletime;
}
static inline u64 tcf_gate_cycletimeext(const struct tc_action *a)
{
+ struct tcf_gate_params *p;
u64 tcfg_cycletimeext;
- tcfg_cycletimeext = to_gate(a)->param.tcfg_cycletime_ext;
+ p = tcf_gate_params_locked(a);
+ tcfg_cycletimeext = p->tcfg_cycletime_ext;
return tcfg_cycletimeext;
}
static inline u32 tcf_gate_num_entries(const struct tc_action *a)
{
+ struct tcf_gate_params *p;
u32 num_entries;
- num_entries = to_gate(a)->param.num_entries;
+ p = tcf_gate_params_locked(a);
+ num_entries = p->num_entries;
return num_entries;
}
@@ -105,7 +124,7 @@ static inline struct action_gate_entry
u32 num_entries;
int i = 0;
- p = &to_gate(a)->param;
+ p = tcf_gate_params_locked(a);
num_entries = p->num_entries;
list_for_each_entry(entry, &p->entries, list)
diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
index c7f24a2da1ca..24d4d5a62b3c 100644
--- a/include/net/tc_act/tc_ife.h
+++ b/include/net/tc_act/tc_ife.h
@@ -13,15 +13,13 @@ struct tcf_ife_params {
u8 eth_src[ETH_ALEN];
u16 eth_type;
u16 flags;
-
+ struct list_head metalist;
struct rcu_head rcu;
};
struct tcf_ife_info {
struct tc_action common;
struct tcf_ife_params __rcu *params;
- /* list of metaids allowed */
- struct list_head metalist;
};
#define to_ife(a) ((struct tcf_ife_info *)a)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index eb8bf63fdafc..978eea2d5df0 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -43,6 +43,7 @@
#include <net/dst.h>
#include <net/mptcp.h>
#include <net/xfrm.h>
+#include <net/secure_seq.h>
#include <linux/seq_file.h>
#include <linux/memcontrol.h>
@@ -2464,8 +2465,9 @@ struct tcp_request_sock_ops {
struct flowi *fl,
struct request_sock *req,
u32 tw_isn);
- u32 (*init_seq)(const struct sk_buff *skb);
- u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
+ union tcp_seq_and_ts_off (*init_seq_and_ts_off)(
+ const struct net *net,
+ const struct sk_buff *skb);
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct request_sock *req,
struct tcp_fastopen_cookie *foc,
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index d9c6d04bb3b5..fc1fc43345b5 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -52,7 +52,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
struct socket **sockp)
{
- return 0;
+ return -EPFNOSUPPORT;
}
#endif
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index 242e34f771cc..6b9ebae2dc95 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -51,6 +51,11 @@ static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
}
+static inline u32 xsk_pool_get_rx_frag_step(struct xsk_buff_pool *pool)
+{
+ return pool->unaligned ? 0 : xsk_pool_get_chunk_size(pool);
+}
+
static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
struct xdp_rxq_info *rxq)
{
@@ -122,7 +127,7 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
goto out;
list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
- list_del(&pos->list_node);
+ list_del_init(&pos->list_node);
xp_free(pos);
}
@@ -157,7 +162,7 @@ static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
frag = list_first_entry_or_null(&xskb->pool->xskb_list,
struct xdp_buff_xsk, list_node);
if (frag) {
- list_del(&frag->list_node);
+ list_del_init(&frag->list_node);
ret = &frag->xdp;
}
@@ -168,7 +173,7 @@ static inline void xsk_buff_del_frag(struct xdp_buff *xdp)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
- list_del(&xskb->list_node);
+ list_del_init(&xskb->list_node);
}
static inline struct xdp_buff *xsk_buff_get_head(struct xdp_buff *first)
@@ -337,6 +342,11 @@ static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
return 0;
}
+static inline u32 xsk_pool_get_rx_frag_step(struct xsk_buff_pool *pool)
+{
+ return 0;
+}
+
static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
struct xdp_rxq_info *rxq)
{
diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
index ae1e1489b671..28f9f5940ab6 100644
--- a/include/sound/cs35l56.h
+++ b/include/sound/cs35l56.h
@@ -406,6 +406,7 @@ extern const char * const cs35l56_cal_set_status_text[3];
extern const char * const cs35l56_tx_input_texts[CS35L56_NUM_INPUT_SRC];
extern const unsigned int cs35l56_tx_input_values[CS35L56_NUM_INPUT_SRC];
+int cs35l56_set_asp_patch(struct cs35l56_base *cs35l56_base);
int cs35l56_set_patch(struct cs35l56_base *cs35l56_base);
int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command);
int cs35l56_firmware_shutdown(struct cs35l56_base *cs35l56_base);
diff --git a/include/sound/sdca_function.h b/include/sound/sdca_function.h
index 79bd5a7a0f88..0e871c786513 100644
--- a/include/sound/sdca_function.h
+++ b/include/sound/sdca_function.h
@@ -27,11 +27,6 @@ struct sdca_function_desc;
#define SDCA_MAX_ENTITY_COUNT 128
/*
- * Sanity check on number of initialization writes, can be expanded if needed.
- */
-#define SDCA_MAX_INIT_COUNT 2048
-
-/*
* The Cluster IDs are 16-bit, so a maximum of 65535 Clusters per
* function can be represented, however limit this to a slightly
* more reasonable value. Can be expanded if needed.
diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h
index 7c03bdc951bb..e847cf51878c 100644
--- a/include/sound/tas2781.h
+++ b/include/sound/tas2781.h
@@ -151,6 +151,7 @@ struct tasdevice {
struct bulk_reg_val *cali_data_backup;
struct bulk_reg_val alp_cali_bckp;
struct tasdevice_fw *cali_data_fmw;
+ void *cali_specific;
unsigned int dev_addr;
unsigned int err_code;
unsigned char cur_book;
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 125bdc166bfe..0864700f76e0 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -769,12 +769,15 @@ TRACE_EVENT(btrfs_sync_file,
),
TP_fast_assign(
- const struct dentry *dentry = file->f_path.dentry;
- const struct inode *inode = d_inode(dentry);
+ struct dentry *dentry = file_dentry(file);
+ struct inode *inode = file_inode(file);
+ struct dentry *parent = dget_parent(dentry);
+ struct inode *parent_inode = d_inode(parent);
- TP_fast_assign_fsid(btrfs_sb(file->f_path.dentry->d_sb));
+ dput(parent);
+ TP_fast_assign_fsid(btrfs_sb(inode->i_sb));
__entry->ino = btrfs_ino(BTRFS_I(inode));
- __entry->parent = btrfs_ino(BTRFS_I(d_inode(dentry->d_parent)));
+ __entry->parent = btrfs_ino(BTRFS_I(parent_inode));
__entry->datasync = datasync;
__entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root);
),
diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h
index 33e99e792f1a..63597b004424 100644
--- a/include/trace/events/dma.h
+++ b/include/trace/events/dma.h
@@ -32,7 +32,9 @@ TRACE_DEFINE_ENUM(DMA_NONE);
{ DMA_ATTR_ALLOC_SINGLE_PAGES, "ALLOC_SINGLE_PAGES" }, \
{ DMA_ATTR_NO_WARN, "NO_WARN" }, \
{ DMA_ATTR_PRIVILEGED, "PRIVILEGED" }, \
- { DMA_ATTR_MMIO, "MMIO" })
+ { DMA_ATTR_MMIO, "MMIO" }, \
+ { DMA_ATTR_DEBUGGING_IGNORE_CACHELINES, "CACHELINES_OVERLAP" }, \
+ { DMA_ATTR_REQUIRE_COHERENT, "REQUIRE_COHERENT" })
DECLARE_EVENT_CLASS(dma_map,
TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr,
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index 64a382fbc31a..cbe28211106c 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -57,6 +57,7 @@
EM(netfs_rreq_trace_done, "DONE ") \
EM(netfs_rreq_trace_end_copy_to_cache, "END-C2C") \
EM(netfs_rreq_trace_free, "FREE ") \
+ EM(netfs_rreq_trace_intr, "INTR ") \
EM(netfs_rreq_trace_ki_complete, "KI-CMPL") \
EM(netfs_rreq_trace_recollect, "RECLLCT") \
EM(netfs_rreq_trace_redirty, "REDIRTY") \
@@ -169,7 +170,8 @@
EM(netfs_sreq_trace_put_oom, "PUT OOM ") \
EM(netfs_sreq_trace_put_wip, "PUT WIP ") \
EM(netfs_sreq_trace_put_work, "PUT WORK ") \
- E_(netfs_sreq_trace_put_terminated, "PUT TERM ")
+ EM(netfs_sreq_trace_put_terminated, "PUT TERM ") \
+ E_(netfs_sreq_trace_see_failed, "SEE FAILED ")
#define netfs_folio_traces \
EM(netfs_folio_is_uptodate, "mod-uptodate") \
@@ -738,19 +740,19 @@ TRACE_EVENT(netfs_collect_stream,
__field(unsigned int, wreq)
__field(unsigned char, stream)
__field(unsigned long long, collected_to)
- __field(unsigned long long, front)
+ __field(unsigned long long, issued_to)
),
TP_fast_assign(
__entry->wreq = wreq->debug_id;
__entry->stream = stream->stream_nr;
__entry->collected_to = stream->collected_to;
- __entry->front = stream->front ? stream->front->start : UINT_MAX;
+ __entry->issued_to = atomic64_read(&wreq->issued_to);
),
- TP_printk("R=%08x[%x:] cto=%llx frn=%llx",
+ TP_printk("R=%08x[%x:] cto=%llx ito=%llx",
__entry->wreq, __entry->stream,
- __entry->collected_to, __entry->front)
+ __entry->collected_to, __entry->issued_to)
);
TRACE_EVENT(netfs_folioq,
diff --git a/include/trace/events/task.h b/include/trace/events/task.h
index 4f0759634306..b9a129eb54d9 100644
--- a/include/trace/events/task.h
+++ b/include/trace/events/task.h
@@ -38,19 +38,22 @@ TRACE_EVENT(task_rename,
TP_ARGS(task, comm),
TP_STRUCT__entry(
+ __field( pid_t, pid)
__array( char, oldcomm, TASK_COMM_LEN)
__array( char, newcomm, TASK_COMM_LEN)
__field( short, oom_score_adj)
),
TP_fast_assign(
+ __entry->pid = task->pid;
memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
strscpy(entry->newcomm, comm, TASK_COMM_LEN);
__entry->oom_score_adj = task->signal->oom_score_adj;
),
- TP_printk("oldcomm=%s newcomm=%s oom_score_adj=%hd",
- __entry->oldcomm, __entry->newcomm, __entry->oom_score_adj)
+ TP_printk("pid=%d oldcomm=%s newcomm=%s oom_score_adj=%hd",
+ __entry->pid, __entry->oldcomm,
+ __entry->newcomm, __entry->oom_score_adj)
);
/**
diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h
index 5a6fda66d9ad..e827c9d20c5d 100644
--- a/include/uapi/linux/dma-buf.h
+++ b/include/uapi/linux/dma-buf.h
@@ -20,6 +20,7 @@
#ifndef _DMA_BUF_UAPI_H_
#define _DMA_BUF_UAPI_H_
+#include <linux/ioctl.h>
#include <linux/types.h>
/**
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 6750c383a2ab..1ff16141c8a5 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -188,7 +188,8 @@ enum io_uring_sqe_flags_bit {
/*
* If COOP_TASKRUN is set, get notified if task work is available for
* running and a kernel transition would be needed to run it. This sets
- * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN.
+ * IORING_SQ_TASKRUN in the sq ring flags. Not valid without COOP_TASKRUN
+ * or DEFER_TASKRUN.
*/
#define IORING_SETUP_TASKRUN_FLAG (1U << 9)
#define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 65500f5db379..80364d4dbebb 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -14,6 +14,10 @@
#include <linux/ioctl.h>
#include <asm/kvm.h>
+#ifdef __KERNEL__
+#include <linux/kvm_types.h>
+#endif
+
#define KVM_API_VERSION 12
/*
@@ -1601,7 +1605,11 @@ struct kvm_stats_desc {
__u16 size;
__u32 offset;
__u32 bucket_size;
+#ifdef __KERNEL__
+ char name[KVM_STATS_NAME_SIZE];
+#else
char name[];
+#endif
};
#define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 26071021e986..56b6b60a814f 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -159,5 +159,9 @@ enum ip_conntrack_expect_events {
#define NF_CT_EXPECT_INACTIVE 0x2
#define NF_CT_EXPECT_USERSPACE 0x4
+#ifdef __KERNEL__
+#define NF_CT_EXPECT_MASK (NF_CT_EXPECT_PERMANENT | NF_CT_EXPECT_INACTIVE | \
+ NF_CT_EXPECT_USERSPACE)
+#endif
#endif /* _UAPI_NF_CONNTRACK_COMMON_H */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index c94caf852aea..8ca15743af7f 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -80,6 +80,7 @@ struct xenbus_device {
const char *devicetype;
const char *nodename;
const char *otherend;
+ bool vanished;
int otherend_id;
struct xenbus_watch otherend_watch;
struct device dev;
@@ -228,7 +229,8 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr);
int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port);
int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port);
-enum xenbus_state xenbus_read_driver_state(const char *path);
+enum xenbus_state xenbus_read_driver_state(const struct xenbus_device *dev,
+ const char *path);
__printf(3, 4)
void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...);