summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2022-10-07 01:47:03 +0300
committerJason Gunthorpe <jgg@nvidia.com>2022-10-07 01:48:45 +0300
commit33331a728c83f380e53a3dbf2be0c1893da1d739 (patch)
tree236d3bde9840b641723467018b839c7f37a824ee /include/linux
parent8ad891ed435ba24465e0650942267e90a060675f (diff)
parent4fe89d07dcc2804c8b562f6c7896a45643d34b2f (diff)
downloadlinux-33331a728c83f380e53a3dbf2be0c1893da1d739.tar.xz
Merge tag 'v6.0' into rdma.git for-next
Trvial merge conflicts against rdma.git for-rc resolved matching linux-next: drivers/infiniband/hw/hns/hns_roce_hw_v2.c drivers/infiniband/hw/hns/hns_roce_main.c https://lore.kernel.org/r/20220929124005.105149-1-broonie@kernel.org Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/amba/bus.h1
-rw-r--r--include/linux/bitops.h1
-rw-r--r--include/linux/blk-mq.h1
-rw-r--r--include/linux/buffer_head.h13
-rw-r--r--include/linux/cgroup.h5
-rw-r--r--include/linux/compiler.h6
-rw-r--r--include/linux/cpumask.h31
-rw-r--r--include/linux/debugfs.h6
-rw-r--r--include/linux/device/driver.h1
-rw-r--r--include/linux/dma-mapping.h5
-rw-r--r--include/linux/fscache.h4
-rw-r--r--include/linux/hp_sdc.h2
-rw-r--r--include/linux/ieee80211.h8
-rw-r--r--include/linux/kvm_host.h66
-rw-r--r--include/linux/libata.h7
-rw-r--r--include/linux/lsm_hook_defs.h1
-rw-r--r--include/linux/lsm_hooks.h3
-rw-r--r--include/linux/memcontrol.h60
-rw-r--r--include/linux/memremap.h5
-rw-r--r--include/linux/mlx5/driver.h20
-rw-r--r--include/linux/mm.h14
-rw-r--r--include/linux/netdevice.h20
-rw-r--r--include/linux/netfilter_bridge/ebtables.h4
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/of_device.h5
-rw-r--r--include/linux/overflow.h1
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/platform_data/x86/pmc_atom.h6
-rw-r--r--include/linux/psi.h2
-rw-r--r--include/linux/rmap.h7
-rw-r--r--include/linux/scmi_protocol.h4
-rw-r--r--include/linux/security.h5
-rw-r--r--include/linux/serial_core.h25
-rw-r--r--include/linux/shmem_fs.h13
-rw-r--r--include/linux/skbuff.h21
-rw-r--r--include/linux/spi/spi.h2
-rw-r--r--include/linux/trace_events.h2
-rw-r--r--include/linux/udp.h1
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/typec_dp.h5
-rw-r--r--include/linux/userfaultfd_k.h2
-rw-r--r--include/linux/virtio.h6
-rw-r--r--include/linux/virtio_config.h32
-rw-r--r--include/linux/vm_event_item.h15
-rw-r--r--include/linux/wait_bit.h8
45 files changed, 271 insertions, 182 deletions
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index e94cdf235f1d..5001e14c5c06 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -67,6 +67,7 @@ struct amba_device {
struct clk *pclk;
struct device_dma_parameters dma_parms;
unsigned int periphid;
+ struct mutex periphid_lock;
unsigned int cid;
struct amba_cs_uci_id uci;
unsigned int irq[AMBA_NR_IRQS];
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index cf9bf65039f2..3b89c64bcfd8 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -59,6 +59,7 @@ extern unsigned long __sw_hweight64(__u64 w);
#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
+#define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr)
/*
* Include this here because some architectures need generic_ffs/fls in
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index effee1dc715a..92294a5fb083 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -857,7 +857,6 @@ void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_complete_request(struct request *rq);
bool blk_mq_complete_request_remote(struct request *rq);
-bool blk_mq_queue_stopped(struct request_queue *q);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_stop_hw_queues(struct request_queue *q);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index def8b8d30ccc..df518c429667 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -138,6 +138,17 @@ BUFFER_FNS(Defer_Completion, defer_completion)
static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
{
/*
+ * If somebody else already set this uptodate, they will
+ * have done the memory barrier, and a reader will thus
+ * see *some* valid buffer state.
+ *
+ * Any other serialization (with IO errors or whatever that
+ * might clear the bit) has to come from other state (eg BH_Lock).
+ */
+ if (test_bit(BH_Uptodate, &bh->b_state))
+ return;
+
+ /*
* make it consistent with folio_mark_uptodate
* pairs with smp_load_acquire in buffer_uptodate
*/
@@ -156,7 +167,7 @@ static __always_inline int buffer_uptodate(const struct buffer_head *bh)
* make it consistent with folio_test_uptodate
* pairs with smp_mb__before_atomic in set_buffer_uptodate
*/
- return (smp_load_acquire(&bh->b_state) & (1UL << BH_Uptodate)) != 0;
+ return test_bit_acquire(BH_Uptodate, &bh->b_state);
}
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ed53bfe7c46c..ac5d0515680e 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -734,11 +734,6 @@ static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
return NULL;
}
-static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
-{
- return NULL;
-}
-
static inline bool cgroup_psi_enabled(void)
{
return false;
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 01ce94b58b42..7713d7bcdaea 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -240,6 +240,12 @@ static inline void *offset_to_ptr(const int *off)
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
/*
+ * Whether 'type' is a signed type or an unsigned type. Supports scalar types,
+ * bool and also pointer types.
+ */
+#define is_signed_type(type) (((type)(-1)) < (__force type)1)
+
+/*
* This is needed in functions which generate the stack canary, see
* arch/x86/kernel/smpboot.c::start_secondary() for an example.
*/
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 0d435d0edbcb..e8ad12b5b9d2 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -202,12 +202,13 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
return 0;
}
-static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
- const struct cpumask *src2p) {
+static inline unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p)
+{
return cpumask_first_and(src1p, src2p);
}
-static inline int cpumask_any_distribute(const struct cpumask *srcp)
+static inline unsigned int cpumask_any_distribute(const struct cpumask *srcp)
{
return cpumask_first(srcp);
}
@@ -261,7 +262,26 @@ unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
(cpu) = cpumask_next_zero((cpu), (mask)), \
(cpu) < nr_cpu_ids;)
+#if NR_CPUS == 1
+static inline
+unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
+{
+ cpumask_check(start);
+ if (n != -1)
+ cpumask_check(n);
+
+ /*
+ * Return the first available CPU when wrapping, or when starting before cpu0,
+ * since there is only one valid option.
+ */
+ if (wrap && n >= 0)
+ return nr_cpumask_bits;
+
+ return cpumask_first(mask);
+}
+#else
unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
+#endif
/**
* for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
@@ -1107,9 +1127,10 @@ cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
* cover a worst-case of every other cpu being on one of two nodes for a
* very large NR_CPUS.
*
- * Use PAGE_SIZE as a minimum for smaller configurations.
+ * Use PAGE_SIZE as a minimum for smaller configurations while avoiding
+ * unsigned comparison to -1.
*/
-#define CPUMAP_FILE_MAX_BYTES ((((NR_CPUS * 9)/32 - 1) > PAGE_SIZE) \
+#define CPUMAP_FILE_MAX_BYTES (((NR_CPUS * 9)/32 > PAGE_SIZE) \
? (NR_CPUS * 9)/32 - 1 : PAGE_SIZE)
#define CPULIST_FILE_MAX_BYTES (((NR_CPUS * 7)/2 > PAGE_SIZE) ? (NR_CPUS * 7)/2 : PAGE_SIZE)
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index c869f1e73d75..f60674692d36 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -91,6 +91,8 @@ struct dentry *debugfs_create_automount(const char *name,
void debugfs_remove(struct dentry *dentry);
#define debugfs_remove_recursive debugfs_remove
+void debugfs_lookup_and_remove(const char *name, struct dentry *parent);
+
const struct file_operations *debugfs_real_fops(const struct file *filp);
int debugfs_file_get(struct dentry *dentry);
@@ -225,6 +227,10 @@ static inline void debugfs_remove(struct dentry *dentry)
static inline void debugfs_remove_recursive(struct dentry *dentry)
{ }
+static inline void debugfs_lookup_and_remove(const char *name,
+ struct dentry *parent)
+{ }
+
const struct file_operations *debugfs_real_fops(const struct file *filp);
static inline int debugfs_file_get(struct dentry *dentry)
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index 7acaabde5396..2114d65b862f 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -242,6 +242,7 @@ driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev)
extern int driver_deferred_probe_timeout;
void driver_deferred_probe_add(struct device *dev);
+int driver_deferred_probe_check_state(struct device *dev);
void driver_init(void);
/**
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 25a30906289d..0ee20b764000 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -139,7 +139,6 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
bool dma_can_mmap(struct device *dev);
-int dma_supported(struct device *dev, u64 mask);
bool dma_pci_p2pdma_supported(struct device *dev);
int dma_set_mask(struct device *dev, u64 mask);
int dma_set_coherent_mask(struct device *dev, u64 mask);
@@ -248,10 +247,6 @@ static inline bool dma_can_mmap(struct device *dev)
{
return false;
}
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- return 0;
-}
static inline bool dma_pci_p2pdma_supported(struct device *dev)
{
return false;
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 720874e6ee94..36e5dd84cf59 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -258,7 +258,7 @@ struct fscache_cookie *fscache_acquire_cookie(struct fscache_volume *volume,
/**
* fscache_use_cookie - Request usage of cookie attached to an object
- * @object: Object description
+ * @cookie: The cookie representing the cache object
* @will_modify: If cache is expected to be modified locally
*
* Request usage of the cookie attached to an object. The caller should tell
@@ -274,7 +274,7 @@ static inline void fscache_use_cookie(struct fscache_cookie *cookie,
/**
* fscache_unuse_cookie - Cease usage of cookie attached to an object
- * @object: Object description
+ * @cookie: The cookie representing the cache object
* @aux_data: Updated auxiliary data (or NULL)
* @object_size: Revised size of the object (or NULL)
*
diff --git a/include/linux/hp_sdc.h b/include/linux/hp_sdc.h
index 6f1dee7e67e0..9be8704e2d38 100644
--- a/include/linux/hp_sdc.h
+++ b/include/linux/hp_sdc.h
@@ -180,7 +180,7 @@ switch (val) { \
#define HP_SDC_CMD_SET_IM 0x40 /* 010xxxxx == set irq mask */
-/* The documents provided do not explicitly state that all registers betweem
+/* The documents provided do not explicitly state that all registers between
* 0x01 and 0x1f inclusive can be read by sending their register index as a
* command, but this is implied and appears to be the case.
*/
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 55e6f4ad0ca6..b6e6d5b40774 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -310,9 +310,11 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
struct ieee80211_hdr {
__le16 frame_control;
__le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
+ struct_group(addrs,
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ );
__le16 seq_ctrl;
u8 addr4[ETH_ALEN];
} __packed __aligned(2);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 1c480b1821e1..f4519d3689e1 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -656,12 +656,12 @@ struct kvm_irq_routing_table {
};
#endif
-#ifndef KVM_PRIVATE_MEM_SLOTS
-#define KVM_PRIVATE_MEM_SLOTS 0
+#ifndef KVM_INTERNAL_MEM_SLOTS
+#define KVM_INTERNAL_MEM_SLOTS 0
#endif
#define KVM_MEM_SLOTS_NUM SHRT_MAX
-#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS)
+#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS)
#ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
@@ -765,10 +765,10 @@ struct kvm {
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
struct mmu_notifier mmu_notifier;
- unsigned long mmu_notifier_seq;
- long mmu_notifier_count;
- unsigned long mmu_notifier_range_start;
- unsigned long mmu_notifier_range_end;
+ unsigned long mmu_invalidate_seq;
+ long mmu_invalidate_in_progress;
+ unsigned long mmu_invalidate_range_start;
+ unsigned long mmu_invalidate_range_end;
#endif
struct list_head devices;
u64 manual_dirty_log_protect;
@@ -1357,10 +1357,10 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc);
void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
#endif
-void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
- unsigned long end);
-void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start,
- unsigned long end);
+void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
+ unsigned long end);
+void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
+ unsigned long end);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -1907,42 +1907,44 @@ extern const struct kvm_stats_header kvm_vcpu_stats_header;
extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
-static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
+static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
{
- if (unlikely(kvm->mmu_notifier_count))
+ if (unlikely(kvm->mmu_invalidate_in_progress))
return 1;
/*
- * Ensure the read of mmu_notifier_count happens before the read
- * of mmu_notifier_seq. This interacts with the smp_wmb() in
- * mmu_notifier_invalidate_range_end to make sure that the caller
- * either sees the old (non-zero) value of mmu_notifier_count or
- * the new (incremented) value of mmu_notifier_seq.
- * PowerPC Book3s HV KVM calls this under a per-page lock
- * rather than under kvm->mmu_lock, for scalability, so
- * can't rely on kvm->mmu_lock to keep things ordered.
+ * Ensure the read of mmu_invalidate_in_progress happens before
+ * the read of mmu_invalidate_seq. This interacts with the
+ * smp_wmb() in mmu_notifier_invalidate_range_end to make sure
+ * that the caller either sees the old (non-zero) value of
+ * mmu_invalidate_in_progress or the new (incremented) value of
+ * mmu_invalidate_seq.
+ *
+ * PowerPC Book3s HV KVM calls this under a per-page lock rather
+ * than under kvm->mmu_lock, for scalability, so can't rely on
+ * kvm->mmu_lock to keep things ordered.
*/
smp_rmb();
- if (kvm->mmu_notifier_seq != mmu_seq)
+ if (kvm->mmu_invalidate_seq != mmu_seq)
return 1;
return 0;
}
-static inline int mmu_notifier_retry_hva(struct kvm *kvm,
- unsigned long mmu_seq,
- unsigned long hva)
+static inline int mmu_invalidate_retry_hva(struct kvm *kvm,
+ unsigned long mmu_seq,
+ unsigned long hva)
{
lockdep_assert_held(&kvm->mmu_lock);
/*
- * If mmu_notifier_count is non-zero, then the range maintained by
- * kvm_mmu_notifier_invalidate_range_start contains all addresses that
- * might be being invalidated. Note that it may include some false
+ * If mmu_invalidate_in_progress is non-zero, then the range maintained
+ * by kvm_mmu_notifier_invalidate_range_start contains all addresses
+ * that might be being invalidated. Note that it may include some false
* positives, due to shortcuts when handing concurrent invalidations.
*/
- if (unlikely(kvm->mmu_notifier_count) &&
- hva >= kvm->mmu_notifier_range_start &&
- hva < kvm->mmu_notifier_range_end)
+ if (unlikely(kvm->mmu_invalidate_in_progress) &&
+ hva >= kvm->mmu_invalidate_range_start &&
+ hva < kvm->mmu_invalidate_range_end)
return 1;
- if (kvm->mmu_notifier_seq != mmu_seq)
+ if (kvm->mmu_invalidate_seq != mmu_seq)
return 1;
return 0;
}
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 0269ff114f5a..20765d1c5f80 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1136,8 +1136,8 @@ extern int ata_scsi_slave_config(struct scsi_device *sdev);
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
int queue_depth);
-extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
- int queue_depth);
+extern int ata_change_queue_depth(struct ata_port *ap, struct ata_device *dev,
+ struct scsi_device *sdev, int queue_depth);
extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
@@ -1382,7 +1382,8 @@ extern const struct attribute_group *ata_common_sdev_groups[];
.proc_name = drv_name, \
.slave_destroy = ata_scsi_slave_destroy, \
.bios_param = ata_std_bios_param, \
- .unlock_native_capacity = ata_scsi_unlock_native_capacity
+ .unlock_native_capacity = ata_scsi_unlock_native_capacity,\
+ .max_sectors = ATA_MAX_SECTORS_LBA48
#define ATA_SUBBASE_SHT(drv_name) \
__ATA_BASE_SHT(drv_name), \
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 806448173033..60fff133c0b1 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -407,4 +407,5 @@ LSM_HOOK(int, 0, perf_event_write, struct perf_event *event)
#ifdef CONFIG_IO_URING
LSM_HOOK(int, 0, uring_override_creds, const struct cred *new)
LSM_HOOK(int, 0, uring_sqpoll, void)
+LSM_HOOK(int, 0, uring_cmd, struct io_uring_cmd *ioucmd)
#endif /* CONFIG_IO_URING */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 84a0d7e02176..3aa6030302f5 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1582,6 +1582,9 @@
* Check whether the current task is allowed to spawn a io_uring polling
* thread (IORING_SETUP_SQPOLL).
*
+ * @uring_cmd:
+ * Check whether the file_operations uring_cmd is allowed to run.
+ *
*/
union security_list_options {
#define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 4d31ce55b1c0..567f12323f55 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -987,19 +987,30 @@ static inline void mod_memcg_page_state(struct page *page,
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
{
- return READ_ONCE(memcg->vmstats.state[idx]);
+ long x = READ_ONCE(memcg->vmstats.state[idx]);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
}
static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
enum node_stat_item idx)
{
struct mem_cgroup_per_node *pn;
+ long x;
if (mem_cgroup_disabled())
return node_page_state(lruvec_pgdat(lruvec), idx);
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- return READ_ONCE(pn->lruvec_stats.state[idx]);
+ x = READ_ONCE(pn->lruvec_stats.state[idx]);
+#ifdef CONFIG_SMP
+ if (x < 0)
+ x = 0;
+#endif
+ return x;
}
static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
@@ -1777,42 +1788,6 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
rcu_read_unlock();
}
-/**
- * get_mem_cgroup_from_obj - get a memcg associated with passed kernel object.
- * @p: pointer to object from which memcg should be extracted. It can be NULL.
- *
- * Retrieves the memory group into which the memory of the pointed kernel
- * object is accounted. If memcg is found, its reference is taken.
- * If a passed kernel object is uncharged, or if proper memcg cannot be found,
- * as well as if mem_cgroup is disabled, NULL is returned.
- *
- * Return: valid memcg pointer with taken reference or NULL.
- */
-static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p)
-{
- struct mem_cgroup *memcg;
-
- rcu_read_lock();
- do {
- memcg = mem_cgroup_from_obj(p);
- } while (memcg && !css_tryget(&memcg->css));
- rcu_read_unlock();
- return memcg;
-}
-
-/**
- * mem_cgroup_or_root - always returns a pointer to a valid memory cgroup.
- * @memcg: pointer to a valid memory cgroup or NULL.
- *
- * If passed argument is not NULL, returns it without any additional checks
- * and changes. Otherwise, root_mem_cgroup is returned.
- *
- * NOTE: root_mem_cgroup can be NULL during early boot.
- */
-static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg)
-{
- return memcg ? memcg : root_mem_cgroup;
-}
#else
static inline bool mem_cgroup_kmem_disabled(void)
{
@@ -1869,15 +1844,6 @@ static inline void count_objcg_event(struct obj_cgroup *objcg,
{
}
-static inline struct mem_cgroup *get_mem_cgroup_from_obj(void *p)
-{
- return NULL;
-}
-
-static inline struct mem_cgroup *mem_cgroup_or_root(struct mem_cgroup *memcg)
-{
- return NULL;
-}
#endif /* CONFIG_MEMCG_KMEM */
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 19010491a603..c3b4cc84877b 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -139,6 +139,11 @@ struct dev_pagemap {
};
};
+static inline bool pgmap_has_memory_failure(struct dev_pagemap *pgmap)
+{
+ return pgmap->ops && pgmap->ops->memory_failure;
+}
+
static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
{
if (pgmap->flags & PGMAP_ALTMAP_VALID)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 9ccfd9dd0d0f..a07e3a00db10 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -779,6 +779,7 @@ struct mlx5_core_dev {
enum mlx5_device_state state;
/* sync interface state */
struct mutex intf_state_mutex;
+ struct lock_class_key lock_key;
unsigned long intf_state;
struct mlx5_priv priv;
struct mlx5_profile profile;
@@ -1277,16 +1278,17 @@ enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
-static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev)
+bool mlx5_is_roce_on(struct mlx5_core_dev *dev);
+
+static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- union devlink_param_value val;
- int err;
-
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
- &val);
- return err ? MLX5_CAP_GEN(dev, roce) : val.vbool;
+ if (MLX5_CAP_GEN(dev, roce_rw_supported))
+ return MLX5_CAP_GEN(dev, roce);
+
+ /* If RoCE cap is read-only in FW, get RoCE state from devlink
+ * in order to support RoCE enable/disable feature
+ */
+ return mlx5_is_roce_on(dev);
}
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3bedc449c14d..21f8b27bd9fd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1544,9 +1544,16 @@ static inline bool is_longterm_pinnable_page(struct page *page)
if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
return false;
#endif
- return !(is_device_coherent_page(page) ||
- is_zone_movable_page(page) ||
- is_zero_pfn(page_to_pfn(page)));
+ /* The zero page may always be pinned */
+ if (is_zero_pfn(page_to_pfn(page)))
+ return true;
+
+ /* Coherent device memory must always allow eviction. */
+ if (is_device_coherent_page(page))
+ return false;
+
+ /* Otherwise, non-movable zone pages can be pinned. */
+ return !is_zone_movable_page(page);
}
#else
static inline bool is_longterm_pinnable_page(struct page *page)
@@ -2885,7 +2892,6 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
-#define FOLL_COW 0x4000 /* internal GUP flag */
#define FOLL_ANON 0x8000 /* don't do file mappings */
#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */
#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1a3cb93c3dcc..05d6f3facd5a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -640,9 +640,23 @@ extern int sysctl_devconf_inherit_init_net;
*/
static inline bool net_has_fallback_tunnels(const struct net *net)
{
- return !IS_ENABLED(CONFIG_SYSCTL) ||
- !sysctl_fb_tunnels_only_for_init_net ||
- (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1);
+#if IS_ENABLED(CONFIG_SYSCTL)
+ int fb_tunnels_only_for_init_net = READ_ONCE(sysctl_fb_tunnels_only_for_init_net);
+
+ return !fb_tunnels_only_for_init_net ||
+ (net_eq(net, &init_net) && fb_tunnels_only_for_init_net == 1);
+#else
+ return true;
+#endif
+}
+
+static inline int net_inherit_devconf(void)
+{
+#if IS_ENABLED(CONFIG_SYSCTL)
+ return READ_ONCE(sysctl_devconf_inherit_init_net);
+#else
+ return 0;
+#endif
}
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index a13296d6c7ce..fd533552a062 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -94,10 +94,6 @@ struct ebt_table {
struct ebt_replace_kernel *table;
unsigned int valid_hooks;
rwlock_t lock;
- /* e.g. could be the table explicitly only allows certain
- * matches, targets, ... 0 == let it in */
- int (*check)(const struct ebt_table_info *info,
- unsigned int valid_hooks);
/* the data used by the kernel */
struct ebt_table_info *private;
struct nf_hook_ops *ops;
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index b32ed68e7dc4..7931fa472561 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -83,7 +83,6 @@ struct nfs_open_context {
fmode_t mode;
unsigned long flags;
-#define NFS_CONTEXT_RESEND_WRITES (1)
#define NFS_CONTEXT_BAD (2)
#define NFS_CONTEXT_UNLOCK (3)
#define NFS_CONTEXT_FILE_OPEN (4)
@@ -182,6 +181,7 @@ struct nfs_inode {
/* Regular file */
struct {
atomic_long_t nrequests;
+ atomic_long_t redirtied_pages;
struct nfs_mds_commit_info commit_info;
struct mutex commit_mutex;
};
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 1d7992a02e36..1a803e4335d3 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -101,8 +101,9 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
}
static inline int of_dma_configure_id(struct device *dev,
- struct device_node *np,
- bool force_dma)
+ struct device_node *np,
+ bool force_dma,
+ const u32 *id)
{
return 0;
}
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index f1221d11f8e5..0eb3b192f07a 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -30,7 +30,6 @@
* https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
* credit to Christian Biere.
*/
-#define is_signed_type(type) (((type)(-1)) < (type)1)
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
#define type_min(T) ((T)((T)-type_max(T)-(T)1))
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 6feade66efdb..15b49e655ce3 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2079,6 +2079,9 @@
#define PCI_DEVICE_ID_ICE_1712 0x1712
#define PCI_DEVICE_ID_VT1724 0x1724
+#define PCI_VENDOR_ID_MICROSOFT 0x1414
+#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
+
#define PCI_VENDOR_ID_OXSEMI 0x1415
#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403
#define PCI_DEVICE_ID_OXSEMI_PCIe840 0xC000
diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h
index 3edfb6d4e67a..dd81f510e4cf 100644
--- a/include/linux/platform_data/x86/pmc_atom.h
+++ b/include/linux/platform_data/x86/pmc_atom.h
@@ -7,6 +7,8 @@
#ifndef PMC_ATOM_H
#define PMC_ATOM_H
+#include <linux/bits.h>
+
/* ValleyView Power Control Unit PCI Device ID */
#define PCI_DEVICE_ID_VLV_PMC 0x0F1C
/* CherryTrail Power Control Unit PCI Device ID */
@@ -139,9 +141,9 @@
#define ACPI_MMIO_REG_LEN 0x100
#define PM1_CNT 0x4
-#define SLEEP_TYPE_MASK 0xFFFFECFF
+#define SLEEP_TYPE_MASK GENMASK(12, 10)
#define SLEEP_TYPE_S5 0x1C00
-#define SLEEP_ENABLE 0x2000
+#define SLEEP_ENABLE BIT(13)
extern int pmc_atom_read(int offset, u32 *value);
diff --git a/include/linux/psi.h b/include/linux/psi.h
index 89784763d19e..dd74411ac21d 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -27,7 +27,7 @@ void psi_memstall_leave(unsigned long *flags);
int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
struct psi_trigger *psi_trigger_create(struct psi_group *group,
- char *buf, size_t nbytes, enum psi_res res);
+ char *buf, enum psi_res res);
void psi_trigger_destroy(struct psi_trigger *t);
__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index bf80adca980b..b89b4b86951f 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -41,12 +41,15 @@ struct anon_vma {
atomic_t refcount;
/*
- * Count of child anon_vmas and VMAs which points to this anon_vma.
+ * Count of child anon_vmas. Equals to the count of all anon_vmas that
+ * have ->parent pointing to this one, including itself.
*
* This counter is used for making decision about reusing anon_vma
* instead of forking new one. See comments in function anon_vma_clone.
*/
- unsigned degree;
+ unsigned long num_children;
+ /* Count of VMAs whose ->anon_vma pointer points to this object. */
+ unsigned long num_active_vmas;
struct anon_vma *parent; /* Parent of this anon_vma */
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index a193884ecf2b..4f765bc788ff 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -84,7 +84,7 @@ struct scmi_protocol_handle;
struct scmi_clk_proto_ops {
int (*count_get)(const struct scmi_protocol_handle *ph);
- const struct scmi_clock_info *(*info_get)
+ const struct scmi_clock_info __must_check *(*info_get)
(const struct scmi_protocol_handle *ph, u32 clk_id);
int (*rate_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
u64 *rate);
@@ -466,7 +466,7 @@ enum scmi_sensor_class {
*/
struct scmi_sensor_proto_ops {
int (*count_get)(const struct scmi_protocol_handle *ph);
- const struct scmi_sensor_info *(*info_get)
+ const struct scmi_sensor_info __must_check *(*info_get)
(const struct scmi_protocol_handle *ph, u32 sensor_id);
int (*trip_point_config)(const struct scmi_protocol_handle *ph,
u32 sensor_id, u8 trip_id, u64 trip_value);
diff --git a/include/linux/security.h b/include/linux/security.h
index 1bc362cb413f..7bd0c490703d 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -2060,6 +2060,7 @@ static inline int security_perf_event_write(struct perf_event *event)
#ifdef CONFIG_SECURITY
extern int security_uring_override_creds(const struct cred *new);
extern int security_uring_sqpoll(void);
+extern int security_uring_cmd(struct io_uring_cmd *ioucmd);
#else
static inline int security_uring_override_creds(const struct cred *new)
{
@@ -2069,6 +2070,10 @@ static inline int security_uring_sqpoll(void)
{
return 0;
}
+static inline int security_uring_cmd(struct io_uring_cmd *ioucmd)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_IO_URING */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index aef3145f2032..1eaea9fe44d8 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -141,6 +141,14 @@ struct gpio_desc;
* Locking: none.
* Interrupts: caller dependent.
*
+ * @start_rx: ``void ()(struct uart_port *port)``
+ *
+ * Start receiving characters.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
* @stop_rx: ``void ()(struct uart_port *port)``
*
* Stop receiving characters; the @port is in the process of being closed.
@@ -616,6 +624,23 @@ struct uart_state {
/* number of characters left in xmit buffer before we ask for more */
#define WAKEUP_CHARS 256
+/**
+ * uart_xmit_advance - Advance xmit buffer and account Tx'ed chars
+ * @up: uart_port structure describing the port
+ * @chars: number of characters sent
+ *
+ * This function advances the tail of circular xmit buffer by the number of
+ * @chars transmitted and handles accounting of transmitted bytes (into
+ * @up's icount.tx).
+ */
+static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars)
+{
+ struct circ_buf *xmit = &up->state->xmit;
+
+ xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1);
+ up->icount.tx += chars;
+}
+
struct module;
struct tty_driver;
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 1b6c4013f691..ff0b990de83d 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -29,15 +29,10 @@ struct shmem_inode_info {
struct inode vfs_inode;
};
-#define SHMEM_FL_USER_VISIBLE FS_FL_USER_VISIBLE
-#define SHMEM_FL_USER_MODIFIABLE FS_FL_USER_MODIFIABLE
-#define SHMEM_FL_INHERITED FS_FL_USER_MODIFIABLE
-
-/* Flags that are appropriate for regular files (all but dir-specific ones). */
-#define SHMEM_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
-
-/* Flags that are appropriate for non-directories/regular files. */
-#define SHMEM_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
+#define SHMEM_FL_USER_VISIBLE FS_FL_USER_VISIBLE
+#define SHMEM_FL_USER_MODIFIABLE \
+ (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL)
+#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL)
struct shmem_sb_info {
unsigned long max_blocks; /* How many blocks are allowed */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ca8afa382bf2..18e163a3460d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2444,6 +2444,27 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
skb_shinfo(skb)->nr_frags = i + 1;
}
+/**
+ * skb_fill_page_desc_noacc - initialise a paged fragment in an skb
+ * @skb: buffer containing fragment to be initialised
+ * @i: paged fragment index to initialise
+ * @page: the page to use for this fragment
+ * @off: the offset to the data with @page
+ * @size: the length of the data
+ *
+ * Variant of skb_fill_page_desc() which does not deal with
+ * pfmemalloc, if page is not owned by us.
+ */
+static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i,
+ struct page *page, int off,
+ int size)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ __skb_fill_page_desc_noacc(shinfo, i, page, off, size);
+ shinfo->nr_frags = i + 1;
+}
+
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
int size, unsigned int truesize);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index e6c73d5ff1a8..f089ee1ead58 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -469,6 +469,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* SPI_TRANS_FAIL_NO_START.
* @queue_empty: signal green light for opportunistically skipping the queue
* for spi_sync transfers.
+ * @must_async: disable all fast paths in the core
*
* Each SPI controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -690,6 +691,7 @@ struct spi_controller {
/* Flag for enabling opportunistic skipping of the queue in spi_sync */
bool queue_empty;
+ bool must_async;
};
static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index b18759a673c6..8401dec93c15 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -814,8 +814,6 @@ extern int trace_add_event_call(struct trace_event_call *call);
extern int trace_remove_event_call(struct trace_event_call *call);
extern int trace_event_get_offsets(struct trace_event_call *call);
-#define is_signed_type(type) (((type)(-1)) < (type)1)
-
int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
int trace_set_clr_event(const char *system, const char *event, int set);
int trace_array_set_clr_event(struct trace_array *tr, const char *system,
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 254a2654400f..e96da4157d04 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -70,6 +70,7 @@ struct udp_sock {
* For encapsulation sockets.
*/
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+ void (*encap_err_rcv)(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset);
int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
void (*encap_destroy)(struct sock *sk);
diff --git a/include/linux/usb.h b/include/linux/usb.h
index f7a9914fc97f..9ff1ad4dfad1 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -575,6 +575,7 @@ struct usb3_lpm_parameters {
* @devaddr: device address, XHCI: assigned by HW, others: same as devnum
* @can_submit: URBs may be submitted
* @persist_enabled: USB_PERSIST enabled for this device
+ * @reset_in_progress: the device is being reset
* @have_langid: whether string_langid is valid
* @authorized: policy has said we can use it;
* (user space) policy determines if we authorize this device to be
@@ -662,6 +663,7 @@ struct usb_device {
unsigned can_submit:1;
unsigned persist_enabled:1;
+ unsigned reset_in_progress:1;
unsigned have_langid:1;
unsigned authorized:1;
unsigned authenticated:1;
diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h
index cfb916cccd31..8d09c2f0a9b8 100644
--- a/include/linux/usb/typec_dp.h
+++ b/include/linux/usb/typec_dp.h
@@ -73,6 +73,11 @@ enum {
#define DP_CAP_USB BIT(7)
#define DP_CAP_DFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(15, 8)) >> 8)
#define DP_CAP_UFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(23, 16)) >> 16)
+/* Get pin assignment taking plug & receptacle into consideration */
+#define DP_CAP_PIN_ASSIGN_UFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \
+ DP_CAP_UFP_D_PIN_ASSIGN(_cap_) : DP_CAP_DFP_D_PIN_ASSIGN(_cap_))
+#define DP_CAP_PIN_ASSIGN_DFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \
+ DP_CAP_DFP_D_PIN_ASSIGN(_cap_) : DP_CAP_UFP_D_PIN_ASSIGN(_cap_))
/* DisplayPort Status Update VDO bits */
#define DP_STATUS_CONNECTION(_status_) ((_status_) & 3)
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index 732b522bacb7..e1b8a915e9e9 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -73,6 +73,8 @@ extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start,
extern int mwriteprotect_range(struct mm_struct *dst_mm,
unsigned long start, unsigned long len,
bool enable_wp, atomic_t *mmap_changing);
+extern void uffd_wp_range(struct mm_struct *dst_mm, struct vm_area_struct *vma,
+ unsigned long start, unsigned long len, bool enable_wp);
/* mm helpers */
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index a3f73bb6733e..dcab9c7e8784 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -11,7 +11,7 @@
#include <linux/gfp.h>
/**
- * virtqueue - a queue to register buffers for sending or receiving.
+ * struct virtqueue - a queue to register buffers for sending or receiving.
* @list: the chain of virtqueues for this device
* @callback: the function to call when buffers are consumed (can be NULL).
* @name: the name of this virtqueue (mainly for debugging)
@@ -97,7 +97,7 @@ int virtqueue_resize(struct virtqueue *vq, u32 num,
void (*recycle)(struct virtqueue *vq, void *buf));
/**
- * virtio_device - representation of a device using virtio
+ * struct virtio_device - representation of a device using virtio
* @index: unique position on the virtio bus
* @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore)
* @config_enabled: configuration change reporting enabled
@@ -156,7 +156,7 @@ size_t virtio_max_dma_size(struct virtio_device *vdev);
list_for_each_entry(vq, &vdev->vqs, list)
/**
- * virtio_driver - operations for a virtio I/O driver
+ * struct virtio_driver - operations for a virtio I/O driver
* @driver: underlying device driver (populate name and owner).
* @id_table: the ids serviced by this driver.
* @feature_table: an array of feature numbers supported by this driver.
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 6adff09f7170..4b517649cfe8 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -55,7 +55,6 @@ struct virtio_shm_region {
* include a NULL entry for vqs that do not need a callback
* names: array of virtqueue names (mainly for debugging)
* include a NULL entry for vqs unused by driver
- * sizes: array of virtqueue sizes
* Returns 0 on success or error status
* @del_vqs: free virtqueues found by find_vqs().
* @synchronize_cbs: synchronize with the virtqueue callbacks (optional)
@@ -104,9 +103,7 @@ struct virtio_config_ops {
void (*reset)(struct virtio_device *vdev);
int (*find_vqs)(struct virtio_device *, unsigned nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[],
- u32 sizes[],
- const bool *ctx,
+ const char * const names[], const bool *ctx,
struct irq_affinity *desc);
void (*del_vqs)(struct virtio_device *);
void (*synchronize_cbs)(struct virtio_device *);
@@ -215,7 +212,7 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
const char *names[] = { n };
struct virtqueue *vq;
int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL,
- NULL, NULL);
+ NULL);
if (err < 0)
return ERR_PTR(err);
return vq;
@@ -227,8 +224,7 @@ int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
const char * const names[],
struct irq_affinity *desc)
{
- return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL,
- NULL, desc);
+ return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc);
}
static inline
@@ -237,25 +233,13 @@ int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
const char * const names[], const bool *ctx,
struct irq_affinity *desc)
{
- return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL,
- ctx, desc);
-}
-
-static inline
-int virtio_find_vqs_ctx_size(struct virtio_device *vdev, u32 nvqs,
- struct virtqueue *vqs[],
- vq_callback_t *callbacks[],
- const char * const names[],
- u32 sizes[],
- const bool *ctx, struct irq_affinity *desc)
-{
- return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, sizes,
- ctx, desc);
+ return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx,
+ desc);
}
/**
* virtio_synchronize_cbs - synchronize with virtqueue callbacks
- * @vdev: the device
+ * @dev: the virtio device
*/
static inline
void virtio_synchronize_cbs(struct virtio_device *dev)
@@ -274,7 +258,7 @@ void virtio_synchronize_cbs(struct virtio_device *dev)
/**
* virtio_device_ready - enable vq use in probe function
- * @vdev: the device
+ * @dev: the virtio device
*
* Driver must call this to use vqs in the probe function.
*
@@ -322,7 +306,7 @@ const char *virtio_bus_name(struct virtio_device *vdev)
/**
* virtqueue_set_affinity - setting affinity for a virtqueue
* @vq: the virtqueue
- * @cpu: the cpu no.
+ * @cpu_mask: the cpu mask
*
* Pay attention the function are best-effort: the affinity hint may not be set
* due to config support, irq type and sharing.
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 404024486fa5..f3fc36cd2276 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -20,12 +20,19 @@
#define HIGHMEM_ZONE(xx)
#endif
-#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE
+#ifdef CONFIG_ZONE_DEVICE
+#define DEVICE_ZONE(xx) xx##_DEVICE,
+#else
+#define DEVICE_ZONE(xx)
+#endif
+
+#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, \
+ HIGHMEM_ZONE(xx) xx##_MOVABLE, DEVICE_ZONE(xx)
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
- FOR_ALL_ZONES(PGALLOC),
- FOR_ALL_ZONES(ALLOCSTALL),
- FOR_ALL_ZONES(PGSCAN_SKIP),
+ FOR_ALL_ZONES(PGALLOC)
+ FOR_ALL_ZONES(ALLOCSTALL)
+ FOR_ALL_ZONES(PGSCAN_SKIP)
PGFREE, PGACTIVATE, PGDEACTIVATE, PGLAZYFREE,
PGFAULT, PGMAJFAULT,
PGLAZYFREED,
diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
index 7dec36aecbd9..7725b7579b78 100644
--- a/include/linux/wait_bit.h
+++ b/include/linux/wait_bit.h
@@ -71,7 +71,7 @@ static inline int
wait_on_bit(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
- if (!test_bit(bit, word))
+ if (!test_bit_acquire(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit,
bit_wait,
@@ -96,7 +96,7 @@ static inline int
wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
{
might_sleep();
- if (!test_bit(bit, word))
+ if (!test_bit_acquire(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit,
bit_wait_io,
@@ -123,7 +123,7 @@ wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
unsigned long timeout)
{
might_sleep();
- if (!test_bit(bit, word))
+ if (!test_bit_acquire(bit, word))
return 0;
return out_of_line_wait_on_bit_timeout(word, bit,
bit_wait_timeout,
@@ -151,7 +151,7 @@ wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
unsigned mode)
{
might_sleep();
- if (!test_bit(bit, word))
+ if (!test_bit_acquire(bit, word))
return 0;
return out_of_line_wait_on_bit(word, bit, action, mode);
}