summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpixf.h6
-rw-r--r--include/asm-generic/io.h98
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/fs.h15
-rw-r--r--include/linux/iio/frequency/adf4350.h2
-rw-r--r--include/linux/ksm.h8
-rw-r--r--include/linux/memcontrol.h26
-rw-r--r--include/linux/mm.h22
-rw-r--r--include/linux/pm_runtime.h56
-rw-r--r--include/linux/rseq.h11
-rw-r--r--include/linux/sunrpc/svc_xprt.h3
-rw-r--r--include/media/v4l2-subdev.h30
-rw-r--r--include/trace/events/dma.h1
14 files changed, 191 insertions, 92 deletions
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index b49396aa4058..97c25ae8a36e 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -214,6 +214,12 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_osi_data, 0);
ACPI_INIT_GLOBAL(u8, acpi_gbl_reduced_hardware, FALSE);
/*
+ * ACPI Global Lock is mainly used for systems with SMM, so no-SMM systems
+ * (such as loong_arch) may not have and not use Global Lock.
+ */
+ACPI_INIT_GLOBAL(u8, acpi_gbl_use_global_lock, TRUE);
+
+/*
* Maximum timeout for While() loop iterations before forced method abort.
* This mechanism is intended to prevent infinite loops during interpreter
* execution within a host kernel.
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 11abad6c87e1..ca5a1ce6f0f8 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -75,6 +75,7 @@
#if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
#include <linux/tracepoint-defs.h>
+#define rwmmio_tracepoint_enabled(tracepoint) tracepoint_enabled(tracepoint)
DECLARE_TRACEPOINT(rwmmio_write);
DECLARE_TRACEPOINT(rwmmio_post_write);
DECLARE_TRACEPOINT(rwmmio_read);
@@ -91,6 +92,7 @@ void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
#else
+#define rwmmio_tracepoint_enabled(tracepoint) false
static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
unsigned long caller_addr, unsigned long caller_addr0) {}
static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
@@ -189,11 +191,13 @@ static inline u8 readb(const volatile void __iomem *addr)
{
u8 val;
- log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __raw_readb(addr);
__io_ar(val);
- log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -204,11 +208,13 @@ static inline u16 readw(const volatile void __iomem *addr)
{
u16 val;
- log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
__io_ar(val);
- log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -219,11 +225,13 @@ static inline u32 readl(const volatile void __iomem *addr)
{
u32 val;
- log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
__io_ar(val);
- log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -235,11 +243,13 @@ static inline u64 readq(const volatile void __iomem *addr)
{
u64 val;
- log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
__io_br();
val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
__io_ar(val);
- log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -249,11 +259,13 @@ static inline u64 readq(const volatile void __iomem *addr)
#define writeb writeb
static inline void writeb(u8 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writeb(value, addr);
__io_aw();
- log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -261,11 +273,13 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
#define writew writew
static inline void writew(u16 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writew((u16 __force)cpu_to_le16(value), addr);
__io_aw();
- log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -273,11 +287,13 @@ static inline void writew(u16 value, volatile void __iomem *addr)
#define writel writel
static inline void writel(u32 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writel((u32 __force)__cpu_to_le32(value), addr);
__io_aw();
- log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -286,11 +302,13 @@ static inline void writel(u32 value, volatile void __iomem *addr)
#define writeq writeq
static inline void writeq(u64 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
__io_bw();
__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
__io_aw();
- log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
}
#endif
#endif /* CONFIG_64BIT */
@@ -306,9 +324,11 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr)
{
u8 val;
- log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
val = __raw_readb(addr);
- log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -319,9 +339,11 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr)
{
u16 val;
- log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
- log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -332,9 +354,11 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr)
{
u32 val;
- log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
- log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -345,9 +369,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
{
u64 val;
- log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_read))
+ log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
- log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_read))
+ log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
return val;
}
#endif
@@ -356,9 +382,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
#define writeb_relaxed writeb_relaxed
static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
__raw_writeb(value, addr);
- log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -366,9 +394,11 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
#define writew_relaxed writew_relaxed
static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
__raw_writew((u16 __force)cpu_to_le16(value), addr);
- log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -376,9 +406,11 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
#define writel_relaxed writel_relaxed
static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
__raw_writel((u32 __force)__cpu_to_le32(value), addr);
- log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
}
#endif
@@ -386,9 +418,11 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
#define writeq_relaxed writeq_relaxed
static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
{
- log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_write))
+ log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
- log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
+ if (rwmmio_tracepoint_enabled(rwmmio_post_write))
+ log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
}
#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 8efbe8c4874e..d61a02fce727 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -832,6 +832,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
/* Required sections not related to debugging. */
#define ELF_DETAILS \
+ .modinfo : { *(.modinfo) } \
.comment 0 : { *(.comment) } \
.symtab 0 : { *(.symtab) } \
.strtab 0 : { *(.strtab) } \
@@ -1045,7 +1046,6 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
*(.discard.*) \
*(.export_symbol) \
*(.no_trim_symbol) \
- *(.modinfo) \
/* ld.bfd warns about .gnu.version* even when not emitted */ \
*(.gnu.version*) \
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 95f3807c8c55..5ed8781f3905 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -32,6 +32,9 @@
*/
#define CPUFREQ_ETERNAL (-1)
+
+#define CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS NSEC_PER_MSEC
+
#define CPUFREQ_NAME_LEN 16
/* Print length for names. Extra 1 space for accommodating '\n' in prints */
#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 601d036a6c78..ed0271526103 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -238,6 +238,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define ATTR_ATIME_SET (1 << 7)
#define ATTR_MTIME_SET (1 << 8)
#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */
+#define ATTR_CTIME_SET (1 << 10)
#define ATTR_KILL_SUID (1 << 11)
#define ATTR_KILL_SGID (1 << 12)
#define ATTR_FILE (1 << 13)
@@ -4024,4 +4025,18 @@ static inline bool vfs_empty_path(int dfd, const char __user *path)
int generic_atomic_write_valid(struct kiocb *iocb, struct iov_iter *iter);
+static inline bool extensible_ioctl_valid(unsigned int cmd_a,
+ unsigned int cmd_b, size_t min_size)
+{
+ if (_IOC_DIR(cmd_a) != _IOC_DIR(cmd_b))
+ return false;
+ if (_IOC_TYPE(cmd_a) != _IOC_TYPE(cmd_b))
+ return false;
+ if (_IOC_NR(cmd_a) != _IOC_NR(cmd_b))
+ return false;
+ if (_IOC_SIZE(cmd_a) < min_size)
+ return false;
+ return true;
+}
+
#endif /* _LINUX_FS_H */
diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h
index de45cf2ee1e4..ce2086f97e3f 100644
--- a/include/linux/iio/frequency/adf4350.h
+++ b/include/linux/iio/frequency/adf4350.h
@@ -51,7 +51,7 @@
/* REG3 Bit Definitions */
#define ADF4350_REG3_12BIT_CLKDIV(x) ((x) << 3)
-#define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 16)
+#define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 15)
#define ADF4350_REG3_12BIT_CSR_EN (1 << 18)
#define ADF4351_REG3_CHARGE_CANCELLATION_EN (1 << 21)
#define ADF4351_REG3_ANTI_BACKLASH_3ns_EN (1 << 22)
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index c17b955e7b0b..8d61d0454293 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -56,8 +56,14 @@ static inline long mm_ksm_zero_pages(struct mm_struct *mm)
static inline void ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
/* Adding mm to ksm is best effort on fork. */
- if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
+ if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) {
+ long nr_ksm_zero_pages = atomic_long_read(&mm->ksm_zero_pages);
+
+ mm->ksm_merging_pages = 0;
+ mm->ksm_rmap_items = 0;
+ atomic_long_add(nr_ksm_zero_pages, &ksm_zero_pages);
__ksm_enter(mm);
+ }
}
static inline int ksm_execve(struct mm_struct *mm)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 25921fbec685..d3ebc9b73c6f 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -987,22 +987,28 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
count_memcg_events_mm(mm, idx, 1);
}
-static inline void memcg_memory_event(struct mem_cgroup *memcg,
- enum memcg_memory_event event)
+static inline void __memcg_memory_event(struct mem_cgroup *memcg,
+ enum memcg_memory_event event,
+ bool allow_spinning)
{
bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
event == MEMCG_SWAP_FAIL;
+ /* For now only MEMCG_MAX can happen with !allow_spinning context. */
+ VM_WARN_ON_ONCE(!allow_spinning && event != MEMCG_MAX);
+
atomic_long_inc(&memcg->memory_events_local[event]);
- if (!swap_event)
+ if (!swap_event && allow_spinning)
cgroup_file_notify(&memcg->events_local_file);
do {
atomic_long_inc(&memcg->memory_events[event]);
- if (swap_event)
- cgroup_file_notify(&memcg->swap_events_file);
- else
- cgroup_file_notify(&memcg->events_file);
+ if (allow_spinning) {
+ if (swap_event)
+ cgroup_file_notify(&memcg->swap_events_file);
+ else
+ cgroup_file_notify(&memcg->events_file);
+ }
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
break;
@@ -1012,6 +1018,12 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg,
!mem_cgroup_is_root(memcg));
}
+static inline void memcg_memory_event(struct mem_cgroup *memcg,
+ enum memcg_memory_event event)
+{
+ __memcg_memory_event(memcg, event, true);
+}
+
static inline void memcg_memory_event_mm(struct mm_struct *mm,
enum memcg_memory_event event)
{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c6794d0e24eb..f23dd28f193f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -4159,14 +4159,13 @@ int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
* since this value becomes part of PP_SIGNATURE; meaning we can just use the
* space between the PP_SIGNATURE value (without POISON_POINTER_DELTA), and the
* lowest bits of POISON_POINTER_DELTA. On arches where POISON_POINTER_DELTA is
- * 0, we make sure that we leave the two topmost bits empty, as that guarantees
- * we won't mistake a valid kernel pointer for a value we set, regardless of the
- * VMSPLIT setting.
+ * 0, we use the lowest bit of PAGE_OFFSET as the boundary if that value is
+ * known at compile-time.
*
- * Altogether, this means that the number of bits available is constrained by
- * the size of an unsigned long (at the upper end, subtracting two bits per the
- * above), and the definition of PP_SIGNATURE (with or without
- * POISON_POINTER_DELTA).
+ * If the value of PAGE_OFFSET is not known at compile time, or if it is too
+ * small to leave at least 8 bits available above PP_SIGNATURE, we define the
+ * number of bits to be 0, which turns off the DMA index tracking altogether
+ * (see page_pool_register_dma_index()).
*/
#define PP_DMA_INDEX_SHIFT (1 + __fls(PP_SIGNATURE - POISON_POINTER_DELTA))
#if POISON_POINTER_DELTA > 0
@@ -4175,8 +4174,13 @@ int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
*/
#define PP_DMA_INDEX_BITS MIN(32, __ffs(POISON_POINTER_DELTA) - PP_DMA_INDEX_SHIFT)
#else
-/* Always leave out the topmost two; see above. */
-#define PP_DMA_INDEX_BITS MIN(32, BITS_PER_LONG - PP_DMA_INDEX_SHIFT - 2)
+/* Use the lowest bit of PAGE_OFFSET if there's at least 8 bits available; see above */
+#define PP_DMA_INDEX_MIN_OFFSET (1 << (PP_DMA_INDEX_SHIFT + 8))
+#define PP_DMA_INDEX_BITS ((__builtin_constant_p(PAGE_OFFSET) && \
+ PAGE_OFFSET >= PP_DMA_INDEX_MIN_OFFSET && \
+ !(PAGE_OFFSET & (PP_DMA_INDEX_MIN_OFFSET - 1))) ? \
+ MIN(32, __ffs(PAGE_OFFSET) - PP_DMA_INDEX_SHIFT) : 0)
+
#endif
#define PP_DMA_INDEX_MASK GENMASK(PP_DMA_INDEX_BITS + PP_DMA_INDEX_SHIFT - 1, \
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index d88d6b6ccf5b..d1ff76e0e2d0 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -350,13 +350,12 @@ static inline int pm_runtime_force_resume(struct device *dev) { return -ENXIO; }
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
- * * -EAGAIN: Runtime PM usage_count non-zero, Runtime PM status change ongoing
- * or device not in %RPM_ACTIVE state.
+ * * -EAGAIN: Runtime PM usage counter non-zero, Runtime PM status change
+ * ongoing or device not in %RPM_ACTIVE state.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -EINPROGRESS: Suspend already in progress.
* * -ENOSYS: CONFIG_PM not enabled.
- * * 1: Device already suspended.
* Other values and conditions for the above values are possible as returned by
* Runtime PM idle and suspend callbacks.
*/
@@ -370,14 +369,15 @@ static inline int pm_runtime_idle(struct device *dev)
* @dev: Target device.
*
* Return:
+ * * 1: Success; device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
- * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing.
+ * * -EAGAIN: Runtime PM usage counter non-zero or Runtime PM status change
+ * ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -ENOSYS: CONFIG_PM not enabled.
- * * 1: Device already suspended.
* Other values and conditions for the above values are possible as returned by
* Runtime PM suspend callbacks.
*/
@@ -396,14 +396,15 @@ static inline int pm_runtime_suspend(struct device *dev)
* engaging its "idle check" callback.
*
* Return:
+ * * 1: Success; device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
- * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing.
+ * * -EAGAIN: Runtime PM usage counter non-zero or Runtime PM status change
+ * ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -ENOSYS: CONFIG_PM not enabled.
- * * 1: Device already suspended.
* Other values and conditions for the above values are possible as returned by
* Runtime PM suspend callbacks.
*/
@@ -433,13 +434,12 @@ static inline int pm_runtime_resume(struct device *dev)
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
- * * -EAGAIN: Runtime PM usage_count non-zero, Runtime PM status change ongoing
- * or device not in %RPM_ACTIVE state.
+ * * -EAGAIN: Runtime PM usage counter non-zero, Runtime PM status change
+ * ongoing or device not in %RPM_ACTIVE state.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -EINPROGRESS: Suspend already in progress.
* * -ENOSYS: CONFIG_PM not enabled.
- * * 1: Device already suspended.
*/
static inline int pm_request_idle(struct device *dev)
{
@@ -464,15 +464,16 @@ static inline int pm_request_resume(struct device *dev)
* equivalent pm_runtime_autosuspend() for @dev asynchronously.
*
* Return:
+ * * 1: Success; device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
- * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing.
+ * * -EAGAIN: Runtime PM usage counter non-zero or Runtime PM status change
+ * ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -EINPROGRESS: Suspend already in progress.
* * -ENOSYS: CONFIG_PM not enabled.
- * * 1: Device already suspended.
*/
static inline int pm_request_autosuspend(struct device *dev)
{
@@ -540,15 +541,16 @@ static inline int pm_runtime_resume_and_get(struct device *dev)
* equal to 0, queue up a work item for @dev like in pm_request_idle().
*
* Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
- * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -EINPROGRESS: Suspend already in progress.
* * -ENOSYS: CONFIG_PM not enabled.
- * * 1: Device already suspended.
*/
static inline int pm_runtime_put(struct device *dev)
{
@@ -565,15 +567,16 @@ DEFINE_FREE(pm_runtime_put, struct device *, if (_T) pm_runtime_put(_T))
* equal to 0, queue up a work item for @dev like in pm_request_autosuspend().
*
* Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
- * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -EINPROGRESS: Suspend already in progress.
* * -ENOSYS: CONFIG_PM not enabled.
- * * 1: Device already suspended.
*/
static inline int __pm_runtime_put_autosuspend(struct device *dev)
{
@@ -590,15 +593,16 @@ static inline int __pm_runtime_put_autosuspend(struct device *dev)
* in pm_request_autosuspend().
*
* Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
- * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -EINPROGRESS: Suspend already in progress.
* * -ENOSYS: CONFIG_PM not enabled.
- * * 1: Device already suspended.
*/
static inline int pm_runtime_put_autosuspend(struct device *dev)
{
@@ -619,14 +623,15 @@ static inline int pm_runtime_put_autosuspend(struct device *dev)
* if it returns an error code.
*
* Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
- * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -ENOSYS: CONFIG_PM not enabled.
- * * 1: Device already suspended.
* Other values and conditions for the above values are possible as returned by
* Runtime PM suspend callbacks.
*/
@@ -646,15 +651,15 @@ static inline int pm_runtime_put_sync(struct device *dev)
* if it returns an error code.
*
* Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
- * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing.
- * * -EAGAIN: usage_count non-zero or Runtime PM status change ongoing.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -ENOSYS: CONFIG_PM not enabled.
- * * 1: Device already suspended.
* Other values and conditions for the above values are possible as returned by
* Runtime PM suspend callbacks.
*/
@@ -677,15 +682,16 @@ static inline int pm_runtime_put_sync_suspend(struct device *dev)
* if it returns an error code.
*
* Return:
+ * * 1: Success. Usage counter dropped to zero, but device was already suspended.
* * 0: Success.
* * -EINVAL: Runtime PM error.
* * -EACCES: Runtime PM disabled.
- * * -EAGAIN: Runtime PM usage_count non-zero or Runtime PM status change ongoing.
+ * * -EAGAIN: Runtime PM usage counter became non-zero or Runtime PM status
+ * change ongoing.
* * -EBUSY: Runtime PM child_count non-zero.
* * -EPERM: Device PM QoS resume latency 0.
* * -EINPROGRESS: Suspend already in progress.
* * -ENOSYS: CONFIG_PM not enabled.
- * * 1: Device already suspended.
* Other values and conditions for the above values are possible as returned by
* Runtime PM suspend callbacks.
*/
diff --git a/include/linux/rseq.h b/include/linux/rseq.h
index bc8af3eb5598..1fbeb61babeb 100644
--- a/include/linux/rseq.h
+++ b/include/linux/rseq.h
@@ -7,6 +7,12 @@
#include <linux/preempt.h>
#include <linux/sched.h>
+#ifdef CONFIG_MEMBARRIER
+# define RSEQ_EVENT_GUARD irq
+#else
+# define RSEQ_EVENT_GUARD preempt
+#endif
+
/*
* Map the event mask on the user-space ABI enum rseq_cs_flags
* for direct mask checks.
@@ -41,9 +47,8 @@ static inline void rseq_handle_notify_resume(struct ksignal *ksig,
static inline void rseq_signal_deliver(struct ksignal *ksig,
struct pt_regs *regs)
{
- preempt_disable();
- __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
- preempt_enable();
+ scoped_guard(RSEQ_EVENT_GUARD)
+ __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
rseq_handle_notify_resume(ksig, regs);
}
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 369a89aea186..2b886f7eb295 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -104,6 +104,9 @@ enum {
* it has access to. It is NOT counted
* in ->sv_tmpcnt.
*/
+ XPT_RPCB_UNREG, /* transport that needs unregistering
+ * with rpcbind (TCP, UDP) on destroy
+ */
};
/*
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 5dcf4065708f..398b57461677 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -1962,19 +1962,23 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers;
*
* Note: only legacy non-MC drivers may need this macro.
*/
-#define v4l2_subdev_call_state_try(sd, o, f, args...) \
- ({ \
- int __result; \
- static struct lock_class_key __key; \
- const char *name = KBUILD_BASENAME \
- ":" __stringify(__LINE__) ":state->lock"; \
- struct v4l2_subdev_state *state = \
- __v4l2_subdev_state_alloc(sd, name, &__key); \
- v4l2_subdev_lock_state(state); \
- __result = v4l2_subdev_call(sd, o, f, state, ##args); \
- v4l2_subdev_unlock_state(state); \
- __v4l2_subdev_state_free(state); \
- __result; \
+#define v4l2_subdev_call_state_try(sd, o, f, args...) \
+ ({ \
+ int __result; \
+ static struct lock_class_key __key; \
+ const char *name = KBUILD_BASENAME \
+ ":" __stringify(__LINE__) ":state->lock"; \
+ struct v4l2_subdev_state *state = \
+ __v4l2_subdev_state_alloc(sd, name, &__key); \
+ if (IS_ERR(state)) { \
+ __result = PTR_ERR(state); \
+ } else { \
+ v4l2_subdev_lock_state(state); \
+ __result = v4l2_subdev_call(sd, o, f, state, ##args); \
+ v4l2_subdev_unlock_state(state); \
+ __v4l2_subdev_state_free(state); \
+ } \
+ __result; \
})
/**
diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h
index d8ddc27b6a7c..945fcbaae77e 100644
--- a/include/trace/events/dma.h
+++ b/include/trace/events/dma.h
@@ -134,6 +134,7 @@ DECLARE_EVENT_CLASS(dma_alloc_class,
__entry->dma_addr = dma_addr;
__entry->size = size;
__entry->flags = flags;
+ __entry->dir = dir;
__entry->attrs = attrs;
),