diff options
Diffstat (limited to 'include')
170 files changed, 4454 insertions, 1319 deletions
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index 9e49b37fc869..9778408f8db4 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -589,82 +589,92 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status acpi_install_initialization_handler (acpi_init_handler handler, u32 function)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status - acpi_install_sci_handler(acpi_sci_handler - address, - void *context)) -ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status - acpi_remove_sci_handler(acpi_sci_handler - address)) + acpi_install_sci_handler(acpi_sci_handler + address, + void *context)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status - acpi_install_global_event_handler - (acpi_gbl_event_handler handler, - void *context)) + acpi_remove_sci_handler(acpi_sci_handler + address)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status - acpi_install_fixed_event_handler(u32 - acpi_event, - acpi_event_handler - handler, - void - *context)) + acpi_install_global_event_handler + (acpi_gbl_event_handler handler, + void *context)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status - acpi_remove_fixed_event_handler(u32 acpi_event, + acpi_install_fixed_event_handler(u32 + acpi_event, acpi_event_handler - handler)) -ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status - acpi_install_gpe_handler(acpi_handle - gpe_device, - u32 gpe_number, - u32 type, - acpi_gpe_handler - address, - void *context)) + handler, + void + *context)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status - acpi_install_gpe_raw_handler(acpi_handle - gpe_device, - u32 gpe_number, - u32 type, - acpi_gpe_handler - address, - void *context)) + acpi_remove_fixed_event_handler(u32 acpi_event, + acpi_event_handler + handler)) ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status - acpi_remove_gpe_handler(acpi_handle gpe_device, + acpi_install_gpe_handler(acpi_handle + gpe_device, u32 gpe_number, + u32 type, acpi_gpe_handler - address)) -ACPI_EXTERNAL_RETURN_STATUS(acpi_status - acpi_install_notify_handler(acpi_handle device, - u32 handler_type, - acpi_notify_handler - handler, + address, void *context)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_install_gpe_raw_handler(acpi_handle + gpe_device, + u32 gpe_number, + u32 type, + acpi_gpe_handler + address, + void *context)) +ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status + acpi_remove_gpe_handler(acpi_handle gpe_device, + u32 gpe_number, + acpi_gpe_handler + address)) ACPI_EXTERNAL_RETURN_STATUS(acpi_status - acpi_remove_notify_handler(acpi_handle device, + acpi_install_notify_handler(acpi_handle device, u32 handler_type, acpi_notify_handler - handler)) -ACPI_EXTERNAL_RETURN_STATUS(acpi_status - acpi_install_address_space_handler(acpi_handle - device, - acpi_adr_space_type - space_id, - acpi_adr_space_handler - handler, - acpi_adr_space_setup - setup, - void *context)) -ACPI_EXTERNAL_RETURN_STATUS(acpi_status - acpi_remove_address_space_handler(acpi_handle + handler, + void *context)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_remove_notify_handler(acpi_handle device, + u32 handler_type, + acpi_notify_handler + handler)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_address_space_handler(acpi_handle device, acpi_adr_space_type space_id, acpi_adr_space_handler - handler)) -ACPI_EXTERNAL_RETURN_STATUS(acpi_status - acpi_install_exception_handler - (acpi_exception_handler handler)) -ACPI_EXTERNAL_RETURN_STATUS(acpi_status - acpi_install_interface_handler - (acpi_interface_handler handler)) + handler, + acpi_adr_space_setup + setup, + void *context)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_address_space_handler_no_reg + (acpi_handle device, acpi_adr_space_type space_id, + acpi_adr_space_handler handler, + acpi_adr_space_setup setup, + void *context)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_execute_reg_methods(acpi_handle device, + acpi_adr_space_type + space_id)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_remove_address_space_handler(acpi_handle + device, + acpi_adr_space_type + space_id, + acpi_adr_space_handler + handler)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_exception_handler + (acpi_exception_handler handler)) +ACPI_EXTERNAL_RETURN_STATUS(acpi_status + acpi_install_interface_handler + (acpi_interface_handler handler)) /* * Global Lock interfaces diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index aea9aee1f3e9..a7752cf152ce 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h @@ -11,40 +11,18 @@ #include <linux/gpio/driver.h> #include <linux/gpio/consumer.h> -/* Platforms may implement their GPIO interface with library code, +/* + * Platforms may implement their GPIO interface with library code, * at a small performance cost for non-inlined operations and some * extra memory (for code and for per-GPIO table entries). - * - * While the GPIO programming interface defines valid GPIO numbers - * to be in the range 0..MAX_INT, this library restricts them to the - * smaller range 0..ARCH_NR_GPIOS-1. - * - * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of - * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is - * actually an estimate of a board-specific value. */ -#ifndef ARCH_NR_GPIOS -#if defined(CONFIG_ARCH_NR_GPIO) && CONFIG_ARCH_NR_GPIO > 0 -#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO -#else -#define ARCH_NR_GPIOS 512 -#endif -#endif - /* - * "valid" GPIO numbers are nonnegative and may be passed to - * setup routines like gpio_request(). only some valid numbers - * can successfully be requested and used. - * - * Invalid GPIO numbers are useful for indicating no-such-GPIO in - * platform data and other tables. + * At the end we want all GPIOs to be dynamically allocated from 0. + * However, some legacy drivers still perform fixed allocation. + * Until they are all fixed, leave 0-512 space for them. */ - -static inline bool gpio_is_valid(int number) -{ - return number >= 0 && number < ARCH_NR_GPIOS; -} +#define GPIO_DYNAMIC_BASE 512 struct device; struct gpio; @@ -140,12 +118,6 @@ static inline void gpio_unexport(unsigned gpio) #include <linux/kernel.h> -static inline bool gpio_is_valid(int number) -{ - /* only non-negative numbers are valid */ - return number >= 0; -} - /* platforms that don't directly support access to GPIOs through I2C, SPI, * or other blocking infrastructure can use these wrappers. */ @@ -169,4 +141,19 @@ static inline void gpio_set_value_cansleep(unsigned gpio, int value) #endif /* !CONFIG_GPIOLIB */ +/* + * "valid" GPIO numbers are nonnegative and may be passed to + * setup routines like gpio_request(). only some valid numbers + * can successfully be requested and used. + * + * Invalid GPIO numbers are useful for indicating no-such-GPIO in + * platform data and other tables. + */ + +static inline bool gpio_is_valid(int number) +{ + /* only non-negative numbers are valid */ + return number >= 0; +} + #endif /* _ASM_GENERIC_GPIO_H */ diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h index b17c6eeb9afa..e29ccabf2e09 100644 --- a/include/asm-generic/hyperv-tlfs.h +++ b/include/asm-generic/hyperv-tlfs.h @@ -408,6 +408,11 @@ struct hv_vpset { u64 bank_contents[]; } __packed; +/* The maximum number of sparse vCPU banks which can be encoded by 'struct hv_vpset' */ +#define HV_MAX_SPARSE_VCPU_BANKS (64) +/* The number of vCPUs in one sparse bank */ +#define HV_VCPUS_PER_SPARSE_BANK (64) + /* HvCallSendSyntheticClusterIpi hypercall */ struct hv_send_ipi { u32 vector; diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index a68f8fbf423b..4c44a29b5e8e 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -80,24 +80,24 @@ DECLARE_TRACEPOINT(rwmmio_read); DECLARE_TRACEPOINT(rwmmio_post_read); void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, - unsigned long caller_addr); + unsigned long caller_addr, unsigned long caller_addr0); void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, - unsigned long caller_addr); + unsigned long caller_addr, unsigned long caller_addr0); void log_read_mmio(u8 width, const volatile void __iomem *addr, - unsigned long caller_addr); + unsigned long caller_addr, unsigned long caller_addr0); void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, - unsigned long caller_addr); + unsigned long caller_addr, unsigned long caller_addr0); #else static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, - unsigned long caller_addr) {} + unsigned long caller_addr, unsigned long caller_addr0) {} static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, - unsigned long caller_addr) {} + unsigned long caller_addr, unsigned long caller_addr0) {} static inline void log_read_mmio(u8 width, const volatile void __iomem *addr, - unsigned long caller_addr) {} + unsigned long caller_addr, unsigned long caller_addr0) {} static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, - unsigned long caller_addr) {} + unsigned long caller_addr, unsigned long caller_addr0) {} #endif /* CONFIG_TRACE_MMIO_ACCESS */ @@ -188,11 +188,11 @@ static inline u8 readb(const volatile void __iomem *addr) { u8 val; - log_read_mmio(8, addr, _THIS_IP_); + log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); __io_br(); val = __raw_readb(addr); __io_ar(val); - log_post_read_mmio(val, 8, addr, _THIS_IP_); + log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -203,11 +203,11 @@ static inline u16 readw(const volatile void __iomem *addr) { u16 val; - log_read_mmio(16, addr, _THIS_IP_); + log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); __io_br(); val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); __io_ar(val); - log_post_read_mmio(val, 16, addr, _THIS_IP_); + log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -218,11 +218,11 @@ static inline u32 readl(const volatile void __iomem *addr) { u32 val; - log_read_mmio(32, addr, _THIS_IP_); + log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); __io_br(); val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); __io_ar(val); - log_post_read_mmio(val, 32, addr, _THIS_IP_); + log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -234,11 +234,11 @@ static inline u64 readq(const volatile void __iomem *addr) { u64 val; - log_read_mmio(64, addr, _THIS_IP_); + log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); __io_br(); val = __le64_to_cpu(__raw_readq(addr)); __io_ar(val); - log_post_read_mmio(val, 64, addr, _THIS_IP_); + log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -248,11 +248,11 @@ static inline u64 readq(const volatile void __iomem *addr) #define writeb writeb static inline void writeb(u8 value, volatile void __iomem *addr) { - log_write_mmio(value, 8, addr, _THIS_IP_); + log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); __io_bw(); __raw_writeb(value, addr); __io_aw(); - log_post_write_mmio(value, 8, addr, _THIS_IP_); + log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); } #endif @@ -260,11 +260,11 @@ static inline void writeb(u8 value, volatile void __iomem *addr) #define writew writew static inline void writew(u16 value, volatile void __iomem *addr) { - log_write_mmio(value, 16, addr, _THIS_IP_); + log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); __io_bw(); __raw_writew((u16 __force)cpu_to_le16(value), addr); __io_aw(); - log_post_write_mmio(value, 16, addr, _THIS_IP_); + log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); } #endif @@ -272,11 +272,11 @@ static inline void writew(u16 value, volatile void __iomem *addr) #define writel writel static inline void writel(u32 value, volatile void __iomem *addr) { - log_write_mmio(value, 32, addr, _THIS_IP_); + log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); __io_bw(); __raw_writel((u32 __force)__cpu_to_le32(value), addr); __io_aw(); - log_post_write_mmio(value, 32, addr, _THIS_IP_); + log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); } #endif @@ -285,11 +285,11 @@ static inline void writel(u32 value, volatile void __iomem *addr) #define writeq writeq static inline void writeq(u64 value, volatile void __iomem *addr) { - log_write_mmio(value, 64, addr, _THIS_IP_); + log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); __io_bw(); __raw_writeq(__cpu_to_le64(value), addr); __io_aw(); - log_post_write_mmio(value, 64, addr, _THIS_IP_); + log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); } #endif #endif /* CONFIG_64BIT */ @@ -305,9 +305,9 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr) { u8 val; - log_read_mmio(8, addr, _THIS_IP_); + log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); val = __raw_readb(addr); - log_post_read_mmio(val, 8, addr, _THIS_IP_); + log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -318,9 +318,9 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr) { u16 val; - log_read_mmio(16, addr, _THIS_IP_); + log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); val = __le16_to_cpu(__raw_readw(addr)); - log_post_read_mmio(val, 16, addr, _THIS_IP_); + log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -331,9 +331,9 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr) { u32 val; - log_read_mmio(32, addr, _THIS_IP_); + log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); val = __le32_to_cpu(__raw_readl(addr)); - log_post_read_mmio(val, 32, addr, _THIS_IP_); + log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -344,9 +344,9 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr) { u64 val; - log_read_mmio(64, addr, _THIS_IP_); + log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); val = __le64_to_cpu(__raw_readq(addr)); - log_post_read_mmio(val, 64, addr, _THIS_IP_); + log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); return val; } #endif @@ -355,9 +355,9 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr) #define writeb_relaxed writeb_relaxed static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) { - log_write_mmio(value, 8, addr, _THIS_IP_); + log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); __raw_writeb(value, addr); - log_post_write_mmio(value, 8, addr, _THIS_IP_); + log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); } #endif @@ -365,9 +365,9 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) #define writew_relaxed writew_relaxed static inline void writew_relaxed(u16 value, volatile void __iomem *addr) { - log_write_mmio(value, 16, addr, _THIS_IP_); + log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); __raw_writew(cpu_to_le16(value), addr); - log_post_write_mmio(value, 16, addr, _THIS_IP_); + log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); } #endif @@ -375,9 +375,9 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr) #define writel_relaxed writel_relaxed static inline void writel_relaxed(u32 value, volatile void __iomem *addr) { - log_write_mmio(value, 32, addr, _THIS_IP_); + log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); __raw_writel(__cpu_to_le32(value), addr); - log_post_write_mmio(value, 32, addr, _THIS_IP_); + log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); } #endif @@ -385,9 +385,9 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr) #define writeq_relaxed writeq_relaxed static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) { - log_write_mmio(value, 64, addr, _THIS_IP_); + log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); __raw_writeq(__cpu_to_le64(value), addr); - log_post_write_mmio(value, 64, addr, _THIS_IP_); + log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); } #endif diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index bfb9eb9d7215..d55d2833a37b 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -211,9 +211,10 @@ static inline int __cpumask_to_vpset(struct hv_vpset *vpset, { int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; int this_cpu = smp_processor_id(); + int max_vcpu_bank = hv_max_vp_index / HV_VCPUS_PER_SPARSE_BANK; - /* valid_bank_mask can represent up to 64 banks */ - if (hv_max_vp_index / 64 >= 64) + /* vpset.valid_bank_mask can represent up to HV_MAX_SPARSE_VCPU_BANKS banks */ + if (max_vcpu_bank >= HV_MAX_SPARSE_VCPU_BANKS) return 0; /* @@ -221,7 +222,7 @@ static inline int __cpumask_to_vpset(struct hv_vpset *vpset, * structs are not cleared between calls, we risk flushing unneeded * vCPUs otherwise. */ - for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++) + for (vcpu_bank = 0; vcpu_bank <= max_vcpu_bank; vcpu_bank++) vpset->bank_contents[vcpu_bank] = 0; /* @@ -233,8 +234,8 @@ static inline int __cpumask_to_vpset(struct hv_vpset *vpset, vcpu = hv_cpu_number_to_vp_number(cpu); if (vcpu == VP_INVAL) return -1; - vcpu_bank = vcpu / 64; - vcpu_offset = vcpu % 64; + vcpu_bank = vcpu / HV_VCPUS_PER_SPARSE_BANK; + vcpu_offset = vcpu % HV_VCPUS_PER_SPARSE_BANK; __set_bit(vcpu_offset, (unsigned long *) &vpset->bank_contents[vcpu_bank]); if (vcpu_bank >= nr_bank) diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index c8ab800652b5..a94219e9916f 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -81,8 +81,8 @@ #define RO_EXCEPTION_TABLE #endif -/* Align . to a 8 byte boundary equals to maximum function alignment. */ -#define ALIGN_FUNCTION() . = ALIGN(8) +/* Align . function alignment. */ +#define ALIGN_FUNCTION() . = ALIGN(CONFIG_FUNCTION_ALIGNMENT) /* * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which @@ -199,100 +199,114 @@ # endif #endif +#define BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_) \ + _BEGIN_##_label_ = .; \ + KEEP(*(_sec_)) \ + _END_##_label_ = .; + +#define BOUNDED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_) \ + _label_##_BEGIN_ = .; \ + KEEP(*(_sec_)) \ + _label_##_END_ = .; + +#define BOUNDED_SECTION_BY(_sec_, _label_) \ + BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, __start, __stop) + +#define BOUNDED_SECTION(_sec) BOUNDED_SECTION_BY(_sec, _sec) + +#define HEADERED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_, _HDR_) \ + _HDR_##_label_ = .; \ + KEEP(*(.gnu.linkonce.##_sec_)) \ + BOUNDED_SECTION_PRE_LABEL(_sec_, _label_, _BEGIN_, _END_) + +#define HEADERED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_, _HDR_) \ + _label_##_HDR_ = .; \ + KEEP(*(.gnu.linkonce.##_sec_)) \ + BOUNDED_SECTION_POST_LABEL(_sec_, _label_, _BEGIN_, _END_) + +#define HEADERED_SECTION_BY(_sec_, _label_) \ + HEADERED_SECTION_PRE_LABEL(_sec_, _label_, __start, __stop) + +#define HEADERED_SECTION(_sec) HEADERED_SECTION_BY(_sec, _sec) + #ifdef CONFIG_TRACE_BRANCH_PROFILING -#define LIKELY_PROFILE() __start_annotated_branch_profile = .; \ - KEEP(*(_ftrace_annotated_branch)) \ - __stop_annotated_branch_profile = .; +#define LIKELY_PROFILE() \ + BOUNDED_SECTION_BY(_ftrace_annotated_branch, _annotated_branch_profile) #else #define LIKELY_PROFILE() #endif #ifdef CONFIG_PROFILE_ALL_BRANCHES -#define BRANCH_PROFILE() __start_branch_profile = .; \ - KEEP(*(_ftrace_branch)) \ - __stop_branch_profile = .; +#define BRANCH_PROFILE() \ + BOUNDED_SECTION_BY(_ftrace_branch, _branch_profile) #else #define BRANCH_PROFILE() #endif #ifdef CONFIG_KPROBES -#define KPROBE_BLACKLIST() . = ALIGN(8); \ - __start_kprobe_blacklist = .; \ - KEEP(*(_kprobe_blacklist)) \ - __stop_kprobe_blacklist = .; +#define KPROBE_BLACKLIST() \ + . = ALIGN(8); \ + BOUNDED_SECTION(_kprobe_blacklist) #else #define KPROBE_BLACKLIST() #endif #ifdef CONFIG_FUNCTION_ERROR_INJECTION -#define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \ - __start_error_injection_whitelist = .; \ - KEEP(*(_error_injection_whitelist)) \ - __stop_error_injection_whitelist = .; +#define ERROR_INJECT_WHITELIST() \ + STRUCT_ALIGN(); \ + BOUNDED_SECTION(_error_injection_whitelist) #else #define ERROR_INJECT_WHITELIST() #endif #ifdef CONFIG_EVENT_TRACING -#define FTRACE_EVENTS() . = ALIGN(8); \ - __start_ftrace_events = .; \ - KEEP(*(_ftrace_events)) \ - __stop_ftrace_events = .; \ - __start_ftrace_eval_maps = .; \ - KEEP(*(_ftrace_eval_map)) \ - __stop_ftrace_eval_maps = .; +#define FTRACE_EVENTS() \ + . = ALIGN(8); \ + BOUNDED_SECTION(_ftrace_events) \ + BOUNDED_SECTION_BY(_ftrace_eval_map, _ftrace_eval_maps) #else #define FTRACE_EVENTS() #endif #ifdef CONFIG_TRACING -#define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \ - KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \ - __stop___trace_bprintk_fmt = .; -#define TRACEPOINT_STR() __start___tracepoint_str = .; \ - KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \ - __stop___tracepoint_str = .; +#define TRACE_PRINTKS() BOUNDED_SECTION_BY(__trace_printk_fmt, ___trace_bprintk_fmt) +#define TRACEPOINT_STR() BOUNDED_SECTION_BY(__tracepoint_str, ___tracepoint_str) #else #define TRACE_PRINTKS() #define TRACEPOINT_STR() #endif #ifdef CONFIG_FTRACE_SYSCALLS -#define TRACE_SYSCALLS() . = ALIGN(8); \ - __start_syscalls_metadata = .; \ - KEEP(*(__syscalls_metadata)) \ - __stop_syscalls_metadata = .; +#define TRACE_SYSCALLS() \ + . = ALIGN(8); \ + BOUNDED_SECTION_BY(__syscalls_metadata, _syscalls_metadata) #else #define TRACE_SYSCALLS() #endif #ifdef CONFIG_BPF_EVENTS -#define BPF_RAW_TP() STRUCT_ALIGN(); \ - __start__bpf_raw_tp = .; \ - KEEP(*(__bpf_raw_tp_map)) \ - __stop__bpf_raw_tp = .; +#define BPF_RAW_TP() STRUCT_ALIGN(); \ + BOUNDED_SECTION_BY(__bpf_raw_tp_map, __bpf_raw_tp) #else #define BPF_RAW_TP() #endif #ifdef CONFIG_SERIAL_EARLYCON -#define EARLYCON_TABLE() . = ALIGN(8); \ - __earlycon_table = .; \ - KEEP(*(__earlycon_table)) \ - __earlycon_table_end = .; +#define EARLYCON_TABLE() \ + . = ALIGN(8); \ + BOUNDED_SECTION_POST_LABEL(__earlycon_table, __earlycon_table, , _end) #else #define EARLYCON_TABLE() #endif #ifdef CONFIG_SECURITY -#define LSM_TABLE() . = ALIGN(8); \ - __start_lsm_info = .; \ - KEEP(*(.lsm_info.init)) \ - __end_lsm_info = .; -#define EARLY_LSM_TABLE() . = ALIGN(8); \ - __start_early_lsm_info = .; \ - KEEP(*(.early_lsm_info.init)) \ - __end_early_lsm_info = .; +#define LSM_TABLE() \ + . = ALIGN(8); \ + BOUNDED_SECTION_PRE_LABEL(.lsm_info.init, _lsm_info, __start, __end) + +#define EARLY_LSM_TABLE() \ + . = ALIGN(8); \ + BOUNDED_SECTION_PRE_LABEL(.early_lsm_info.init, _early_lsm_info, __start, __end) #else #define LSM_TABLE() #define EARLY_LSM_TABLE() @@ -318,9 +332,8 @@ #ifdef CONFIG_ACPI #define ACPI_PROBE_TABLE(name) \ . = ALIGN(8); \ - __##name##_acpi_probe_table = .; \ - KEEP(*(__##name##_acpi_probe_table)) \ - __##name##_acpi_probe_table_end = .; + BOUNDED_SECTION_POST_LABEL(__##name##_acpi_probe_table, \ + __##name##_acpi_probe_table,, _end) #else #define ACPI_PROBE_TABLE(name) #endif @@ -328,9 +341,8 @@ #ifdef CONFIG_THERMAL #define THERMAL_TABLE(name) \ . = ALIGN(8); \ - __##name##_thermal_table = .; \ - KEEP(*(__##name##_thermal_table)) \ - __##name##_thermal_table_end = .; + BOUNDED_SECTION_POST_LABEL(__##name##_thermal_table, \ + __##name##_thermal_table,, _end) #else #define THERMAL_TABLE(name) #endif @@ -360,12 +372,8 @@ *(__tracepoints) \ /* implement dynamic printk debug */ \ . = ALIGN(8); \ - __start___dyndbg_classes = .; \ - KEEP(*(__dyndbg_classes)) \ - __stop___dyndbg_classes = .; \ - __start___dyndbg = .; \ - KEEP(*(__dyndbg)) \ - __stop___dyndbg = .; \ + BOUNDED_SECTION_BY(__dyndbg_classes, ___dyndbg_classes) \ + BOUNDED_SECTION_BY(__dyndbg, ___dyndbg) \ LIKELY_PROFILE() \ BRANCH_PROFILE() \ TRACE_PRINTKS() \ @@ -408,19 +416,13 @@ #define JUMP_TABLE_DATA \ . = ALIGN(8); \ - __start___jump_table = .; \ - KEEP(*(__jump_table)) \ - __stop___jump_table = .; + BOUNDED_SECTION_BY(__jump_table, ___jump_table) #ifdef CONFIG_HAVE_STATIC_CALL_INLINE #define STATIC_CALL_DATA \ . = ALIGN(8); \ - __start_static_call_sites = .; \ - KEEP(*(.static_call_sites)) \ - __stop_static_call_sites = .; \ - __start_static_call_tramp_key = .; \ - KEEP(*(.static_call_tramp_key)) \ - __stop_static_call_tramp_key = .; + BOUNDED_SECTION_BY(.static_call_sites, _static_call_sites) \ + BOUNDED_SECTION_BY(.static_call_tramp_key, _static_call_tramp_key) #else #define STATIC_CALL_DATA #endif @@ -446,9 +448,7 @@ #ifdef CONFIG_ARCH_USES_CFI_TRAPS #define KCFI_TRAPS \ __kcfi_traps : AT(ADDR(__kcfi_traps) - LOAD_OFFSET) { \ - __start___kcfi_traps = .; \ - KEEP(*(.kcfi_traps)) \ - __stop___kcfi_traps = .; \ + BOUNDED_SECTION_BY(.kcfi_traps, ___kcfi_traps) \ } #else #define KCFI_TRAPS @@ -466,9 +466,7 @@ SCHED_DATA \ RO_AFTER_INIT_DATA /* Read only after init */ \ . = ALIGN(8); \ - __start___tracepoints_ptrs = .; \ - KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \ - __stop___tracepoints_ptrs = .; \ + BOUNDED_SECTION_BY(__tracepoints_ptrs, ___tracepoints_ptrs) \ *(__tracepoints_strings)/* Tracepoints: strings */ \ } \ \ @@ -478,30 +476,14 @@ \ /* PCI quirks */ \ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ - __start_pci_fixups_early = .; \ - KEEP(*(.pci_fixup_early)) \ - __end_pci_fixups_early = .; \ - __start_pci_fixups_header = .; \ - KEEP(*(.pci_fixup_header)) \ - __end_pci_fixups_header = .; \ - __start_pci_fixups_final = .; \ - KEEP(*(.pci_fixup_final)) \ - __end_pci_fixups_final = .; \ - __start_pci_fixups_enable = .; \ - KEEP(*(.pci_fixup_enable)) \ - __end_pci_fixups_enable = .; \ - __start_pci_fixups_resume = .; \ - KEEP(*(.pci_fixup_resume)) \ - __end_pci_fixups_resume = .; \ - __start_pci_fixups_resume_early = .; \ - KEEP(*(.pci_fixup_resume_early)) \ - __end_pci_fixups_resume_early = .; \ - __start_pci_fixups_suspend = .; \ - KEEP(*(.pci_fixup_suspend)) \ - __end_pci_fixups_suspend = .; \ - __start_pci_fixups_suspend_late = .; \ - KEEP(*(.pci_fixup_suspend_late)) \ - __end_pci_fixups_suspend_late = .; \ + BOUNDED_SECTION_PRE_LABEL(.pci_fixup_early, _pci_fixups_early, __start, __end) \ + BOUNDED_SECTION_PRE_LABEL(.pci_fixup_header, _pci_fixups_header, __start, __end) \ + BOUNDED_SECTION_PRE_LABEL(.pci_fixup_final, _pci_fixups_final, __start, __end) \ + BOUNDED_SECTION_PRE_LABEL(.pci_fixup_enable, _pci_fixups_enable, __start, __end) \ + BOUNDED_SECTION_PRE_LABEL(.pci_fixup_resume, _pci_fixups_resume, __start, __end) \ + BOUNDED_SECTION_PRE_LABEL(.pci_fixup_suspend, _pci_fixups_suspend, __start, __end) \ + BOUNDED_SECTION_PRE_LABEL(.pci_fixup_resume_early, _pci_fixups_resume_early, __start, __end) \ + BOUNDED_SECTION_PRE_LABEL(.pci_fixup_suspend_late, _pci_fixups_suspend_late, __start, __end) \ } \ \ FW_LOADER_BUILT_IN_DATA \ @@ -551,16 +533,12 @@ \ /* Built-in module parameters. */ \ __param : AT(ADDR(__param) - LOAD_OFFSET) { \ - __start___param = .; \ - KEEP(*(__param)) \ - __stop___param = .; \ + BOUNDED_SECTION_BY(__param, ___param) \ } \ \ /* Built-in module versions. */ \ __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \ - __start___modver = .; \ - KEEP(*(__modver)) \ - __stop___modver = .; \ + BOUNDED_SECTION_BY(__modver, ___modver) \ } \ \ KCFI_TRAPS \ @@ -670,9 +648,7 @@ #define EXCEPTION_TABLE(align) \ . = ALIGN(align); \ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \ - __start___ex_table = .; \ - KEEP(*(__ex_table)) \ - __stop___ex_table = .; \ + BOUNDED_SECTION_BY(__ex_table, ___ex_table) \ } /* @@ -681,9 +657,7 @@ #ifdef CONFIG_DEBUG_INFO_BTF #define BTF \ .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ - __start_BTF = .; \ - KEEP(*(.BTF)) \ - __stop_BTF = .; \ + BOUNDED_SECTION_BY(.BTF, _BTF) \ } \ . = ALIGN(4); \ .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \ @@ -860,9 +834,7 @@ #define BUG_TABLE \ . = ALIGN(8); \ __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ - __start___bug_table = .; \ - KEEP(*(__bug_table)) \ - __stop___bug_table = .; \ + BOUNDED_SECTION_BY(__bug_table, ___bug_table) \ } #else #define BUG_TABLE @@ -872,15 +844,11 @@ #define ORC_UNWIND_TABLE \ . = ALIGN(4); \ .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \ - __start_orc_unwind_ip = .; \ - KEEP(*(.orc_unwind_ip)) \ - __stop_orc_unwind_ip = .; \ + BOUNDED_SECTION_BY(.orc_unwind_ip, _orc_unwind_ip) \ } \ . = ALIGN(2); \ .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \ - __start_orc_unwind = .; \ - KEEP(*(.orc_unwind)) \ - __stop_orc_unwind = .; \ + BOUNDED_SECTION_BY(.orc_unwind, _orc_unwind) \ } \ text_size = _etext - _stext; \ . = ALIGN(4); \ @@ -898,9 +866,7 @@ #ifdef CONFIG_FW_LOADER #define FW_LOADER_BUILT_IN_DATA \ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \ - __start_builtin_fw = .; \ - KEEP(*(.builtin_fw)) \ - __end_builtin_fw = .; \ + BOUNDED_SECTION_PRE_LABEL(.builtin_fw, _builtin_fw, __start, __end) \ } #else #define FW_LOADER_BUILT_IN_DATA @@ -910,9 +876,7 @@ #define TRACEDATA \ . = ALIGN(4); \ .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \ - __tracedata_start = .; \ - KEEP(*(.tracedata)) \ - __tracedata_end = .; \ + BOUNDED_SECTION_POST_LABEL(.tracedata, __tracedata, _start, _end) \ } #else #define TRACEDATA @@ -921,9 +885,7 @@ #ifdef CONFIG_PRINTK_INDEX #define PRINTK_INDEX \ .printk_index : AT(ADDR(.printk_index) - LOAD_OFFSET) { \ - __start_printk_index = .; \ - *(.printk_index) \ - __stop_printk_index = .; \ + BOUNDED_SECTION_BY(.printk_index, _printk_index) \ } #else #define PRINTK_INDEX @@ -931,17 +893,13 @@ #define NOTES \ .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ - __start_notes = .; \ - KEEP(*(.note.*)) \ - __stop_notes = .; \ + BOUNDED_SECTION_BY(.note.*, _notes) \ } NOTES_HEADERS \ NOTES_HEADERS_RESTORE #define INIT_SETUP(initsetup_align) \ . = ALIGN(initsetup_align); \ - __setup_start = .; \ - KEEP(*(.init.setup)) \ - __setup_end = .; + BOUNDED_SECTION_POST_LABEL(.init.setup, __setup, _start, _end) #define INIT_CALLS_LEVEL(level) \ __initcall##level##_start = .; \ @@ -963,16 +921,12 @@ __initcall_end = .; #define CON_INITCALL \ - __con_initcall_start = .; \ - KEEP(*(.con_initcall.init)) \ - __con_initcall_end = .; + BOUNDED_SECTION_POST_LABEL(.con_initcall.init, __con_initcall, _start, _end) /* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */ #define KUNIT_TABLE() \ . = ALIGN(8); \ - __kunit_suites_start = .; \ - KEEP(*(.kunit_test_suites)) \ - __kunit_suites_end = .; + BOUNDED_SECTION_POST_LABEL(.kunit_test_suites, __kunit_suites, _start, _end) #ifdef CONFIG_BLK_DEV_INITRD #define INIT_RAM_FS \ diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index cb3d6b1c655d..e4bc96528902 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -11,6 +11,7 @@ #include <linux/crypto.h> #define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001 +#define CRYPTO_ACOMP_DST_MAX 131072 /** * struct acomp_req - asynchronous (de)compression request diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 5764b46bd1ec..734c213918bd 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -43,9 +43,12 @@ struct akcipher_request { * struct crypto_akcipher - user-instantiated objects which encapsulate * algorithms and core processing logic * + * @reqsize: Request context size required by algorithm implementation * @base: Common crypto API algorithm data structure */ struct crypto_akcipher { + unsigned int reqsize; + struct crypto_tfm base; }; @@ -86,7 +89,6 @@ struct crypto_akcipher { * counterpart to @init, used to remove various changes set in * @init. * - * @reqsize: Request context size required by algorithm implementation * @base: Common crypto API algorithm data structure */ struct akcipher_alg { @@ -102,7 +104,6 @@ struct akcipher_alg { int (*init)(struct crypto_akcipher *tfm); void (*exit)(struct crypto_akcipher *tfm); - unsigned int reqsize; struct crypto_alg base; }; @@ -155,7 +156,7 @@ static inline struct akcipher_alg *crypto_akcipher_alg( static inline unsigned int crypto_akcipher_reqsize(struct crypto_akcipher *tfm) { - return crypto_akcipher_alg(tfm)->reqsize; + return tfm->reqsize; } static inline void akcipher_request_set_tfm(struct akcipher_request *req, diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index f50c5d1725da..61b327206b55 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -8,6 +8,7 @@ #define _CRYPTO_ALGAPI_H #include <linux/align.h> +#include <linux/cache.h> #include <linux/crypto.h> #include <linux/kconfig.h> #include <linux/list.h> @@ -21,10 +22,18 @@ * algs and architectures. Ciphers have a lower maximum size. */ #define MAX_ALGAPI_BLOCKSIZE 160 -#define MAX_ALGAPI_ALIGNMASK 63 +#define MAX_ALGAPI_ALIGNMASK 127 #define MAX_CIPHER_BLOCKSIZE 16 #define MAX_CIPHER_ALIGNMASK 15 +#ifdef ARCH_DMA_MINALIGN +#define CRYPTO_DMA_ALIGN ARCH_DMA_MINALIGN +#else +#define CRYPTO_DMA_ALIGN CRYPTO_MINALIGN +#endif + +#define CRYPTO_DMA_PADDING ((CRYPTO_DMA_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1)) + struct crypto_aead; struct crypto_instance; struct module; @@ -189,10 +198,38 @@ static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2, } } +static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) +{ + return tfm->__crt_ctx; +} + +static inline void *crypto_tfm_ctx_align(struct crypto_tfm *tfm, + unsigned int align) +{ + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + + return PTR_ALIGN(crypto_tfm_ctx(tfm), align); +} + static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) { - return PTR_ALIGN(crypto_tfm_ctx(tfm), - crypto_tfm_alg_alignmask(tfm) + 1); + return crypto_tfm_ctx_align(tfm, crypto_tfm_alg_alignmask(tfm) + 1); +} + +static inline unsigned int crypto_dma_align(void) +{ + return CRYPTO_DMA_ALIGN; +} + +static inline unsigned int crypto_dma_padding(void) +{ + return (crypto_dma_align() - 1) & ~(crypto_tfm_ctx_alignment() - 1); +} + +static inline void *crypto_tfm_ctx_dma(struct crypto_tfm *tfm) +{ + return crypto_tfm_ctx_align(tfm, crypto_dma_align()); } static inline struct crypto_instance *crypto_tfm_alg_instance( diff --git a/include/crypto/aria.h b/include/crypto/aria.h index 254da46cc385..73295146be11 100644 --- a/include/crypto/aria.h +++ b/include/crypto/aria.h @@ -18,11 +18,11 @@ #ifndef _CRYPTO_ARIA_H #define _CRYPTO_ARIA_H +#include <crypto/algapi.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/errno.h> -#include <linux/crypto.h> #include <asm/byteorder.h> #define ARIA_MIN_KEY_SIZE 16 diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h index 9d7eff04f224..fd9df607a836 100644 --- a/include/crypto/gcm.h +++ b/include/crypto/gcm.h @@ -3,6 +3,9 @@ #include <linux/errno.h> +#include <crypto/aes.h> +#include <crypto/gf128mul.h> + #define GCM_AES_IV_SIZE 12 #define GCM_RFC4106_IV_SIZE 8 #define GCM_RFC4543_IV_SIZE 8 @@ -60,4 +63,23 @@ static inline int crypto_ipsec_check_assoclen(unsigned int assoclen) return 0; } + +struct aesgcm_ctx { + be128 ghash_key; + struct crypto_aes_ctx aes_ctx; + unsigned int authsize; +}; + +int aesgcm_expandkey(struct aesgcm_ctx *ctx, const u8 *key, + unsigned int keysize, unsigned int authsize); + +void aesgcm_encrypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src, + int crypt_len, const u8 *assoc, int assoc_len, + const u8 iv[GCM_AES_IV_SIZE], u8 *authtag); + +bool __must_check aesgcm_decrypt(const struct aesgcm_ctx *ctx, u8 *dst, + const u8 *src, int crypt_len, const u8 *assoc, + int assoc_len, const u8 iv[GCM_AES_IV_SIZE], + const u8 *authtag); + #endif diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index cfc47e18820f..49339003bd2c 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -8,7 +8,9 @@ */ #ifndef _CRYPTO_ACOMP_INT_H #define _CRYPTO_ACOMP_INT_H + #include <crypto/acompress.h> +#include <crypto/algapi.h> /* * Transform internal helpers. diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h index d482017f3e20..cd8cb1e921b7 100644 --- a/include/crypto/internal/aead.h +++ b/include/crypto/internal/aead.h @@ -39,6 +39,11 @@ static inline void *crypto_aead_ctx(struct crypto_aead *tfm) return crypto_tfm_ctx(&tfm->base); } +static inline void *crypto_aead_ctx_dma(struct crypto_aead *tfm) +{ + return crypto_tfm_ctx_dma(&tfm->base); +} + static inline struct crypto_instance *aead_crypto_instance( struct aead_instance *inst) { @@ -65,6 +70,16 @@ static inline void *aead_request_ctx(struct aead_request *req) return req->__ctx; } +static inline void *aead_request_ctx_dma(struct aead_request *req) +{ + unsigned int align = crypto_dma_align(); + + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + + return PTR_ALIGN(aead_request_ctx(req), align); +} + static inline void aead_request_complete(struct aead_request *req, int err) { req->base.complete(&req->base, err); @@ -108,6 +123,13 @@ static inline void crypto_aead_set_reqsize(struct crypto_aead *aead, aead->reqsize = reqsize; } +static inline void crypto_aead_set_reqsize_dma(struct crypto_aead *aead, + unsigned int reqsize) +{ + reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1); + aead->reqsize = reqsize; +} + static inline void aead_init_queue(struct aead_queue *queue, unsigned int max_qlen) { diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h index 8d3220c9ab77..aaf1092b93b8 100644 --- a/include/crypto/internal/akcipher.h +++ b/include/crypto/internal/akcipher.h @@ -33,15 +33,37 @@ static inline void *akcipher_request_ctx(struct akcipher_request *req) return req->__ctx; } +static inline void *akcipher_request_ctx_dma(struct akcipher_request *req) +{ + unsigned int align = crypto_dma_align(); + + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + + return PTR_ALIGN(akcipher_request_ctx(req), align); +} + static inline void akcipher_set_reqsize(struct crypto_akcipher *akcipher, unsigned int reqsize) { - crypto_akcipher_alg(akcipher)->reqsize = reqsize; + akcipher->reqsize = reqsize; +} + +static inline void akcipher_set_reqsize_dma(struct crypto_akcipher *akcipher, + unsigned int reqsize) +{ + reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1); + akcipher->reqsize = reqsize; } static inline void *akcipher_tfm_ctx(struct crypto_akcipher *tfm) { - return tfm->base.__crt_ctx; + return crypto_tfm_ctx(&tfm->base); +} + +static inline void *akcipher_tfm_ctx_dma(struct crypto_akcipher *tfm) +{ + return crypto_tfm_ctx_dma(&tfm->base); } static inline void akcipher_request_complete(struct akcipher_request *req, diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 25806141db59..1a2a41b79253 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -75,7 +75,13 @@ void crypto_unregister_ahashes(struct ahash_alg *algs, int count); int ahash_register_instance(struct crypto_template *tmpl, struct ahash_instance *inst); -bool crypto_shash_alg_has_setkey(struct shash_alg *alg); +int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, + unsigned int keylen); + +static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) +{ + return alg->setkey != shash_no_setkey; +} static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg) { @@ -134,6 +140,11 @@ static inline void *crypto_ahash_ctx(struct crypto_ahash *tfm) return crypto_tfm_ctx(crypto_ahash_tfm(tfm)); } +static inline void *crypto_ahash_ctx_dma(struct crypto_ahash *tfm) +{ + return crypto_tfm_ctx_dma(crypto_ahash_tfm(tfm)); +} + static inline struct ahash_alg *__crypto_ahash_alg(struct crypto_alg *alg) { return container_of(__crypto_hash_alg_common(alg), struct ahash_alg, @@ -146,6 +157,13 @@ static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm, tfm->reqsize = reqsize; } +static inline void crypto_ahash_set_reqsize_dma(struct crypto_ahash *ahash, + unsigned int reqsize) +{ + reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1); + ahash->reqsize = reqsize; +} + static inline struct crypto_instance *ahash_crypto_instance( struct ahash_instance *inst) { @@ -169,6 +187,16 @@ static inline void *ahash_instance_ctx(struct ahash_instance *inst) return crypto_instance_ctx(ahash_crypto_instance(inst)); } +static inline void *ahash_request_ctx_dma(struct ahash_request *req) +{ + unsigned int align = crypto_dma_align(); + + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + + return PTR_ALIGN(ahash_request_ctx(req), align); +} + static inline void ahash_request_complete(struct ahash_request *req, int err) { req->base.complete(&req->base, err); diff --git a/include/crypto/internal/kpp.h b/include/crypto/internal/kpp.h index 9cb0662ebe87..3c9726e89f53 100644 --- a/include/crypto/internal/kpp.h +++ b/include/crypto/internal/kpp.h @@ -50,9 +50,37 @@ static inline void *kpp_request_ctx(struct kpp_request *req) return req->__ctx; } +static inline void *kpp_request_ctx_dma(struct kpp_request *req) +{ + unsigned int align = crypto_dma_align(); + + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + + return PTR_ALIGN(kpp_request_ctx(req), align); +} + +static inline void kpp_set_reqsize(struct crypto_kpp *kpp, + unsigned int reqsize) +{ + kpp->reqsize = reqsize; +} + +static inline void kpp_set_reqsize_dma(struct crypto_kpp *kpp, + unsigned int reqsize) +{ + reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1); + kpp->reqsize = reqsize; +} + static inline void *kpp_tfm_ctx(struct crypto_kpp *tfm) { - return tfm->base.__crt_ctx; + return crypto_tfm_ctx(&tfm->base); +} + +static inline void *kpp_tfm_ctx_dma(struct crypto_kpp *tfm) +{ + return crypto_tfm_ctx_dma(&tfm->base); } static inline void kpp_request_complete(struct kpp_request *req, int err) diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h index f834274c2493..252cc949d4ee 100644 --- a/include/crypto/internal/scompress.h +++ b/include/crypto/internal/scompress.h @@ -8,7 +8,8 @@ */ #ifndef _CRYPTO_SCOMP_INT_H #define _CRYPTO_SCOMP_INT_H -#include <linux/crypto.h> + +#include <crypto/algapi.h> #define SCOMP_SCRATCH_SIZE 131072 diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index a2339f80a615..06d0a5491cf3 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -14,6 +14,14 @@ #include <linux/list.h> #include <linux/types.h> +/* + * Set this if your algorithm is sync but needs a reqsize larger + * than MAX_SYNC_SKCIPHER_REQSIZE. + * + * Reuse bit that is specific to hash algorithms. + */ +#define CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE CRYPTO_ALG_OPTIONAL_KEY + struct aead_request; struct rtattr; @@ -122,6 +130,13 @@ static inline void crypto_skcipher_set_reqsize( skcipher->reqsize = reqsize; } +static inline void crypto_skcipher_set_reqsize_dma( + struct crypto_skcipher *skcipher, unsigned int reqsize) +{ + reqsize += crypto_dma_align() & ~(crypto_tfm_ctx_alignment() - 1); + skcipher->reqsize = reqsize; +} + int crypto_register_skcipher(struct skcipher_alg *alg); void crypto_unregister_skcipher(struct skcipher_alg *alg); int crypto_register_skciphers(struct skcipher_alg *algs, int count); @@ -151,11 +166,26 @@ static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm) return crypto_tfm_ctx(&tfm->base); } +static inline void *crypto_skcipher_ctx_dma(struct crypto_skcipher *tfm) +{ + return crypto_tfm_ctx_dma(&tfm->base); +} + static inline void *skcipher_request_ctx(struct skcipher_request *req) { return req->__ctx; } +static inline void *skcipher_request_ctx_dma(struct skcipher_request *req) +{ + unsigned int align = crypto_dma_align(); + + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + + return PTR_ALIGN(skcipher_request_ctx(req), align); +} + static inline u32 skcipher_request_flags(struct skcipher_request *req) { return req->base.flags; diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h index 24d01e9877c1..33ff32878802 100644 --- a/include/crypto/kpp.h +++ b/include/crypto/kpp.h @@ -37,9 +37,13 @@ struct kpp_request { * struct crypto_kpp - user-instantiated object which encapsulate * algorithms and core processing logic * + * @reqsize: Request context size required by algorithm + * implementation * @base: Common crypto API algorithm data structure */ struct crypto_kpp { + unsigned int reqsize; + struct crypto_tfm base; }; @@ -64,8 +68,6 @@ struct crypto_kpp { * put in place here. * @exit: Undo everything @init did. * - * @reqsize: Request context size required by algorithm - * implementation * @base: Common crypto API algorithm data structure */ struct kpp_alg { @@ -79,7 +81,6 @@ struct kpp_alg { int (*init)(struct crypto_kpp *tfm); void (*exit)(struct crypto_kpp *tfm); - unsigned int reqsize; struct crypto_alg base; }; @@ -128,7 +129,7 @@ static inline struct kpp_alg *crypto_kpp_alg(struct crypto_kpp *tfm) static inline unsigned int crypto_kpp_reqsize(struct crypto_kpp *tfm) { - return crypto_kpp_alg(tfm)->reqsize; + return tfm->reqsize; } static inline void kpp_request_set_tfm(struct kpp_request *req, diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index ccdb05f68a75..f2c42b4111b1 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -93,7 +93,6 @@ static inline void scatterwalk_done(struct scatter_walk *walk, int out, void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, size_t nbytes, int out); -void *scatterwalk_map(struct scatter_walk *walk); void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, unsigned int start, unsigned int nbytes, int out); diff --git a/include/dt-bindings/mailbox/mediatek,mt8188-gce.h b/include/dt-bindings/mailbox/mediatek,mt8188-gce.h new file mode 100644 index 000000000000..119865787b47 --- /dev/null +++ b/include/dt-bindings/mailbox/mediatek,mt8188-gce.h @@ -0,0 +1,967 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ +/* + * Copyright (c) 2022 MediaTek Inc. + * + */ +#ifndef _DT_BINDINGS_GCE_MT8188_H +#define _DT_BINDINGS_GCE_MT8188_H + +#define CMDQ_THR_PRIO_LOWEST 0 +#define CMDQ_THR_PRIO_1 1 +#define CMDQ_THR_PRIO_2 2 +#define CMDQ_THR_PRIO_3 3 +#define CMDQ_THR_PRIO_4 4 +#define CMDQ_THR_PRIO_5 5 +#define CMDQ_THR_PRIO_6 6 +#define CMDQ_THR_PRIO_HIGHEST 7 + +#define SUBSYS_1400XXXX 0 +#define SUBSYS_1401XXXX 1 +#define SUBSYS_1402XXXX 2 +#define SUBSYS_1c00XXXX 3 +#define SUBSYS_1c01XXXX 4 +#define SUBSYS_1c02XXXX 5 +#define SUBSYS_1c10XXXX 6 +#define SUBSYS_1c11XXXX 7 +#define SUBSYS_1c12XXXX 8 +#define SUBSYS_14f0XXXX 9 +#define SUBSYS_14f1XXXX 10 +#define SUBSYS_14f2XXXX 11 +#define SUBSYS_1800XXXX 12 +#define SUBSYS_1801XXXX 13 +#define SUBSYS_1802XXXX 14 +#define SUBSYS_1803XXXX 15 +#define SUBSYS_1032XXXX 16 +#define SUBSYS_1033XXXX 17 +#define SUBSYS_1600XXXX 18 +#define SUBSYS_1601XXXX 19 +#define SUBSYS_14e0XXXX 20 +#define SUBSYS_1c20XXXX 21 +#define SUBSYS_1c30XXXX 22 +#define SUBSYS_1c40XXXX 23 +#define SUBSYS_1c50XXXX 24 +#define SUBSYS_1c60XXXX 25 +#define SUBSYS_NO_SUPPORT 99 + +#define CMDQ_EVENT_IMG_SOF 0 +#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_0 1 +#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_1 2 +#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_2 3 +#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_3 4 +#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_4 5 +#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_5 6 +#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_6 7 +#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_7 8 +#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_8 9 +#define CMDQ_EVENT_IMG_TRAW0_CQ_THR_DONE_9 10 +#define CMDQ_EVENT_IMG_TRAW0_DMA_ERROR_INT 11 +#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_0 12 +#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_1 13 +#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_2 14 +#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_3 15 +#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_4 16 +#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_5 17 +#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_6 18 +#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_7 19 +#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_8 20 +#define CMDQ_EVENT_IMG_TRAW1_CQ_THR_DONE_9 21 +#define CMDQ_EVENT_IMG_TRAW1_DMA_ERROR_INT 22 +#define CMDQ_EVENT_IMG_ADL_RESERVED 23 +#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_0 24 +#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_1 25 +#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_2 26 +#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_3 27 +#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_4 28 +#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_5 29 +#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_6 30 +#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_7 31 +#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_8 32 +#define CMDQ_EVENT_IMG_DIP_CQ_THR_DONE_9 33 +#define CMDQ_EVENT_IMG_DIP_DMA_ERR 34 +#define CMDQ_EVENT_IMG_DIP_NR_DMA_ERR 35 +#define CMDQ_EVENT_DIP_DUMMY_0 36 +#define CMDQ_EVENT_DIP_DUMMY_1 37 +#define CMDQ_EVENT_DIP_DUMMY_2 38 +#define CMDQ_EVENT_IMG_WPE_EIS_GCE_FRAME_DONE 39 +#define CMDQ_EVENT_IMG_WPE_EIS_DONE_SYNC_OUT 40 +#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_0 41 +#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_1 42 +#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_2 43 +#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_3 44 +#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_4 45 +#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_5 46 +#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_6 47 +#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_7 48 +#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_8 49 +#define CMDQ_EVENT_IMG_WPE_EIS_CQ_THR_DONE_9 50 +#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_0 51 +#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_1 52 +#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_2 53 +#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_3 54 +#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_4 55 +#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_5 56 +#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_6 57 +#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_7 58 +#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_8 59 +#define CMDQ_EVENT_IMG_PQDIP_A_CQ_THR_DONE_9 60 +#define CMDQ_EVENT_IMG_PQDIP_A_DMA_ERR 61 +#define CMDQ_EVENT_WPE0_DUMMY_0 62 +#define CMDQ_EVENT_WPE0_DUMMY_1 63 +#define CMDQ_EVENT_WPE0_DUMMY_2 64 +#define CMDQ_EVENT_IMG_WPE_TNR_GCE_FRAME_DONE 65 +#define CMDQ_EVENT_IMG_WPE_TNR_DONE_SYNC_OUT 66 +#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_0 67 +#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_1 68 +#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_2 69 +#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_3 70 +#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_4 71 +#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_5 72 +#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_6 73 +#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_7 74 +#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_8 75 +#define CMDQ_EVENT_IMG_WPE_TNR_CQ_THR_DONE_9 76 +#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_0 77 +#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_1 78 +#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_2 79 +#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_3 80 +#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_4 81 +#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_5 82 +#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_6 83 +#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_7 84 +#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_8 85 +#define CMDQ_EVENT_IMG_PQDIP_B_CQ_THR_DONE_9 86 +#define CMDQ_EVENT_IMG_PQDIP_B_DMA_ERR 87 +#define CMDQ_EVENT_WPE1_DUMMY_0 88 +#define CMDQ_EVENT_WPE1_DUMMY_1 89 +#define CMDQ_EVENT_WPE1_DUMMY_2 90 +#define CMDQ_EVENT_IMG_WPE_LITE_GCE_FRAME_DONE 91 +#define CMDQ_EVENT_IMG_WPE_LITE_DONE_SYNC_OUT 92 +#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_0 93 +#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_1 94 +#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_2 95 +#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_3 96 +#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_4 97 +#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_5 98 +#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_6 99 +#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_7 100 +#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_8 101 +#define CMDQ_EVENT_IMG_WPE_LITE_CQ_THR_DONE_9 102 +#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_0 103 +#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_1 104 +#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_2 105 +#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_3 106 +#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_4 107 +#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_5 108 +#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_6 109 +#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_7 110 +#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_8 111 +#define CMDQ_EVENT_IMG_XTRAW_CQ_THR_DONE_9 112 +#define CMDQ_EVENT_IMG_XTRAW_DMA_ERR_EVENT 113 +#define CMDQ_EVENT_WPE2_DUMMY_0 114 +#define CMDQ_EVENT_WPE2_DUMMY_1 115 +#define CMDQ_EVENT_WPE2_DUMMY_2 116 +#define CMDQ_EVENT_IMG_IMGSYS_IPE_DUMMY 117 +#define CMDQ_EVENT_IMG_IMGSYS_IPE_FDVT_DONE 118 +#define CMDQ_EVENT_IMG_IMGSYS_IPE_ME_DONE 119 +#define CMDQ_EVENT_IMG_IMGSYS_IPE_DVS_DONE 120 +#define CMDQ_EVENT_IMG_IMGSYS_IPE_DVP_DONE 121 +#define CMDQ_EVENT_FDVT1_RESERVED 122 +#define CMDQ_EVENT_IMG_ENG_EVENT 123 +#define CMDQ_EVENT_CAMSUBA_SW_PASS1_DONE 129 +#define CMDQ_EVENT_CAMSUBB_SW_PASS1_DONE 130 +#define CMDQ_EVENT_CAMSUBC_SW_PASS1_DONE 131 +#define CMDQ_EVENT_GCAMSV_A_1_SW_PASS1_DONE 132 +#define CMDQ_EVENT_GCAMSV_A_2_SW_PASS1_DONE 133 +#define CMDQ_EVENT_GCAMSV_B_1_SW_PASS1_DONE 134 +#define CMDQ_EVENT_GCAMSV_B_2_SW_PASS1_DONE 135 +#define CMDQ_EVENT_GCAMSV_C_1_SW_PASS1_DONE 136 +#define CMDQ_EVENT_GCAMSV_C_2_SW_PASS1_DONE 137 +#define CMDQ_EVENT_GCAMSV_D_1_SW_PASS1_DONE 138 +#define CMDQ_EVENT_GCAMSV_D_2_SW_PASS1_DONE 139 +#define CMDQ_EVENT_GCAMSV_E_1_SW_PASS1_DONE 140 +#define CMDQ_EVENT_GCAMSV_E_2_SW_PASS1_DONE 141 +#define CMDQ_EVENT_GCAMSV_F_1_SW_PASS1_DONE 142 +#define CMDQ_EVENT_GCAMSV_F_2_SW_PASS1_DONE 143 +#define CMDQ_EVENT_GCAMSV_G_1_SW_PASS1_DONE 144 +#define CMDQ_EVENT_GCAMSV_G_2_SW_PASS1_DONE 145 +#define CMDQ_EVENT_GCAMSV_H_1_SW_PASS1_DONE 146 +#define CMDQ_EVENT_GCAMSV_H_2_SW_PASS1_DONE 147 +#define CMDQ_EVENT_GCAMSV_I_1_SW_PASS1_DONE 148 +#define CMDQ_EVENT_GCAMSV_I_2_SW_PASS1_DONE 149 +#define CMDQ_EVENT_GCAMSV_J_1_SW_PASS1_DONE 150 +#define CMDQ_EVENT_GCAMSV_J_2_SW_PASS1_DONE 151 +#define CMDQ_EVENT_MRAW_0_SW_PASS1_DONE 152 +#define CMDQ_EVENT_MRAW_1_SW_PASS1_DONE 153 +#define CMDQ_EVENT_MRAW_2_SW_PASS1_DONE 154 +#define CMDQ_EVENT_MRAW_3_SW_PASS1_DONE 155 +#define CMDQ_EVENT_SENINF_CAM0_FIFO_FULL 156 +#define CMDQ_EVENT_SENINF_CAM1_FIFO_FULL 157 +#define CMDQ_EVENT_SENINF_CAM2_FIFO_FULL 158 +#define CMDQ_EVENT_SENINF_CAM3_FIFO_FULL 159 +#define CMDQ_EVENT_SENINF_CAM4_FIFO_FULL 160 +#define CMDQ_EVENT_SENINF_CAM5_FIFO_FULL 161 +#define CMDQ_EVENT_SENINF_CAM6_FIFO_FULL 162 +#define CMDQ_EVENT_SENINF_CAM7_FIFO_FULL 163 +#define CMDQ_EVENT_SENINF_CAM8_FIFO_FULL 164 +#define CMDQ_EVENT_SENINF_CAM9_FIFO_FULL 165 +#define CMDQ_EVENT_SENINF_CAM10_FIFO_FULL 166 +#define CMDQ_EVENT_SENINF_CAM11_FIFO_FULL 167 +#define CMDQ_EVENT_SENINF_CAM12_FIFO_FULL 168 +#define CMDQ_EVENT_SENINF_CAM13_FIFO_FULL 169 +#define CMDQ_EVENT_SENINF_CAM14_FIFO_FULL 170 +#define CMDQ_EVENT_SENINF_CAM15_FIFO_FULL 171 +#define CMDQ_EVENT_SENINF_CAM16_FIFO_FULL 172 +#define CMDQ_EVENT_SENINF_CAM17_FIFO_FULL 173 +#define CMDQ_EVENT_SENINF_CAM18_FIFO_FULL 174 +#define CMDQ_EVENT_SENINF_CAM19_FIFO_FULL 175 +#define CMDQ_EVENT_SENINF_CAM20_FIFO_FULL 176 +#define CMDQ_EVENT_SENINF_CAM21_FIFO_FULL 177 +#define CMDQ_EVENT_SENINF_CAM22_FIFO_FULL 178 +#define CMDQ_EVENT_SENINF_CAM23_FIFO_FULL 179 +#define CMDQ_EVENT_SENINF_CAM24_FIFO_FULL 180 +#define CMDQ_EVENT_SENINF_CAM25_FIFO_FULL 181 +#define CMDQ_EVENT_SENINF_CAM26_FIFO_FULL 182 +#define CMDQ_EVENT_TG_OVRUN_MRAW0_INT 183 +#define CMDQ_EVENT_TG_OVRUN_MRAW1_INT 184 +#define CMDQ_EVENT_TG_OVRUN_MRAW2_INT 185 +#define CMDQ_EVENT_TG_OVRUN_MRAW3_INT 186 +#define CMDQ_EVENT_DMA_R1_ERROR_MRAW0_INT 187 +#define CMDQ_EVENT_DMA_R1_ERROR_MRAW1_INT 188 +#define CMDQ_EVENT_DMA_R1_ERROR_MRAW2_INT 189 +#define CMDQ_EVENT_DMA_R1_ERROR_MRAW3_INT 190 +#define CMDQ_EVENT_PDA0_IRQO_EVENT_DONE_D1 191 +#define CMDQ_EVENT_PDA1_IRQO_EVENT_DONE_D1 192 +#define CMDQ_EVENT_CAM_SUBA_TG_INT1 193 +#define CMDQ_EVENT_CAM_SUBA_TG_INT2 194 +#define CMDQ_EVENT_CAM_SUBA_TG_INT3 195 +#define CMDQ_EVENT_CAM_SUBA_TG_INT4 196 +#define CMDQ_EVENT_CAM_SUBB_TG_INT1 197 +#define CMDQ_EVENT_CAM_SUBB_TG_INT2 198 +#define CMDQ_EVENT_CAM_SUBB_TG_INT3 199 +#define CMDQ_EVENT_CAM_SUBB_TG_INT4 200 +#define CMDQ_EVENT_CAM_SUBC_TG_INT1 201 +#define CMDQ_EVENT_CAM_SUBC_TG_INT2 202 +#define CMDQ_EVENT_CAM_SUBC_TG_INT3 203 +#define CMDQ_EVENT_CAM_SUBC_TG_INT4 204 +#define CMDQ_EVENT_CAM_SUBA_IMGO_R1_LOW_LATENCY_LINE_CNT_INT 205 +#define CMDQ_EVENT_CAM_SUBA_YUVO_R1_LOW_LATENCY_LINE_CNT_INT 206 +#define CMDQ_EVENT_CAM_SUBA_YUVO_R3_LOW_LATENCY_LINE_CNT_INT 207 +#define CMDQ_EVENT_CAM_SUBA_DRZS4NO_R1_LOW_LATENCY_LINE_CNT_INT 208 +#define CMDQ_EVENT_CAM_SUBB_IMGO_R1_LOW_LATENCY_LINE_CNT_INT 209 +#define CMDQ_EVENT_CAM_SUBB_YUVO_R1_LOW_LATENCY_LINE_CNT_INT 210 +#define CMDQ_EVENT_CAM_SUBB_YUVO_R3_LOW_LATENCY_LINE_CNT_INT 211 +#define CMDQ_EVENT_CAM_SUBB_DRZS4NO_R1_LOW_LATENCY_LINE_CNT_INT 212 +#define CMDQ_EVENT_CAM_SUBC_IMGO_R1_LOW_LATENCY_LINE_CNT_INT 213 +#define CMDQ_EVENT_CAM_SUBC_YUVO_R1_LOW_LATENCY_LINE_CNT_INT 214 +#define CMDQ_EVENT_CAM_SUBC_YUVO_R3_LOW_LATENCY_LINE_CNT_INT 215 +#define CMDQ_EVENT_CAM_SUBC_DRZS4NO_R1_LOW_LATENCY_LINE_CNT_INT 216 +#define CMDQ_EVENT_RAW_SEL_SOF_SUBA 217 +#define CMDQ_EVENT_RAW_SEL_SOF_SUBB 218 +#define CMDQ_EVENT_RAW_SEL_SOF_SUBC 219 +#define CMDQ_EVENT_CAM_SUBA_RING_BUFFER_OVERFLOW_INT_IN 220 +#define CMDQ_EVENT_CAM_SUBB_RING_BUFFER_OVERFLOW_INT_IN 221 +#define CMDQ_EVENT_CAM_SUBC_RING_BUFFER_OVERFLOW_INT_IN 222 +#define CMDQ_EVENT_VPP0_MDP_RDMA_SOF 256 +#define CMDQ_EVENT_VPP0_MDP_FG_SOF 257 +#define CMDQ_EVENT_VPP0_STITCH_SOF 258 +#define CMDQ_EVENT_VPP0_MDP_HDR_SOF 259 +#define CMDQ_EVENT_VPP0_MDP_AAL_SOF 260 +#define CMDQ_EVENT_VPP0_MDP_RSZ_IN_RSZ_SOF 261 +#define CMDQ_EVENT_VPP0_MDP_TDSHP_SOF 262 +#define CMDQ_EVENT_VPP0_DISP_COLOR_SOF 263 +#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_SOF 264 +#define CMDQ_EVENT_VPP0_VPP_PADDING_IN_PADDING_SOF 265 +#define CMDQ_EVENT_VPP0_MDP_TCC_IN_SOF 266 +#define CMDQ_EVENT_VPP0_MDP_WROT_SOF 267 +#define CMDQ_EVENT_VPP0_WARP0_MMSYS_TOP_RELAY_SOF_PRE 269 +#define CMDQ_EVENT_VPP0_WARP1_MMSYS_TOP_RELAY_SOF_PRE 270 +#define CMDQ_EVENT_VPP0_VPP1_MMSYS_TOP_RELAY_SOF 271 +#define CMDQ_EVENT_VPP0_VPP1_IN_MMSYS_TOP_RELAY_SOF_PRE 272 +#define CMDQ_EVENT_VPP0_DISP_RDMA_SOF 273 +#define CMDQ_EVENT_VPP0_DISP_WDMA_SOF 274 +#define CMDQ_EVENT_VPP0_MDP_HMS_SOF 275 +#define CMDQ_EVENT_VPP0_MDP_RDMA_FRAME_DONE 288 +#define CMDQ_EVENT_VPP0_MDP_FG_TILE_DONE 289 +#define CMDQ_EVENT_VPP0_STITCH_FRAME_DONE 290 +#define CMDQ_EVENT_VPP0_MDP_HDR_FRAME_DONE 291 +#define CMDQ_EVENT_VPP0_MDP_AAL_FRAME_DONE 292 +#define CMDQ_EVENT_VPP0_MDP_RSZ_FRAME_DONE 293 +#define CMDQ_EVENT_VPP0_MDP_TDSHP_FRAME_DONE 294 +#define CMDQ_EVENT_VPP0_DISP_COLOR_FRAME_DONE 295 +#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_FRAME_DONE 296 +#define CMDQ_EVENT_VPP0_VPP_PADDING_IN_PADDING_FRAME_DONE 297 +#define CMDQ_EVENT_VPP0_MDP_TCC_TCC_FRAME_DONE 298 +#define CMDQ_EVENT_VPP0_MDP_WROT_VIDO_WDONE 299 +#define CMDQ_EVENT_VPP0_DISP_RDMA_FRAME_DONE 305 +#define CMDQ_EVENT_VPP0_DISP_WDMA_FRAME_DONE 306 +#define CMDQ_EVENT_VPP0_MDP_HMS_FRAME_DONE 307 +#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_0 320 +#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_1 321 +#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_2 322 +#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_3 323 +#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_4 324 +#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_5 325 +#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_6 326 +#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_7 327 +#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_8 328 +#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_9 329 +#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_10 330 +#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_11 331 +#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_12 332 +#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_13 333 +#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_14 334 +#define CMDQ_EVENT_VPP0_DISP_MUTEX_STREAM_DONE_15 335 +#define CMDQ_EVENT_VPP0_DISP_RDMA_0_UNDERRUN 336 +#define CMDQ_EVENT_VPP0_DISP_RDMA_1_UNDERRUN 337 +#define CMDQ_EVENT_VPP0_U_MERGE4_UNDERRUN 338 +#define CMDQ_EVENT_VPP0_U_VPP_SPLIT_VIDEO_0_OVERFLOW 339 +#define CMDQ_EVENT_VPP0_U_VPP_SPLIT_VIDEO_1_OVERFLOW 340 +#define CMDQ_EVENT_VPP0_DSI_0_UNDERRUN 341 +#define CMDQ_EVENT_VPP0_DSI_1_UNDERRUN 342 +#define CMDQ_EVENT_VPP0_DP_INTF_0 343 +#define CMDQ_EVENT_VPP0_DP_INTF_1 344 +#define CMDQ_EVENT_VPP0_DPI_0 345 +#define CMDQ_EVENT_VPP0_DPI_1 346 +#define CMDQ_EVENT_VPP0_MDP_RDMA_SW_RST_DONE 352 +#define CMDQ_EVENT_VPP0_MDP_RDMA_PM_VALID_EVENT 353 +#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_FRAME_RESET_DONE_PULSE 354 +#define CMDQ_EVENT_VPP0_MDP_WROT_SW_RST_DONE 355 +#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_0 356 +#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_1 357 +#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_2 358 +#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_3 359 +#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_4 360 +#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_5 361 +#define CMDQ_EVENT_VPP0_DISP_OVL_NOAFBC_TARGET_MATCH_6 362 +#define CMDQ_EVENT_VPP0_DISP_RDMA_DISP_RDMA_VALID_EVENT 363 +#define CMDQ_EVENT_VPP0_DISP_RDMA_DISP_RDMA_TARGET_LINE_EVENT 364 +#define CMDQ_EVENT_VPP0_DISP_WDMA_SW_RST_DONE 365 +#define CMDQ_EVENT_VPP0_DISP_WDMA_WDMA_VALID_EVENT 366 +#define CMDQ_EVENT_VPP0_DISP_WDMA_WDMA_TARGET_LINE_EVENT 367 +#define CMDQ_EVENT_VPP1_HDMI_META_SOF 384 +#define CMDQ_EVENT_VPP1_DGI_SOF 385 +#define CMDQ_EVENT_VPP1_VPP_SPLIT_SOF 386 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_TCC_SOF 387 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_RDMA_SOF 388 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_RDMA_SOF 389 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_RDMA_SOF 390 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_FG_SOF 391 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_FG_SOF 392 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_FG_SOF 393 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_HDR_SOF 394 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_HDR_SOF 395 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_HDR_SOF 396 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_AAL_SOF 397 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_AAL_SOF 398 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_AAL_SOF 399 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_RSZ_SOF 400 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_RSZ_SOF 401 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_RSZ_SOF 402 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_TDSHP_SOF 403 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_TDSHP_SOF 404 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_TDSHP_SOF 405 +#define CMDQ_EVENT_VPP1_SVPP2_VPP_MERGE_SOF 406 +#define CMDQ_EVENT_VPP1_SVPP3_VPP_MERGE_SOF 407 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_COLOR_SOF 408 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_COLOR_SOF 409 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_COLOR_SOF 410 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_SOF 411 +#define CMDQ_EVENT_VPP1_SVPP1_VPP_PAD_SOF 412 +#define CMDQ_EVENT_VPP1_SVPP2_VPP_PAD_SOF 413 +#define CMDQ_EVENT_VPP1_SVPP3_VPP_PAD_SOF 414 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_SOF 415 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_SOF 416 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_SOF 417 +#define CMDQ_EVENT_VPP1_VPP0_DL_IRLY_SOF 418 +#define CMDQ_EVENT_VPP1_VPP0_DL_ORLY_SOF 419 +#define CMDQ_EVENT_VPP1_VDO0_DL_ORLY_0_SOF 420 +#define CMDQ_EVENT_VPP1_VDO0_DL_ORLY_1_SOF 421 +#define CMDQ_EVENT_VPP1_VDO1_DL_ORLY_0_SOF 422 +#define CMDQ_EVENT_VPP1_VDO1_DL_ORLY_1_SOF 423 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_RDMA_FRAME_DONE 424 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_RDMA_FRAME_DONE 425 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_RDMA_FRAME_DONE 426 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_FRAME_DONE 427 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_FRAME_DONE 428 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_FRAME_DONE 429 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_FRAME_DONE 430 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_RSZ_FRAME_DONE 431 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_RSZ_FRAME_DONE 432 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_RSZ_FRAME_DONE 433 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_FG_TILE_DONE 434 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_FG_TILE_DONE 435 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_FG_TILE_DONE 436 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_HDR_FRAME_DONE 437 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_HDR_FRAME_DONE 438 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_HDR_FRAME_DONE 439 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_AAL_FRAME_DONE 440 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_AAL_FRAME_DONE 441 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_AAL_FRAME_DONE 442 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_TDSHP_FRAME_DONE 443 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_TDSHP_FRAME_DONE 444 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_TDSHP_FRAME_DONE 445 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_COLOR_FRAME_DONE 446 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_COLOR_FRAME_DONE 447 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_COLOR_FRAME_DONE 448 +#define CMDQ_EVENT_VPP1_SVPP1_VPP_PAD_FRAME_DONE 449 +#define CMDQ_EVENT_VPP1_SVPP2_VPP_PAD_FRAME_DONE 450 +#define CMDQ_EVENT_VPP1_SVPP3_VPP_PAD_FRAME_DONE 451 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_TCC_FRAME_DONE 452 +#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_0 456 +#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_1 457 +#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_2 458 +#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_3 459 +#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_4 460 +#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_5 461 +#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_6 462 +#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_7 463 +#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_8 464 +#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_9 465 +#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_10 466 +#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_11 467 +#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_12 468 +#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_13 469 +#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_14 470 +#define CMDQ_EVENT_VPP1_MUTEX_STREAM_DONE_GCE_EVENT_15 471 +#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_0 472 +#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_1 473 +#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_2 474 +#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_3 475 +#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_4 476 +#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_5 477 +#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_6 478 +#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_7 479 +#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_8 480 +#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_9 481 +#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_10 482 +#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_11 483 +#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_12 484 +#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_13 485 +#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_14 486 +#define CMDQ_EVENT_VPP1_MUTEX_BUF_UNDERRUN_GCE_EVENT_15 487 +#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_0 488 +#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_1 489 +#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_2 490 +#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_3 491 +#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_4 492 +#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_5 493 +#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_6 494 +#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_7 495 +#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_8 496 +#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_9 497 +#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_10 498 +#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_11 499 +#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_12 500 +#define CMDQ_EVENT_VPP1_DGI_SYNC_EVENT_13 501 +#define CMDQ_EVENT_VPP1_SVPP3_VPP_MERGE_GCE_EVENT 502 +#define CMDQ_EVENT_VPP1_SVPP2_VPP_MERGE_GCE_EVENT 503 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_GCE_EVENT 504 +#define CMDQ_EVENT_VPP1_VPP_SPLIT_DGI_GCE_EVENT 505 +#define CMDQ_EVENT_VPP1_VPP_SPLIT_HDMI_GCE_EVENT 506 +#define CMDQ_EVENT_VPP1_SVPP3_MDP_WROT_SW_RST_DONE_GCE_EVENT 507 +#define CMDQ_EVENT_VPP1_SVPP2_MDP_WROT_SW_RST_DONE_GCE_EVENT 508 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_WROT_SW_RST_DONE_GCE_EVENT 509 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_NEW_EVENT_0 510 +#define CMDQ_EVENT_VPP1_SVPP1_MDP_OVL_NEW_EVENT_1 511 +#define CMDQ_EVENT_VDO0_DISP_OVL0_SOF 512 +#define CMDQ_EVENT_VDO0_DISP_WDMA0_SOF 513 +#define CMDQ_EVENT_VDO0_DISP_RDMA0_SOF 514 +#define CMDQ_EVENT_VDO0_DISP_COLOR0_SOF 515 +#define CMDQ_EVENT_VDO0_DISP_CCORR0_SOF 516 +#define CMDQ_EVENT_VDO0_DISP_AAL0_SOF 517 +#define CMDQ_EVENT_VDO0_DISP_GAMMA0_SOF 518 +#define CMDQ_EVENT_VDO0_DISP_DITHER0_SOF 519 +#define CMDQ_EVENT_VDO0_DSI0_SOF 520 +#define CMDQ_EVENT_VDO0_DSC_WRAP0C0_SOF 521 +#define CMDQ_EVENT_VDO0_DISP_OVL1_SOF 522 +#define CMDQ_EVENT_VDO0_DISP_WDMA1_SOF 523 +#define CMDQ_EVENT_VDO0_DISP_RDMA1_SOF 524 +#define CMDQ_EVENT_VDO0_DISP_COLOR1_SOF 525 +#define CMDQ_EVENT_VDO0_DISP_CCORR1_SOF 526 +#define CMDQ_EVENT_VDO0_DISP_AAL1_SOF 527 +#define CMDQ_EVENT_VDO0_DISP_GAMMA1_SOF 528 +#define CMDQ_EVENT_VDO0_DISP_DITHER1_SOF 529 +#define CMDQ_EVENT_VDO0_DSI1_SOF 530 +#define CMDQ_EVENT_VDO0_DSC_WRAP0C1_SOF 531 +#define CMDQ_EVENT_VDO0_VPP_MERGE0_SOF 532 +#define CMDQ_EVENT_VDO0_DP_INTF0_SOF 533 +#define CMDQ_EVENT_VDO0_DISP_DPI0_SOF 534 +#define CMDQ_EVENT_VDO0_DISP_DPI1_SOF 535 +#define CMDQ_EVENT_VDO0_DISP_POSTMASK0_SOF 536 +#define CMDQ_EVENT_VDO0_MDP_WROT0_SOF 537 +#define CMDQ_EVENT_VDO0_DISP_RSZ0_SOF 538 +#define CMDQ_EVENT_VDO0_VPP1_DL_RELAY0_SOF 539 +#define CMDQ_EVENT_VDO0_VPP1_DL_RELAY1_SOF 540 +#define CMDQ_EVENT_VDO0_VDO1_DL_RELAY2_SOF 541 +#define CMDQ_EVENT_VDO0_VDO0_DL_RELAY3_SOF 542 +#define CMDQ_EVENT_VDO0_VDO0_DL_RELAY4_SOF 543 +#define CMDQ_EVENT_VDO0_DISP_PWM0_SOF 544 +#define CMDQ_EVENT_VDO0_DISP_PWM1_SOF 545 +#define CMDQ_EVENT_VDO0_DISP_OVL0_O_FRAME_DONE 546 +#define CMDQ_EVENT_VDO0_DISP_WDMA0_FRAME_DONE 547 +#define CMDQ_EVENT_VDO0_DISP_RDMA0_FRAME_DONE 548 +#define CMDQ_EVENT_VDO0_DISP_COLOR0_O_FRAME_DONE 549 +#define CMDQ_EVENT_VDO0_DISP_CCORR0_O_FRAME_DONE 550 +#define CMDQ_EVENT_VDO0_DISP_AAL0_O_FRAME_DONE 551 +#define CMDQ_EVENT_VDO0_DISP_GAMMA0_O_FRAME_DONE 552 +#define CMDQ_EVENT_VDO0_DISP_DITHER0_O_FRAME_DONE 553 +#define CMDQ_EVENT_VDO0_DSI0_FRAME_DONE 554 +#define CMDQ_EVENT_VDO0_DSC_WRAP0_O_FRAME_DONE_0 555 +#define CMDQ_EVENT_VDO0_DISP_OVL1_O_FRAME_DONE 556 +#define CMDQ_EVENT_VDO0_DISP_WDMA1_O_FRAME_DONE 557 +#define CMDQ_EVENT_VDO0_DISP_RDMA1_O_FRAME_DONE 558 +#define CMDQ_EVENT_VDO0_DISP_COLOR1_O_FRAME_DONE 559 +#define CMDQ_EVENT_VDO0_DISP_CCORR1_O_FRAME_DONE 560 +#define CMDQ_EVENT_VDO0_DISP_AAL1_O_FRAME_DONE 561 +#define CMDQ_EVENT_VDO0_DISP_GAMMA1_O_FRAME_DONE 562 +#define CMDQ_EVENT_VDO0_DISP_DITHER1_O_FRAME_DONE 563 +#define CMDQ_EVENT_VDO0_DSI1_FRAME_DONE 564 +#define CMDQ_EVENT_VDO0_DSC_WRAP0_O_FRAME_DONE_1 565 +#define CMDQ_EVENT_VDO0_DP_INTF0_FRAME_DONE 567 +#define CMDQ_EVENT_VDO0_DISP_DPI0_O_FRAME_DONE 568 +#define CMDQ_EVENT_VDO0_DISP_DPI1_O_FRAME_DONE 569 +#define CMDQ_EVENT_VDO0_DISP_POSTMASK0_O_FRAME_DONE 570 +#define CMDQ_EVENT_VDO0_MDP_WROT0_O_FRAME_DONE 571 +#define CMDQ_EVENT_VDO0_DISP_RSZ0_O_FRAME_DONE 572 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_0 574 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_1 575 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_2 576 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_3 577 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_4 578 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_5 579 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_6 580 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_7 581 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_8 582 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_9 583 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_10 584 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_11 585 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_12 586 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_13 587 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_14 588 +#define CMDQ_EVENT_VDO0_DISP_STREAM_DONE_15 589 +#define CMDQ_EVENT_VDO0_DISP_RDMA_0_UNDERRUN 590 +#define CMDQ_EVENT_VDO0_DISP_RDMA_1_UNDERRUN 591 +#define CMDQ_EVENT_VDO0_U_MERGE4_UNDERRUN 592 +#define CMDQ_EVENT_VDO0_DSI_0_UNDERRUN 595 +#define CMDQ_EVENT_VDO0_DSI_1_UNDERRUN 596 +#define CMDQ_EVENT_VDO0_DP_INTF_0 597 +#define CMDQ_EVENT_VDO0_DP_INTF_1 598 +#define CMDQ_EVENT_VDO0_DPI_0 599 +#define CMDQ_EVENT_VDO0_DPI_1 600 +#define CMDQ_EVENT_VDO0_DISP_SMIASSERT_ENG_EVENT 606 +#define CMDQ_EVENT_VDO0_DSI0_O_DSI_IRQ_EVENT_MM 607 +#define CMDQ_EVENT_VDO0_DSI0_TE_ENG_EVENT_MM 608 +#define CMDQ_EVENT_VDO0_DSI0_O_DSI_DONE_EVENT_MM 609 +#define CMDQ_EVENT_VDO0_DSI0_O_DSI_VACTL_EVENT_MM 610 +#define CMDQ_EVENT_VDO0_DSI1_O_DSI_IRQ_EVENT_MM 611 +#define CMDQ_EVENT_VDO0_DSI1_TE_ENG_EVENT_MM 612 +#define CMDQ_EVENT_VDO0_DSI1_O_DSI_DONE_EVENT_MM 613 +#define CMDQ_EVENT_VDO0_DSI1_O_DSI_VACTL_EVENT_MM 614 +#define CMDQ_EVENT_VDO0_DP_INTF0_O_DP_VSYNC_START_EVENT_MM 615 +#define CMDQ_EVENT_VDO0_DP_INTF0_O_DP_VSYNC_END_EVENT_MM 616 +#define CMDQ_EVENT_VDO0_DP_INTF0_O_DP_VDE_START_EVENT_MM 617 +#define CMDQ_EVENT_VDO0_DP_INTF0_O_DP_VDE_END_EVENT_MM 618 +#define CMDQ_EVENT_VDO0_DP_INTF0_O_VACT_TARGET_LINE_EVENT_MM 619 +#define CMDQ_EVENT_VDO0_DP_INTF0_O_LAST_SAFE_BLANK_EVENT_MM 620 +#define CMDQ_EVENT_VDO0_DP_INTF0_O_LAST_LINE_EVENT_MM 621 +#define CMDQ_EVENT_VDO0_DP_INTF0_O_TRIGGER_LOOP_CLEAR_EVENT_MM 622 +#define CMDQ_EVENT_VDO0_DP_INTF0_O_TARGET_LINE_0_EVENT_MM 623 +#define CMDQ_EVENT_VDO0_DP_INTF0_O_TARGET_LINE_1_EVENT_MM 624 +#define CMDQ_EVENT_VDO0_DISP_POSTMASK0_O_FRAME_RESET_DONE_PULSE 625 +#define CMDQ_EVENT_VDO0_VPP_MERGE0_O_VPP_MERGE_EVENT 626 +#define CMDQ_EVENT_VDO0_DISP_OVL0_O_FRAME_RESET_DONE_PULSE 627 +#define CMDQ_EVENT_VDO0_DISP_RDMA0_O_DISP_RDMA_TARGET_LINE_EVENT 628 +#define CMDQ_EVENT_VDO0_DISP_WDMA0_O_WDMA_TARGET_LINE_EVENT 629 +#define CMDQ_EVENT_VDO0_DISP_WDMA0_O_SW_RST_DONE 630 +#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_0 631 +#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_1 632 +#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_2 633 +#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_3 634 +#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_4 635 +#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_5 636 +#define CMDQ_EVENT_VDO0_DISP_OVL0_O_TARGET_MATCH_EVENT_6 637 +#define CMDQ_EVENT_VDO0_MDP_WROT0_O_SW_RST_DONE 638 +#define CMDQ_EVENT_VDO0_RESERVED 639 +#define CMDQ_EVENT_VDO1_MDP_RDMA0_SOF 640 +#define CMDQ_EVENT_VDO1_MDP_RDMA1_SOF 641 +#define CMDQ_EVENT_VDO1_MDP_RDMA2_SOF 642 +#define CMDQ_EVENT_VDO1_MDP_RDMA3_SOF 643 +#define CMDQ_EVENT_VDO1_MDP_RDMA4_SOF 644 +#define CMDQ_EVENT_VDO1_MDP_RDMA5_SOF 645 +#define CMDQ_EVENT_VDO1_MDP_RDMA6_SOF 646 +#define CMDQ_EVENT_VDO1_MDP_RDMA7_SOF 647 +#define CMDQ_EVENT_VDO1_DISP_PADDING0_SOF 648 +#define CMDQ_EVENT_VDO1_DISP_PADDING1_SOF 649 +#define CMDQ_EVENT_VDO1_DISP_PADDING2_SOF 650 +#define CMDQ_EVENT_VDO1_DISP_PADDING3_SOF 651 +#define CMDQ_EVENT_VDO1_DISP_PADDING4_SOF 652 +#define CMDQ_EVENT_VDO1_DISP_PADDING5_SOF 653 +#define CMDQ_EVENT_VDO1_DISP_PADDING6_SOF 654 +#define CMDQ_EVENT_VDO1_DISP_PADDING7_SOF 655 +#define CMDQ_EVENT_VDO1_DISP_RSZ0_SOF 656 +#define CMDQ_EVENT_VDO1_DISP_RSZ1_SOF 657 +#define CMDQ_EVENT_VDO1_DISP_RSZ2_SOF 658 +#define CMDQ_EVENT_VDO1_DISP_RSZ3_SOF 659 +#define CMDQ_EVENT_VDO1_VPP_MERGE0_SOF 660 +#define CMDQ_EVENT_VDO1_VPP_MERGE1_SOF 661 +#define CMDQ_EVENT_VDO1_VPP_MERGE2_SOF 662 +#define CMDQ_EVENT_VDO1_VPP_MERGE3_SOF 663 +#define CMDQ_EVENT_VDO1_VPP_MERGE4_SOF 664 +#define CMDQ_EVENT_VDO1_VPP2_DL_RELAY_SOF 665 +#define CMDQ_EVENT_VDO1_VPP3_DL_RELAY_SOF 666 +#define CMDQ_EVENT_VDO0_DSC_DL_ASYNC_SOF 667 +#define CMDQ_EVENT_VDO0_MERGE_DL_ASYNC_SOF 668 +#define CMDQ_EVENT_VDO1_OUT_DL_RELAY_SOF 669 +#define CMDQ_EVENT_VDO1_DISP_MIXER_SOF 670 +#define CMDQ_EVENT_VDO1_HDR_VDO_FE0_SOF 671 +#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_SOF 672 +#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_SOF 673 +#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_SOF 674 +#define CMDQ_EVENT_VDO1_HDR_VDO_BE0_SOF 675 +#define CMDQ_EVENT_VDO1_HDR_MLOAD_SOF 676 +#define CMDQ_EVENT_VDO1_DPI0_EXT_SOF 677 +#define CMDQ_EVENT_VDO1_DPI1_EXT_SOF 678 +#define CMDQ_EVENT_VDO1_DP_INTF_EXT_EXT_SOF 679 +#define CMDQ_EVENT_VDO1_MDP_RDMA0_FRAME_DONE 680 +#define CMDQ_EVENT_VDO1_MDP_RDMA1_FRAME_DONE 681 +#define CMDQ_EVENT_VDO1_MDP_RDMA2_FRAME_DONE 682 +#define CMDQ_EVENT_VDO1_MDP_RDMA3_FRAME_DONE 683 +#define CMDQ_EVENT_VDO1_MDP_RDMA4_FRAME_DONE 684 +#define CMDQ_EVENT_VDO1_MDP_RDMA5_FRAME_DONE 685 +#define CMDQ_EVENT_VDO1_MDP_RDMA6_FRAME_DONE 686 +#define CMDQ_EVENT_VDO1_MDP_RDMA7_FRAME_DONE 687 +#define CMDQ_EVENT_VDO1_DISP_PADDING0_FRAME_DONE 688 +#define CMDQ_EVENT_VDO1_DISP_PADDING1_FRAME_DONE 689 +#define CMDQ_EVENT_VDO1_DISP_PADDING2_FRAME_DONE 690 +#define CMDQ_EVENT_VDO1_DISP_PADDING3_FRAME_DONE 691 +#define CMDQ_EVENT_VDO1_DISP_PADDING4_FRAME_DONE 692 +#define CMDQ_EVENT_VDO1_DISP_PADDING5_FRAME_DONE 693 +#define CMDQ_EVENT_VDO1_DISP_PADDING6_FRAME_DONE 694 +#define CMDQ_EVENT_VDO1_DISP_PADDING7_FRAME_DONE 695 +#define CMDQ_EVENT_VDO1_DISP_RSZ0_FRAME_DONE 696 +#define CMDQ_EVENT_VDO1_DISP_RSZ1_FRAME_DONE 697 +#define CMDQ_EVENT_VDO1_DISP_RSZ2_FRAME_DONE 698 +#define CMDQ_EVENT_VDO1_DISP_RSZ3_FRAME_DONE 699 +#define CMDQ_EVENT_VDO1_VPP_MERGE0_FRAME_DONE 700 +#define CMDQ_EVENT_VDO1_VPP_MERGE1_FRAME_DONE 701 +#define CMDQ_EVENT_VDO1_VPP_MERGE2_FRAME_DONE 702 +#define CMDQ_EVENT_VDO1_VPP_MERGE3_FRAME_DONE 703 +#define CMDQ_EVENT_VDO1_VPP_MERGE4_FRAME_DONE 704 +#define CMDQ_EVENT_VDO1_DPI0_FRAME_DONE 705 +#define CMDQ_EVENT_VDO1_DPI1_FRAME_DONE 706 +#define CMDQ_EVENT_VDO1_DP_INTF0_FRAME_DONE 707 +#define CMDQ_EVENT_VDO1_DISP_MIXER_FRAME_DONE_MM 708 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_0 709 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_1 710 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_2 711 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_3 712 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_4 713 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_5 714 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_6 715 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_7 716 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_8 717 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_9 718 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_10 719 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_11 720 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_12 721 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_13 722 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_14 723 +#define CMDQ_EVENT_VDO1_STREAM_DONE_ENG_15 724 +#define CMDQ_EVENT_VDO1_DISP_RDMA_0_UNDERRUN 725 +#define CMDQ_EVENT_VDO1_DISP_RDMA_1_UNDERRUN 726 +#define CMDQ_EVENT_VDO1_U_MERGE4_UNDERRUN 727 +#define CMDQ_EVENT_VDO1_U_VPP_SPLIT_VIDEO_0_OVERFLOW 728 +#define CMDQ_EVENT_VDO1_U_VPP_SPLIT_VIDEO_1_OVERFLOW 729 +#define CMDQ_EVENT_VDO1_DSI_0_UNDERRUN 730 +#define CMDQ_EVENT_VDO1_DSI_1_UNDERRUN 731 +#define CMDQ_EVENT_VDO1_DP_INTF_0 732 +#define CMDQ_EVENT_VDO1_DP_INTF_1 733 +#define CMDQ_EVENT_VDO1_DPI_0 734 +#define CMDQ_EVENT_VDO1_DPI_1 735 +#define CMDQ_EVENT_VDO1_MDP_RDMA0_SW_RST_DONE 741 +#define CMDQ_EVENT_VDO1_MDP_RDMA1_SW_RST_DONE 742 +#define CMDQ_EVENT_VDO1_MDP_RDMA2_SW_RST_DONE 743 +#define CMDQ_EVENT_VDO1_MDP_RDMA3_SW_RST_DONE 744 +#define CMDQ_EVENT_VDO1_MDP_RDMA4_SW_RST_DONE 745 +#define CMDQ_EVENT_VDO1_MDP_RDMA5_SW_RST_DONE 746 +#define CMDQ_EVENT_VDO1_MDP_RDMA6_SW_RST_DONE 747 +#define CMDQ_EVENT_VDO1_MDP_RDMA7_SW_RST_DONE 748 +#define CMDQ_EVENT_VDO1_DP0_VDE_END_ENG_EVENT_MM 749 +#define CMDQ_EVENT_VDO1_DP0_VDE_START_ENG_EVENT_MM 750 +#define CMDQ_EVENT_VDO1_DP0_VSYNC_END_ENG_EVENT_MM 751 +#define CMDQ_EVENT_VDO1_DP0_VSYNC_START_ENG_EVENT_MM 752 +#define CMDQ_EVENT_VDO1_DP0_TARGET_LINE_ENG_EVENT_MM 753 +#define CMDQ_EVENT_VDO1_VPP_MERGE0_EVENT 754 +#define CMDQ_EVENT_VDO1_VPP_MERGE1_EVENT 755 +#define CMDQ_EVENT_VDO1_VPP_MERGE2_EVENT 756 +#define CMDQ_EVENT_VDO1_VPP_MERGE3_EVENT 757 +#define CMDQ_EVENT_VDO1_VPP_MERGE4_EVENT 758 +#define CMDQ_EVENT_VDO1_HDMITX_EVENT 759 +#define CMDQ_EVENT_VDO1_HDR_VDO_BE0_ADL_TRIG_EVENT_MM 760 +#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_THDR_ADL_TRIG_EVENT_MM 761 +#define CMDQ_EVENT_VDO1_HDR_GFX_FE1_DM_ADL_TRIG_EVENT_MM 762 +#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_THDR_ADL_TRIG_EVENT_MM 763 +#define CMDQ_EVENT_VDO1_HDR_GFX_FE0_DM_ADL_TRIG_EVENT_MM 764 +#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_ADL_TRIG_EVENT_MM 765 +#define CMDQ_EVENT_VDO1_HDR_VDO_FE1_AD0_TRIG_EVENT_MM 766 +#define CMDQ_EVENT_VDO1_DPI0_TARGET_LINE_1_EVENT_MM 767 +#define CMDQ_EVENT_HANDSHAKE_0 768 +#define CMDQ_EVENT_HANDSHAKE_1 769 +#define CMDQ_EVENT_HANDSHAKE_2 770 +#define CMDQ_EVENT_HANDSHAKE_3 771 +#define CMDQ_EVENT_HANDSHAKE_4 772 +#define CMDQ_EVENT_HANDSHAKE_5 773 +#define CMDQ_EVENT_HANDSHAKE_6 774 +#define CMDQ_EVENT_HANDSHAKE_7 775 +#define CMDQ_EVENT_HANDSHAKE_8 776 +#define CMDQ_EVENT_HANDSHAKE_9 777 +#define CMDQ_EVENT_HANDSHAKE_10 778 +#define CMDQ_EVENT_HANDSHAKE_11 779 +#define CMDQ_EVENT_HANDSHAKE_12 780 +#define CMDQ_EVENT_HANDSHAKE_13 781 +#define CMDQ_EVENT_HANDSHAKE_14 782 +#define CMDQ_EVENT_HANDSHAKE_15 783 +#define CMDQ_EVENT_VDEC_SOC_EVENT_0 800 +#define CMDQ_EVENT_VDEC_SOC_EVENT_1 801 +#define CMDQ_EVENT_VDEC_SOC_EVENT_2 802 +#define CMDQ_EVENT_VDEC_SOC_EVENT_3 803 +#define CMDQ_EVENT_VDEC_SOC_EVENT_4 804 +#define CMDQ_EVENT_VDEC_SOC_EVENT_5 805 +#define CMDQ_EVENT_VDEC_SOC_EVENT_6 806 +#define CMDQ_EVENT_VDEC_SOC_EVENT_7 807 +#define CMDQ_EVENT_VDEC_SOC_EVENT_8 808 +#define CMDQ_EVENT_VDEC_SOC_EVENT_9 809 +#define CMDQ_EVENT_VDEC_SOC_EVENT_10 810 +#define CMDQ_EVENT_VDEC_SOC_EVENT_11 811 +#define CMDQ_EVENT_VDEC_SOC_EVENT_12 812 +#define CMDQ_EVENT_VDEC_SOC_EVENT_13 813 +#define CMDQ_EVENT_VDEC_SOC_EVENT_14 814 +#define CMDQ_EVENT_VDEC_SOC_EVENT_15 815 +#define CMDQ_EVENT_VDEC_CORE0_EVENT_0 832 +#define CMDQ_EVENT_VDEC_CORE0_EVENT_1 833 +#define CMDQ_EVENT_VDEC_CORE0_EVENT_2 834 +#define CMDQ_EVENT_VDEC_CORE0_EVENT_3 835 +#define CMDQ_EVENT_VDEC_CORE0_EVENT_4 836 +#define CMDQ_EVENT_VDEC_CORE0_EVENT_5 837 +#define CMDQ_EVENT_VDEC_CORE0_EVENT_6 838 +#define CMDQ_EVENT_VDEC_CORE0_EVENT_7 839 +#define CMDQ_EVENT_VDEC_CORE0_EVENT_8 840 +#define CMDQ_EVENT_VDEC_CORE0_EVENT_9 841 +#define CMDQ_EVENT_VDEC_CORE0_EVENT_10 842 +#define CMDQ_EVENT_VDEC_CORE0_EVENT_11 843 +#define CMDQ_EVENT_VDEC_CORE0_EVENT_12 844 +#define CMDQ_EVENT_VDEC_CORE0_EVENT_13 845 +#define CMDQ_EVENT_VDEC_CORE0_EVENT_14 846 +#define CMDQ_EVENT_VDEC_CORE0_EVENT_15 847 +#define CMDQ_EVENT_VENC_TOP_VENC_FRAME_DONE 865 +#define CMDQ_EVENT_VENC_TOP_VENC_PAUSE_DONE 866 +#define CMDQ_EVENT_VENC_TOP_JPGENC_DONE 867 +#define CMDQ_EVENT_VENC_TOP_VENC_MB_DONE 868 +#define CMDQ_EVENT_VENC_TOP_VENC_128BYTE_DONE 869 +#define CMDQ_EVENT_VENC_TOP_JPGDEC_DONE 870 +#define CMDQ_EVENT_VENC_TOP_VENC_SLICE_DONE 871 +#define CMDQ_EVENT_VENC_TOP_JPGDEC_INSUFF_DONE 872 +#define CMDQ_EVENT_VENC_TOP_WP_2ND_STAGE_DONE 874 +#define CMDQ_EVENT_VENC_TOP_WP_3RD_STAGE_DONE 875 +#define CMDQ_EVENT_VENC_TOP_PPS_HEADER_DONE 876 +#define CMDQ_EVENT_VENC_TOP_SPS_HEADER_DONE 877 +#define CMDQ_EVENT_VENC_TOP_VPS_HEADER_DONE 878 +#define CMDQ_EVENT_WPE_VPP0_WPE_GCE_FRAME_DONE 882 +#define CMDQ_EVENT_WPE_VPP0_WPE_DONE_SYNC_OUT 883 +#define CMDQ_EVENT_SVPP1_MDP_OVL_NEW_EVENT_2 896 +#define CMDQ_EVENT_SVPP1_MDP_OVL_NEW_EVENT_3 897 +#define CMDQ_EVENT_SVPP1_MDP_OVL_NEW_EVENT_4 898 +#define CMDQ_EVENT_SVPP1_MDP_OVL_NEW_EVENT_5 899 +#define CMDQ_EVENT_SVPP1_MDP_OVL_NEW_EVENT_6 900 +#define CMDQ_EVENT_VDO1_DPI0_TARGET_LINE_0_EVENT_MM 928 +#define CMDQ_EVENT_VDO1_DPI0_TRIGGER_LOOP_CLEAR_EVENT_MM 929 +#define CMDQ_EVENT_VDO1_DPI0_LAST_LINE_EVENT_MM 930 +#define CMDQ_EVENT_VDO1_DPI0_LAST_SAFE_BLANK_EVENT_MM 931 +#define CMDQ_EVENT_VDO1_DPI0_VSYNC_START_EVENT_MM 932 +#define CMDQ_EVENT_VDO1_DPI1_TARGET_LINE_1_EVENT_MM 933 +#define CMDQ_EVENT_VDO1_DPI1_TARGET_LINE_0_EVENT_MM 934 +#define CMDQ_EVENT_VDO1_DPI1_TRIGGER_LOOP_CLEAR_EVENT_MM 935 +#define CMDQ_EVENT_VDO1_DPI1_LAST_LINE_EVENT_MM 936 +#define CMDQ_EVENT_VDO1_DPI1_LAST_SAFE_BLANK_EVENT_MM 937 +#define CMDQ_EVENT_VDO1_DPI1_VSYNC_START_EVENT_MM 938 +#define CMDQ_EVENT_VDO1_DP_INTF_TARGET_LINE_1_EVENT_MM 939 +#define CMDQ_EVENT_VDO1_DP_INTF_TARGET_LINE_0_EVENT_MM 940 +#define CMDQ_EVENT_VDO1_DP_INTF_TRIGGER_LOOP_CLEAR_EVENT_MM 941 +#define CMDQ_EVENT_VDO1_DP_INTF_LAST_LINE_EVENT_MM 942 +#define CMDQ_EVENT_VDO1_DP_INTF_LAST_SAFE_BLANK_EVENT_MM 943 +#define CMDQ_EVENT_VBLANK_FALLING 946 +#define CMDQ_EVENT_VSC_FINISH 947 +#define CMDQ_EVENT_TPR_0 962 +#define CMDQ_EVENT_TPR_1 963 +#define CMDQ_EVENT_TPR_2 964 +#define CMDQ_EVENT_TPR_3 965 +#define CMDQ_EVENT_TPR_4 966 +#define CMDQ_EVENT_TPR_5 967 +#define CMDQ_EVENT_TPR_6 968 +#define CMDQ_EVENT_TPR_7 969 +#define CMDQ_EVENT_TPR_8 970 +#define CMDQ_EVENT_TPR_9 971 +#define CMDQ_EVENT_TPR_10 972 +#define CMDQ_EVENT_TPR_11 973 +#define CMDQ_EVENT_TPR_12 974 +#define CMDQ_EVENT_TPR_13 975 +#define CMDQ_EVENT_TPR_14 976 +#define CMDQ_EVENT_TPR_15 977 +#define CMDQ_EVENT_TPR_16 978 +#define CMDQ_EVENT_TPR_17 979 +#define CMDQ_EVENT_TPR_18 980 +#define CMDQ_EVENT_TPR_19 981 +#define CMDQ_EVENT_TPR_20 982 +#define CMDQ_EVENT_TPR_21 983 +#define CMDQ_EVENT_TPR_22 984 +#define CMDQ_EVENT_TPR_23 985 +#define CMDQ_EVENT_TPR_24 986 +#define CMDQ_EVENT_TPR_25 987 +#define CMDQ_EVENT_TPR_26 988 +#define CMDQ_EVENT_TPR_27 989 +#define CMDQ_EVENT_TPR_28 990 +#define CMDQ_EVENT_TPR_29 991 +#define CMDQ_EVENT_TPR_30 992 +#define CMDQ_EVENT_TPR_31 993 +#define CMDQ_EVENT_TPR_TIMEOUT_0 994 +#define CMDQ_EVENT_TPR_TIMEOUT_1 995 +#define CMDQ_EVENT_TPR_TIMEOUT_2 996 +#define CMDQ_EVENT_TPR_TIMEOUT_3 997 +#define CMDQ_EVENT_TPR_TIMEOUT_4 998 +#define CMDQ_EVENT_TPR_TIMEOUT_5 999 +#define CMDQ_EVENT_TPR_TIMEOUT_6 1000 +#define CMDQ_EVENT_TPR_TIMEOUT_7 1001 +#define CMDQ_EVENT_TPR_TIMEOUT_8 1002 +#define CMDQ_EVENT_TPR_TIMEOUT_9 1003 +#define CMDQ_EVENT_TPR_TIMEOUT_10 1004 +#define CMDQ_EVENT_TPR_TIMEOUT_11 1005 +#define CMDQ_EVENT_TPR_TIMEOUT_12 1006 +#define CMDQ_EVENT_TPR_TIMEOUT_13 1007 +#define CMDQ_EVENT_TPR_TIMEOUT_14 1008 +#define CMDQ_EVENT_TPR_TIMEOUT_15 1009 +#define CMDQ_EVENT_OUTPIN_0 1018 +#define CMDQ_EVENT_OUTPIN_1 1019 + +#define CMDQ_SYNC_TOKEN_IMGSYS_WPE_EIS 124 +#define CMDQ_SYNC_TOKEN_IMGSYS_WPE_TNR 125 +#define CMDQ_SYNC_TOKEN_IMGSYS_WPE_LITE 126 +#define CMDQ_SYNC_TOKEN_IMGSYS_TRAW 127 +#define CMDQ_SYNC_TOKEN_IMGSYS_LTRAW 128 +#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_1 223 +#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_2 224 +#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_3 225 +#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_4 226 +#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_5 227 +#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_6 228 +#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_7 229 +#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_8 230 +#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_9 231 +#define CMDQ_SYNC_TOKEN_CAMSYS_POOL_10 232 +#define CMDQ_SYNC_TOKEN_IMGSYS_XTRAW 233 +#define CMDQ_SYNC_TOKEN_IMGSYS_DIP 234 +#define CMDQ_SYNC_TOKEN_IMGSYS_PQDIP_A 235 +#define CMDQ_SYNC_TOKEN_IMGSYS_PQDIP_B 236 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_1 237 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_2 238 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_3 239 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_4 240 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_5 241 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_6 242 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_7 243 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_8 244 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_9 245 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_10 246 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_11 247 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_12 248 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_13 249 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_14 250 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_15 251 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_16 252 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_17 253 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_18 254 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_19 255 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_20 276 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_21 277 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_22 278 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_23 279 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_24 280 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_25 281 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_26 282 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_27 283 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_28 284 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_29 285 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_30 286 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_31 287 +#define CMDQ_SYNC_TOKEN_IPESYS_ME 300 +#define CMDQ_SYNC_TOKEN_IMGSYS_VSS_TRAW 301 +#define CMDQ_SYNC_TOKEN_IMGSYS_VSS_LTRAW 302 +#define CMDQ_SYNC_TOKEN_IMGSYS_VSS_XTRAW 303 +#define CMDQ_SYNC_TOKEN_IMGSYS_VSS_DIP 304 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_32 308 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_33 309 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_34 310 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_35 311 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_36 312 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_37 313 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_38 314 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_39 315 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_40 316 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_41 370 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_42 371 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_43 372 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_44 373 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_45 374 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_46 375 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_47 376 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_48 377 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_49 378 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_50 379 +#define CMDQ_SYNC_TOKEN_TZMP_ISP_WAIT 380 +#define CMDQ_SYNC_TOKEN_TZMP_ISP_SET 381 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_51 790 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_52 791 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_53 792 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_54 793 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_55 794 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_56 795 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_57 796 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_58 797 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_59 798 +#define CMDQ_SYNC_TOKEN_IMGSYS_POOL_60 799 +#define CMDQ_SYNC_TOKEN_PREBUILT_MDP_WAIT 816 +#define CMDQ_SYNC_TOKEN_PREBUILT_MDP_SET 817 +#define CMDQ_SYNC_TOKEN_PREBUILT_MDP_LOCK 818 +#define CMDQ_SYNC_TOKEN_PREBUILT_MML_WAIT 819 +#define CMDQ_SYNC_TOKEN_PREBUILT_MML_SET 820 +#define CMDQ_SYNC_TOKEN_PREBUILT_MML_LOCK 821 +#define CMDQ_SYNC_TOKEN_PREBUILT_VFMT_WAIT 822 +#define CMDQ_SYNC_TOKEN_PREBUILT_VFMT_SET 823 +#define CMDQ_SYNC_TOKEN_PREBUILT_VFMT_LOCK 824 +#define CMDQ_SYNC_TOKEN_PREBUILT_DISP_WAIT 825 +#define CMDQ_SYNC_TOKEN_PREBUILT_DISP_SET 826 +#define CMDQ_SYNC_TOKEN_PREBUILT_DISP_LOCK 827 +#define CMDQ_SYNC_TOKEN_CONFIG_DIRTY 848 +#define CMDQ_SYNC_TOKEN_STREAM_EOF 849 +#define CMDQ_SYNC_TOKEN_ESD_EOF 850 +#define CMDQ_SYNC_TOKEN_STREAM_BLOCK 851 +#define CMDQ_SYNC_TOKEN_CABC_EOF 852 +#define CMDQ_SYNC_TOKEN_VENC_INPUT_READY 853 +#define CMDQ_SYNC_TOKEN_VENC_EOF 854 +#define CMDQ_SYNC_TOKEN_SECURE_THR_EOF 855 +#define CMDQ_SYNC_TOKEN_USER_0 856 +#define CMDQ_SYNC_TOKEN_USER_1 857 +#define CMDQ_SYNC_TOKEN_POLL_MONITOR 858 +#define CMDQ_TOKEN_TPR_LOCK 859 +#define CMDQ_SYNC_TOKEN_MSS 860 +#define CMDQ_SYNC_TOKEN_MSF 861 +#define CMDQ_SYNC_TOKEN_GPR_SET_0 884 +#define CMDQ_SYNC_TOKEN_GPR_SET_1 885 +#define CMDQ_SYNC_TOKEN_GPR_SET_2 886 +#define CMDQ_SYNC_TOKEN_GPR_SET_3 887 +#define CMDQ_SYNC_TOKEN_GPR_SET_4 888 +#define CMDQ_SYNC_RESOURCE_WROT0 889 +#define CMDQ_SYNC_RESOURCE_WROT1 890 +#define CMDQ_SYNC_TOKEN_DISP_VA_START 1012 +#define CMDQ_SYNC_TOKEN_DISP_VA_END 1013 + +#endif diff --git a/include/dt-bindings/memory/mediatek,mt8365-larb-port.h b/include/dt-bindings/memory/mediatek,mt8365-larb-port.h new file mode 100644 index 000000000000..56d5a5dd519e --- /dev/null +++ b/include/dt-bindings/memory/mediatek,mt8365-larb-port.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2022 MediaTek Inc. + * Author: Yong Wu <yong.wu@mediatek.com> + */ +#ifndef _DT_BINDINGS_MEMORY_MT8365_LARB_PORT_H_ +#define _DT_BINDINGS_MEMORY_MT8365_LARB_PORT_H_ + +#include <dt-bindings/memory/mtk-memory-port.h> + +#define M4U_LARB0_ID 0 +#define M4U_LARB1_ID 1 +#define M4U_LARB2_ID 2 +#define M4U_LARB3_ID 3 + +/* larb0 */ +#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0) +#define M4U_PORT_DISP_OVL0_2L MTK_M4U_ID(M4U_LARB0_ID, 1) +#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 2) +#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 3) +#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 4) +#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 5) +#define M4U_PORT_MDP_WROT1 MTK_M4U_ID(M4U_LARB0_ID, 6) +#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB0_ID, 7) +#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB0_ID, 8) +#define M4U_PORT_DISP_FAKE0 MTK_M4U_ID(M4U_LARB0_ID, 9) +#define M4U_PORT_APU_READ MTK_M4U_ID(M4U_LARB0_ID, 10) +#define M4U_PORT_APU_WRITE MTK_M4U_ID(M4U_LARB0_ID, 11) + +/* larb1 */ +#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB1_ID, 0) +#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB1_ID, 1) +#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB1_ID, 2) +#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB1_ID, 3) +#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB1_ID, 4) +#define M4U_PORT_VENC_NBM_RDMA MTK_M4U_ID(M4U_LARB1_ID, 5) +#define M4U_PORT_VENC_NBM_RDMA_LITE MTK_M4U_ID(M4U_LARB1_ID, 6) +#define M4U_PORT_JPGENC_Y_RDMA MTK_M4U_ID(M4U_LARB1_ID, 7) +#define M4U_PORT_JPGENC_C_RDMA MTK_M4U_ID(M4U_LARB1_ID, 8) +#define M4U_PORT_JPGENC_Q_TABLE MTK_M4U_ID(M4U_LARB1_ID, 9) +#define M4U_PORT_JPGENC_BSDMA MTK_M4U_ID(M4U_LARB1_ID, 10) +#define M4U_PORT_JPGDEC_WDMA MTK_M4U_ID(M4U_LARB1_ID, 11) +#define M4U_PORT_JPGDEC_BSDMA MTK_M4U_ID(M4U_LARB1_ID, 12) +#define M4U_PORT_VENC_NBM_WDMA MTK_M4U_ID(M4U_LARB1_ID, 13) +#define M4U_PORT_VENC_NBM_WDMA_LITE MTK_M4U_ID(M4U_LARB1_ID, 14) +#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB1_ID, 15) +#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB1_ID, 16) +#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB1_ID, 17) +#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB1_ID, 18) + +/* larb2 */ +#define M4U_PORT_CAM_IMGO MTK_M4U_ID(M4U_LARB2_ID, 0) +#define M4U_PORT_CAM_RRZO MTK_M4U_ID(M4U_LARB2_ID, 1) +#define M4U_PORT_CAM_AAO MTK_M4U_ID(M4U_LARB2_ID, 2) +#define M4U_PORT_CAM_LCS MTK_M4U_ID(M4U_LARB2_ID, 3) +#define M4U_PORT_CAM_ESFKO MTK_M4U_ID(M4U_LARB2_ID, 4) +#define M4U_PORT_CAM_CAM_SV0 MTK_M4U_ID(M4U_LARB2_ID, 5) +#define M4U_PORT_CAM_CAM_SV1 MTK_M4U_ID(M4U_LARB2_ID, 6) +#define M4U_PORT_CAM_LSCI MTK_M4U_ID(M4U_LARB2_ID, 7) +#define M4U_PORT_CAM_LSCI_D MTK_M4U_ID(M4U_LARB2_ID, 8) +#define M4U_PORT_CAM_AFO MTK_M4U_ID(M4U_LARB2_ID, 9) +#define M4U_PORT_CAM_SPARE MTK_M4U_ID(M4U_LARB2_ID, 10) +#define M4U_PORT_CAM_BPCI MTK_M4U_ID(M4U_LARB2_ID, 11) +#define M4U_PORT_CAM_BPCI_D MTK_M4U_ID(M4U_LARB2_ID, 12) +#define M4U_PORT_CAM_UFDI MTK_M4U_ID(M4U_LARB2_ID, 13) +#define M4U_PORT_CAM_IMGI MTK_M4U_ID(M4U_LARB2_ID, 14) +#define M4U_PORT_CAM_IMG2O MTK_M4U_ID(M4U_LARB2_ID, 15) +#define M4U_PORT_CAM_IMG3O MTK_M4U_ID(M4U_LARB2_ID, 16) +#define M4U_PORT_CAM_WPE0_I MTK_M4U_ID(M4U_LARB2_ID, 17) +#define M4U_PORT_CAM_WPE1_I MTK_M4U_ID(M4U_LARB2_ID, 18) +#define M4U_PORT_CAM_WPE_O MTK_M4U_ID(M4U_LARB2_ID, 19) +#define M4U_PORT_CAM_FD0_I MTK_M4U_ID(M4U_LARB2_ID, 20) +#define M4U_PORT_CAM_FD1_I MTK_M4U_ID(M4U_LARB2_ID, 21) +#define M4U_PORT_CAM_FD0_O MTK_M4U_ID(M4U_LARB2_ID, 22) +#define M4U_PORT_CAM_FD1_O MTK_M4U_ID(M4U_LARB2_ID, 23) + +/* larb3 */ +#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB3_ID, 0) +#define M4U_PORT_HW_VDEC_UFO_EXT MTK_M4U_ID(M4U_LARB3_ID, 1) +#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB3_ID, 2) +#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB3_ID, 3) +#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB3_ID, 4) +#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB3_ID, 5) +#define M4U_PORT_HW_VDEC_TILE_EXT MTK_M4U_ID(M4U_LARB3_ID, 6) +#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB3_ID, 7) +#define M4U_PORT_HW_VDEC_VLD2_EXT MTK_M4U_ID(M4U_LARB3_ID, 8) +#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB3_ID, 9) +#define M4U_PORT_HW_VDEC_RG_CTRL_DMA_EXT MTK_M4U_ID(M4U_LARB3_ID, 10) + +#endif diff --git a/include/dt-bindings/phy/phy-qcom-qmp.h b/include/dt-bindings/phy/phy-qcom-qmp.h new file mode 100644 index 000000000000..4edec4c5b224 --- /dev/null +++ b/include/dt-bindings/phy/phy-qcom-qmp.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* + * Qualcomm QMP PHY constants + * + * Copyright (C) 2022 Linaro Limited + */ + +#ifndef _DT_BINDINGS_PHY_QMP +#define _DT_BINDINGS_PHY_QMP + +/* QMP USB4-USB3-DP clocks */ +#define QMP_USB43DP_USB3_PIPE_CLK 0 +#define QMP_USB43DP_DP_LINK_CLK 1 +#define QMP_USB43DP_DP_VCO_DIV_CLK 2 + +/* QMP USB4-USB3-DP PHYs */ +#define QMP_USB43DP_USB3_PHY 0 +#define QMP_USB43DP_DP_PHY 1 + +#endif /* _DT_BINDINGS_PHY_QMP */ diff --git a/include/dt-bindings/power/xlnx-zynqmp-power.h b/include/dt-bindings/power/xlnx-zynqmp-power.h index 0d9a412fd5e0..618024cbb20d 100644 --- a/include/dt-bindings/power/xlnx-zynqmp-power.h +++ b/include/dt-bindings/power/xlnx-zynqmp-power.h @@ -6,6 +6,12 @@ #ifndef _DT_BINDINGS_ZYNQMP_POWER_H #define _DT_BINDINGS_ZYNQMP_POWER_H +#define PD_RPU_0 7 +#define PD_RPU_1 8 +#define PD_R5_0_ATCM 15 +#define PD_R5_0_BTCM 16 +#define PD_R5_1_ATCM 17 +#define PD_R5_1_BTCM 18 #define PD_USB_0 22 #define PD_USB_1 23 #define PD_TTC_0 24 diff --git a/include/dt-bindings/reset/mt8188-resets.h b/include/dt-bindings/reset/mt8188-resets.h new file mode 100644 index 000000000000..377cdfda82a9 --- /dev/null +++ b/include/dt-bindings/reset/mt8188-resets.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)*/ +/* + * Copyright (c) 2022 MediaTek Inc. + * Author: Runyang Chen <runyang.chen@mediatek.com> + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8188 +#define _DT_BINDINGS_RESET_CONTROLLER_MT8188 + +#define MT8188_TOPRGU_CONN_MCU_SW_RST 0 +#define MT8188_TOPRGU_INFRA_GRST_SW_RST 1 +#define MT8188_TOPRGU_IPU0_SW_RST 2 +#define MT8188_TOPRGU_IPU1_SW_RST 3 +#define MT8188_TOPRGU_IPU2_SW_RST 4 +#define MT8188_TOPRGU_AUD_ASRC_SW_RST 5 +#define MT8188_TOPRGU_INFRA_SW_RST 6 +#define MT8188_TOPRGU_MMSYS_SW_RST 7 +#define MT8188_TOPRGU_MFG_SW_RST 8 +#define MT8188_TOPRGU_VENC_SW_RST 9 +#define MT8188_TOPRGU_VDEC_SW_RST 10 +#define MT8188_TOPRGU_CAM_VCORE_SW_RST 11 +#define MT8188_TOPRGU_SCP_SW_RST 12 +#define MT8188_TOPRGU_APMIXEDSYS_SW_RST 13 +#define MT8188_TOPRGU_AUDIO_SW_RST 14 +#define MT8188_TOPRGU_CAMSYS_SW_RST 15 +#define MT8188_TOPRGU_MJC_SW_RST 16 +#define MT8188_TOPRGU_PERI_SW_RST 17 +#define MT8188_TOPRGU_PERI_AO_SW_RST 18 +#define MT8188_TOPRGU_PCIE_SW_RST 19 +#define MT8188_TOPRGU_ADSPSYS_SW_RST 21 +#define MT8188_TOPRGU_DPTX_SW_RST 22 +#define MT8188_TOPRGU_SPMI_MST_SW_RST 23 + +#define MT8188_TOPRGU_SW_RST_NUM 24 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8188 */ diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index c0b868ce6a8f..628775334d5e 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -11,7 +11,6 @@ #include <asm/perf_event.h> #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) -#define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1) #ifdef CONFIG_HW_PERF_EVENTS @@ -29,7 +28,6 @@ struct kvm_pmu { struct irq_work overflow_work; struct kvm_pmu_events events; struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; - DECLARE_BITMAP(chained, ARMV8_PMU_MAX_COUNTER_PAIRS); int irq_num; bool created; bool irq_level; @@ -91,6 +89,14 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); vcpu->arch.pmu.events = *kvm_get_pmu_events(); \ } while (0) +/* + * Evaluates as true when emulating PMUv3p5, and false otherwise. + */ +#define kvm_pmu_is_3p5(vcpu) \ + (vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5) + +u8 kvm_arm_pmu_get_pmuver_limit(void); + #else struct kvm_pmu { }; @@ -153,9 +159,14 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) } #define kvm_vcpu_has_pmu(vcpu) ({ false; }) +#define kvm_pmu_is_3p5(vcpu) ({ false; }) static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} +static inline u8 kvm_arm_pmu_get_pmuver_limit(void) +{ + return 0; +} #endif diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 4df9e73a8bb5..9270cd87da3f 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -263,6 +263,7 @@ struct vgic_dist { struct vgic_io_device dist_iodev; bool has_its; + bool save_its_tables_in_progress; /* * Contains the attributes and gpa of the LPI configuration table. diff --git a/include/linux/bio.h b/include/linux/bio.h index b231a665682a..22078a28d7cb 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -782,8 +782,7 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) static inline void bio_clear_polled(struct bio *bio) { - /* can't support alloc cache if we turn off polling */ - bio->bi_opf &= ~(REQ_POLLED | REQ_ALLOC_CACHE); + bio->bi_opf &= ~REQ_POLLED; } struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h index e3a0be2c90ad..3aa3640f8c18 100644 --- a/include/linux/build_bug.h +++ b/include/linux/build_bug.h @@ -77,4 +77,13 @@ #define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr) #define __static_assert(expr, msg, ...) _Static_assert(expr, msg) + +/* + * Compile time check that field has an expected offset + */ +#define ASSERT_STRUCT_OFFSET(type, field, expected_offset) \ + BUILD_BUG_ON_MSG(offsetof(type, field) != (expected_offset), \ + "Offset of " #field " in " #type " has changed.") + + #endif /* _LINUX_BUILD_BUG_H */ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 973a1bfd7ef5..947a60b801db 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -236,6 +236,7 @@ static inline void *offset_to_ptr(const int *off) * bool and also pointer types. */ #define is_signed_type(type) (((type)(-1)) < (__force type)1) +#define is_unsigned_type(type) (!is_signed_type(type)) /* * This is needed in functions which generate the stack canary, see diff --git a/include/linux/container_of.h b/include/linux/container_of.h index 2f4944b791b8..1d898f9158b4 100644 --- a/include/linux/container_of.h +++ b/include/linux/container_of.h @@ -13,6 +13,7 @@ * @type: the type of the container struct this is embedded in. * @member: the name of the member within the struct. * + * WARNING: any const qualifier of @ptr is lost. */ #define container_of(ptr, type, member) ({ \ void *__mptr = (void *)(ptr); \ @@ -22,19 +23,16 @@ ((type *)(__mptr - offsetof(type, member))); }) /** - * container_of_safe - cast a member of a structure out to the containing structure - * @ptr: the pointer to the member. - * @type: the type of the container struct this is embedded in. - * @member: the name of the member within the struct. - * - * If IS_ERR_OR_NULL(ptr), ptr is returned unchanged. + * container_of_const - cast a member of a structure out to the containing + * structure and preserve the const-ness of the pointer + * @ptr: the pointer to the member + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. */ -#define container_of_safe(ptr, type, member) ({ \ - void *__mptr = (void *)(ptr); \ - static_assert(__same_type(*(ptr), ((type *)0)->member) || \ - __same_type(*(ptr), void), \ - "pointer type mismatch in container_of_safe()"); \ - IS_ERR_OR_NULL(__mptr) ? ERR_CAST(__mptr) : \ - ((type *)(__mptr - offsetof(type, member))); }) +#define container_of_const(ptr, type, member) \ + _Generic(ptr, \ + const typeof(*(ptr)) *: ((const type *)container_of(ptr, type, member)),\ + default: ((type *)container_of(ptr, type, member)) \ + ) #endif /* _LINUX_CONTAINER_OF_H */ diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 2324ab6f1846..5d1e961f810e 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -714,11 +714,6 @@ static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) tfm->crt_flags &= ~flags; } -static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) -{ - return tfm->__crt_ctx; -} - static inline unsigned int crypto_tfm_ctx_alignment(void) { struct crypto_tfm *tfm; diff --git a/include/linux/device.h b/include/linux/device.h index c90a444be1c4..44e3acae7b36 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -197,9 +197,9 @@ void devres_remove_group(struct device *dev, void *id); int devres_release_group(struct device *dev, void *id); /* managed devm_k.alloc/kfree for device drivers */ -void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc; +void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __alloc_size(2); void *devm_krealloc(struct device *dev, void *ptr, size_t size, - gfp_t gfp) __must_check; + gfp_t gfp) __must_check __realloc_size(3); __printf(3, 0) char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, va_list ap) __malloc; __printf(3, 4) char *devm_kasprintf(struct device *dev, gfp_t gfp, @@ -226,7 +226,8 @@ static inline void *devm_kcalloc(struct device *dev, void devm_kfree(struct device *dev, const void *p); char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc; const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp); -void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp); +void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) + __realloc_size(3); unsigned long devm_get_free_pages(struct device *dev, gfp_t gfp_mask, unsigned int order); @@ -678,10 +679,7 @@ struct device_link { bool supplier_preactivated; /* Owned by consumer probe. */ }; -static inline struct device *kobj_to_dev(struct kobject *kobj) -{ - return container_of(kobj, struct device, kobj); -} +#define kobj_to_dev(__kobj) container_of_const(__kobj, struct device, kobj) /** * device_iommu_mapped - Returns true when the device DMA is translated @@ -1044,12 +1042,8 @@ static inline void device_remove_group(struct device *dev, int __must_check devm_device_add_groups(struct device *dev, const struct attribute_group **groups); -void devm_device_remove_groups(struct device *dev, - const struct attribute_group **groups); int __must_check devm_device_add_group(struct device *dev, const struct attribute_group *grp); -void devm_device_remove_group(struct device *dev, - const struct attribute_group *grp); /* * Platform "fixup" functions - allow the platform to have their say diff --git a/include/linux/device/class.h b/include/linux/device/class.h index e61ec5502019..42cc3fb44a84 100644 --- a/include/linux/device/class.h +++ b/include/linux/device/class.h @@ -59,8 +59,8 @@ struct class { const struct attribute_group **dev_groups; struct kobject *dev_kobj; - int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); - char *(*devnode)(struct device *dev, umode_t *mode); + int (*dev_uevent)(const struct device *dev, struct kobj_uevent_env *env); + char *(*devnode)(const struct device *dev, umode_t *mode); void (*class_release)(struct class *class); void (*dev_release)(struct device *dev); @@ -68,9 +68,9 @@ struct class { int (*shutdown_pre)(struct device *dev); const struct kobj_ns_type_operations *ns_type; - const void *(*namespace)(struct device *dev); + const void *(*namespace)(const struct device *dev); - void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid); + void (*get_ownership)(const struct device *dev, kuid_t *uid, kgid_t *gid); const struct dev_pm_ops *pm; diff --git a/include/linux/filter.h b/include/linux/filter.h index bf701976056e..ccc4a4a58c72 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -860,8 +860,7 @@ static inline void bpf_prog_lock_ro(struct bpf_prog *fp) static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) { set_vm_flush_reset_perms(hdr); - set_memory_ro((unsigned long)hdr, hdr->size >> PAGE_SHIFT); - set_memory_x((unsigned long)hdr, hdr->size >> PAGE_SHIFT); + set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT); } int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index fac37680ffe7..b986e267d149 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -12,6 +12,7 @@ #ifndef __FIRMWARE_ZYNQMP_H__ #define __FIRMWARE_ZYNQMP_H__ +#include <linux/types.h> #include <linux/err.h> @@ -87,6 +88,8 @@ enum pm_api_cb_id { enum pm_api_id { PM_GET_API_VERSION = 1, PM_REGISTER_NOTIFIER = 5, + PM_FORCE_POWERDOWN = 8, + PM_REQUEST_WAKEUP = 10, PM_SYSTEM_SHUTDOWN = 12, PM_REQUEST_NODE = 13, PM_RELEASE_NODE = 14, @@ -135,6 +138,10 @@ enum pm_ret_status { }; enum pm_ioctl_id { + IOCTL_GET_RPU_OPER_MODE = 0, + IOCTL_SET_RPU_OPER_MODE = 1, + IOCTL_RPU_BOOT_ADDR_CONFIG = 2, + IOCTL_TCM_COMB_CONFIG = 3, IOCTL_SET_TAPDELAY_BYPASS = 4, IOCTL_SD_DLL_RESET = 6, IOCTL_SET_SD_TAPDELAY = 7, @@ -176,6 +183,21 @@ enum pm_query_id { PM_QID_CLOCK_GET_MAX_DIVISOR = 13, }; +enum rpu_oper_mode { + PM_RPU_MODE_LOCKSTEP = 0, + PM_RPU_MODE_SPLIT = 1, +}; + +enum rpu_boot_mem { + PM_RPU_BOOTMEM_LOVEC = 0, + PM_RPU_BOOTMEM_HIVEC = 1, +}; + +enum rpu_tcm_comb { + PM_RPU_TCM_SPLIT = 0, + PM_RPU_TCM_COMB = 1, +}; + enum zynqmp_pm_reset_action { PM_RESET_ACTION_RELEASE = 0, PM_RESET_ACTION_ASSERT = 1, @@ -516,6 +538,15 @@ int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id); int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value); int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload); int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset); +int zynqmp_pm_force_pwrdwn(const u32 target, + const enum zynqmp_pm_request_ack ack); +int zynqmp_pm_request_wake(const u32 node, + const bool set_addr, + const u64 address, + const enum zynqmp_pm_request_ack ack); +int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode); +int zynqmp_pm_set_rpu_mode(u32 node_id, u32 arg1); +int zynqmp_pm_set_tcm_config(u32 node_id, u32 arg1); int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value); int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config, u32 value); @@ -795,6 +826,35 @@ static inline int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset) return -ENODEV; } +static inline int zynqmp_pm_force_pwrdwn(const u32 target, + const enum zynqmp_pm_request_ack ack) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_request_wake(const u32 node, + const bool set_addr, + const u64 address, + const enum zynqmp_pm_request_ack ack) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_set_rpu_mode(u32 node_id, u32 arg1) +{ + return -ENODEV; +} + +static inline int zynqmp_pm_set_tcm_config(u32 node_id, u32 arg1) +{ + return -ENODEV; +} + static inline int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value) diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index 1067a8450826..7cad8bb031e9 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -18,7 +18,7 @@ void __write_overflow_field(size_t avail, size_t wanted) __compiletime_warning(" #define __compiletime_strlen(p) \ ({ \ - unsigned char *__p = (unsigned char *)(p); \ + char *__p = (char *)(p); \ size_t __ret = SIZE_MAX; \ size_t __p_size = __member_size(p); \ if (__p_size != SIZE_MAX && \ @@ -119,13 +119,13 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size) * Instead, please choose an alternative, so that the expectation * of @p's contents is unambiguous: * - * +--------------------+-----------------+------------+ - * | @p needs to be: | padded to @size | not padded | - * +====================+=================+============+ - * | NUL-terminated | strscpy_pad() | strscpy() | - * +--------------------+-----------------+------------+ - * | not NUL-terminated | strtomem_pad() | strtomem() | - * +--------------------+-----------------+------------+ + * +--------------------+--------------------+------------+ + * | **p** needs to be: | padded to **size** | not padded | + * +====================+====================+============+ + * | NUL-terminated | strscpy_pad() | strscpy() | + * +--------------------+--------------------+------------+ + * | not NUL-terminated | strtomem_pad() | strtomem() | + * +--------------------+--------------------+------------+ * * Note strscpy*()'s differing return values for detecting truncation, * and strtomem*()'s expectation that the destination is marked with @@ -144,6 +144,21 @@ char *strncpy(char * const POS p, const char *q, __kernel_size_t size) return __underlying_strncpy(p, q, size); } +/** + * strcat - Append a string to an existing string + * + * @p: pointer to NUL-terminated string to append to + * @q: pointer to NUL-terminated source string to append from + * + * Do not use this function. While FORTIFY_SOURCE tries to avoid + * read and write overflows, this is only possible when the + * destination buffer size is known to the compiler. Prefer + * building the string with formatting, via scnprintf() or similar. + * At the very least, use strncat(). + * + * Returns @p. + * + */ __FORTIFY_INLINE __diagnose_as(__builtin_strcat, 1, 2) char *strcat(char * const POS p, const char *q) { @@ -157,6 +172,16 @@ char *strcat(char * const POS p, const char *q) } extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen); +/** + * strnlen - Return bounded count of characters in a NUL-terminated string + * + * @p: pointer to NUL-terminated string to count. + * @maxlen: maximum number of characters to count. + * + * Returns number of characters in @p (NOT including the final NUL), or + * @maxlen, if no NUL has been found up to there. + * + */ __FORTIFY_INLINE __kernel_size_t strnlen(const char * const POS p, __kernel_size_t maxlen) { size_t p_size = __member_size(p); @@ -182,6 +207,19 @@ __FORTIFY_INLINE __kernel_size_t strnlen(const char * const POS p, __kernel_size * possible for strlen() to be used on compile-time strings for use in * static initializers (i.e. as a constant expression). */ +/** + * strlen - Return count of characters in a NUL-terminated string + * + * @p: pointer to NUL-terminated string to count. + * + * Do not use this function unless the string length is known at + * compile-time. When @p is unterminated, this function may crash + * or return unexpected counts that could lead to memory content + * exposures. Prefer strnlen(). + * + * Returns number of characters in @p (NOT including the final NUL). + * + */ #define strlen(p) \ __builtin_choose_expr(__is_constexpr(__builtin_strlen(p)), \ __builtin_strlen(p), __fortify_strlen(p)) @@ -200,8 +238,26 @@ __kernel_size_t __fortify_strlen(const char * const POS p) return ret; } -/* defined after fortified strlen to reuse it */ +/* Defined after fortified strlen() to reuse it. */ extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy); +/** + * strlcpy - Copy a string into another string buffer + * + * @p: pointer to destination of copy + * @q: pointer to NUL-terminated source string to copy + * @size: maximum number of bytes to write at @p + * + * If strlen(@q) >= @size, the copy of @q will be truncated at + * @size - 1 bytes. @p will always be NUL-terminated. + * + * Do not use this function. While FORTIFY_SOURCE tries to avoid + * over-reads when calculating strlen(@q), it is still possible. + * Prefer strscpy(), though note its different return values for + * detecting truncation. + * + * Returns total number of bytes written to @p, including terminating NUL. + * + */ __FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, size_t size) { size_t p_size = __member_size(p); @@ -227,8 +283,32 @@ __FORTIFY_INLINE size_t strlcpy(char * const POS p, const char * const POS q, si return q_len; } -/* defined after fortified strnlen to reuse it */ +/* Defined after fortified strnlen() to reuse it. */ extern ssize_t __real_strscpy(char *, const char *, size_t) __RENAME(strscpy); +/** + * strscpy - Copy a C-string into a sized buffer + * + * @p: Where to copy the string to + * @q: Where to copy the string from + * @size: Size of destination buffer + * + * Copy the source string @p, or as much of it as fits, into the destination + * @q buffer. The behavior is undefined if the string buffers overlap. The + * destination @p buffer is always NUL terminated, unless it's zero-sized. + * + * Preferred to strlcpy() since the API doesn't require reading memory + * from the source @q string beyond the specified @size bytes, and since + * the return value is easier to error-check than strlcpy()'s. + * In addition, the implementation is robust to the string changing out + * from underneath it, unlike the current strlcpy() implementation. + * + * Preferred to strncpy() since it always returns a valid string, and + * doesn't unnecessarily force the tail of the destination buffer to be + * zero padded. If padding is desired please use strscpy_pad(). + * + * Returns the number of characters copied in @p (not including the + * trailing %NUL) or -E2BIG if @size is 0 or the copy of @q was truncated. + */ __FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, size_t size) { size_t len; @@ -247,6 +327,16 @@ __FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, s if (__compiletime_lessthan(p_size, size)) __write_overflow(); + /* Short-circuit for compile-time known-safe lengths. */ + if (__compiletime_lessthan(p_size, SIZE_MAX)) { + len = __compiletime_strlen(q); + + if (len < SIZE_MAX && __compiletime_lessthan(len, size)) { + __underlying_memcpy(p, q, len + 1); + return len; + } + } + /* * This call protects from read overflow, because len will default to q * length if it smaller than size. @@ -274,7 +364,26 @@ __FORTIFY_INLINE ssize_t strscpy(char * const POS p, const char * const POS q, s return __real_strscpy(p, q, len); } -/* defined after fortified strlen and strnlen to reuse them */ +/** + * strncat - Append a string to an existing string + * + * @p: pointer to NUL-terminated string to append to + * @q: pointer to source string to append from + * @count: Maximum bytes to read from @q + * + * Appends at most @count bytes from @q (stopping at the first + * NUL byte) after the NUL-terminated string at @p. @p will be + * NUL-terminated. + * + * Do not use this function. While FORTIFY_SOURCE tries to avoid + * read and write overflows, this is only possible when the sizes + * of @p and @q are known to the compiler. Prefer building the + * string with formatting, via scnprintf() or similar. + * + * Returns @p. + * + */ +/* Defined after fortified strlen() and strnlen() to reuse them. */ __FORTIFY_INLINE __diagnose_as(__builtin_strncat, 1, 2, 3) char *strncat(char * const POS p, const char * const POS q, __kernel_size_t count) { @@ -573,7 +682,8 @@ __FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size) return __real_memchr_inv(p, c, size); } -extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup); +extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup) + __realloc_size(2); __FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp) { size_t p_size = __struct_size(p); @@ -585,6 +695,20 @@ __FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp return __real_kmemdup(p, size, gfp); } +/** + * strcpy - Copy a string into another string buffer + * + * @p: pointer to destination of copy + * @q: pointer to NUL-terminated source string to copy + * + * Do not use this function. While FORTIFY_SOURCE tries to avoid + * overflows, this is only possible when the sizes of @q and @p are + * known to the compiler. Prefer strscpy(), though note its different + * return values for detecting truncation. + * + * Returns @p. + * + */ /* Defined after fortified strlen to reuse it. */ __FORTIFY_INLINE __diagnose_as(__builtin_strcpy, 1, 2) char *strcpy(char * const POS p, const char * const POS q) diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 36460ced060b..45da8f137fe5 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -581,27 +581,6 @@ struct gpio_desc *devm_fwnode_gpiod_get(struct device *dev, flags, label); } -static inline -struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev, - const char *con_id, int index, - struct fwnode_handle *child, - enum gpiod_flags flags, - const char *label) -{ - return devm_fwnode_gpiod_get_index(dev, child, con_id, index, - flags, label); -} - -static inline -struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev, - const char *con_id, - struct fwnode_handle *child, - enum gpiod_flags flags, - const char *label) -{ - return devm_fwnode_gpiod_get_index(dev, child, con_id, 0, flags, label); -} - #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_OF_GPIO) struct device_node; diff --git a/include/linux/gpio/property.h b/include/linux/gpio/property.h new file mode 100644 index 000000000000..6c75c8bd44a0 --- /dev/null +++ b/include/linux/gpio/property.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0+ +#ifndef __LINUX_GPIO_PROPERTY_H +#define __LINUX_GPIO_PROPERTY_H + +#include <dt-bindings/gpio/gpio.h> /* for GPIO_* flags */ +#include <linux/property.h> + +#define PROPERTY_ENTRY_GPIO(_name_, _chip_node_, _idx_, _flags_) \ + PROPERTY_ENTRY_REF(_name_, _chip_node_, _idx_, _flags_) + +#endif /* __LINUX_GPIO_PROPERTY_H */ diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index e230c7c46110..be3aedaa96dc 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -384,14 +384,14 @@ struct hisi_qp { static inline int q_num_set(const char *val, const struct kernel_param *kp, unsigned int device) { - struct pci_dev *pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, - device, NULL); + struct pci_dev *pdev; u32 n, q_num; int ret; if (!val) return -EINVAL; + pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL); if (!pdev) { q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2); pr_info("No device found currently, suppose queue number is %u\n", @@ -401,6 +401,8 @@ static inline int q_num_set(const char *val, const struct kernel_param *kp, q_num = QM_QNUM_V1; else q_num = QM_QNUM_V2; + + pci_dev_put(pdev); } ret = kstrtou32(val, 10, &n); @@ -469,11 +471,11 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen); int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs); void hisi_qm_dev_err_init(struct hisi_qm *qm); void hisi_qm_dev_err_uninit(struct hisi_qm *qm); -int hisi_qm_diff_regs_init(struct hisi_qm *qm, - struct dfx_diff_registers *dregs, int reg_len); -void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len); +int hisi_qm_regs_debugfs_init(struct hisi_qm *qm, + struct dfx_diff_registers *dregs, u32 reg_len); +void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len); void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s, - struct dfx_diff_registers *dregs, int regs_len); + struct dfx_diff_registers *dregs, u32 regs_len); pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, pci_channel_state_t state); diff --git a/include/linux/hpet.h b/include/linux/hpet.h index 8604564b985d..21e69eaf7a36 100644 --- a/include/linux/hpet.h +++ b/include/linux/hpet.h @@ -30,7 +30,7 @@ struct hpet { unsigned long _hpet_compare; } _u1; u64 hpet_fsb[2]; /* FSB route */ - } hpet_timers[1]; + } hpet_timers[]; }; #define hpet_mc _u0._hpet_mc diff --git a/include/linux/htcpld.h b/include/linux/htcpld.h deleted file mode 100644 index 5f8ac9b1d724..000000000000 --- a/include/linux/htcpld.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_HTCPLD_H -#define __LINUX_HTCPLD_H - -struct htcpld_chip_platform_data { - unsigned int addr; - unsigned int reset; - unsigned int num_gpios; - unsigned int gpio_out_base; - unsigned int gpio_in_base; - unsigned int irq_base; - unsigned int num_irqs; -}; - -struct htcpld_core_platform_data { - unsigned int i2c_adapter_id; - - struct htcpld_chip_platform_data *chip; - unsigned int num_chip; -}; - -#endif /* __LINUX_HTCPLD_H */ - diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index 77c2885c4c13..8a3115516a1b 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -34,7 +34,7 @@ * @priv: Private data, for use by the RNG driver. * @quality: Estimation of true entropy in RNG's bitstream * (in bits of entropy per 1024 bits of input; - * valid values: 1 to 1024, or 0 for unknown). + * valid values: 1 to 1024, or 0 for maximum). */ struct hwrng { const char *name; diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h index 8242e13e7b0b..1c997abe868c 100644 --- a/include/linux/i3c/device.h +++ b/include/linux/i3c/device.h @@ -287,12 +287,15 @@ static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv, #define module_i3c_i2c_driver(__i3cdrv, __i2cdrv) \ module_driver(__i3cdrv, \ i3c_i2c_driver_register, \ - i3c_i2c_driver_unregister) + i3c_i2c_driver_unregister, \ + __i2cdrv) int i3c_device_do_priv_xfers(struct i3c_device *dev, struct i3c_priv_xfer *xfers, int nxfers); +int i3c_device_do_setdasa(struct i3c_device *dev); + void i3c_device_get_info(struct i3c_device *dev, struct i3c_device_info *info); struct i3c_ibi_payload { diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h index e2ca8ea23e19..89c3fd7c29ca 100644 --- a/include/linux/iio/buffer_impl.h +++ b/include/linux/iio/buffer_impl.h @@ -123,7 +123,7 @@ struct iio_buffer { struct attribute_group buffer_group; /* @attrs: Standard attributes of the buffer. */ - const struct attribute **attrs; + const struct iio_dev_attr **attrs; /* @demux_bounce: Buffer for doing gather from incoming scan. */ void *demux_bounce; diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index db4a1b260348..f5f3ee57bc70 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -224,8 +224,6 @@ struct st_sensor_settings { * @mount_matrix: The mounting matrix of the sensor. * @sensor_settings: Pointer to the specific sensor settings in use. * @current_fullscale: Maximum range of measure by the sensor. - * @vdd: Pointer to sensor's Vdd power supply - * @vdd_io: Pointer to sensor's Vdd-IO power supply * @regmap: Pointer to specific sensor regmap configuration. * @enabled: Status of the sensor (false->off, true->on). * @odr: Output data rate of the sensor [Hz]. @@ -244,8 +242,6 @@ struct st_sensor_data { struct iio_mount_matrix mount_matrix; struct st_sensor_settings *sensor_settings; struct st_sensor_fullscale_avl *current_fullscale; - struct regulator *vdd; - struct regulator *vdd_io; struct regmap *regmap; bool enabled; diff --git a/include/linux/iio/gyro/itg3200.h b/include/linux/iio/gyro/itg3200.h index a602fe7b84fa..74b6d1cadc86 100644 --- a/include/linux/iio/gyro/itg3200.h +++ b/include/linux/iio/gyro/itg3200.h @@ -102,6 +102,8 @@ struct itg3200 { struct i2c_client *i2c; struct iio_trigger *trig; struct iio_mount_matrix orientation; + /* lock to protect against multiple access to the device */ + struct mutex lock; }; enum ITG3200_SCAN_INDEX { diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h index d1f8b30a7c8b..5aec3945555b 100644 --- a/include/linux/iio/iio-opaque.h +++ b/include/linux/iio/iio-opaque.h @@ -11,6 +11,7 @@ * checked by device drivers but should be considered * read-only as this is a core internal bit * @driver_module: used to make it harder to undercut users + * @mlock: lock used to prevent simultaneous device state changes * @mlock_key: lockdep class for iio_dev lock * @info_exist_lock: lock to prevent use during removal * @trig_readonly: mark the current trigger immutable @@ -43,6 +44,7 @@ struct iio_dev_opaque { int currentmode; int id; struct module *driver_module; + struct mutex mlock; struct lock_class_key mlock_key; struct mutex info_exist_lock; bool trig_readonly; diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index f0ec8a5e5a7a..8e0afaaa3f75 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -548,8 +548,6 @@ struct iio_buffer_setup_ops { * and owner * @buffer: [DRIVER] any buffer present * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux - * @mlock: [INTERN] lock used to prevent simultaneous device state - * changes * @available_scan_masks: [DRIVER] optional array of allowed bitmasks * @masklength: [INTERN] the length of the mask established from * channels @@ -574,7 +572,6 @@ struct iio_dev { struct iio_buffer *buffer; int scan_bytes; - struct mutex mlock; const unsigned long *available_scan_masks; unsigned masklength; @@ -629,6 +626,8 @@ int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp); int iio_device_claim_direct_mode(struct iio_dev *indio_dev); void iio_device_release_direct_mode(struct iio_dev *indio_dev); +int iio_device_claim_buffer_mode(struct iio_dev *indio_dev); +void iio_device_release_buffer_mode(struct iio_dev *indio_dev); extern struct bus_type iio_bus_type; diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index 515ca09764fe..dc9ea299e088 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -402,28 +402,27 @@ static inline int adis_update_bits_base(struct adis *adis, unsigned int reg, __adis_update_bits_base(adis, reg, mask, val, sizeof(val)); \ }) -int adis_enable_irq(struct adis *adis, bool enable); int __adis_check_status(struct adis *adis); int __adis_initial_startup(struct adis *adis); +int __adis_enable_irq(struct adis *adis, bool enable); -static inline int adis_check_status(struct adis *adis) +static inline int adis_enable_irq(struct adis *adis, bool enable) { int ret; mutex_lock(&adis->state_lock); - ret = __adis_check_status(adis); + ret = __adis_enable_irq(adis, enable); mutex_unlock(&adis->state_lock); return ret; } -/* locked version of __adis_initial_startup() */ -static inline int adis_initial_startup(struct adis *adis) +static inline int adis_check_status(struct adis *adis) { int ret; mutex_lock(&adis->state_lock); - ret = __adis_initial_startup(adis); + ret = __adis_check_status(adis); mutex_unlock(&adis->state_lock); return ret; diff --git a/include/linux/iio/kfifo_buf.h b/include/linux/iio/kfifo_buf.h index 8a83fb58232d..22874da0c8be 100644 --- a/include/linux/iio/kfifo_buf.h +++ b/include/linux/iio/kfifo_buf.h @@ -5,6 +5,7 @@ struct iio_buffer; struct iio_buffer_setup_ops; struct iio_dev; +struct iio_dev_attr; struct device; struct iio_buffer *iio_kfifo_allocate(void); @@ -13,7 +14,7 @@ void iio_kfifo_free(struct iio_buffer *r); int devm_iio_kfifo_buffer_setup_ext(struct device *dev, struct iio_dev *indio_dev, const struct iio_buffer_setup_ops *setup_ops, - const struct attribute **buffer_attrs); + const struct iio_dev_attr **buffer_attrs); #define devm_iio_kfifo_buffer_setup(dev, indio_dev, setup_ops) \ devm_iio_kfifo_buffer_setup_ext((dev), (indio_dev), (setup_ops), NULL) diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h index e51fba66de4b..de5bb125815c 100644 --- a/include/linux/iio/sysfs.h +++ b/include/linux/iio/sysfs.h @@ -97,6 +97,17 @@ struct iio_const_attr { = { .string = _string, \ .dev_attr = __ATTR(_name, S_IRUGO, iio_read_const_attr, NULL)} +#define IIO_STATIC_CONST_DEVICE_ATTR(_name, _string) \ + static ssize_t iio_const_dev_attr_show_##_name( \ + struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ + { \ + return sysfs_emit(buf, "%s\n", _string); \ + } \ + static IIO_DEVICE_ATTR(_name, 0444, \ + iio_const_dev_attr_show_##_name, NULL, 0) + /* Generic attributes of onetype or another */ /** diff --git a/include/linux/iio/triggered_buffer.h b/include/linux/iio/triggered_buffer.h index 7490b05fc5b2..29e1fe146879 100644 --- a/include/linux/iio/triggered_buffer.h +++ b/include/linux/iio/triggered_buffer.h @@ -5,8 +5,8 @@ #include <linux/iio/buffer.h> #include <linux/interrupt.h> -struct attribute; struct iio_dev; +struct iio_dev_attr; struct iio_buffer_setup_ops; int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev, @@ -14,7 +14,7 @@ int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev, irqreturn_t (*thread)(int irq, void *p), enum iio_buffer_direction direction, const struct iio_buffer_setup_ops *setup_ops, - const struct attribute **buffer_attrs); + const struct iio_dev_attr **buffer_attrs); void iio_triggered_buffer_cleanup(struct iio_dev *indio_dev); #define iio_triggered_buffer_setup(indio_dev, h, thread, setup_ops) \ @@ -28,7 +28,7 @@ int devm_iio_triggered_buffer_setup_ext(struct device *dev, irqreturn_t (*thread)(int irq, void *p), enum iio_buffer_direction direction, const struct iio_buffer_setup_ops *ops, - const struct attribute **buffer_attrs); + const struct iio_dev_attr **buffer_attrs); #define devm_iio_triggered_buffer_setup(dev, indio_dev, h, thread, setup_ops) \ devm_iio_triggered_buffer_setup_ext((dev), (indio_dev), (h), (thread), \ diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 207ef06ba3e1..f9a0d44f6fdb 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -13,17 +13,4 @@ #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20) #define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5) -/* - * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only - * for access to kernel addresses. No IOTLB flushes are automatically done - * for kernel mappings; it is valid only for access to the kernel's static - * 1:1 mapping of physical memory — not to vmalloc or even module mappings. - * A future API addition may permit the use of such ranges, by means of an - * explicit IOTLB flush call (akin to the DMA API's unmap method). - * - * It is unlikely that we will ever hook into flush_tlb_kernel_range() to - * do such IOTLB flushes automatically. - */ -#define SVM_FLAG_SUPERVISOR_MODE BIT(0) - #endif /* __INTEL_SVM_H__ */ diff --git a/include/linux/interval_tree.h b/include/linux/interval_tree.h index 288c26f50732..2b8026a39906 100644 --- a/include/linux/interval_tree.h +++ b/include/linux/interval_tree.h @@ -27,4 +27,62 @@ extern struct interval_tree_node * interval_tree_iter_next(struct interval_tree_node *node, unsigned long start, unsigned long last); +/** + * struct interval_tree_span_iter - Find used and unused spans. + * @start_hole: Start of an interval for a hole when is_hole == 1 + * @last_hole: Inclusive end of an interval for a hole when is_hole == 1 + * @start_used: Start of a used interval when is_hole == 0 + * @last_used: Inclusive end of a used interval when is_hole == 0 + * @is_hole: 0 == used, 1 == is_hole, -1 == done iteration + * + * This iterator travels over spans in an interval tree. It does not return + * nodes but classifies each span as either a hole, where no nodes intersect, or + * a used, which is fully covered by nodes. Each iteration step toggles between + * hole and used until the entire range is covered. The returned spans always + * fully cover the requested range. + * + * The iterator is greedy, it always returns the largest hole or used possible, + * consolidating all consecutive nodes. + * + * Use interval_tree_span_iter_done() to detect end of iteration. + */ +struct interval_tree_span_iter { + /* private: not for use by the caller */ + struct interval_tree_node *nodes[2]; + unsigned long first_index; + unsigned long last_index; + + /* public: */ + union { + unsigned long start_hole; + unsigned long start_used; + }; + union { + unsigned long last_hole; + unsigned long last_used; + }; + int is_hole; +}; + +void interval_tree_span_iter_first(struct interval_tree_span_iter *state, + struct rb_root_cached *itree, + unsigned long first_index, + unsigned long last_index); +void interval_tree_span_iter_advance(struct interval_tree_span_iter *iter, + struct rb_root_cached *itree, + unsigned long new_index); +void interval_tree_span_iter_next(struct interval_tree_span_iter *state); + +static inline bool +interval_tree_span_iter_done(struct interval_tree_span_iter *state) +{ + return state->is_hole == -1; +} + +#define interval_tree_for_each_span(span, itree, first_index, last_index) \ + for (interval_tree_span_iter_first(span, itree, \ + first_index, last_index); \ + !interval_tree_span_iter_done(span); \ + interval_tree_span_iter_next(span)) + #endif /* _LINUX_INTERVAL_TREE_H */ diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 1f068dfdb140..1b7a44b35616 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -150,9 +150,7 @@ struct io_pgtable_cfg { /** * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers. * - * @map: Map a physically contiguous memory region. * @map_pages: Map a physically contiguous range of pages of the same size. - * @unmap: Unmap a physically contiguous memory region. * @unmap_pages: Unmap a range of virtually contiguous pages of the same size. * @iova_to_phys: Translate iova to physical address. * @@ -160,13 +158,9 @@ struct io_pgtable_cfg { * the same names. */ struct io_pgtable_ops { - int (*map)(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp); int (*map_pages)(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped); - size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, - size_t size, struct iommu_iotlb_gather *gather); size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova, size_t pgsize, size_t pgcount, struct iommu_iotlb_gather *gather); diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 238a03087e17..0983dfc9a203 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -49,26 +49,35 @@ struct vm_fault; * * IOMAP_F_BUFFER_HEAD indicates that the file system requires the use of * buffer heads for this mapping. + * + * IOMAP_F_XATTR indicates that the iomap is for an extended attribute extent + * rather than a file data extent. */ -#define IOMAP_F_NEW 0x01 -#define IOMAP_F_DIRTY 0x02 -#define IOMAP_F_SHARED 0x04 -#define IOMAP_F_MERGED 0x08 -#define IOMAP_F_BUFFER_HEAD 0x10 -#define IOMAP_F_ZONE_APPEND 0x20 +#define IOMAP_F_NEW (1U << 0) +#define IOMAP_F_DIRTY (1U << 1) +#define IOMAP_F_SHARED (1U << 2) +#define IOMAP_F_MERGED (1U << 3) +#define IOMAP_F_BUFFER_HEAD (1U << 4) +#define IOMAP_F_ZONE_APPEND (1U << 5) +#define IOMAP_F_XATTR (1U << 6) /* * Flags set by the core iomap code during operations: * * IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size * has changed as the result of this write operation. + * + * IOMAP_F_STALE indicates that the iomap is not valid any longer and the file + * range it covers needs to be remapped by the high level before the operation + * can proceed. */ -#define IOMAP_F_SIZE_CHANGED 0x100 +#define IOMAP_F_SIZE_CHANGED (1U << 8) +#define IOMAP_F_STALE (1U << 9) /* * Flags from 0x1000 up are for file system specific usage: */ -#define IOMAP_F_PRIVATE 0x1000 +#define IOMAP_F_PRIVATE (1U << 12) /* @@ -89,6 +98,7 @@ struct iomap { void *inline_data; void *private; /* filesystem private */ const struct iomap_page_ops *page_ops; + u64 validity_cookie; /* used with .iomap_valid() */ }; static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos) @@ -128,6 +138,23 @@ struct iomap_page_ops { int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len); void (*page_done)(struct inode *inode, loff_t pos, unsigned copied, struct page *page); + + /* + * Check that the cached iomap still maps correctly to the filesystem's + * internal extent map. FS internal extent maps can change while iomap + * is iterating a cached iomap, so this hook allows iomap to detect that + * the iomap needs to be refreshed during a long running write + * operation. + * + * The filesystem can store internal state (e.g. a sequence number) in + * iomap->validity_cookie when the iomap is first mapped to be able to + * detect changes between mapping time and whenever .iomap_valid() is + * called. + * + * This is called with the folio over the specified file position held + * locked by the iomap code. + */ + bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap); }; /* @@ -226,6 +253,10 @@ static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i) ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, const struct iomap_ops *ops); +int iomap_file_buffered_write_punch_delalloc(struct inode *inode, + struct iomap *iomap, loff_t pos, loff_t length, ssize_t written, + int (*punch)(struct inode *inode, loff_t pos, loff_t length)); + int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops); void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 6f53ad74fa0d..46e1347bfa22 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -64,6 +64,8 @@ struct iommu_domain_geometry { #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ #define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */ +#define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */ + /* * This are the possible domain-types * @@ -77,6 +79,8 @@ struct iommu_domain_geometry { * certain optimizations for these domains * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB * invalidation. + * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses + * represented by mm_struct's. */ #define IOMMU_DOMAIN_BLOCKED (0U) #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) @@ -86,15 +90,27 @@ struct iommu_domain_geometry { #define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \ __IOMMU_DOMAIN_DMA_API | \ __IOMMU_DOMAIN_DMA_FQ) +#define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA) struct iommu_domain { unsigned type; const struct iommu_domain_ops *ops; unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ - iommu_fault_handler_t handler; - void *handler_token; struct iommu_domain_geometry geometry; struct iommu_dma_cookie *iova_cookie; + enum iommu_page_response_code (*iopf_handler)(struct iommu_fault *fault, + void *data); + void *fault_data; + union { + struct { + iommu_fault_handler_t handler; + void *handler_token; + }; + struct { /* IOMMU_DOMAIN_SVA */ + struct mm_struct *mm; + int users; + }; + }; }; static inline bool iommu_is_dma_domain(struct iommu_domain *domain) @@ -108,6 +124,11 @@ enum iommu_cap { IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for DMA protection and we should too */ + /* + * Per-device flag indicating if enforce_cache_coherency() will work on + * this device. + */ + IOMMU_CAP_ENFORCE_CACHE_COHERENCY, }; /* These are the possible reserved region types */ @@ -214,15 +235,15 @@ struct iommu_iotlb_gather { * driver init to device driver init (default no) * @dev_enable/disable_feat: per device entries to enable/disable * iommu specific features. - * @sva_bind: Bind process address space to device - * @sva_unbind: Unbind process address space from device - * @sva_get_pasid: Get PASID associated to a SVA handle * @page_response: handle page request response * @def_domain_type: device default domain type, return value: * - IOMMU_DOMAIN_IDENTITY: must use an identity domain * - IOMMU_DOMAIN_DMA: must use a dma domain * - 0: use the default setting * @default_domain_ops: the default ops for domains + * @remove_dev_pasid: Remove any translation configurations of a specific + * pasid, so that any DMA transactions with this pasid + * will be blocked by the hardware. * @pgsize_bitmap: bitmap of all possible supported page sizes * @owner: Driver module providing these ops */ @@ -247,16 +268,12 @@ struct iommu_ops { int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); - struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm, - void *drvdata); - void (*sva_unbind)(struct iommu_sva *handle); - u32 (*sva_get_pasid)(struct iommu_sva *handle); - int (*page_response)(struct device *dev, struct iommu_fault_event *evt, struct iommu_page_response *msg); int (*def_domain_type)(struct device *dev); + void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid); const struct iommu_domain_ops *default_domain_ops; unsigned long pgsize_bitmap; @@ -266,7 +283,20 @@ struct iommu_ops { /** * struct iommu_domain_ops - domain specific operations * @attach_dev: attach an iommu domain to a device + * Return: + * * 0 - success + * * EINVAL - can indicate that device and domain are incompatible due to + * some previous configuration of the domain, in which case the + * driver shouldn't log an error, since it is legitimate for a + * caller to test reuse of existing domains. Otherwise, it may + * still represent some other fundamental problem + * * ENOMEM - out of memory + * * ENOSPC - non-ENOMEM type of resource allocation failures + * * EBUSY - device is attached to a domain and cannot be changed + * * ENODEV - device specific errors, not able to be attached + * * <others> - treated as ENODEV by the caller. Use is discouraged * @detach_dev: detach an iommu domain from a device + * @set_dev_pasid: set an iommu domain to a pasid of device * @map: map a physically contiguous memory region to an iommu domain * @map_pages: map a physically contiguous set of pages of the same size to * an iommu domain. @@ -287,6 +317,8 @@ struct iommu_ops { struct iommu_domain_ops { int (*attach_dev)(struct iommu_domain *domain, struct device *dev); void (*detach_dev)(struct iommu_domain *domain, struct device *dev); + int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev, + ioasid_t pasid); int (*map)(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp); @@ -322,12 +354,14 @@ struct iommu_domain_ops { * @list: Used by the iommu-core to keep a list of registered iommus * @ops: iommu-ops for talking to this iommu * @dev: struct device for sysfs handling + * @max_pasids: number of supported PASIDs */ struct iommu_device { struct list_head list; const struct iommu_ops *ops; struct fwnode_handle *fwnode; struct device *dev; + u32 max_pasids; }; /** @@ -366,6 +400,7 @@ struct iommu_fault_param { * @fwspec: IOMMU fwspec data * @iommu_dev: IOMMU device this device is linked to * @priv: IOMMU Driver private data + * @max_pasids: number of PASIDs this device can consume * * TODO: migrate other per device data pointers under iommu_dev_data, e.g. * struct iommu_group *iommu_group; @@ -377,6 +412,7 @@ struct dev_iommu { struct iommu_fwspec *fwspec; struct iommu_device *iommu_dev; void *priv; + u32 max_pasids; }; int iommu_device_register(struct iommu_device *iommu, @@ -626,6 +662,7 @@ struct iommu_fwspec { */ struct iommu_sva { struct device *dev; + struct iommu_domain *domain; }; int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, @@ -667,12 +704,6 @@ void iommu_release_device(struct device *dev); int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); -struct iommu_sva *iommu_sva_bind_device(struct device *dev, - struct mm_struct *mm, - void *drvdata); -void iommu_sva_unbind_device(struct iommu_sva *handle); -u32 iommu_sva_get_pasid(struct iommu_sva *handle); - int iommu_device_use_default_domain(struct device *dev); void iommu_device_unuse_default_domain(struct device *dev); @@ -680,6 +711,18 @@ int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner); void iommu_group_release_dma_owner(struct iommu_group *group); bool iommu_group_dma_owner_claimed(struct iommu_group *group); +int iommu_device_claim_dma_owner(struct device *dev, void *owner); +void iommu_device_release_dma_owner(struct device *dev); + +struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, + struct mm_struct *mm); +int iommu_attach_device_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid); +void iommu_detach_device_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid); +struct iommu_domain * +iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, + unsigned int type); #else /* CONFIG_IOMMU_API */ struct iommu_ops {}; @@ -999,21 +1042,6 @@ iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) return -ENODEV; } -static inline struct iommu_sva * -iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) -{ - return NULL; -} - -static inline void iommu_sva_unbind_device(struct iommu_sva *handle) -{ -} - -static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) -{ - return IOMMU_PASID_INVALID; -} - static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) { return NULL; @@ -1042,6 +1070,39 @@ static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group) { return false; } + +static inline void iommu_device_release_dma_owner(struct device *dev) +{ +} + +static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner) +{ + return -ENODEV; +} + +static inline struct iommu_domain * +iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm) +{ + return NULL; +} + +static inline int iommu_attach_device_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid) +{ + return -ENODEV; +} + +static inline void iommu_detach_device_pasid(struct iommu_domain *domain, + struct device *dev, ioasid_t pasid) +{ +} + +static inline struct iommu_domain * +iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, + unsigned int type) +{ + return NULL; +} #endif /* CONFIG_IOMMU_API */ /** @@ -1124,4 +1185,26 @@ static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream return false; } +#ifdef CONFIG_IOMMU_SVA +struct iommu_sva *iommu_sva_bind_device(struct device *dev, + struct mm_struct *mm); +void iommu_sva_unbind_device(struct iommu_sva *handle); +u32 iommu_sva_get_pasid(struct iommu_sva *handle); +#else +static inline struct iommu_sva * +iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) +{ + return NULL; +} + +static inline void iommu_sva_unbind_device(struct iommu_sva *handle) +{ +} + +static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) +{ + return IOMMU_PASID_INVALID; +} +#endif /* CONFIG_IOMMU_SVA */ + #endif /* __LINUX_IOMMU_H */ diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h new file mode 100644 index 000000000000..650d45629647 --- /dev/null +++ b/include/linux/iommufd.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Intel Corporation + * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES + */ +#ifndef __LINUX_IOMMUFD_H +#define __LINUX_IOMMUFD_H + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/err.h> + +struct device; +struct iommufd_device; +struct page; +struct iommufd_ctx; +struct iommufd_access; +struct file; + +struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx, + struct device *dev, u32 *id); +void iommufd_device_unbind(struct iommufd_device *idev); + +int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id); +void iommufd_device_detach(struct iommufd_device *idev); + +struct iommufd_access_ops { + u8 needs_pin_pages : 1; + void (*unmap)(void *data, unsigned long iova, unsigned long length); +}; + +enum { + IOMMUFD_ACCESS_RW_READ = 0, + IOMMUFD_ACCESS_RW_WRITE = 1 << 0, + /* Set if the caller is in a kthread then rw will use kthread_use_mm() */ + IOMMUFD_ACCESS_RW_KTHREAD = 1 << 1, + + /* Only for use by selftest */ + __IOMMUFD_ACCESS_RW_SLOW_PATH = 1 << 2, +}; + +struct iommufd_access * +iommufd_access_create(struct iommufd_ctx *ictx, u32 ioas_id, + const struct iommufd_access_ops *ops, void *data); +void iommufd_access_destroy(struct iommufd_access *access); + +void iommufd_ctx_get(struct iommufd_ctx *ictx); + +#if IS_ENABLED(CONFIG_IOMMUFD) +struct iommufd_ctx *iommufd_ctx_from_file(struct file *file); +void iommufd_ctx_put(struct iommufd_ctx *ictx); + +int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova, + unsigned long length, struct page **out_pages, + unsigned int flags); +void iommufd_access_unpin_pages(struct iommufd_access *access, + unsigned long iova, unsigned long length); +int iommufd_access_rw(struct iommufd_access *access, unsigned long iova, + void *data, size_t len, unsigned int flags); +int iommufd_vfio_compat_ioas_id(struct iommufd_ctx *ictx, u32 *out_ioas_id); +#else /* !CONFIG_IOMMUFD */ +static inline struct iommufd_ctx *iommufd_ctx_from_file(struct file *file) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline void iommufd_ctx_put(struct iommufd_ctx *ictx) +{ +} + +static inline int iommufd_access_pin_pages(struct iommufd_access *access, + unsigned long iova, + unsigned long length, + struct page **out_pages, + unsigned int flags) +{ + return -EOPNOTSUPP; +} + +static inline void iommufd_access_unpin_pages(struct iommufd_access *access, + unsigned long iova, + unsigned long length) +{ +} + +static inline int iommufd_access_rw(struct iommufd_access *access, unsigned long iova, + void *data, size_t len, unsigned int flags) +{ + return -EOPNOTSUPP; +} + +static inline int iommufd_vfio_compat_ioas_id(struct iommufd_ctx *ictx, + u32 *out_ioas_id) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_IOMMUFD */ +#endif diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 4ae3c541ea6f..25d768d48970 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -155,7 +155,7 @@ enum { /* helpers to define resources */ #define DEFINE_RES_NAMED(_start, _size, _name, _flags) \ - { \ +(struct resource) { \ .start = (_start), \ .end = (_start) + (_size) - 1, \ .name = (_name), \ diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h index eee1877a354e..859f4b0c1b2b 100644 --- a/include/linux/kernel-page-flags.h +++ b/include/linux/kernel-page-flags.h @@ -18,5 +18,6 @@ #define KPF_UNCACHED 39 #define KPF_SOFTDIRTY 40 #define KPF_ARCH_2 41 +#define KPF_ARCH_3 42 #endif /* LINUX_KERNEL_PAGE_FLAGS_H */ diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 57fb972fea05..58a5b75612e3 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -112,18 +112,18 @@ extern struct kobject * __must_check kobject_get_unless_zero( struct kobject *kobj); extern void kobject_put(struct kobject *kobj); -extern const void *kobject_namespace(struct kobject *kobj); -extern void kobject_get_ownership(struct kobject *kobj, +extern const void *kobject_namespace(const struct kobject *kobj); +extern void kobject_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid); -extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); +extern char *kobject_get_path(const struct kobject *kobj, gfp_t flag); struct kobj_type { void (*release)(struct kobject *kobj); const struct sysfs_ops *sysfs_ops; const struct attribute_group **default_groups; - const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj); - const void *(*namespace)(struct kobject *kobj); - void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid); + const struct kobj_ns_type_operations *(*child_ns_type)(const struct kobject *kobj); + const void *(*namespace)(const struct kobject *kobj); + void (*get_ownership)(const struct kobject *kobj, kuid_t *uid, kgid_t *gid); }; struct kobj_uevent_env { @@ -135,8 +135,8 @@ struct kobj_uevent_env { }; struct kset_uevent_ops { - int (* const filter)(struct kobject *kobj); - const char *(* const name)(struct kobject *kobj); + int (* const filter)(const struct kobject *kobj); + const char *(* const name)(const struct kobject *kobj); int (* const uevent)(struct kobject *kobj, struct kobj_uevent_env *env); }; @@ -198,7 +198,7 @@ static inline void kset_put(struct kset *k) kobject_put(&k->kobj); } -static inline const struct kobj_type *get_ktype(struct kobject *kobj) +static inline const struct kobj_type *get_ktype(const struct kobject *kobj) { return kobj->ktype; } diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h index 2b5b64256cf4..be707748e7ce 100644 --- a/include/linux/kobject_ns.h +++ b/include/linux/kobject_ns.h @@ -47,8 +47,8 @@ struct kobj_ns_type_operations { int kobj_ns_type_register(const struct kobj_ns_type_operations *ops); int kobj_ns_type_registered(enum kobj_ns_type type); -const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent); -const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj); +const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *parent); +const struct kobj_ns_type_operations *kobj_ns_ops(const struct kobject *kobj); bool kobj_ns_current_may_mount(enum kobj_ns_type type); void *kobj_ns_grab_current(enum kobj_ns_type type); diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h index 906f899813dc..4862c98d80d3 100644 --- a/include/linux/kvm_dirty_ring.h +++ b/include/linux/kvm_dirty_ring.h @@ -37,6 +37,11 @@ static inline u32 kvm_dirty_ring_get_rsvd_entries(void) return 0; } +static inline bool kvm_use_dirty_bitmap(struct kvm *kvm) +{ + return true; +} + static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size) { @@ -49,7 +54,7 @@ static inline int kvm_dirty_ring_reset(struct kvm *kvm, return 0; } -static inline void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, +static inline void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset) { } @@ -64,13 +69,11 @@ static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring) { } -static inline bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring) -{ - return true; -} - #else /* CONFIG_HAVE_KVM_DIRTY_RING */ +int kvm_cpu_dirty_log_size(void); +bool kvm_use_dirty_bitmap(struct kvm *kvm); +bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm); u32 kvm_dirty_ring_get_rsvd_entries(void); int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size); @@ -84,13 +87,14 @@ int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring); * returns =0: successfully pushed * <0: unable to push, need to wait */ -void kvm_dirty_ring_push(struct kvm_dirty_ring *ring, u32 slot, u64 offset); +void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset); + +bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu); /* for use in vm_operations_struct */ struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset); void kvm_dirty_ring_free(struct kvm_dirty_ring *ring); -bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring); #endif /* CONFIG_HAVE_KVM_DIRTY_RING */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 915142abdf76..4f26b244f6d0 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -50,8 +50,8 @@ #endif /* - * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used - * in kvm, other bits are visible for userspace which are defined in + * The bit 16 ~ bit 31 of kvm_userspace_memory_region::flags are internally + * used in kvm, other bits are visible for userspace which are defined in * include/linux/kvm_h. */ #define KVM_MEMSLOT_INVALID (1UL << 16) @@ -96,6 +96,7 @@ #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) +#define KVM_PFN_ERR_SIGPENDING (KVM_PFN_ERR_MASK + 3) /* * error pfns indicate that the gfn is in slot but faild to @@ -107,6 +108,15 @@ static inline bool is_error_pfn(kvm_pfn_t pfn) } /* + * KVM_PFN_ERR_SIGPENDING indicates that fetching the PFN was interrupted + * by a pending signal. Note, the signal may or may not be fatal. + */ +static inline bool is_sigpending_pfn(kvm_pfn_t pfn) +{ + return pfn == KVM_PFN_ERR_SIGPENDING; +} + +/* * error_noslot pfns indicate that the gfn can not be * translated to pfn - it is not in slot or failed to * translate it to pfn. @@ -153,10 +163,11 @@ static inline bool is_error_page(struct page *page) * Architecture-independent vcpu->requests bit members * Bits 3-7 are reserved for more arch-independent bits. */ -#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) -#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) -#define KVM_REQ_UNBLOCK 2 -#define KVM_REQUEST_ARCH_BASE 8 +#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_UNBLOCK 2 +#define KVM_REQ_DIRTY_RING_SOFT_FULL 3 +#define KVM_REQUEST_ARCH_BASE 8 /* * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to @@ -655,6 +666,8 @@ struct kvm_irq_routing_table { }; #endif +bool kvm_arch_irqchip_in_kernel(struct kvm *kvm); + #ifndef KVM_INTERNAL_MEM_SLOTS #define KVM_INTERNAL_MEM_SLOTS 0 #endif @@ -710,6 +723,11 @@ struct kvm { /* The current active memslot set for each address space */ struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; struct xarray vcpu_array; + /* + * Protected by slots_lock, but can be read outside if an + * incorrect answer is acceptable. + */ + atomic_t nr_memslots_dirty_logging; /* Used to wait for completion of MMU notifiers. */ spinlock_t mn_invalidate_lock; @@ -779,6 +797,7 @@ struct kvm { bool override_halt_poll_ns; unsigned int max_halt_poll_ns; u32 dirty_ring_size; + bool dirty_ring_with_bitmap; bool vm_bugged; bool vm_dead; @@ -1141,8 +1160,8 @@ kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn); kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn); kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, - bool atomic, bool *async, bool write_fault, - bool *writable, hva_t *hva); + bool atomic, bool interruptible, bool *async, + bool write_fault, bool *writable, hva_t *hva); void kvm_release_pfn_clean(kvm_pfn_t pfn); void kvm_release_pfn_dirty(kvm_pfn_t pfn); @@ -1244,18 +1263,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); * kvm_gpc_init - initialize gfn_to_pfn_cache. * * @gpc: struct gfn_to_pfn_cache object. - * - * This sets up a gfn_to_pfn_cache by initializing locks. Note, the cache must - * be zero-allocated (or zeroed by the caller before init). - */ -void kvm_gpc_init(struct gfn_to_pfn_cache *gpc); - -/** - * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest - * physical address. - * * @kvm: pointer to kvm instance. - * @gpc: struct gfn_to_pfn_cache object. * @vcpu: vCPU to be used for marking pages dirty and to be woken on * invalidation. * @usage: indicates if the resulting host physical PFN is used while @@ -1264,28 +1272,36 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc); * changes!---will also force @vcpu to exit the guest and * refresh the cache); and/or if the PFN used directly * by KVM (and thus needs a kernel virtual mapping). + * + * This sets up a gfn_to_pfn_cache by initializing locks and assigning the + * immutable attributes. Note, the cache must be zero-allocated (or zeroed by + * the caller before init). + */ +void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm, + struct kvm_vcpu *vcpu, enum pfn_cache_usage usage); + +/** + * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest + * physical address. + * + * @gpc: struct gfn_to_pfn_cache object. * @gpa: guest physical address to map. * @len: sanity check; the range being access must fit a single page. * * @return: 0 for success. * -EINVAL for a mapping which would cross a page boundary. - * -EFAULT for an untranslatable guest physical address. + * -EFAULT for an untranslatable guest physical address. * - * This primes a gfn_to_pfn_cache and links it into the @kvm's list for - * invalidations to be processed. Callers are required to use - * kvm_gfn_to_pfn_cache_check() to ensure that the cache is valid before - * accessing the target page. + * This primes a gfn_to_pfn_cache and links it into the @gpc->kvm's list for + * invalidations to be processed. Callers are required to use kvm_gpc_check() + * to ensure that the cache is valid before accessing the target page. */ -int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, - struct kvm_vcpu *vcpu, enum pfn_cache_usage usage, - gpa_t gpa, unsigned long len); +int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len); /** - * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache. + * kvm_gpc_check - check validity of a gfn_to_pfn_cache. * - * @kvm: pointer to kvm instance. * @gpc: struct gfn_to_pfn_cache object. - * @gpa: current guest physical address to map. * @len: sanity check; the range being access must fit a single page. * * @return: %true if the cache is still valid and the address matches. @@ -1298,52 +1314,35 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, * Callers in IN_GUEST_MODE may do so without locking, although they should * still hold a read lock on kvm->scru for the memslot checks. */ -bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, - gpa_t gpa, unsigned long len); +bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len); /** - * kvm_gfn_to_pfn_cache_refresh - update a previously initialized cache. + * kvm_gpc_refresh - update a previously initialized cache. * - * @kvm: pointer to kvm instance. * @gpc: struct gfn_to_pfn_cache object. - * @gpa: updated guest physical address to map. * @len: sanity check; the range being access must fit a single page. * * @return: 0 for success. * -EINVAL for a mapping which would cross a page boundary. - * -EFAULT for an untranslatable guest physical address. + * -EFAULT for an untranslatable guest physical address. * * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful - * returm from this function does not mean the page can be immediately + * return from this function does not mean the page can be immediately * accessed because it may have raced with an invalidation. Callers must * still lock and check the cache status, as this function does not return * with the lock still held to permit access. */ -int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc, - gpa_t gpa, unsigned long len); - -/** - * kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache. - * - * @kvm: pointer to kvm instance. - * @gpc: struct gfn_to_pfn_cache object. - * - * This unmaps the referenced page. The cache is left in the invalid state - * but at least the mapping from GPA to userspace HVA will remain cached - * and can be reused on a subsequent refresh. - */ -void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc); +int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len); /** * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache. * - * @kvm: pointer to kvm instance. * @gpc: struct gfn_to_pfn_cache object. * - * This removes a cache from the @kvm's list to be processed on MMU notifier + * This removes a cache from the VM's list to be processed on MMU notifier * invocation. */ -void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc); +void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc); void kvm_sigset_activate(struct kvm_vcpu *vcpu); void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 3ca3db020e0e..76de36e56cdf 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -67,6 +67,7 @@ struct gfn_to_pfn_cache { gpa_t gpa; unsigned long uhva; struct kvm_memory_slot *memslot; + struct kvm *kvm; struct kvm_vcpu *vcpu; struct list_head list; rwlock_t lock; diff --git a/include/linux/linkage.h b/include/linux/linkage.h index 1feab6136b5b..5c8865bb59d9 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h @@ -69,8 +69,8 @@ #endif #ifndef __ALIGN -#define __ALIGN .align 4,0x90 -#define __ALIGN_STR ".align 4,0x90" +#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT +#define __ALIGN_STR __stringify(__ALIGN) #endif #ifdef __ASSEMBLY__ diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h index 7dd1f01ec4f9..7aab4a769736 100644 --- a/include/linux/mISDNif.h +++ b/include/linux/mISDNif.h @@ -586,7 +586,7 @@ extern struct mISDNclock *mISDN_register_clock(char *, int, clockctl_func_t *, void *); extern void mISDN_unregister_clock(struct mISDNclock *); -static inline struct mISDNdevice *dev_to_mISDN(struct device *dev) +static inline struct mISDNdevice *dev_to_mISDN(const struct device *dev) { if (dev) return dev_get_drvdata(dev); diff --git a/include/linux/mfd/dm355evm_msp.h b/include/linux/mfd/dm355evm_msp.h deleted file mode 100644 index 372470350fab..000000000000 --- a/include/linux/mfd/dm355evm_msp.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * dm355evm_msp.h - support MSP430 microcontroller on DM355EVM board - */ -#ifndef __LINUX_I2C_DM355EVM_MSP -#define __LINUX_I2C_DM355EVM_MSP - -/* - * Written against Spectrum's writeup for the A4 firmware revision, - * and tweaked to match source and rev D2 schematics by removing CPLD - * and NOR flash hooks (which were last appropriate in rev B boards). - * - * Note that the firmware supports a flavor of write posting ... to be - * sure a write completes, issue another read or write. - */ - -/* utilities to access "registers" emulated by msp430 firmware */ -extern int dm355evm_msp_write(u8 value, u8 reg); -extern int dm355evm_msp_read(u8 reg); - - -/* command/control registers */ -#define DM355EVM_MSP_COMMAND 0x00 -# define MSP_COMMAND_NULL 0 -# define MSP_COMMAND_RESET_COLD 1 -# define MSP_COMMAND_RESET_WARM 2 -# define MSP_COMMAND_RESET_WARM_I 3 -# define MSP_COMMAND_POWEROFF 4 -# define MSP_COMMAND_IR_REINIT 5 -#define DM355EVM_MSP_STATUS 0x01 -# define MSP_STATUS_BAD_OFFSET BIT(0) -# define MSP_STATUS_BAD_COMMAND BIT(1) -# define MSP_STATUS_POWER_ERROR BIT(2) -# define MSP_STATUS_RXBUF_OVERRUN BIT(3) -#define DM355EVM_MSP_RESET 0x02 /* 0 bits == in reset */ -# define MSP_RESET_DC5 BIT(0) -# define MSP_RESET_TVP5154 BIT(2) -# define MSP_RESET_IMAGER BIT(3) -# define MSP_RESET_ETHERNET BIT(4) -# define MSP_RESET_SYS BIT(5) -# define MSP_RESET_AIC33 BIT(7) - -/* GPIO registers ... bit patterns mostly match the source MSP ports */ -#define DM355EVM_MSP_LED 0x03 /* active low (MSP P4) */ -#define DM355EVM_MSP_SWITCH1 0x04 /* (MSP P5, masked) */ -# define MSP_SWITCH1_SW6_1 BIT(0) -# define MSP_SWITCH1_SW6_2 BIT(1) -# define MSP_SWITCH1_SW6_3 BIT(2) -# define MSP_SWITCH1_SW6_4 BIT(3) -# define MSP_SWITCH1_J1 BIT(4) /* NTSC/PAL */ -# define MSP_SWITCH1_MSP_INT BIT(5) /* active low */ -#define DM355EVM_MSP_SWITCH2 0x05 /* (MSP P6, masked) */ -# define MSP_SWITCH2_SW10 BIT(3) -# define MSP_SWITCH2_SW11 BIT(4) -# define MSP_SWITCH2_SW12 BIT(5) -# define MSP_SWITCH2_SW13 BIT(6) -# define MSP_SWITCH2_SW14 BIT(7) -#define DM355EVM_MSP_SDMMC 0x06 /* (MSP P2, masked) */ -# define MSP_SDMMC_0_WP BIT(1) -# define MSP_SDMMC_0_CD BIT(2) /* active low */ -# define MSP_SDMMC_1_WP BIT(3) -# define MSP_SDMMC_1_CD BIT(4) /* active low */ -#define DM355EVM_MSP_FIRMREV 0x07 /* not a GPIO (out of order) */ -#define DM355EVM_MSP_VIDEO_IN 0x08 /* (MSP P3, masked) */ -# define MSP_VIDEO_IMAGER BIT(7) /* low == tvp5146 */ - -/* power supply registers are currently omitted */ - -/* RTC registers */ -#define DM355EVM_MSP_RTC_0 0x12 /* LSB */ -#define DM355EVM_MSP_RTC_1 0x13 -#define DM355EVM_MSP_RTC_2 0x14 -#define DM355EVM_MSP_RTC_3 0x15 /* MSB */ - -/* input event queue registers; code == ((HIGH << 8) | LOW) */ -#define DM355EVM_MSP_INPUT_COUNT 0x16 /* decrement by reading LOW */ -#define DM355EVM_MSP_INPUT_HIGH 0x17 -#define DM355EVM_MSP_INPUT_LOW 0x18 - -#endif /* __LINUX_I2C_DM355EVM_MSP */ diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index 1e61c7e9f50d..117d02708439 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -16,7 +16,6 @@ #include <linux/regmap.h> #include <linux/regulator/driver.h> #include <linux/extcon-provider.h> -#include <linux/of_gpio.h> #include <linux/usb/phy_companion.h> #define PALMAS_NUM_CLIENTS 3 diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h index 3f752dc62a6c..539f27f8bd89 100644 --- a/include/linux/mfd/pcf50633/core.h +++ b/include/linux/mfd/pcf50633/core.h @@ -13,6 +13,7 @@ #include <linux/workqueue.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> +#include <linux/pm.h> #include <linux/power_supply.h> #include <linux/mfd/pcf50633/backlight.h> @@ -226,9 +227,6 @@ static inline struct pcf50633 *dev_to_pcf50633(struct device *dev) int pcf50633_irq_init(struct pcf50633 *pcf, int irq); void pcf50633_irq_free(struct pcf50633 *pcf); -#ifdef CONFIG_PM -int pcf50633_irq_suspend(struct pcf50633 *pcf); -int pcf50633_irq_resume(struct pcf50633 *pcf); -#endif +extern const struct dev_pm_ops pcf50633_pm; #endif diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h index 8aa0bda1af4f..aacb6d51e99c 100644 --- a/include/linux/mfd/rn5t618.h +++ b/include/linux/mfd/rn5t618.h @@ -227,6 +227,15 @@ #define RN5T618_WATCHDOG_WDOGTIM_S 0 #define RN5T618_PWRIRQ_IR_WDOG BIT(6) +#define RN5T618_POFFHIS_PWRON BIT(0) +#define RN5T618_POFFHIS_TSHUT BIT(1) +#define RN5T618_POFFHIS_VINDET BIT(2) +#define RN5T618_POFFHIS_IODET BIT(3) +#define RN5T618_POFFHIS_CPU BIT(4) +#define RN5T618_POFFHIS_WDG BIT(5) +#define RN5T618_POFFHIS_DCLIM BIT(6) +#define RN5T618_POFFHIS_N_OE BIT(7) + enum { RN5T618_DCDC1, RN5T618_DCDC2, diff --git a/include/linux/mfd/stmfx.h b/include/linux/mfd/stmfx.h index 744dce63946e..967a2e486800 100644 --- a/include/linux/mfd/stmfx.h +++ b/include/linux/mfd/stmfx.h @@ -113,10 +113,8 @@ struct stmfx { struct irq_domain *irq_domain; struct mutex lock; /* IRQ bus lock */ u8 irq_src; -#ifdef CONFIG_PM u8 bkp_sysctrl; u8 bkp_irqoutpin; -#endif }; int stmfx_function_enable(struct stmfx *stmfx, u32 func); diff --git a/include/linux/mfd/tps65219.h b/include/linux/mfd/tps65219.h new file mode 100644 index 000000000000..e6826e34e2a6 --- /dev/null +++ b/include/linux/mfd/tps65219.h @@ -0,0 +1,345 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Functions to access TPS65219 Power Management IC. + * + * Copyright (C) 2022 BayLibre Incorporated - https://www.baylibre.com/ + */ + +#ifndef MFD_TPS65219_H +#define MFD_TPS65219_H + +#include <linux/bitops.h> +#include <linux/notifier.h> +#include <linux/regulator/driver.h> + +struct regmap; +struct regmap_irq_chip_data; + +#define TPS65219_1V35 1350000 +#define TPS65219_1V8 1800000 + +/* TPS chip id list */ +#define TPS65219 0xF0 + +/* I2C ID for TPS65219 part */ +#define TPS65219_I2C_ID 0x24 + +/* All register addresses */ +#define TPS65219_REG_TI_DEV_ID 0x00 +#define TPS65219_REG_NVM_ID 0x01 +#define TPS65219_REG_ENABLE_CTRL 0x02 +#define TPS65219_REG_BUCKS_CONFIG 0x03 +#define TPS65219_REG_LDO4_VOUT 0x04 +#define TPS65219_REG_LDO3_VOUT 0x05 +#define TPS65219_REG_LDO2_VOUT 0x06 +#define TPS65219_REG_LDO1_VOUT 0x07 +#define TPS65219_REG_BUCK3_VOUT 0x8 +#define TPS65219_REG_BUCK2_VOUT 0x9 +#define TPS65219_REG_BUCK1_VOUT 0xA +#define TPS65219_REG_LDO4_SEQUENCE_SLOT 0xB +#define TPS65219_REG_LDO3_SEQUENCE_SLOT 0xC +#define TPS65219_REG_LDO2_SEQUENCE_SLOT 0xD +#define TPS65219_REG_LDO1_SEQUENCE_SLOT 0xE +#define TPS65219_REG_BUCK3_SEQUENCE_SLOT 0xF +#define TPS65219_REG_BUCK2_SEQUENCE_SLOT 0x10 +#define TPS65219_REG_BUCK1_SEQUENCE_SLOT 0x11 +#define TPS65219_REG_nRST_SEQUENCE_SLOT 0x12 +#define TPS65219_REG_GPIO_SEQUENCE_SLOT 0x13 +#define TPS65219_REG_GPO2_SEQUENCE_SLOT 0x14 +#define TPS65219_REG_GPO1_SEQUENCE_SLOT 0x15 +#define TPS65219_REG_POWER_UP_SLOT_DURATION_1 0x16 +#define TPS65219_REG_POWER_UP_SLOT_DURATION_2 0x17 +#define TPS65219_REG_POWER_UP_SLOT_DURATION_3 0x18 +#define TPS65219_REG_POWER_UP_SLOT_DURATION_4 0x19 +#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_1 0x1A +#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_2 0x1B +#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_3 0x1C +#define TPS65219_REG_POWER_DOWN_SLOT_DURATION_4 0x1D +#define TPS65219_REG_GENERAL_CONFIG 0x1E +#define TPS65219_REG_MFP_1_CONFIG 0x1F +#define TPS65219_REG_MFP_2_CONFIG 0x20 +#define TPS65219_REG_STBY_1_CONFIG 0x21 +#define TPS65219_REG_STBY_2_CONFIG 0x22 +#define TPS65219_REG_OC_DEGL_CONFIG 0x23 +/* 'sub irq' MASK registers */ +#define TPS65219_REG_INT_MASK_UV 0x24 +#define TPS65219_REG_MASK_CONFIG 0x25 + +#define TPS65219_REG_I2C_ADDRESS_REG 0x26 +#define TPS65219_REG_USER_GENERAL_NVM_STORAGE 0x27 +#define TPS65219_REG_MANUFACTURING_VER 0x28 +#define TPS65219_REG_MFP_CTRL 0x29 +#define TPS65219_REG_DISCHARGE_CONFIG 0x2A +/* main irq registers */ +#define TPS65219_REG_INT_SOURCE 0x2B +/* 'sub irq' registers */ +#define TPS65219_REG_INT_LDO_3_4 0x2C +#define TPS65219_REG_INT_LDO_1_2 0x2D +#define TPS65219_REG_INT_BUCK_3 0x2E +#define TPS65219_REG_INT_BUCK_1_2 0x2F +#define TPS65219_REG_INT_SYSTEM 0x30 +#define TPS65219_REG_INT_RV 0x31 +#define TPS65219_REG_INT_TIMEOUT_RV_SD 0x32 +#define TPS65219_REG_INT_PB 0x33 + +#define TPS65219_REG_INT_LDO_3_4_POS 0 +#define TPS65219_REG_INT_LDO_1_2_POS 1 +#define TPS65219_REG_INT_BUCK_3_POS 2 +#define TPS65219_REG_INT_BUCK_1_2_POS 3 +#define TPS65219_REG_INT_SYS_POS 4 +#define TPS65219_REG_INT_RV_POS 5 +#define TPS65219_REG_INT_TO_RV_POS 6 +#define TPS65219_REG_INT_PB_POS 7 + +#define TPS65219_REG_USER_NVM_CMD 0x34 +#define TPS65219_REG_POWER_UP_STATUS 0x35 +#define TPS65219_REG_SPARE_2 0x36 +#define TPS65219_REG_SPARE_3 0x37 +#define TPS65219_REG_FACTORY_CONFIG_2 0x41 + +/* Register field definitions */ +#define TPS65219_DEVID_REV_MASK GENMASK(7, 0) +#define TPS65219_BUCKS_LDOS_VOUT_VSET_MASK GENMASK(5, 0) +#define TPS65219_BUCKS_UV_THR_SEL_MASK BIT(6) +#define TPS65219_BUCKS_BW_SEL_MASK BIT(7) +#define LDO_BYP_SHIFT 6 +#define TPS65219_LDOS_BYP_CONFIG_MASK BIT(LDO_BYP_SHIFT) +#define TPS65219_LDOS_LSW_CONFIG_MASK BIT(7) +/* Regulators enable control */ +#define TPS65219_ENABLE_BUCK1_EN_MASK BIT(0) +#define TPS65219_ENABLE_BUCK2_EN_MASK BIT(1) +#define TPS65219_ENABLE_BUCK3_EN_MASK BIT(2) +#define TPS65219_ENABLE_LDO1_EN_MASK BIT(3) +#define TPS65219_ENABLE_LDO2_EN_MASK BIT(4) +#define TPS65219_ENABLE_LDO3_EN_MASK BIT(5) +#define TPS65219_ENABLE_LDO4_EN_MASK BIT(6) +/* power ON-OFF sequence slot */ +#define TPS65219_BUCKS_LDOS_SEQUENCE_OFF_SLOT_MASK GENMASK(3, 0) +#define TPS65219_BUCKS_LDOS_SEQUENCE_ON_SLOT_MASK GENMASK(7, 4) +/* TODO: Not needed, same mapping as TPS65219_ENABLE_REGNAME_EN, factorize */ +#define TPS65219_STBY1_BUCK1_STBY_EN_MASK BIT(0) +#define TPS65219_STBY1_BUCK2_STBY_EN_MASK BIT(1) +#define TPS65219_STBY1_BUCK3_STBY_EN_MASK BIT(2) +#define TPS65219_STBY1_LDO1_STBY_EN_MASK BIT(3) +#define TPS65219_STBY1_LDO2_STBY_EN_MASK BIT(4) +#define TPS65219_STBY1_LDO3_STBY_EN_MASK BIT(5) +#define TPS65219_STBY1_LDO4_STBY_EN_MASK BIT(6) +/* STBY_2 config */ +#define TPS65219_STBY2_GPO1_STBY_EN_MASK BIT(0) +#define TPS65219_STBY2_GPO2_STBY_EN_MASK BIT(1) +#define TPS65219_STBY2_GPIO_STBY_EN_MASK BIT(2) +/* MFP Control */ +#define TPS65219_MFP_I2C_OFF_REQ_MASK BIT(0) +#define TPS65219_MFP_STBY_I2C_CTRL_MASK BIT(1) +#define TPS65219_MFP_COLD_RESET_I2C_CTRL_MASK BIT(2) +#define TPS65219_MFP_WARM_RESET_I2C_CTRL_MASK BIT(3) +#define TPS65219_MFP_GPIO_STATUS_MASK BIT(4) +/* MFP_1 Config */ +#define TPS65219_MFP_1_VSEL_DDR_SEL_MASK BIT(0) +#define TPS65219_MFP_1_VSEL_SD_POL_MASK BIT(1) +#define TPS65219_MFP_1_VSEL_RAIL_MASK BIT(2) +/* MFP_2 Config */ +#define TPS65219_MFP_2_MODE_STBY_MASK GENMASK(1, 0) +#define TPS65219_MFP_2_MODE_RESET_MASK BIT(2) +#define TPS65219_MFP_2_EN_PB_VSENSE_DEGL_MASK BIT(3) +#define TPS65219_MFP_2_EN_PB_VSENSE_MASK GENMASK(5, 4) +#define TPS65219_MFP_2_WARM_COLD_RESET_MASK BIT(6) +#define TPS65219_MFP_2_PU_ON_FSD_MASK BIT(7) +#define TPS65219_MFP_2_EN 0 +#define TPS65219_MFP_2_PB BIT(4) +#define TPS65219_MFP_2_VSENSE BIT(5) +/* MASK_UV Config */ +#define TPS65219_REG_MASK_UV_LDO1_UV_MASK BIT(0) +#define TPS65219_REG_MASK_UV_LDO2_UV_MASK BIT(1) +#define TPS65219_REG_MASK_UV_LDO3_UV_MASK BIT(2) +#define TPS65219_REG_MASK_UV_LDO4_UV_MASK BIT(3) +#define TPS65219_REG_MASK_UV_BUCK1_UV_MASK BIT(4) +#define TPS65219_REG_MASK_UV_BUCK2_UV_MASK BIT(5) +#define TPS65219_REG_MASK_UV_BUCK3_UV_MASK BIT(6) +#define TPS65219_REG_MASK_UV_RETRY_MASK BIT(7) +/* MASK Config */ +// SENSOR_N_WARM_MASK already defined in Thermal +#define TPS65219_REG_MASK_INT_FOR_RV_MASK BIT(4) +#define TPS65219_REG_MASK_EFFECT_MASK GENMASK(2, 1) +#define TPS65219_REG_MASK_INT_FOR_PB_MASK BIT(7) +/* UnderVoltage - Short to GND - OverCurrent*/ +/* LDO3-4 */ +#define TPS65219_INT_LDO3_SCG_MASK BIT(0) +#define TPS65219_INT_LDO3_OC_MASK BIT(1) +#define TPS65219_INT_LDO3_UV_MASK BIT(2) +#define TPS65219_INT_LDO4_SCG_MASK BIT(3) +#define TPS65219_INT_LDO4_OC_MASK BIT(4) +#define TPS65219_INT_LDO4_UV_MASK BIT(5) +/* LDO1-2 */ +#define TPS65219_INT_LDO1_SCG_MASK BIT(0) +#define TPS65219_INT_LDO1_OC_MASK BIT(1) +#define TPS65219_INT_LDO1_UV_MASK BIT(2) +#define TPS65219_INT_LDO2_SCG_MASK BIT(3) +#define TPS65219_INT_LDO2_OC_MASK BIT(4) +#define TPS65219_INT_LDO2_UV_MASK BIT(5) +/* BUCK3 */ +#define TPS65219_INT_BUCK3_SCG_MASK BIT(0) +#define TPS65219_INT_BUCK3_OC_MASK BIT(1) +#define TPS65219_INT_BUCK3_NEG_OC_MASK BIT(2) +#define TPS65219_INT_BUCK3_UV_MASK BIT(3) +/* BUCK1-2 */ +#define TPS65219_INT_BUCK1_SCG_MASK BIT(0) +#define TPS65219_INT_BUCK1_OC_MASK BIT(1) +#define TPS65219_INT_BUCK1_NEG_OC_MASK BIT(2) +#define TPS65219_INT_BUCK1_UV_MASK BIT(3) +#define TPS65219_INT_BUCK2_SCG_MASK BIT(4) +#define TPS65219_INT_BUCK2_OC_MASK BIT(5) +#define TPS65219_INT_BUCK2_NEG_OC_MASK BIT(6) +#define TPS65219_INT_BUCK2_UV_MASK BIT(7) +/* Thermal Sensor */ +#define TPS65219_INT_SENSOR_3_WARM_MASK BIT(0) +#define TPS65219_INT_SENSOR_2_WARM_MASK BIT(1) +#define TPS65219_INT_SENSOR_1_WARM_MASK BIT(2) +#define TPS65219_INT_SENSOR_0_WARM_MASK BIT(3) +#define TPS65219_INT_SENSOR_3_HOT_MASK BIT(4) +#define TPS65219_INT_SENSOR_2_HOT_MASK BIT(5) +#define TPS65219_INT_SENSOR_1_HOT_MASK BIT(6) +#define TPS65219_INT_SENSOR_0_HOT_MASK BIT(7) +/* Residual Voltage */ +#define TPS65219_INT_BUCK1_RV_MASK BIT(0) +#define TPS65219_INT_BUCK2_RV_MASK BIT(1) +#define TPS65219_INT_BUCK3_RV_MASK BIT(2) +#define TPS65219_INT_LDO1_RV_MASK BIT(3) +#define TPS65219_INT_LDO2_RV_MASK BIT(4) +#define TPS65219_INT_LDO3_RV_MASK BIT(5) +#define TPS65219_INT_LDO4_RV_MASK BIT(6) +/* Residual Voltage ShutDown */ +#define TPS65219_INT_BUCK1_RV_SD_MASK BIT(0) +#define TPS65219_INT_BUCK2_RV_SD_MASK BIT(1) +#define TPS65219_INT_BUCK3_RV_SD_MASK BIT(2) +#define TPS65219_INT_LDO1_RV_SD_MASK BIT(3) +#define TPS65219_INT_LDO2_RV_SD_MASK BIT(4) +#define TPS65219_INT_LDO3_RV_SD_MASK BIT(5) +#define TPS65219_INT_LDO4_RV_SD_MASK BIT(6) +#define TPS65219_INT_TIMEOUT_MASK BIT(7) +/* Power Button */ +#define TPS65219_INT_PB_FALLING_EDGE_DETECT_MASK BIT(0) +#define TPS65219_INT_PB_RISING_EDGE_DETECT_MASK BIT(1) +#define TPS65219_INT_PB_REAL_TIME_STATUS_MASK BIT(2) + +#define TPS65219_PB_POS 7 +#define TPS65219_TO_RV_POS 6 +#define TPS65219_RV_POS 5 +#define TPS65219_SYS_POS 4 +#define TPS65219_BUCK_1_2_POS 3 +#define TPS65219_BUCK_3_POS 2 +#define TPS65219_LDO_1_2_POS 1 +#define TPS65219_LDO_3_4_POS 0 + +/* IRQs */ +enum { + /* LDO3-4 register IRQs */ + TPS65219_INT_LDO3_SCG, + TPS65219_INT_LDO3_OC, + TPS65219_INT_LDO3_UV, + TPS65219_INT_LDO4_SCG, + TPS65219_INT_LDO4_OC, + TPS65219_INT_LDO4_UV, + /* LDO1-2 */ + TPS65219_INT_LDO1_SCG, + TPS65219_INT_LDO1_OC, + TPS65219_INT_LDO1_UV, + TPS65219_INT_LDO2_SCG, + TPS65219_INT_LDO2_OC, + TPS65219_INT_LDO2_UV, + /* BUCK3 */ + TPS65219_INT_BUCK3_SCG, + TPS65219_INT_BUCK3_OC, + TPS65219_INT_BUCK3_NEG_OC, + TPS65219_INT_BUCK3_UV, + /* BUCK1-2 */ + TPS65219_INT_BUCK1_SCG, + TPS65219_INT_BUCK1_OC, + TPS65219_INT_BUCK1_NEG_OC, + TPS65219_INT_BUCK1_UV, + TPS65219_INT_BUCK2_SCG, + TPS65219_INT_BUCK2_OC, + TPS65219_INT_BUCK2_NEG_OC, + TPS65219_INT_BUCK2_UV, + /* Thermal Sensor */ + TPS65219_INT_SENSOR_3_WARM, + TPS65219_INT_SENSOR_2_WARM, + TPS65219_INT_SENSOR_1_WARM, + TPS65219_INT_SENSOR_0_WARM, + TPS65219_INT_SENSOR_3_HOT, + TPS65219_INT_SENSOR_2_HOT, + TPS65219_INT_SENSOR_1_HOT, + TPS65219_INT_SENSOR_0_HOT, + /* Residual Voltage */ + TPS65219_INT_BUCK1_RV, + TPS65219_INT_BUCK2_RV, + TPS65219_INT_BUCK3_RV, + TPS65219_INT_LDO1_RV, + TPS65219_INT_LDO2_RV, + TPS65219_INT_LDO3_RV, + TPS65219_INT_LDO4_RV, + /* Residual Voltage ShutDown */ + TPS65219_INT_BUCK1_RV_SD, + TPS65219_INT_BUCK2_RV_SD, + TPS65219_INT_BUCK3_RV_SD, + TPS65219_INT_LDO1_RV_SD, + TPS65219_INT_LDO2_RV_SD, + TPS65219_INT_LDO3_RV_SD, + TPS65219_INT_LDO4_RV_SD, + TPS65219_INT_TIMEOUT, + /* Power Button */ + TPS65219_INT_PB_FALLING_EDGE_DETECT, + TPS65219_INT_PB_RISING_EDGE_DETECT, +}; + +enum tps65219_regulator_id { + /* DCDC's */ + TPS65219_BUCK_1, + TPS65219_BUCK_2, + TPS65219_BUCK_3, + /* LDOs */ + TPS65219_LDO_1, + TPS65219_LDO_2, + TPS65219_LDO_3, + TPS65219_LDO_4, +}; + +/* Number of step-down converters available */ +#define TPS65219_NUM_DCDC 3 +/* Number of LDO voltage regulators available */ +#define TPS65219_NUM_LDO 4 +/* Number of total regulators available */ +#define TPS65219_NUM_REGULATOR (TPS65219_NUM_DCDC + TPS65219_NUM_LDO) + +/* Define the TPS65219 IRQ numbers */ +enum tps65219_irqs { + /* INT source registers */ + TPS65219_TO_RV_SD_SET_IRQ, + TPS65219_RV_SET_IRQ, + TPS65219_SYS_SET_IRQ, + TPS65219_BUCK_1_2_SET_IRQ, + TPS65219_BUCK_3_SET_IRQ, + TPS65219_LDO_1_2_SET_IRQ, + TPS65219_LDO_3_4_SET_IRQ, + TPS65219_PB_SET_IRQ, +}; + +/** + * struct tps65219 - tps65219 sub-driver chip access routines + * + * Device data may be used to access the TPS65219 chip + * + * @dev: MFD device + * @regmap: Regmap for accessing the device registers + * @irq_data: Regmap irq data used for the irq chip + * @nb: notifier block for the restart handler + */ +struct tps65219 { + struct device *dev; + struct regmap *regmap; + + struct regmap_irq_chip_data *irq_data; + struct notifier_block nb; +}; + +#endif /* MFD_TPS65219_H */ diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h index 1fc7450bd8ab..286a724e379a 100644 --- a/include/linux/mfd/twl6040.h +++ b/include/linux/mfd/twl6040.h @@ -174,35 +174,7 @@ #define TWL6040_GPO_MAX 3 -/* TODO: All platform data struct can be removed */ -struct twl6040_codec_data { - u16 hs_left_step; - u16 hs_right_step; - u16 hf_left_step; - u16 hf_right_step; -}; - -struct twl6040_vibra_data { - unsigned int vibldrv_res; /* left driver resistance */ - unsigned int vibrdrv_res; /* right driver resistance */ - unsigned int viblmotor_res; /* left motor resistance */ - unsigned int vibrmotor_res; /* right motor resistance */ - int vddvibl_uV; /* VDDVIBL volt, set 0 for fixed reg */ - int vddvibr_uV; /* VDDVIBR volt, set 0 for fixed reg */ -}; - -struct twl6040_gpo_data { - int gpio_base; -}; - -struct twl6040_platform_data { - int audpwron_gpio; /* audio power-on gpio */ - - struct twl6040_codec_data *codec; - struct twl6040_vibra_data *vibra; - struct twl6040_gpo_data *gpo; -}; - +struct gpio_desc; struct regmap; struct regmap_irq_chips_data; @@ -218,7 +190,7 @@ struct twl6040 { struct mfd_cell cells[TWL6040_CELLS]; struct completion ready; - int audpwron; + struct gpio_desc *audpwron; int power_count; int rev; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 152d2d7f8743..f3d1c62c98dd 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1891,7 +1891,12 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 max_reformat_remove_size[0x8]; u8 max_reformat_remove_offset[0x8]; - u8 reserved_at_c0[0xe0]; + u8 reserved_at_c0[0x8]; + u8 migration_multi_load[0x1]; + u8 migration_tracking_state[0x1]; + u8 reserved_at_ca[0x16]; + + u8 reserved_at_e0[0xc0]; u8 reserved_at_1a0[0xb]; u8 log_min_mkey_entity_size[0x5]; @@ -12033,7 +12038,8 @@ struct mlx5_ifc_query_vhca_migration_state_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x10]; + u8 incremental[0x1]; + u8 reserved_at_41[0xf]; u8 vhca_id[0x10]; u8 reserved_at_60[0x20]; @@ -12059,7 +12065,9 @@ struct mlx5_ifc_save_vhca_state_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x10]; + u8 incremental[0x1]; + u8 set_track[0x1]; + u8 reserved_at_42[0xe]; u8 vhca_id[0x10]; u8 reserved_at_60[0x20]; diff --git a/include/linux/mm.h b/include/linux/mm.h index 8178fe894e2e..f3f196e4d66d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3088,6 +3088,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, #define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */ #define FOLL_FAST_ONLY 0x80000 /* gup_fast: prevent fall-back to slow gup */ #define FOLL_PCI_P2PDMA 0x100000 /* allow returning PCI P2PDMA pages */ +#define FOLL_INTERRUPTIBLE 0x200000 /* allow interrupts from generic signals */ /* * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h index 092c52aa6c2c..0ccca33a7a6d 100644 --- a/include/linux/mnt_idmapping.h +++ b/include/linux/mnt_idmapping.h @@ -96,26 +96,6 @@ static inline bool vfsgid_eq_kgid(vfsgid_t vfsgid, kgid_t kgid) return vfsgid_valid(vfsgid) && __vfsgid_val(vfsgid) == __kgid_val(kgid); } -static inline bool vfsuid_gt_kuid(vfsuid_t vfsuid, kuid_t kuid) -{ - return __vfsuid_val(vfsuid) > __kuid_val(kuid); -} - -static inline bool vfsgid_gt_kgid(vfsgid_t vfsgid, kgid_t kgid) -{ - return __vfsgid_val(vfsgid) > __kgid_val(kgid); -} - -static inline bool vfsuid_lt_kuid(vfsuid_t vfsuid, kuid_t kuid) -{ - return __vfsuid_val(vfsuid) < __kuid_val(kuid); -} - -static inline bool vfsgid_lt_kgid(vfsgid_t vfsgid, kgid_t kgid) -{ - return __vfsgid_val(vfsgid) < __kgid_val(kgid); -} - /* * vfs{g,u}ids are created from k{g,u}ids. * We don't allow them to be created from regular {u,g}id. diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index 9e09d11ffe5b..7b4587a19189 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -13,6 +13,9 @@ * must be implemented by each architecture. */ +/* arch may override to do additional checking of ELF header architecture */ +bool module_elf_check_arch(Elf_Ehdr *hdr); + /* Adjust arch-specific sections. Return 0 on success. */ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 1a803e4335d3..ab7d557d541d 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -35,7 +35,7 @@ extern const void *of_device_get_match_data(const struct device *dev); extern ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len); extern int of_device_request_module(struct device *dev); -extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env); +extern void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env); extern int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env); static inline struct device_node *of_cpu_device_node_get(int cpu) @@ -64,7 +64,7 @@ static inline int of_driver_match_device(struct device *dev, return 0; } -static inline void of_device_uevent(struct device *dev, +static inline void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env) { } static inline const void *of_device_get_match_data(const struct device *dev) diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index a5166eb93437..6db627257a7b 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -34,7 +34,7 @@ enum of_gpio_flags { #ifdef CONFIG_OF_GPIO -#include <linux/kernel.h> +#include <linux/container_of.h> /* * OF GPIO chip for memory mapped banks diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 1d3be1a2204c..0e33b5cbdb9f 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -128,6 +128,53 @@ static inline bool __must_check __must_check_overflow(bool overflow) (*_d >> _to_shift) != _a); \ })) +#define __overflows_type_constexpr(x, T) ( \ + is_unsigned_type(typeof(x)) ? \ + (x) > type_max(typeof(T)) : \ + is_unsigned_type(typeof(T)) ? \ + (x) < 0 || (x) > type_max(typeof(T)) : \ + (x) < type_min(typeof(T)) || (x) > type_max(typeof(T))) + +#define __overflows_type(x, T) ({ \ + typeof(T) v = 0; \ + check_add_overflow((x), v, &v); \ +}) + +/** + * overflows_type - helper for checking the overflows between value, variables, + * or data type + * + * @n: source constant value or variable to be checked + * @T: destination variable or data type proposed to store @x + * + * Compares the @x expression for whether or not it can safely fit in + * the storage of the type in @T. @x and @T can have different types. + * If @x is a constant expression, this will also resolve to a constant + * expression. + * + * Returns: true if overflow can occur, false otherwise. + */ +#define overflows_type(n, T) \ + __builtin_choose_expr(__is_constexpr(n), \ + __overflows_type_constexpr(n, T), \ + __overflows_type(n, T)) + +/** + * castable_to_type - like __same_type(), but also allows for casted literals + * + * @n: variable or constant value + * @T: variable or data type + * + * Unlike the __same_type() macro, this allows a constant value as the + * first argument. If this value would not overflow into an assignment + * of the second argument's type, it returns true. Otherwise, this falls + * back to __same_type(). + */ +#define castable_to_type(n, T) \ + __builtin_choose_expr(__is_constexpr(n), \ + !__overflows_type_constexpr(n, T), \ + __same_type(n, T)) + /** * size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX * @factor1: first factor diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 9aec9fd8c50b..69e93a0c1277 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -132,8 +132,9 @@ enum pageflags { PG_young, PG_idle, #endif -#ifdef CONFIG_64BIT +#ifdef CONFIG_ARCH_USES_PG_ARCH_X PG_arch_2, + PG_arch_3, #endif #ifdef CONFIG_KASAN_HW_TAGS PG_skip_kasan_poison, diff --git a/include/linux/panic.h b/include/linux/panic.h index c7759b3f2045..979b776e3bcb 100644 --- a/include/linux/panic.h +++ b/include/linux/panic.h @@ -11,6 +11,7 @@ extern long (*panic_blink)(int state); __printf(1, 2) void panic(const char *fmt, ...) __noreturn __cold; void nmi_panic(struct pt_regs *regs, const char *msg); +void check_panic_on_warn(const char *origin); extern void oops_enter(void); extern void oops_exit(void); extern bool oops_may_print(void); diff --git a/include/linux/pci.h b/include/linux/pci.h index c0d939f3169c..adffd65e84b4 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1760,6 +1760,7 @@ static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) { return 0; } #endif int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent); +void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent); #endif /* Some architectures require additional setup to direct VGA traffic */ diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index dfabd549d2e7..1159b25b0542 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -309,24 +309,28 @@ static inline void ptep_clear(struct mm_struct *mm, unsigned long addr, ptep_get_and_clear(mm, addr, ptep); } -#ifndef __HAVE_ARCH_PTEP_GET +#ifndef ptep_get static inline pte_t ptep_get(pte_t *ptep) { return READ_ONCE(*ptep); } #endif -#ifdef CONFIG_GUP_GET_PTE_LOW_HIGH +#ifndef pmdp_get +static inline pmd_t pmdp_get(pmd_t *pmdp) +{ + return READ_ONCE(*pmdp); +} +#endif + +#ifdef CONFIG_GUP_GET_PXX_LOW_HIGH /* - * WARNING: only to be used in the get_user_pages_fast() implementation. - * - * With get_user_pages_fast(), we walk down the pagetables without taking any - * locks. For this we would like to load the pointers atomically, but sometimes - * that is not possible (e.g. without expensive cmpxchg8b on x86_32 PAE). What - * we do have is the guarantee that a PTE will only either go from not present - * to present, or present to not present or both -- it will not switch to a - * completely different present page without a TLB flush in between; something - * that we are blocking by holding interrupts off. + * For walking the pagetables without holding any locks. Some architectures + * (eg x86-32 PAE) cannot load the entries atomically without using expensive + * instructions. We are guaranteed that a PTE will only either go from not + * present to present, or present to not present -- it will not switch to a + * completely different present page without a TLB flush inbetween; which we + * are blocking by holding interrupts off. * * Setting ptes from not present to present goes: * @@ -361,15 +365,42 @@ static inline pte_t ptep_get_lockless(pte_t *ptep) return pte; } -#else /* CONFIG_GUP_GET_PTE_LOW_HIGH */ +#define ptep_get_lockless ptep_get_lockless + +#if CONFIG_PGTABLE_LEVELS > 2 +static inline pmd_t pmdp_get_lockless(pmd_t *pmdp) +{ + pmd_t pmd; + + do { + pmd.pmd_low = pmdp->pmd_low; + smp_rmb(); + pmd.pmd_high = pmdp->pmd_high; + smp_rmb(); + } while (unlikely(pmd.pmd_low != pmdp->pmd_low)); + + return pmd; +} +#define pmdp_get_lockless pmdp_get_lockless +#endif /* CONFIG_PGTABLE_LEVELS > 2 */ +#endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */ + /* * We require that the PTE can be read atomically. */ +#ifndef ptep_get_lockless static inline pte_t ptep_get_lockless(pte_t *ptep) { return ptep_get(ptep); } -#endif /* CONFIG_GUP_GET_PTE_LOW_HIGH */ +#endif + +#ifndef pmdp_get_lockless +static inline pmd_t pmdp_get_lockless(pmd_t *pmdp) +{ + return pmdp_get(pmdp); +} +#endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR @@ -1313,18 +1344,6 @@ static inline int pud_trans_unstable(pud_t *pud) #endif } -#ifndef pmd_read_atomic -static inline pmd_t pmd_read_atomic(pmd_t *pmdp) -{ - /* - * Depend on compiler for an atomic pmd read. NOTE: this is - * only going to work, if the pmdval_t isn't larger than - * an unsigned long. - */ - return *pmdp; -} -#endif - #ifndef arch_needs_pgtable_deposit #define arch_needs_pgtable_deposit() (false) #endif @@ -1351,13 +1370,13 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp) */ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) { - pmd_t pmdval = pmd_read_atomic(pmd); + pmd_t pmdval = pmdp_get_lockless(pmd); /* * The barrier will stabilize the pmdval in a register or on * the stack so that it will stop changing under the code. * * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE, - * pmd_read_atomic is allowed to return a not atomic pmdval + * pmdp_get_lockless is allowed to return a not atomic pmdval * (for example pointing to an hugepage that has never been * mapped in the pmd). The below checks will only care about * the low part of the pmd with 32bit PAE x86 anyway, with the diff --git a/include/linux/prandom.h b/include/linux/prandom.h index c94c02ba065c..f2ed5b72b3d6 100644 --- a/include/linux/prandom.h +++ b/include/linux/prandom.h @@ -24,12 +24,6 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state); #define prandom_init_once(pcpu_state) \ DO_ONCE(prandom_seed_full_state, (pcpu_state)) -/* Deprecated: use get_random_u32_below() instead. */ -static inline u32 prandom_u32_max(u32 ep_ro) -{ - return get_random_u32_below(ep_ro); -} - /* * Handle minimum values for seeds */ diff --git a/include/linux/property.h b/include/linux/property.h index 67371c963134..37179e3abad5 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -12,6 +12,7 @@ #include <linux/bits.h> #include <linux/fwnode.h> +#include <linux/stddef.h> #include <linux/types.h> struct device; @@ -32,7 +33,12 @@ enum dev_dma_attr { DEV_DMA_COHERENT, }; -struct fwnode_handle *dev_fwnode(const struct device *dev); +const struct fwnode_handle *__dev_fwnode_const(const struct device *dev); +struct fwnode_handle *__dev_fwnode(struct device *dev); +#define dev_fwnode(dev) \ + _Generic((dev), \ + const struct device *: __dev_fwnode_const, \ + struct device *: __dev_fwnode)(dev) bool device_property_present(struct device *dev, const char *propname); int device_property_read_u8_array(struct device *dev, const char *propname, @@ -117,16 +123,16 @@ struct fwnode_handle *fwnode_get_next_available_child_node( for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\ child = fwnode_get_next_available_child_node(fwnode, child)) -struct fwnode_handle *device_get_next_child_node( - struct device *dev, struct fwnode_handle *child); +struct fwnode_handle *device_get_next_child_node(const struct device *dev, + struct fwnode_handle *child); #define device_for_each_child_node(dev, child) \ for (child = device_get_next_child_node(dev, NULL); child; \ child = device_get_next_child_node(dev, child)) -struct fwnode_handle *fwnode_get_named_child_node( - const struct fwnode_handle *fwnode, const char *childname); -struct fwnode_handle *device_get_named_child_node(struct device *dev, +struct fwnode_handle *fwnode_get_named_child_node(const struct fwnode_handle *fwnode, + const char *childname); +struct fwnode_handle *device_get_named_child_node(const struct device *dev, const char *childname); struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode); @@ -135,7 +141,7 @@ void fwnode_handle_put(struct fwnode_handle *fwnode); int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index); int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name); -unsigned int device_get_child_node_count(struct device *dev); +unsigned int device_get_child_node_count(const struct device *dev); static inline bool device_property_read_bool(struct device *dev, const char *propname) @@ -306,24 +312,14 @@ struct property_entry { * crafted to avoid gcc-4.4.4's problems with initialization of anon unions * and structs. */ - -#define __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_) \ - sizeof(((struct property_entry *)NULL)->value._elem_[0]) - -#define __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, _elsize_, _Type_, \ - _val_, _len_) \ -(struct property_entry) { \ - .name = _name_, \ - .length = (_len_) * (_elsize_), \ - .type = DEV_PROP_##_Type_, \ - { .pointer = _val_ }, \ +#define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_) \ +(struct property_entry) { \ + .name = _name_, \ + .length = (_len_) * sizeof_field(struct property_entry, value._elem_[0]), \ + .type = DEV_PROP_##_Type_, \ + { .pointer = _val_ }, \ } -#define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_)\ - __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, \ - __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \ - _Type_, _val_, _len_) - #define PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, _len_) \ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u8_data, U8, _val_, _len_) #define PROPERTY_ENTRY_U16_ARRAY_LEN(_name_, _val_, _len_) \ @@ -334,10 +330,14 @@ struct property_entry { __PROPERTY_ENTRY_ARRAY_LEN(_name_, u64_data, U64, _val_, _len_) #define PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, _len_) \ __PROPERTY_ENTRY_ARRAY_LEN(_name_, str, STRING, _val_, _len_) + #define PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, _len_) \ - __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, \ - sizeof(struct software_node_ref_args), \ - REF, _val_, _len_) +(struct property_entry) { \ + .name = _name_, \ + .length = (_len_) * sizeof(struct software_node_ref_args), \ + .type = DEV_PROP_REF, \ + { .pointer = _val_ }, \ +} #define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \ PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) @@ -349,13 +349,13 @@ struct property_entry { PROPERTY_ENTRY_U64_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) #define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \ PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) -#define PROPERTY_ENTRY_REF_ARRAY(_name_, _val_) \ +#define PROPERTY_ENTRY_REF_ARRAY(_name_, _val_) \ PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_)) #define __PROPERTY_ENTRY_ELEMENT(_name_, _elem_, _Type_, _val_) \ (struct property_entry) { \ .name = _name_, \ - .length = __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \ + .length = sizeof_field(struct property_entry, value._elem_[0]), \ .is_inline = true, \ .type = DEV_PROP_##_Type_, \ { .value = { ._elem_[0] = _val_ } }, \ @@ -372,12 +372,6 @@ struct property_entry { #define PROPERTY_ENTRY_STRING(_name_, _val_) \ __PROPERTY_ENTRY_ELEMENT(_name_, str, STRING, _val_) -#define PROPERTY_ENTRY_BOOL(_name_) \ -(struct property_entry) { \ - .name = _name_, \ - .is_inline = true, \ -} - #define PROPERTY_ENTRY_REF(_name_, _ref_, ...) \ (struct property_entry) { \ .name = _name_, \ @@ -386,14 +380,18 @@ struct property_entry { { .pointer = &SOFTWARE_NODE_REFERENCE(_ref_, ##__VA_ARGS__), }, \ } +#define PROPERTY_ENTRY_BOOL(_name_) \ +(struct property_entry) { \ + .name = _name_, \ + .is_inline = true, \ +} + struct property_entry * property_entries_dup(const struct property_entry *properties); - void property_entries_free(const struct property_entry *properties); -bool device_dma_supported(struct device *dev); - -enum dev_dma_attr device_get_dma_attr(struct device *dev); +bool device_dma_supported(const struct device *dev); +enum dev_dma_attr device_get_dma_attr(const struct device *dev); const void *device_get_match_data(const struct device *dev); @@ -413,7 +411,7 @@ struct fwnode_handle *fwnode_graph_get_remote_port( struct fwnode_handle *fwnode_graph_get_remote_endpoint( const struct fwnode_handle *fwnode); -static inline bool fwnode_graph_is_endpoint(struct fwnode_handle *fwnode) +static inline bool fwnode_graph_is_endpoint(const struct fwnode_handle *fwnode) { return fwnode_property_present(fwnode, "remote-endpoint"); } @@ -445,21 +443,21 @@ unsigned int fwnode_graph_get_endpoint_count(struct fwnode_handle *fwnode, int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, struct fwnode_endpoint *endpoint); -typedef void *(*devcon_match_fn_t)(struct fwnode_handle *fwnode, const char *id, +typedef void *(*devcon_match_fn_t)(const struct fwnode_handle *fwnode, const char *id, void *data); -void *fwnode_connection_find_match(struct fwnode_handle *fwnode, +void *fwnode_connection_find_match(const struct fwnode_handle *fwnode, const char *con_id, void *data, devcon_match_fn_t match); -static inline void *device_connection_find_match(struct device *dev, +static inline void *device_connection_find_match(const struct device *dev, const char *con_id, void *data, devcon_match_fn_t match) { return fwnode_connection_find_match(dev_fwnode(dev), con_id, data, match); } -int fwnode_connection_find_matches(struct fwnode_handle *fwnode, +int fwnode_connection_find_matches(const struct fwnode_handle *fwnode, const char *con_id, void *data, devcon_match_fn_t match, void **matches, unsigned int matches_len); diff --git a/include/linux/pwm.h b/include/linux/pwm.h index bba492eea96c..161e91167b9c 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -276,8 +276,8 @@ struct pwm_ops { struct pwm_capture *result, unsigned long timeout); int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state); - void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, - struct pwm_state *state); + int (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state); struct module *owner; }; diff --git a/include/linux/random.h b/include/linux/random.h index 4a2a1de423cd..b0a940af4fff 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -152,8 +152,6 @@ declare_get_random_var_wait(long, unsigned long) */ #include <linux/prandom.h> -#include <asm/archrandom.h> - #ifdef CONFIG_SMP int random_prepare_cpu(unsigned int cpu); int random_online_cpu(unsigned int cpu); diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 3c7d295746f6..782e14f62201 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -113,8 +113,7 @@ void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val); struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length); -int ring_buffer_unlock_commit(struct trace_buffer *buffer, - struct ring_buffer_event *event); +int ring_buffer_unlock_commit(struct trace_buffer *buffer); int ring_buffer_write(struct trace_buffer *buffer, unsigned long length, void *data); diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index d6c48163c6de..357e0068497c 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -65,6 +65,7 @@ extern void sched_dead(struct task_struct *p); void __noreturn do_task_dead(void); void __noreturn make_task_dead(int signr); +extern void mm_cache_init(void); extern void proc_caches_init(void); extern void fork_init(void); @@ -90,7 +91,6 @@ extern void exit_itimers(struct task_struct *); extern pid_t kernel_clone(struct kernel_clone_args *kargs); struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node); struct task_struct *fork_idle(int); -struct mm_struct *copy_init_mm(void); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); extern pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags); extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index f054d0360a75..4cc52698e214 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -25,7 +25,7 @@ struct user_struct { #if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) || \ defined(CONFIG_NET) || defined(CONFIG_IO_URING) || \ - defined(CONFIG_VFIO_PCI_ZDEV_KVM) + defined(CONFIG_VFIO_PCI_ZDEV_KVM) || IS_ENABLED(CONFIG_IOMMUFD) atomic_long_t locked_vm; #endif #ifdef CONFIG_WATCH_QUEUE diff --git a/include/linux/serdev.h b/include/linux/serdev.h index 66f624fc618c..5f6bfe4f6d95 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h @@ -7,6 +7,7 @@ #include <linux/types.h> #include <linux/device.h> +#include <linux/iopoll.h> #include <linux/uaccess.h> #include <linux/termios.h> #include <linux/delay.h> @@ -279,18 +280,10 @@ static inline bool serdev_device_get_cts(struct serdev_device *serdev) static inline int serdev_device_wait_for_cts(struct serdev_device *serdev, bool state, int timeout_ms) { - unsigned long timeout; bool signal; - timeout = jiffies + msecs_to_jiffies(timeout_ms); - while (time_is_after_jiffies(timeout)) { - signal = serdev_device_get_cts(serdev); - if (signal == state) - return 0; - usleep_range(1000, 2000); - } - - return -ETIMEDOUT; + return readx_poll_timeout(serdev_device_get_cts, serdev, signal, signal == state, + 2000, timeout_ms * 1000); } static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enable) diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 91871464b99d..fd59f600094a 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -664,6 +664,86 @@ struct uart_driver { void uart_write_wakeup(struct uart_port *port); +#define __uart_port_tx(uport, ch, tx_ready, put_char, tx_done, for_test, \ + for_post) \ +({ \ + struct uart_port *__port = (uport); \ + struct circ_buf *xmit = &__port->state->xmit; \ + unsigned int pending; \ + \ + for (; (for_test) && (tx_ready); (for_post), __port->icount.tx++) { \ + if (__port->x_char) { \ + (ch) = __port->x_char; \ + (put_char); \ + __port->x_char = 0; \ + continue; \ + } \ + \ + if (uart_circ_empty(xmit) || uart_tx_stopped(__port)) \ + break; \ + \ + (ch) = xmit->buf[xmit->tail]; \ + (put_char); \ + xmit->tail = (xmit->tail + 1) % UART_XMIT_SIZE; \ + } \ + \ + (tx_done); \ + \ + pending = uart_circ_chars_pending(xmit); \ + if (pending < WAKEUP_CHARS) { \ + uart_write_wakeup(__port); \ + \ + if (pending == 0) \ + __port->ops->stop_tx(__port); \ + } \ + \ + pending; \ +}) + +/** + * uart_port_tx_limited -- transmit helper for uart_port with count limiting + * @port: uart port + * @ch: variable to store a character to be written to the HW + * @count: a limit of characters to send + * @tx_ready: can HW accept more data function + * @put_char: function to write a character + * @tx_done: function to call after the loop is done + * + * This helper transmits characters from the xmit buffer to the hardware using + * @put_char(). It does so until @count characters are sent and while @tx_ready + * evaluates to true. + * + * Returns: the number of characters in the xmit buffer when done. + * + * The expression in macro parameters shall be designed as follows: + * * **tx_ready:** should evaluate to true if the HW can accept more data to + * be sent. This parameter can be %true, which means the HW is always ready. + * * **put_char:** shall write @ch to the device of @port. + * * **tx_done:** when the write loop is done, this can perform arbitrary + * action before potential invocation of ops->stop_tx() happens. If the + * driver does not need to do anything, use e.g. ({}). + * + * For all of them, @port->lock is held, interrupts are locally disabled and + * the expressions must not sleep. + */ +#define uart_port_tx_limited(port, ch, count, tx_ready, put_char, tx_done) ({ \ + unsigned int __count = (count); \ + __uart_port_tx(port, ch, tx_ready, put_char, tx_done, __count, \ + __count--); \ +}) + +/** + * uart_port_tx -- transmit helper for uart_port + * @port: uart port + * @ch: variable to store a character to be written to the HW + * @tx_ready: can HW accept more data function + * @put_char: function to write a character + * + * See uart_port_tx_limited() for more details. + */ +#define uart_port_tx(port, ch, tx_ready, put_char) \ + __uart_port_tx(port, ch, tx_ready, put_char, ({}), true, ({})) + /* * Baud rate helpers. */ diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index 369769ce7399..95ac8398ee72 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -14,6 +14,16 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } #endif +#ifndef set_memory_rox +static inline int set_memory_rox(unsigned long addr, int numpages) +{ + int ret = set_memory_ro(addr, numpages); + if (ret) + return ret; + return set_memory_x(addr, numpages); +} +#endif + #ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP static inline int set_direct_map_invalid_noflush(struct page *page) { diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index d2f581feed67..91f0dc564fe5 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -233,6 +233,7 @@ struct sdw_intel_ctx { * struct sdw_intel_res - Soundwire Intel global resource structure, * typically populated by the DSP driver * + * @hw_ops: abstraction for platform ops * @count: link count * @mmio_base: mmio base of SoundWire registers * @irq: interrupt number @@ -249,6 +250,7 @@ struct sdw_intel_ctx { * @alh_base: sdw alh base. */ struct sdw_intel_res { + const struct sdw_intel_hw_ops *hw_ops; int count; void __iomem *mmio_base; int irq; @@ -290,4 +292,46 @@ irqreturn_t sdw_intel_thread(int irq, void *dev_id); #define SDW_INTEL_QUIRK_MASK_BUS_DISABLE BIT(1) +struct sdw_intel; + +/* struct intel_sdw_hw_ops - SoundWire ops for Intel platforms. + * @debugfs_init: initialize all debugfs capabilities + * @debugfs_exit: close and cleanup debugfs capabilities + * @register_dai: read all PDI information and register DAIs + * @check_clock_stop: throw error message if clock is not stopped. + * @start_bus: normal start + * @start_bus_after_reset: start after reset + * @start_bus_after_clock_stop: start after mode0 clock stop + * @stop_bus: stop all bus + * @link_power_up: power-up using chip-specific helpers + * @link_power_down: power-down with chip-specific helpers + * @shim_check_wake: check if a wake was received + * @shim_wake: enable/disable in-band wake management + * @pre_bank_switch: helper for bus management + * @post_bank_switch: helper for bus management + */ +struct sdw_intel_hw_ops { + void (*debugfs_init)(struct sdw_intel *sdw); + void (*debugfs_exit)(struct sdw_intel *sdw); + + int (*register_dai)(struct sdw_intel *sdw); + + void (*check_clock_stop)(struct sdw_intel *sdw); + int (*start_bus)(struct sdw_intel *sdw); + int (*start_bus_after_reset)(struct sdw_intel *sdw); + int (*start_bus_after_clock_stop)(struct sdw_intel *sdw); + int (*stop_bus)(struct sdw_intel *sdw, bool clock_stop); + + int (*link_power_up)(struct sdw_intel *sdw); + int (*link_power_down)(struct sdw_intel *sdw); + + int (*shim_check_wake)(struct sdw_intel *sdw); + void (*shim_wake)(struct sdw_intel *sdw, bool wake_enable); + + int (*pre_bank_switch)(struct sdw_intel *sdw); + int (*post_bank_switch)(struct sdw_intel *sdw); +}; + +extern const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops; + #endif diff --git a/include/linux/static_call.h b/include/linux/static_call.h index df53bed9d71f..141e6b176a1b 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -162,6 +162,8 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool extern int __init static_call_init(void); +extern void static_call_force_reinit(void); + struct static_call_mod { struct static_call_mod *next; struct module *mod; /* for vmlinux, mod == NULL */ diff --git a/include/linux/string.h b/include/linux/string.h index cf7607b32102..db28802ab0a6 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -176,7 +176,7 @@ extern void kfree_const(const void *x); extern char *kstrdup(const char *s, gfp_t gfp) __malloc; extern const char *kstrdup_const(const char *s, gfp_t gfp); extern char *kstrndup(const char *s, size_t len, gfp_t gfp); -extern void *kmemdup(const void *src, size_t len, gfp_t gfp); +extern void *kmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2); extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp); extern char **argv_split(gfp_t gfp, const char *str, int *argcp); diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 20749bd9db71..4342e996bcdb 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -136,7 +136,6 @@ struct trace_event_functions { struct trace_event { struct hlist_node node; - struct list_head list; int type; struct trace_event_functions *funcs; }; @@ -235,7 +234,8 @@ void tracing_record_taskinfo_sched_switch(struct task_struct *prev, void tracing_record_cmdline(struct task_struct *task); void tracing_record_tgid(struct task_struct *task); -int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...); +int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) + __printf(3, 4); struct event_filter; diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index 5a2c650d9e1c..0c4c7587d6c3 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -97,7 +97,8 @@ extern int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str, const void *buf, size_t len, bool ascii); #else /* CONFIG_TRACING */ -static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) +static inline __printf(2, 3) +void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) { } static inline void diff --git a/include/linux/tty_buffer.h b/include/linux/tty_buffer.h index 1796648c2907..6ceb2789e6c8 100644 --- a/include/linux/tty_buffer.h +++ b/include/linux/tty_buffer.h @@ -17,14 +17,11 @@ struct tty_buffer { int commit; int lookahead; /* Lazy update on recv, can become less than "read" */ int read; - int flags; + bool flags; /* Data points here */ unsigned long data[]; }; -/* Values for .flags field of tty_buffer */ -#define TTYB_NORMAL 1 /* buffer has no flags buffer */ - static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs) { return ((unsigned char *)b->data) + ofs; diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h index 483d41cbcbb7..bfaaeee61a05 100644 --- a/include/linux/tty_flip.h +++ b/include/linux/tty_flip.h @@ -25,9 +25,9 @@ static inline int tty_insert_flip_char(struct tty_port *port, struct tty_buffer *tb = port->buf.tail; int change; - change = (tb->flags & TTYB_NORMAL) && (flag != TTY_NORMAL); + change = !tb->flags && (flag != TTY_NORMAL); if (!change && tb->used < tb->size) { - if (~tb->flags & TTYB_NORMAL) + if (tb->flags) *flag_buf_ptr(tb, tb->used) = flag; *char_buf_ptr(tb, tb->used++) = ch; return 1; diff --git a/include/linux/usb.h b/include/linux/usb.h index d2d2f41052c0..7d5325d47c45 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -258,13 +258,26 @@ struct usb_interface { struct device *usb_dev; struct work_struct reset_ws; /* for resets in atomic context */ }; -#define to_usb_interface(d) container_of(d, struct usb_interface, dev) + +#define to_usb_interface(__dev) container_of_const(__dev, struct usb_interface, dev) static inline void *usb_get_intfdata(struct usb_interface *intf) { return dev_get_drvdata(&intf->dev); } +/** + * usb_set_intfdata() - associate driver-specific data with the interface + * @intf: the usb interface + * @data: pointer to the device priv structure or %NULL + * + * Drivers should use this function in their probe() to associate their + * driver-specific data with the usb interface. + * + * When disconnecting, the core will take care of setting @intf back to %NULL, + * so no actions are needed on the driver side. The interface should not be set + * to %NULL before all actions completed (e.g. no outsanding URB remaining). + */ static inline void usb_set_intfdata(struct usb_interface *intf, void *data) { dev_set_drvdata(&intf->dev, data); @@ -709,12 +722,22 @@ struct usb_device { u16 hub_delay; unsigned use_generic_driver:1; }; -#define to_usb_device(d) container_of(d, struct usb_device, dev) -static inline struct usb_device *interface_to_usbdev(struct usb_interface *intf) +#define to_usb_device(__dev) container_of_const(__dev, struct usb_device, dev) + +static inline struct usb_device *__intf_to_usbdev(struct usb_interface *intf) { return to_usb_device(intf->dev.parent); } +static inline const struct usb_device *__intf_to_usbdev_const(const struct usb_interface *intf) +{ + return to_usb_device((const struct device *)intf->dev.parent); +} + +#define interface_to_usbdev(intf) \ + _Generic((intf), \ + const struct usb_interface *: __intf_to_usbdev_const, \ + struct usb_interface *: __intf_to_usbdev)(intf) extern struct usb_device *usb_get_dev(struct usb_device *dev); extern void usb_put_dev(struct usb_device *dev); @@ -1272,7 +1295,7 @@ struct usb_device_driver { */ struct usb_class_driver { char *name; - char *(*devnode)(struct device *dev, umode_t *mode); + char *(*devnode)(const struct device *dev, umode_t *mode); const struct file_operations *fops; int minor_base; }; diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 78cd566ee238..b51c07111729 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -269,6 +269,9 @@ struct hc_driver { /* called after entering D0 (etc), before resuming the hub */ int (*pci_resume)(struct usb_hcd *hcd, bool hibernated); + /* called just before hibernate final D3 state, allows host to poweroff parts */ + int (*pci_poweroff_late)(struct usb_hcd *hcd, bool do_wakeup); + /* cleanly make HCD stop writing memory and doing I/O */ void (*stop) (struct usb_hcd *hcd); diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index 7751bedcae5d..8fa781207970 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -23,6 +23,7 @@ struct fwnode_handle; struct device; struct usb_power_delivery; +struct usb_power_delivery_desc; enum typec_port_type { TYPEC_PORT_SRC, @@ -327,6 +328,9 @@ void typec_partner_set_svdm_version(struct typec_partner *partner, enum usb_pd_svdm_ver svdm_version); int typec_get_negotiated_svdm_version(struct typec_port *port); +struct usb_power_delivery *typec_partner_usb_power_delivery_register(struct typec_partner *partner, + struct usb_power_delivery_desc *desc); + int typec_port_set_usb_power_delivery(struct typec_port *port, struct usb_power_delivery *pd); int typec_partner_set_usb_power_delivery(struct typec_partner *partner, struct usb_power_delivery *pd); diff --git a/include/linux/vfio.h b/include/linux/vfio.h index fdd393f70b19..35be78e9ae57 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -17,6 +17,9 @@ #include <linux/iova_bitmap.h> struct kvm; +struct iommufd_ctx; +struct iommufd_device; +struct iommufd_access; /* * VFIO devices can be placed in a set, this allows all devices to share this @@ -54,6 +57,12 @@ struct vfio_device { struct completion comp; struct list_head group_next; struct list_head iommu_entry; + struct iommufd_access *iommufd_access; +#if IS_ENABLED(CONFIG_IOMMUFD) + struct iommufd_device *iommufd_device; + struct iommufd_ctx *iommufd_ictx; + bool iommufd_attached; +#endif }; /** @@ -80,6 +89,10 @@ struct vfio_device_ops { char *name; int (*init)(struct vfio_device *vdev); void (*release)(struct vfio_device *vdev); + int (*bind_iommufd)(struct vfio_device *vdev, + struct iommufd_ctx *ictx, u32 *out_device_id); + void (*unbind_iommufd)(struct vfio_device *vdev); + int (*attach_ioas)(struct vfio_device *vdev, u32 *pt_id); int (*open_device)(struct vfio_device *vdev); void (*close_device)(struct vfio_device *vdev); ssize_t (*read)(struct vfio_device *vdev, char __user *buf, @@ -96,6 +109,32 @@ struct vfio_device_ops { void __user *arg, size_t argsz); }; +#if IS_ENABLED(CONFIG_IOMMUFD) +int vfio_iommufd_physical_bind(struct vfio_device *vdev, + struct iommufd_ctx *ictx, u32 *out_device_id); +void vfio_iommufd_physical_unbind(struct vfio_device *vdev); +int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id); +int vfio_iommufd_emulated_bind(struct vfio_device *vdev, + struct iommufd_ctx *ictx, u32 *out_device_id); +void vfio_iommufd_emulated_unbind(struct vfio_device *vdev); +int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id); +#else +#define vfio_iommufd_physical_bind \ + ((int (*)(struct vfio_device *vdev, struct iommufd_ctx *ictx, \ + u32 *out_device_id)) NULL) +#define vfio_iommufd_physical_unbind \ + ((void (*)(struct vfio_device *vdev)) NULL) +#define vfio_iommufd_physical_attach_ioas \ + ((int (*)(struct vfio_device *vdev, u32 *pt_id)) NULL) +#define vfio_iommufd_emulated_bind \ + ((int (*)(struct vfio_device *vdev, struct iommufd_ctx *ictx, \ + u32 *out_device_id)) NULL) +#define vfio_iommufd_emulated_unbind \ + ((void (*)(struct vfio_device *vdev)) NULL) +#define vfio_iommufd_emulated_attach_ioas \ + ((int (*)(struct vfio_device *vdev, u32 *pt_id)) NULL) +#endif + /** * @migration_set_state: Optional callback to change the migration state for * devices that support migration. It's mandatory for @@ -107,6 +146,9 @@ struct vfio_device_ops { * @migration_get_state: Optional callback to get the migration state for * devices that support migration. It's mandatory for * VFIO_DEVICE_FEATURE_MIGRATION migration support. + * @migration_get_data_size: Optional callback to get the estimated data + * length that will be required to complete stop copy. It's mandatory for + * VFIO_DEVICE_FEATURE_MIGRATION migration support. */ struct vfio_migration_ops { struct file *(*migration_set_state)( @@ -114,6 +156,8 @@ struct vfio_migration_ops { enum vfio_device_mig_state new_state); int (*migration_get_state)(struct vfio_device *device, enum vfio_device_mig_state *curr_state); + int (*migration_get_data_size)(struct vfio_device *device, + unsigned long *stop_copy_length); }; /** @@ -176,9 +220,6 @@ struct vfio_device *_vfio_alloc_device(size_t size, struct device *dev, dev, ops), \ struct dev_struct, member) -int vfio_init_device(struct vfio_device *device, struct device *dev, - const struct vfio_device_ops *ops); -void vfio_free_device(struct vfio_device *device); static inline void vfio_put_device(struct vfio_device *device) { put_device(&device->device); @@ -232,29 +273,6 @@ int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs, int max_irq_type, size_t *data_size); -struct pci_dev; -#if IS_ENABLED(CONFIG_VFIO_SPAPR_EEH) -void vfio_spapr_pci_eeh_open(struct pci_dev *pdev); -void vfio_spapr_pci_eeh_release(struct pci_dev *pdev); -long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, unsigned int cmd, - unsigned long arg); -#else -static inline void vfio_spapr_pci_eeh_open(struct pci_dev *pdev) -{ -} - -static inline void vfio_spapr_pci_eeh_release(struct pci_dev *pdev) -{ -} - -static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, - unsigned int cmd, - unsigned long arg) -{ - return -ENOTTY; -} -#endif /* CONFIG_VFIO_SPAPR_EEH */ - /* * IRQfd - generic */ diff --git a/include/linux/zstd_lib.h b/include/linux/zstd_lib.h index b8c7dbf98390..79d55465d5c1 100644 --- a/include/linux/zstd_lib.h +++ b/include/linux/zstd_lib.h @@ -17,8 +17,16 @@ /* ===== ZSTDLIB_API : control library symbols visibility ===== */ -#define ZSTDLIB_VISIBILITY -#define ZSTDLIB_API ZSTDLIB_VISIBILITY +#ifndef ZSTDLIB_VISIBLE +# if (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZSTDLIB_VISIBLE __attribute__ ((visibility ("default"))) +# define ZSTDLIB_HIDDEN __attribute__ ((visibility ("hidden"))) +# else +# define ZSTDLIB_VISIBLE +# define ZSTDLIB_HIDDEN +# endif +#endif +#define ZSTDLIB_API ZSTDLIB_VISIBLE /* ***************************************************************************** @@ -56,8 +64,8 @@ /*------ Version ------*/ #define ZSTD_VERSION_MAJOR 1 -#define ZSTD_VERSION_MINOR 4 -#define ZSTD_VERSION_RELEASE 10 +#define ZSTD_VERSION_MINOR 5 +#define ZSTD_VERSION_RELEASE 2 #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) /*! ZSTD_versionNumber() : @@ -94,7 +102,6 @@ ZSTDLIB_API const char* ZSTD_versionString(void); #define ZSTD_BLOCKSIZE_MAX (1<<ZSTD_BLOCKSIZELOG_MAX) - /* ************************************* * Simple API ***************************************/ @@ -151,7 +158,7 @@ ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t * @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */ ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize); -/*! ZSTD_findFrameCompressedSize() : +/*! ZSTD_findFrameCompressedSize() : Requires v1.4.0+ * `src` should point to the start of a ZSTD frame or skippable frame. * `srcSize` must be >= first frame size * @return : the compressed size of the first frame starting at `src`, @@ -165,8 +172,9 @@ ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize) ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */ ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */ ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */ -ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed */ +ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */ ZSTDLIB_API int ZSTD_maxCLevel(void); /*!< maximum compression level available */ +ZSTDLIB_API int ZSTD_defaultCLevel(void); /*!< default compression level, specified by ZSTD_CLEVEL_DEFAULT, requires v1.5.0+ */ /* ************************************* @@ -219,9 +227,9 @@ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, const void* src, size_t srcSize); -/* ************************************* -* Advanced compression API -***************************************/ +/* ******************************************* +* Advanced compression API (Requires v1.4.0+) +**********************************************/ /* API design : * Parameters are pushed one by one into an existing context, @@ -232,7 +240,7 @@ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, * * It's possible to reset all parameters to "default" using ZSTD_CCtx_reset(). * - * This API supercedes all other "advanced" API entry points in the experimental section. + * This API supersedes all other "advanced" API entry points in the experimental section. * In the future, we expect to remove from experimental API entry points which are redundant with this API. */ @@ -251,7 +259,6 @@ typedef enum { ZSTD_fast=1, Only the order (from fast to strong) is guaranteed */ } ZSTD_strategy; - typedef enum { /* compression parameters @@ -317,7 +324,6 @@ typedef enum { * The higher the value of selected strategy, the more complex it is, * resulting in stronger and slower compression. * Special: value 0 means "use default strategy". */ - /* LDM mode parameters */ ZSTD_c_enableLongDistanceMatching=160, /* Enable long distance matching. * This parameter is designed to improve compression ratio @@ -374,7 +380,7 @@ typedef enum { ZSTD_c_jobSize=401, /* Size of a compression job. This value is enforced only when nbWorkers >= 1. * Each compression job is completed in parallel, so this value can indirectly impact the nb of active threads. * 0 means default, which is dynamically determined based on compression parameters. - * Job size must be a minimum of overlap size, or 1 MB, whichever is largest. + * Job size must be a minimum of overlap size, or ZSTDMT_JOBSIZE_MIN (= 512 KB), whichever is largest. * The minimum size is automatically and transparently enforced. */ ZSTD_c_overlapLog=402, /* Control the overlap size, as a fraction of window size. * The overlap size is an amount of data reloaded from previous job at the beginning of a new job. @@ -404,6 +410,8 @@ typedef enum { * ZSTD_c_stableOutBuffer * ZSTD_c_blockDelimiters * ZSTD_c_validateSequences + * ZSTD_c_useBlockSplitter + * ZSTD_c_useRowMatchFinder * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly; * also, the enums values themselves are unstable and can still change. @@ -419,7 +427,10 @@ typedef enum { ZSTD_c_experimentalParam9=1006, ZSTD_c_experimentalParam10=1007, ZSTD_c_experimentalParam11=1008, - ZSTD_c_experimentalParam12=1009 + ZSTD_c_experimentalParam12=1009, + ZSTD_c_experimentalParam13=1010, + ZSTD_c_experimentalParam14=1011, + ZSTD_c_experimentalParam15=1012 } ZSTD_cParameter; typedef struct { @@ -504,9 +515,9 @@ ZSTDLIB_API size_t ZSTD_compress2( ZSTD_CCtx* cctx, const void* src, size_t srcSize); -/* ************************************* -* Advanced decompression API -***************************************/ +/* ********************************************* +* Advanced decompression API (Requires v1.4.0+) +************************************************/ /* The advanced API pushes parameters one by one into an existing DCtx context. * Parameters are sticky, and remain valid for all following frames @@ -668,7 +679,7 @@ typedef enum { : note : multithreaded compression will block to flush as much output as possible. */ } ZSTD_EndDirective; -/*! ZSTD_compressStream2() : +/*! ZSTD_compressStream2() : Requires v1.4.0+ * Behaves about the same as ZSTD_compressStream, with additional control on end directive. * - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*() * - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode) @@ -714,11 +725,11 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /*< recommended size for output /* ***************************************************************************** - * This following is a legacy streaming API. + * This following is a legacy streaming API, available since v1.0+ . * It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2(). * It is redundant, but remains fully supported. - * Advanced parameters and dictionary compression can only be used through the - * new API. + * Streaming in combination with advanced parameters and dictionary compression + * can only be used through the new API. ******************************************************************************/ /*! @@ -796,7 +807,7 @@ ZSTDLIB_API size_t ZSTD_DStreamOutSize(void); /*!< recommended size for output /*! ZSTD_compress_usingDict() : * Compression at an explicit compression level using a Dictionary. * A dictionary can be any arbitrary data segment (also called a prefix), - * or a buffer with specified information (see dictBuilder/zdict.h). + * or a buffer with specified information (see zdict.h). * Note : This function loads the dictionary, resulting in significant startup delay. * It's intended for a dictionary used only once. * Note 2 : When `dict == NULL || dictSize < 8` no dictionary is used. */ @@ -879,19 +890,25 @@ ZSTDLIB_API size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, * Dictionary helper functions *******************************/ -/*! ZSTD_getDictID_fromDict() : +/*! ZSTD_getDictID_fromDict() : Requires v1.4.0+ * Provides the dictID stored within dictionary. * if @return == 0, the dictionary is not conformant with Zstandard specification. * It can still be loaded, but as a content-only dictionary. */ ZSTDLIB_API unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize); -/*! ZSTD_getDictID_fromDDict() : +/*! ZSTD_getDictID_fromCDict() : Requires v1.5.0+ + * Provides the dictID of the dictionary loaded into `cdict`. + * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. + * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ +ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict); + +/*! ZSTD_getDictID_fromDDict() : Requires v1.4.0+ * Provides the dictID of the dictionary loaded into `ddict`. * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict); -/*! ZSTD_getDictID_fromFrame() : +/*! ZSTD_getDictID_fromFrame() : Requires v1.4.0+ * Provides the dictID required to decompressed the frame stored within `src`. * If @return == 0, the dictID could not be decoded. * This could for one of the following reasons : @@ -905,16 +922,16 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); /* ***************************************************************************** - * Advanced dictionary and prefix API + * Advanced dictionary and prefix API (Requires v1.4.0+) * * This API allows dictionaries to be used with ZSTD_compress2(), - * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and + * ZSTD_compressStream2(), and ZSTD_decompressDCtx(). Dictionaries are sticky, and * only reset with the context is reset with ZSTD_reset_parameters or * ZSTD_reset_session_and_parameters. Prefixes are single-use. ******************************************************************************/ -/*! ZSTD_CCtx_loadDictionary() : +/*! ZSTD_CCtx_loadDictionary() : Requires v1.4.0+ * Create an internal CDict from `dict` buffer. * Decompression will have to use same dictionary. * @result : 0, or an error code (which can be tested with ZSTD_isError()). @@ -933,7 +950,7 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); * to precisely select how dictionary content must be interpreted. */ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); -/*! ZSTD_CCtx_refCDict() : +/*! ZSTD_CCtx_refCDict() : Requires v1.4.0+ * Reference a prepared dictionary, to be used for all next compressed frames. * Note that compression parameters are enforced from within CDict, * and supersede any compression parameter previously set within CCtx. @@ -947,7 +964,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, s * Note 2 : CDict is just referenced, its lifetime must outlive its usage within CCtx. */ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); -/*! ZSTD_CCtx_refPrefix() : +/*! ZSTD_CCtx_refPrefix() : Requires v1.4.0+ * Reference a prefix (single-usage dictionary) for next compressed frame. * A prefix is **only used once**. Tables are discarded at end of frame (ZSTD_e_end). * Decompression will need same prefix to properly regenerate data. @@ -968,7 +985,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize); -/*! ZSTD_DCtx_loadDictionary() : +/*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+ * Create an internal DDict from dict buffer, * to be used to decompress next frames. * The dictionary remains valid for all future frames, until explicitly invalidated. @@ -985,7 +1002,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, */ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); -/*! ZSTD_DCtx_refDDict() : +/*! ZSTD_DCtx_refDDict() : Requires v1.4.0+ * Reference a prepared dictionary, to be used to decompress next frames. * The dictionary remains active for decompression of future frames using same DCtx. * @@ -1003,7 +1020,7 @@ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, s */ ZSTDLIB_API size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); -/*! ZSTD_DCtx_refPrefix() : +/*! ZSTD_DCtx_refPrefix() : Requires v1.4.0+ * Reference a prefix (single-usage dictionary) to decompress next frame. * This is the reverse operation of ZSTD_CCtx_refPrefix(), * and must use the same prefix as the one used during compression. @@ -1024,7 +1041,7 @@ ZSTDLIB_API size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, /* === Memory management === */ -/*! ZSTD_sizeof_*() : +/*! ZSTD_sizeof_*() : Requires v1.4.0+ * These functions give the _current_ memory usage of selected object. * Note that object memory usage can evolve (increase or decrease) over time. */ ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx); @@ -1049,6 +1066,29 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); #if !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY) #define ZSTD_H_ZSTD_STATIC_LINKING_ONLY +/* This can be overridden externally to hide static symbols. */ +#ifndef ZSTDLIB_STATIC_API +#define ZSTDLIB_STATIC_API ZSTDLIB_VISIBLE +#endif + +/* Deprecation warnings : + * Should these warnings be a problem, it is generally possible to disable them, + * typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual. + * Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS. + */ +#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS +# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API /* disable deprecation warnings */ +#else +# if (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) +# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated(message))) +# elif (__GNUC__ >= 3) +# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated)) +# else +# pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler") +# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API +# endif +#endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */ + /* ************************************************************************************** * experimental API (static linking only) **************************************************************************************** @@ -1111,9 +1151,6 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); #define ZSTD_SRCSIZEHINT_MIN 0 #define ZSTD_SRCSIZEHINT_MAX INT_MAX -/* internal */ -#define ZSTD_HASHLOG3_MAX 17 - /* --- Advanced types --- */ @@ -1255,6 +1292,15 @@ typedef enum { ZSTD_lcm_uncompressed = 2 /*< Always emit uncompressed literals. */ } ZSTD_literalCompressionMode_e; +typedef enum { + /* Note: This enum controls features which are conditionally beneficial. Zstd typically will make a final + * decision on whether or not to enable the feature (ZSTD_ps_auto), but setting the switch to ZSTD_ps_enable + * or ZSTD_ps_disable allow for a force enable/disable the feature. + */ + ZSTD_ps_auto = 0, /* Let the library automatically determine whether the feature shall be enabled */ + ZSTD_ps_enable = 1, /* Force-enable the feature */ + ZSTD_ps_disable = 2 /* Do not use the feature */ +} ZSTD_paramSwitch_e; /* ************************************* * Frame size functions @@ -1281,7 +1327,7 @@ typedef enum { * note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to * read each contained frame header. This is fast as most of the data is skipped, * however it does mean that all frame data must be present and valid. */ -ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize); +ZSTDLIB_STATIC_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize); /*! ZSTD_decompressBound() : * `src` should point to the start of a series of ZSTD encoded and/or skippable frames @@ -1296,13 +1342,13 @@ ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t * note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by: * upper-bound = # blocks * min(128 KB, Window_Size) */ -ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize); +ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize); /*! ZSTD_frameHeaderSize() : * srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX. * @return : size of the Frame Header, * or an error code (if srcSize is too small) */ -ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize); typedef enum { ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */ @@ -1325,12 +1371,12 @@ typedef enum { * @return : number of sequences generated */ -ZSTDLIB_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, +ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, size_t outSeqsSize, const void* src, size_t srcSize); /*! ZSTD_mergeBlockDelimiters() : * Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals - * by merging them into into the literals of the next sequence. + * by merging them into the literals of the next sequence. * * As such, the final generated result has no explicit representation of block boundaries, * and the final last literals segment is not represented in the sequences. @@ -1339,7 +1385,7 @@ ZSTDLIB_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, * setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters * @return : number of sequences left after merging */ -ZSTDLIB_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize); +ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize); /*! ZSTD_compressSequences() : * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst. @@ -1369,7 +1415,7 @@ ZSTDLIB_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t se * and cannot emit an RLE block that disagrees with the repcode history * @return : final compressed size or a ZSTD error. */ -ZSTDLIB_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize, +ZSTDLIB_STATIC_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize, const ZSTD_Sequence* inSeqs, size_t inSeqsSize, const void* src, size_t srcSize); @@ -1377,7 +1423,7 @@ ZSTDLIB_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size /*! ZSTD_writeSkippableFrame() : * Generates a zstd skippable frame containing data given by src, and writes it to dst buffer. * - * Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number, + * Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number, * ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15. * As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so * the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant. @@ -1387,9 +1433,29 @@ ZSTDLIB_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size * * @return : number of bytes written or a ZSTD error. */ -ZSTDLIB_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity, +ZSTDLIB_STATIC_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned magicVariant); +/*! ZSTD_readSkippableFrame() : + * Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer. + * + * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, + * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested + * in the magicVariant. + * + * Returns an error if destination buffer is not large enough, or if the frame is not skippable. + * + * @return : number of bytes written or a ZSTD error. + */ +ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant, + const void* src, size_t srcSize); + +/*! ZSTD_isSkippableFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame. + */ +ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size); + + /* ************************************* * Memory management @@ -1418,10 +1484,10 @@ ZSTDLIB_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity, * Note 2 : only single-threaded compression is supported. * ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. */ -ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel); -ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams); -ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params); -ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int compressionLevel); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params); +ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize(void); /*! ZSTD_estimateCStreamSize() : * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one. @@ -1436,20 +1502,20 @@ ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void); * Note : if streaming is init with function ZSTD_init?Stream_usingDict(), * an internal ?Dict will be created, which additional size is not estimated here. * In this case, get total size by adding ZSTD_estimate?DictSize */ -ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel); -ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams); -ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params); -ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize); -ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int compressionLevel); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params); +ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t windowSize); +ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize); /*! ZSTD_estimate?DictSize() : * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict(). * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced(). * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller. */ -ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel); -ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod); -ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod); +ZSTDLIB_STATIC_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod); /*! ZSTD_initStatic*() : * Initialize an object using a pre-allocated fixed-size buffer. @@ -1472,20 +1538,20 @@ ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e * Limitation 2 : static cctx currently not compatible with multi-threading. * Limitation 3 : static dctx is incompatible with legacy support. */ -ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize); -ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /*< same as ZSTD_initStaticCCtx() */ +ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize); +ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /*< same as ZSTD_initStaticCCtx() */ -ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize); -ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /*< same as ZSTD_initStaticDCtx() */ +ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize); +ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /*< same as ZSTD_initStaticDCtx() */ -ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict( +ZSTDLIB_STATIC_API const ZSTD_CDict* ZSTD_initStaticCDict( void* workspace, size_t workspaceSize, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams); -ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict( +ZSTDLIB_STATIC_API const ZSTD_DDict* ZSTD_initStaticDDict( void* workspace, size_t workspaceSize, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, @@ -1504,44 +1570,44 @@ static __attribute__((__unused__)) ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /*< this constant defers to stdlib's functions */ -ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem); +ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem); +ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem); +ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem); +ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, +ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams, ZSTD_customMem customMem); -/* ! Thread pool : - * These prototypes make it possible to share a thread pool among multiple compression contexts. - * This can limit resources for applications with multiple threads where each one uses - * a threaded compression mode (via ZSTD_c_nbWorkers parameter). - * ZSTD_createThreadPool creates a new thread pool with a given number of threads. - * Note that the lifetime of such pool must exist while being used. - * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value - * to use an internal thread pool). - * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer. +/*! Thread pool : + * These prototypes make it possible to share a thread pool among multiple compression contexts. + * This can limit resources for applications with multiple threads where each one uses + * a threaded compression mode (via ZSTD_c_nbWorkers parameter). + * ZSTD_createThreadPool creates a new thread pool with a given number of threads. + * Note that the lifetime of such pool must exist while being used. + * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value + * to use an internal thread pool). + * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer. */ typedef struct POOL_ctx_s ZSTD_threadPool; -ZSTDLIB_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads); -ZSTDLIB_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); /* accept NULL pointer */ -ZSTDLIB_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool); +ZSTDLIB_STATIC_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads); +ZSTDLIB_STATIC_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); /* accept NULL pointer */ +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool); /* * This API is temporary and is expected to change or disappear in the future! */ -ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2( +ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_advanced2( const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, const ZSTD_CCtx_params* cctxParams, ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced( +ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_advanced( const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, @@ -1558,28 +1624,22 @@ ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced( * As a consequence, `dictBuffer` **must** outlive CDict, * and its content must remain unmodified throughout the lifetime of CDict. * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */ -ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel); - -/*! ZSTD_getDictID_fromCDict() : - * Provides the dictID of the dictionary loaded into `cdict`. - * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. - * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ -ZSTDLIB_API unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict); +ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel); /*! ZSTD_getCParams() : * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize. * `estimatedSrcSize` value is optional, select 0 if not known */ -ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); +ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); /*! ZSTD_getParams() : * same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`. * All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */ -ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); +ZSTDLIB_STATIC_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); /*! ZSTD_checkCParams() : * Ensure param values remain within authorized range. * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */ -ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); +ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); /*! ZSTD_adjustCParams() : * optimize params for a given `srcSize` and `dictSize`. @@ -1587,23 +1647,25 @@ ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); * `dictSize` must be `0` when there is no dictionary. * cPar can be invalid : all parameters will be clamped within valid range in the @return struct. * This function never fails (wide contract) */ -ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize); +ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize); /*! ZSTD_compress_advanced() : * Note : this function is now DEPRECATED. * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters. - * This prototype will be marked as deprecated and generate compilation warning on reaching v1.5.x */ -ZSTDLIB_API size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx, + * This prototype will generate compilation warnings. */ +ZSTD_DEPRECATED("use ZSTD_compress2") +size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict,size_t dictSize, ZSTD_parameters params); /*! ZSTD_compress_usingCDict_advanced() : - * Note : this function is now REDUNDANT. + * Note : this function is now DEPRECATED. * It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters. - * This prototype will be marked as deprecated and generate compilation warning in some future version */ -ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, + * This prototype will generate compilation warnings. */ +ZSTD_DEPRECATED("use ZSTD_compress2 with ZSTD_CCtx_loadDictionary") +size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const ZSTD_CDict* cdict, @@ -1613,18 +1675,18 @@ ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, /*! ZSTD_CCtx_loadDictionary_byReference() : * Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx. * It saves some memory, but also requires that `dict` outlives its usage within `cctx` */ -ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); /*! ZSTD_CCtx_loadDictionary_advanced() : * Same as ZSTD_CCtx_loadDictionary(), but gives finer control over * how to load the dictionary (by copy ? by reference ?) * and how to interpret it (automatic ? force raw mode ? full mode only ?) */ -ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); /*! ZSTD_CCtx_refPrefix_advanced() : * Same as ZSTD_CCtx_refPrefix(), but gives finer control over * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ -ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); /* === experimental parameters === */ /* these parameters can be used with ZSTD_setParameter() @@ -1663,9 +1725,15 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre * See the comments on that enum for an explanation of the feature. */ #define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4 -/* Controls how the literals are compressed (default is auto). - * The value must be of type ZSTD_literalCompressionMode_e. - * See ZSTD_literalCompressionMode_t enum definition for details. +/* Controlled with ZSTD_paramSwitch_e enum. + * Default is ZSTD_ps_auto. + * Set to ZSTD_ps_disable to never compress literals. + * Set to ZSTD_ps_enable to always compress literals. (Note: uncompressed literals + * may still be emitted if huffman is not beneficial to use.) + * + * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use + * literals compression based on the compression parameters - specifically, + * negative compression levels do not use literal compression. */ #define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5 @@ -1728,7 +1796,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre * * Note that this means that the CDict tables can no longer be copied into the * CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be - * useable. The dictionary can only be attached or reloaded. + * usable. The dictionary can only be attached or reloaded. * * In general, you should expect compression to be faster--sometimes very much * so--and CDict creation to be slightly slower. Eventually, we will probably @@ -1817,12 +1885,55 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre */ #define ZSTD_c_validateSequences ZSTD_c_experimentalParam12 +/* ZSTD_c_useBlockSplitter + * Controlled with ZSTD_paramSwitch_e enum. + * Default is ZSTD_ps_auto. + * Set to ZSTD_ps_disable to never use block splitter. + * Set to ZSTD_ps_enable to always use block splitter. + * + * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use + * block splitting based on the compression parameters. + */ +#define ZSTD_c_useBlockSplitter ZSTD_c_experimentalParam13 + +/* ZSTD_c_useRowMatchFinder + * Controlled with ZSTD_paramSwitch_e enum. + * Default is ZSTD_ps_auto. + * Set to ZSTD_ps_disable to never use row-based matchfinder. + * Set to ZSTD_ps_enable to force usage of row-based matchfinder. + * + * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use + * the row-based matchfinder based on support for SIMD instructions and the window log. + * Note that this only pertains to compression strategies: greedy, lazy, and lazy2 + */ +#define ZSTD_c_useRowMatchFinder ZSTD_c_experimentalParam14 + +/* ZSTD_c_deterministicRefPrefix + * Default is 0 == disabled. Set to 1 to enable. + * + * Zstd produces different results for prefix compression when the prefix is + * directly adjacent to the data about to be compressed vs. when it isn't. + * This is because zstd detects that the two buffers are contiguous and it can + * use a more efficient match finding algorithm. However, this produces different + * results than when the two buffers are non-contiguous. This flag forces zstd + * to always load the prefix in non-contiguous mode, even if it happens to be + * adjacent to the data, to guarantee determinism. + * + * If you really care about determinism when using a dictionary or prefix, + * like when doing delta compression, you should select this option. It comes + * at a speed penalty of about ~2.5% if the dictionary and data happened to be + * contiguous, and is free if they weren't contiguous. We don't expect that + * intentionally making the dictionary and data contiguous will be worth the + * cost to memcpy() the data. + */ +#define ZSTD_c_deterministicRefPrefix ZSTD_c_experimentalParam15 + /*! ZSTD_CCtx_getParameter() : * Get the requested compression parameter value, selected by enum ZSTD_cParameter, * and store it into int* value. * @return : 0, or an error code (which can be tested with ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value); +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value); /*! ZSTD_CCtx_params : @@ -1842,27 +1953,27 @@ ZSTDLIB_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams() * for static allocation of CCtx for single-threaded compression. */ -ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void); -ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /* accept NULL pointer */ +ZSTDLIB_STATIC_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void); +ZSTDLIB_STATIC_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /* accept NULL pointer */ /*! ZSTD_CCtxParams_reset() : * Reset params to default values. */ -ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params); +ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params); /*! ZSTD_CCtxParams_init() : * Initializes the compression parameters of cctxParams according to * compression level. All other parameters are reset to their default values. */ -ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel); +ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel); /*! ZSTD_CCtxParams_init_advanced() : * Initializes the compression and frame parameters of cctxParams according to * params. All other parameters are reset to their default values. */ -ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params); +ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params); -/*! ZSTD_CCtxParams_setParameter() : +/*! ZSTD_CCtxParams_setParameter() : Requires v1.4.0+ * Similar to ZSTD_CCtx_setParameter. * Set one compression parameter, selected by enum ZSTD_cParameter. * Parameters must be applied to a ZSTD_CCtx using @@ -1870,14 +1981,14 @@ ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, Z * @result : a code representing success or failure (which can be tested with * ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value); +ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value); /*! ZSTD_CCtxParams_getParameter() : * Similar to ZSTD_CCtx_getParameter. * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter. * @result : 0, or an error code (which can be tested with ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value); +ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value); /*! ZSTD_CCtx_setParametersUsingCCtxParams() : * Apply a set of ZSTD_CCtx_params to the compression context. @@ -1886,7 +1997,7 @@ ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, * if nbWorkers>=1, new parameters will be picked up at next job, * with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated). */ -ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams( +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParametersUsingCCtxParams( ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params); /*! ZSTD_compressStream2_simpleArgs() : @@ -1895,7 +2006,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams( * This variant might be helpful for binders from dynamic languages * which have troubles handling structures containing memory pointers. */ -ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs ( +ZSTDLIB_STATIC_API size_t ZSTD_compressStream2_simpleArgs ( ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos, @@ -1911,33 +2022,33 @@ ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs ( * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. * Note 3 : Skippable Frame Identifiers are considered valid. */ -ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size); +ZSTDLIB_STATIC_API unsigned ZSTD_isFrame(const void* buffer, size_t size); /*! ZSTD_createDDict_byReference() : * Create a digested dictionary, ready to start decompression operation without startup delay. * Dictionary content is referenced, and therefore stays in dictBuffer. * It is important that dictBuffer outlives DDict, * it must remain read accessible throughout the lifetime of DDict */ -ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize); +ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize); /*! ZSTD_DCtx_loadDictionary_byReference() : * Same as ZSTD_DCtx_loadDictionary(), * but references `dict` content instead of copying it into `dctx`. * This saves memory if `dict` remains around., * However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */ -ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); +ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); /*! ZSTD_DCtx_loadDictionary_advanced() : * Same as ZSTD_DCtx_loadDictionary(), * but gives direct control over * how to load the dictionary (by copy ? by reference ?) * and how to interpret it (automatic ? force raw mode ? full mode only ?). */ -ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); +ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); /*! ZSTD_DCtx_refPrefix_advanced() : * Same as ZSTD_DCtx_refPrefix(), but gives finer control over * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ -ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); +ZSTDLIB_STATIC_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); /*! ZSTD_DCtx_setMaxWindowSize() : * Refuses allocating internal buffers for frames requiring a window size larger than provided limit. @@ -1946,14 +2057,14 @@ ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* pre * By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize); +ZSTDLIB_STATIC_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize); /*! ZSTD_DCtx_getParameter() : * Get the requested decompression parameter value, selected by enum ZSTD_dParameter, * and store it into int* value. * @return : 0, or an error code (which can be tested with ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value); +ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value); /* ZSTD_d_format * experimental parameter, @@ -2028,11 +2139,13 @@ ZSTDLIB_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param /*! ZSTD_DCtx_setFormat() : + * This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter(). * Instruct the decoder context about what kind of data to decode next. * This instruction is mandatory to decode data without a fully-formed header, * such ZSTD_f_zstd1_magicless for example. * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format); +ZSTD_DEPRECATED("use ZSTD_DCtx_setParameter() instead") +size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format); /*! ZSTD_decompressStream_simpleArgs() : * Same as ZSTD_decompressStream(), @@ -2040,7 +2153,7 @@ ZSTDLIB_API size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format); * This can be helpful for binders from dynamic languages * which have troubles handling structures containing memory pointers. */ -ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs ( +ZSTDLIB_STATIC_API size_t ZSTD_decompressStream_simpleArgs ( ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos); @@ -2056,7 +2169,7 @@ ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs ( /*===== Advanced Streaming compression functions =====*/ /*! ZSTD_initCStream_srcSize() : - * This function is deprecated, and equivalent to: + * This function is DEPRECATED, and equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any) * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); @@ -2065,15 +2178,15 @@ ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs ( * pledgedSrcSize must be correct. If it is not known at init time, use * ZSTD_CONTENTSIZE_UNKNOWN. Note that, for compatibility with older programs, * "0" also disables frame content size field. It may be enabled in the future. - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x + * This prototype will generate compilation warnings. */ -ZSTDLIB_API size_t -ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, +ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") +size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); /*! ZSTD_initCStream_usingDict() : - * This function is deprecated, and is equivalent to: + * This function is DEPRECATED, and is equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel); * ZSTD_CCtx_loadDictionary(zcs, dict, dictSize); @@ -2082,15 +2195,15 @@ ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, * dict == NULL or dictSize < 8, in which case no dict is used. * Note: dict is loaded with ZSTD_dct_auto (treated as a full zstd dictionary if * it begins with ZSTD_MAGIC_DICTIONARY, else as raw content) and ZSTD_dlm_byCopy. - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x + * This prototype will generate compilation warnings. */ -ZSTDLIB_API size_t -ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, +ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") +size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /*! ZSTD_initCStream_advanced() : - * This function is deprecated, and is approximately equivalent to: + * This function is DEPRECATED, and is approximately equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * // Pseudocode: Set each zstd parameter and leave the rest as-is. * for ((param, value) : params) { @@ -2102,23 +2215,24 @@ ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, * dict is loaded with ZSTD_dct_auto and ZSTD_dlm_byCopy. * pledgedSrcSize must be correct. * If srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x + * This prototype will generate compilation warnings. */ -ZSTDLIB_API size_t -ZSTD_initCStream_advanced(ZSTD_CStream* zcs, +ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") +size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /*! ZSTD_initCStream_usingCDict() : - * This function is deprecated, and equivalent to: + * This function is DEPRECATED, and equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_refCDict(zcs, cdict); * * note : cdict will just be referenced, and must outlive compression session - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x + * This prototype will generate compilation warnings. */ -ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); +ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions") +size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); /*! ZSTD_initCStream_usingCDict_advanced() : * This function is DEPRECATED, and is approximately equivalent to: @@ -2133,18 +2247,21 @@ ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDi * same as ZSTD_initCStream_usingCDict(), with control over frame parameters. * pledgedSrcSize must be correct. If srcSize is not known at init time, use * value ZSTD_CONTENTSIZE_UNKNOWN. - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x + * This prototype will generate compilation warnings. */ -ZSTDLIB_API size_t -ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, +ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions") +size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize); /*! ZSTD_resetCStream() : - * This function is deprecated, and is equivalent to: + * This function is DEPRECATED, and is equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize); + * Note: ZSTD_resetCStream() interprets pledgedSrcSize == 0 as ZSTD_CONTENTSIZE_UNKNOWN, but + * ZSTD_CCtx_setPledgedSrcSize() does not do the same, so ZSTD_CONTENTSIZE_UNKNOWN must be + * explicitly specified. * * start a new frame, using same parameters from previous frame. * This is typically useful to skip dictionary loading stage, since it will re-use it in-place. @@ -2154,9 +2271,10 @@ ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, * For the time being, pledgedSrcSize==0 is interpreted as "srcSize unknown" for compatibility with older programs, * but it will change to mean "empty" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead. * @return : 0, or an error code (which can be tested using ZSTD_isError()) - * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x + * This prototype will generate compilation warnings. */ -ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize); +ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions") +size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize); typedef struct { @@ -2174,7 +2292,7 @@ typedef struct { * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed. * Aggregates progression inside active worker threads. */ -ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx); +ZSTDLIB_STATIC_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx); /*! ZSTD_toFlushNow() : * Tell how many bytes are ready to be flushed immediately. @@ -2189,7 +2307,7 @@ ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx * therefore flush speed is limited by production speed of oldest job * irrespective of the speed of concurrent (and newer) jobs. */ -ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx); +ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx); /*===== Advanced Streaming decompression functions =====*/ @@ -2203,7 +2321,7 @@ ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx); * note: no dictionary will be used if dict == NULL or dictSize < 8 * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ -ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); +ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /*! * This function is deprecated, and is equivalent to: @@ -2214,7 +2332,7 @@ ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dic * note : ddict is referenced, it must outlive decompression session * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ -ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); +ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); /*! * This function is deprecated, and is equivalent to: @@ -2224,7 +2342,7 @@ ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDi * re-use decompression parameters from previous init; saves dictionary loading * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ -ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); +ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); /* ******************************************************************* @@ -2243,8 +2361,7 @@ ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); ZSTD_CCtx object can be re-used multiple times within successive compression operations. Start by initializing a context. - Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression, - or ZSTD_compressBegin_advanced(), for finer parameter control. + Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression. It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx() Then, consume your input using ZSTD_compressContinue(). @@ -2267,17 +2384,19 @@ ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); */ /*===== Buffer-less streaming compression functions =====*/ -ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel); -ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); -ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /*< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */ -ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /*< note: fails if cdict==NULL */ -ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */ -ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /*< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */ - -ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); -ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); - - +ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel); +ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); +ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /*< note: fails if cdict==NULL */ +ZSTDLIB_STATIC_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /*< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */ + +ZSTDLIB_STATIC_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); + +/* The ZSTD_compressBegin_advanced() and ZSTD_compressBegin_usingCDict_advanced() are now DEPRECATED and will generate a compiler warning */ +ZSTD_DEPRECATED("use advanced API to access custom parameters") +size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /*< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */ +ZSTD_DEPRECATED("use advanced API to access custom parameters") +size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */ /* Buffer-less streaming decompression (synchronous mode) @@ -2368,24 +2487,24 @@ typedef struct { * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, * or an error code, which can be tested using ZSTD_isError() */ -ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /*< doesn't consume input */ +ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /*< doesn't consume input */ /*! ZSTD_getFrameHeader_advanced() : * same as ZSTD_getFrameHeader(), * with added capability to select a format (like ZSTD_f_zstd1_magicless) */ -ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); -ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /*< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */ +ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); +ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /*< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */ -ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx); -ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); -ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); +ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx); +ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); +ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); -ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx); -ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx); +ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /* misc */ -ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx); +ZSTDLIB_STATIC_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx); typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e; -ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); +ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); @@ -2422,10 +2541,10 @@ ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); */ /*===== Raw zstd block functions =====*/ -ZSTDLIB_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx); -ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); -ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); -ZSTDLIB_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /*< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */ +ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx); +ZSTDLIB_STATIC_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /*< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */ #endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */ diff --git a/include/misc/cxl.h b/include/misc/cxl.h index 0410412de16b..d8044299d654 100644 --- a/include/misc/cxl.h +++ b/include/misc/cxl.h @@ -30,7 +30,7 @@ unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev); /* * Context lifetime overview: * - * An AFU context may be inited and then started and stoppped multiple times + * An AFU context may be inited and then started and stopped multiple times * before it's released. ie. * - cxl_dev_context_init() * - cxl_start_context() diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h index 13abe013af21..429adf6be29c 100644 --- a/include/net/9p/9p.h +++ b/include/net/9p/9p.h @@ -531,6 +531,7 @@ struct p9_rstatfs { * @offset: used by marshalling routines to track current position in buffer * @capacity: used by marshalling routines to track total malloc'd capacity * @sdata: payload + * @zc: whether zero-copy is used * * &p9_fcall represents the structure for all 9P RPC * transactions. Requests are packaged into fcalls, and reponses @@ -549,6 +550,7 @@ struct p9_fcall { struct kmem_cache *cache; u8 *sdata; + bool zc; }; int p9_errstr2errno(char *errstr, int len); diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index d80c78506f19..b3ba04615caa 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -65,8 +65,6 @@ enum { GDMA_DEVICE_MANA = 2, }; -typedef u64 gdma_obj_handle_t; - struct gdma_resource { /* Protect the bitmap */ spinlock_t lock; @@ -200,7 +198,7 @@ struct gdma_mem_info { u64 length; /* Allocated by the PF driver */ - gdma_obj_handle_t dma_region_handle; + u64 dma_region_handle; }; #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8 @@ -632,7 +630,7 @@ struct gdma_create_queue_req { u32 reserved1; u32 pdid; u32 doolbell_id; - gdma_obj_handle_t gdma_region; + u64 gdma_region; u32 reserved2; u32 queue_size; u32 log2_throttle_limit; @@ -707,14 +705,14 @@ struct gdma_create_dma_region_req { struct gdma_create_dma_region_resp { struct gdma_resp_hdr hdr; - gdma_obj_handle_t dma_region_handle; + u64 dma_region_handle; }; /* HW DATA */ /* GDMA_DMA_REGION_ADD_PAGES */ struct gdma_dma_region_add_pages_req { struct gdma_req_hdr hdr; - gdma_obj_handle_t dma_region_handle; + u64 dma_region_handle; u32 page_addr_list_len; u32 reserved3; @@ -726,7 +724,7 @@ struct gdma_dma_region_add_pages_req { struct gdma_destroy_dma_region_req { struct gdma_req_hdr hdr; - gdma_obj_handle_t dma_region_handle; + u64 dma_region_handle; }; /* HW DATA */ enum gdma_pd_flags { @@ -741,14 +739,14 @@ struct gdma_create_pd_req { struct gdma_create_pd_resp { struct gdma_resp_hdr hdr; - gdma_obj_handle_t pd_handle; + u64 pd_handle; u32 pd_id; u32 reserved; };/* HW DATA */ struct gdma_destroy_pd_req { struct gdma_req_hdr hdr; - gdma_obj_handle_t pd_handle; + u64 pd_handle; };/* HW DATA */ struct gdma_destory_pd_resp { @@ -764,11 +762,11 @@ enum gdma_mr_type { }; struct gdma_create_mr_params { - gdma_obj_handle_t pd_handle; + u64 pd_handle; enum gdma_mr_type mr_type; union { struct { - gdma_obj_handle_t dma_region_handle; + u64 dma_region_handle; u64 virtual_address; enum gdma_mr_access_flags access_flags; } gva; @@ -777,13 +775,13 @@ struct gdma_create_mr_params { struct gdma_create_mr_request { struct gdma_req_hdr hdr; - gdma_obj_handle_t pd_handle; + u64 pd_handle; enum gdma_mr_type mr_type; u32 reserved_1; union { struct { - gdma_obj_handle_t dma_region_handle; + u64 dma_region_handle; u64 virtual_address; enum gdma_mr_access_flags access_flags; } gva; @@ -794,14 +792,14 @@ struct gdma_create_mr_request { struct gdma_create_mr_response { struct gdma_resp_hdr hdr; - gdma_obj_handle_t mr_handle; + u64 mr_handle; u32 lkey; u32 rkey; };/* HW DATA */ struct gdma_destroy_mr_request { struct gdma_req_hdr hdr; - gdma_obj_handle_t mr_handle; + u64 mr_handle; };/* HW DATA */ struct gdma_destroy_mr_response { @@ -835,7 +833,6 @@ void mana_gd_free_memory(struct gdma_mem_info *gmi); int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, u32 resp_len, void *resp); -int mana_gd_destroy_dma_region(struct gdma_context *gc, - gdma_obj_handle_t dma_region_handle); +int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle); #endif /* _GDMA_H */ diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index 575ea36ce606..3bb579962a14 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -412,6 +412,9 @@ int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); extern const struct ethtool_ops mana_ethtool_ops; +/* A CQ can be created not associated with any EQ */ +#define GDMA_CQ_NO_EQ 0xffff + struct mana_obj_spec { u32 queue_index; u64 gdma_region; diff --git a/include/net/sock.h b/include/net/sock.h index ecea3dcc2217..dcd72e6285b2 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -318,6 +318,9 @@ struct sk_filter; * @sk_stamp: time stamp of last packet received * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only * @sk_tsflags: SO_TIMESTAMPING flags + * @sk_use_task_frag: allow sk_page_frag() to use current->task_frag. + * Sockets that can be used under memory reclaim should + * set this to false. * @sk_bind_phc: SO_TIMESTAMPING bind PHC index of PTP virtual clock * for timestamping * @sk_tskey: counter to disambiguate concurrent tstamp requests @@ -512,6 +515,7 @@ struct sock { u8 sk_txtime_deadline_mode : 1, sk_txtime_report_errors : 1, sk_txtime_unused : 6; + bool sk_use_task_frag; struct socket *sk_socket; void *sk_user_data; @@ -2560,16 +2564,14 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) * Both direct reclaim and page faults can nest inside other * socket operations and end up recursing into sk_page_frag() * while it's already in use: explicitly avoid task page_frag - * usage if the caller is potentially doing any of them. - * This assumes that page fault handlers use the GFP_NOFS flags. + * when users disable sk_use_task_frag. * * Return: a per task page_frag if context allows that, * otherwise a per socket one. */ static inline struct page_frag *sk_page_frag(struct sock *sk) { - if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) == - (__GFP_DIRECT_RECLAIM | __GFP_FS)) + if (sk->sk_use_task_frag) return ¤t->task_frag; return &sk->sk_frag; diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h index a9162f25beaf..b8c56d7dc35d 100644 --- a/include/rdma/ib_pack.h +++ b/include/rdma/ib_pack.h @@ -84,6 +84,8 @@ enum { /* opcode 0x15 is reserved */ IB_OPCODE_SEND_LAST_WITH_INVALIDATE = 0x16, IB_OPCODE_SEND_ONLY_WITH_INVALIDATE = 0x17, + IB_OPCODE_FLUSH = 0x1C, + IB_OPCODE_ATOMIC_WRITE = 0x1D, /* real constants follow -- see comment about above IB_OPCODE() macro for more details */ @@ -112,6 +114,8 @@ enum { IB_OPCODE(RC, FETCH_ADD), IB_OPCODE(RC, SEND_LAST_WITH_INVALIDATE), IB_OPCODE(RC, SEND_ONLY_WITH_INVALIDATE), + IB_OPCODE(RC, FLUSH), + IB_OPCODE(RC, ATOMIC_WRITE), /* UC */ IB_OPCODE(UC, SEND_FIRST), @@ -149,6 +153,7 @@ enum { IB_OPCODE(RD, ATOMIC_ACKNOWLEDGE), IB_OPCODE(RD, COMPARE_SWAP), IB_OPCODE(RD, FETCH_ADD), + IB_OPCODE(RD, FLUSH), /* UD */ IB_OPCODE(UD, SEND_ONLY), diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 975d6e9efbcb..a9a429172c0a 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -270,6 +270,10 @@ enum ib_device_cap_flags { /* The device supports padding incoming writes to cacheline. */ IB_DEVICE_PCI_WRITE_END_PADDING = IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING, + /* Placement type attributes */ + IB_DEVICE_FLUSH_GLOBAL = IB_UVERBS_DEVICE_FLUSH_GLOBAL, + IB_DEVICE_FLUSH_PERSISTENT = IB_UVERBS_DEVICE_FLUSH_PERSISTENT, + IB_DEVICE_ATOMIC_WRITE = IB_UVERBS_DEVICE_ATOMIC_WRITE, }; enum ib_kernel_cap_flags { @@ -982,9 +986,11 @@ enum ib_wc_opcode { IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW, IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV, IB_WC_LSO = IB_UVERBS_WC_TSO, + IB_WC_ATOMIC_WRITE = IB_UVERBS_WC_ATOMIC_WRITE, IB_WC_REG_MR, IB_WC_MASKED_COMP_SWAP, IB_WC_MASKED_FETCH_ADD, + IB_WC_FLUSH = IB_UVERBS_WC_FLUSH, /* * Set value of IB_WC_RECV so consumers can test if a completion is a * receive by testing (opcode & IB_WC_RECV). @@ -1325,6 +1331,8 @@ enum ib_wr_opcode { IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP, IB_WR_MASKED_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD, + IB_WR_FLUSH = IB_UVERBS_WR_FLUSH, + IB_WR_ATOMIC_WRITE = IB_UVERBS_WR_ATOMIC_WRITE, /* These are kernel only and can not be issued by userspace */ IB_WR_REG_MR = 0x20, @@ -1458,10 +1466,12 @@ enum ib_access_flags { IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND, IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB, IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING, + IB_ACCESS_FLUSH_GLOBAL = IB_UVERBS_ACCESS_FLUSH_GLOBAL, + IB_ACCESS_FLUSH_PERSISTENT = IB_UVERBS_ACCESS_FLUSH_PERSISTENT, IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE, IB_ACCESS_SUPPORTED = - ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL, + ((IB_ACCESS_FLUSH_PERSISTENT << 1) - 1) | IB_ACCESS_OPTIONAL, }; /* @@ -2203,6 +2213,7 @@ struct ib_port_data { struct ib_port_cache cache; struct net_device __rcu *netdev; + netdevice_tracker netdev_tracker; struct hlist_node ndev_hash_link; struct rdma_port_counter port_counter; struct ib_port *sysfs; @@ -4321,6 +4332,8 @@ int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata); static inline int ib_check_mr_access(struct ib_device *ib_dev, unsigned int flags) { + u64 device_cap = ib_dev->attrs.device_cap_flags; + /* * Local write permission is required if remote write or * remote atomic permission is also requested. @@ -4334,7 +4347,14 @@ static inline int ib_check_mr_access(struct ib_device *ib_dev, if (flags & IB_ACCESS_ON_DEMAND && !(ib_dev->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING)) - return -EINVAL; + return -EOPNOTSUPP; + + if ((flags & IB_ACCESS_FLUSH_GLOBAL && + !(device_cap & IB_DEVICE_FLUSH_GLOBAL)) || + (flags & IB_ACCESS_FLUSH_PERSISTENT && + !(device_cap & IB_DEVICE_FLUSH_PERSISTENT))) + return -EOPNOTSUPP; + return 0; } diff --git a/include/rdma/opa_vnic.h b/include/rdma/opa_vnic.h index f3d5377b217a..d297f084001a 100644 --- a/include/rdma/opa_vnic.h +++ b/include/rdma/opa_vnic.h @@ -51,7 +51,7 @@ static inline void *opa_vnic_dev_priv(const struct net_device *dev) return oparn->dev_priv; } -/* opa_vnic skb meta data structrue */ +/* opa_vnic skb meta data structure */ struct opa_vnic_skb_mdata { u8 vl; u8 entropy; diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 654cc3918c94..695eebc6f2c8 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -393,7 +393,7 @@ extern int iscsi_eh_recover_target(struct scsi_cmnd *sc); extern int iscsi_eh_session_reset(struct scsi_cmnd *sc); extern int iscsi_eh_device_reset(struct scsi_cmnd *sc); extern int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc); -extern enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc); +extern enum scsi_timeout_action iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc); /* * iSCSI host helpers. diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 2dbead74a2af..1aee3d0ebbb2 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -639,15 +639,29 @@ struct sas_task_slow { #define SAS_TASK_STATE_ABORTED 4 #define SAS_TASK_NEED_DEV_RESET 8 -extern struct sas_task *sas_alloc_task(gfp_t flags); -extern struct sas_task *sas_alloc_slow_task(gfp_t flags); -extern void sas_free_task(struct sas_task *task); - static inline bool sas_is_internal_abort(struct sas_task *task) { return task->task_proto == SAS_PROTOCOL_INTERNAL_ABORT; } +static inline struct request *sas_task_find_rq(struct sas_task *task) +{ + struct scsi_cmnd *scmd; + + if (task->task_proto & SAS_PROTOCOL_STP_ALL) { + struct ata_queued_cmd *qc = task->uldd_task; + + scmd = qc ? qc->scsicmd : NULL; + } else { + scmd = task->uldd_task; + } + + if (!scmd) + return NULL; + + return scsi_cmd_to_rq(scmd); +} + struct sas_domain_function_template { /* The class calls these to notify the LLDD of an event. */ void (*lldd_port_formed)(struct asd_sas_phy *); @@ -750,6 +764,8 @@ int sas_clear_task_set(struct domain_device *dev, u8 *lun); int sas_lu_reset(struct domain_device *dev, u8 *lun); int sas_query_task(struct sas_task *task, u16 tag); int sas_abort_task(struct sas_task *task, u16 tag); +int sas_find_attached_phy_id(struct expander_device *ex_dev, + struct domain_device *dev); void sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event, gfp_t gfp_flags); diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h index a1df4f9d57a3..9c927d46f136 100644 --- a/include/scsi/sas_ata.h +++ b/include/scsi/sas_ata.h @@ -32,9 +32,10 @@ void sas_probe_sata(struct asd_sas_port *port); void sas_suspend_sata(struct asd_sas_port *port); void sas_resume_sata(struct asd_sas_port *port); void sas_ata_end_eh(struct ata_port *ap); +void sas_ata_device_link_abort(struct domain_device *dev, bool force_reset); int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id); -int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline); +int smp_ata_check_ready_type(struct ata_link *link); #else @@ -87,16 +88,20 @@ static inline void sas_ata_end_eh(struct ata_port *ap) { } +static inline void sas_ata_device_link_abort(struct domain_device *dev, + bool force_reset) +{ +} + static inline int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id) { return 0; } -static inline int sas_ata_wait_after_reset(struct domain_device *dev, - unsigned long deadline) +static inline int smp_ata_check_ready_type(struct ata_link *link) { - return -ETIMEDOUT; + return 0; } #endif diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 3e46859774c8..ec093594ba53 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h @@ -121,6 +121,7 @@ enum scsi_disposition { * msg_byte (unused) * host_byte = set by low-level driver to indicate status. */ +#define status_byte(result) (result & 0xff) #define host_byte(result) (((result) >> 16) & 0xff) #define sense_class(sense) (((sense) >> 4) & 0x7) diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 7d3622db38ed..c2cb5f69635c 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h @@ -52,8 +52,9 @@ struct scsi_pointer { #define SCMD_TAGGED (1 << 0) #define SCMD_INITIALIZED (1 << 1) #define SCMD_LAST (1 << 2) +#define SCMD_FAIL_IF_RECOVERING (1 << 4) /* flags preserved across unprep / reprep */ -#define SCMD_PRESERVED_FLAGS (SCMD_INITIALIZED) +#define SCMD_PRESERVED_FLAGS (SCMD_INITIALIZED | SCMD_FAIL_IF_RECOVERING) /* for scmd->state */ #define SCMD_STATE_COMPLETE 0 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index c36656d8ac6c..3642b8e3928b 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -184,6 +184,7 @@ struct scsi_device { unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */ unsigned no_write_same:1; /* no WRITE SAME command */ unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */ + unsigned use_16_for_sync:1; /* Use sync (16) over sync (10) */ unsigned skip_ms_page_8:1; /* do not use MODE SENSE page 0x08 */ unsigned skip_ms_page_3f:1; /* do not use MODE SENSE page 0x3f */ unsigned skip_vpd_pages:1; /* do not read VPD pages */ @@ -236,7 +237,6 @@ struct scsi_device { struct device sdev_gendev, sdev_dev; - struct execute_work ew; /* used to get process context on put */ struct work_struct requeue_work; struct scsi_device_handler *handler; diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index fcf25f1642a3..587cc767bb67 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h @@ -27,6 +27,18 @@ struct scsi_transport_template; #define MODE_INITIATOR 0x01 #define MODE_TARGET 0x02 +/** + * enum scsi_timeout_action - How to handle a command that timed out. + * @SCSI_EH_DONE: The command has already been completed. + * @SCSI_EH_RESET_TIMER: Reset the timer and continue waiting for completion. + * @SCSI_EH_NOT_HANDLED: The command has not yet finished. Abort the command. + */ +enum scsi_timeout_action { + SCSI_EH_DONE, + SCSI_EH_RESET_TIMER, + SCSI_EH_NOT_HANDLED, +}; + struct scsi_host_template { /* * Put fields referenced in IO submission path together in @@ -331,7 +343,7 @@ struct scsi_host_template { * * Status: OPTIONAL */ - enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *); + enum scsi_timeout_action (*eh_timed_out)(struct scsi_cmnd *); /* * Optional routine that allows the transport to decide if a cmd * is retryable. Return true if the transport is in a state the @@ -358,12 +370,6 @@ struct scsi_host_template { const char *proc_name; /* - * Used to store the procfs directory if a driver implements the - * show_info method. - */ - struct proc_dir_entry *proc_dir; - - /* * This determines if we will use a non-interrupt driven * or an interrupt driven scheme. It is set to the maximum number * of simultaneous commands a single hw queue in HBA will accept. @@ -423,12 +429,6 @@ struct scsi_host_template { */ short cmd_per_lun; - /* - * present contains counter indicating how many boards of this - * type were found when we did the scan. - */ - unsigned char present; - /* If use block layer to manage tags, this is tag allocation policy */ int tag_alloc_policy; @@ -751,6 +751,12 @@ extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int); extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *, struct device *, struct device *); +#if defined(CONFIG_SCSI_PROC_FS) +struct proc_dir_entry * +scsi_template_proc_dir(const struct scsi_host_template *sht); +#else +#define scsi_template_proc_dir(sht) NULL +#endif extern void scsi_scan_host(struct Scsi_Host *); extern void scsi_rescan_device(struct device *); extern void scsi_remove_host(struct Scsi_Host *); diff --git a/include/scsi/scsi_proto.h b/include/scsi/scsi_proto.h index 919ed4137f9a..fbe5bdfe4d6e 100644 --- a/include/scsi/scsi_proto.h +++ b/include/scsi/scsi_proto.h @@ -342,4 +342,14 @@ enum scsi_version_descriptor { SCSI_VERSION_DESCRIPTOR_SRP = 0x0940 }; +enum scsi_support_opcode { + SCSI_SUPPORT_NO_INFO = 0, + SCSI_SUPPORT_NOT_SUPPORTED = 1, + SCSI_SUPPORT_FULL = 3, + SCSI_SUPPORT_VENDOR = 5, +}; + +#define SCSI_CONTROL_MASK 0 +#define SCSI_GROUP_NUMBER_MASK 0 + #endif /* _SCSI_PROTO_H_ */ diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index e80a7c542c88..3dcda19d3520 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -862,7 +862,7 @@ struct fc_vport *fc_vport_create(struct Scsi_Host *shost, int channel, int fc_vport_terminate(struct fc_vport *vport); int fc_block_rport(struct fc_rport *rport); int fc_block_scsi_eh(struct scsi_cmnd *cmnd); -enum blk_eh_timer_return fc_eh_timed_out(struct scsi_cmnd *scmd); +enum scsi_timeout_action fc_eh_timed_out(struct scsi_cmnd *scmd); bool fc_eh_should_retry_cmd(struct scsi_cmnd *scmd); static inline struct Scsi_Host *fc_bsg_to_shost(struct bsg_job *job) diff --git a/include/scsi/scsi_transport_srp.h b/include/scsi/scsi_transport_srp.h index d22df12584f9..dfc78aa112ad 100644 --- a/include/scsi/scsi_transport_srp.h +++ b/include/scsi/scsi_transport_srp.h @@ -118,7 +118,7 @@ extern int srp_reconnect_rport(struct srp_rport *rport); extern void srp_start_tl_fail_timers(struct srp_rport *rport); extern void srp_remove_host(struct Scsi_Host *); extern void srp_stop_rport_timers(struct srp_rport *rport); -enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd); +enum scsi_timeout_action srp_timed_out(struct scsi_cmnd *scmd); /** * srp_chkready() - evaluate the transport layer state before I/O diff --git a/include/scsi/sg.h b/include/scsi/sg.h index 068e35d36557..af31cecd9012 100644 --- a/include/scsi/sg.h +++ b/include/scsi/sg.h @@ -159,7 +159,7 @@ struct compat_sg_io_hdr { #define TASK_ABORTED 0x20 /* Obsolete status_byte() declaration */ -#define status_byte(result) (((result) >> 1) & 0x7f) +#define sg_status_byte(result) (((result) >> 1) & 0x7f) typedef struct sg_scsi_id { /* used by SG_GET_SCSI_ID ioctl() */ int host_no; /* as in "scsi<n>" where 'n' is one of 0, 1, 2 etc */ diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h index 25ec8c181688..eba23daf2c29 100644 --- a/include/sound/hda_codec.h +++ b/include/sound/hda_codec.h @@ -258,6 +258,7 @@ struct hda_codec { unsigned int link_down_at_suspend:1; /* link down at runtime suspend */ unsigned int relaxed_resume:1; /* don't resume forcibly for jack */ unsigned int forced_resume:1; /* forced resume for jack */ + unsigned int no_stream_clean_at_suspend:1; /* do not clean streams at suspend */ #ifdef CONFIG_PM unsigned long power_on_acct; diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h index 68ab89211de2..511211f4a2b6 100644 --- a/include/sound/hdaudio_ext.h +++ b/include/sound/hdaudio_ext.h @@ -75,6 +75,8 @@ struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_bus *bus, struct snd_pcm_substream *substream, int type); void snd_hdac_ext_stream_release(struct hdac_ext_stream *hext_stream, int type); +struct hdac_ext_stream *snd_hdac_ext_cstream_assign(struct hdac_bus *bus, + struct snd_compr_stream *cstream); void snd_hdac_ext_stream_decouple_locked(struct hdac_bus *bus, struct hdac_ext_stream *hext_stream, bool decouple); void snd_hdac_ext_stream_decouple(struct hdac_bus *bus, diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 8c920456edd9..12c9ba16217e 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -91,6 +91,8 @@ #define DA_EMULATE_ALUA 0 /* Emulate SCSI2 RESERVE/RELEASE and Persistent Reservations by default */ #define DA_EMULATE_PR 1 +/* Emulation for REPORT SUPPORTED OPERATION CODES */ +#define DA_EMULATE_RSOC 1 /* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */ #define DA_ENFORCE_PR_ISIDS 1 /* Force SPC-3 PR Activate Persistence across Target Power Loss */ @@ -690,6 +692,7 @@ struct se_dev_attrib { bool emulate_caw; bool emulate_3pc; bool emulate_pr; + bool emulate_rsoc; enum target_prot_type pi_prot_type; enum target_prot_type hw_pi_prot_type; bool pi_prot_verify; @@ -709,7 +712,6 @@ struct se_dev_attrib { u32 unmap_granularity; u32 unmap_granularity_alignment; u32 max_write_same_len; - u32 max_bytes_per_io; struct se_device *da_dev; struct config_group da_group; }; @@ -867,6 +869,21 @@ struct se_device { struct se_device_queue *queues; }; +struct target_opcode_descriptor { + u8 support:3; + u8 serv_action_valid:1; + u8 opcode; + u16 service_action; + u32 cdb_size; + u8 specific_timeout; + u16 nominal_timeout; + u16 recommended_timeout; + bool (*enabled)(struct se_cmd *cmd); + void (*update_usage_bits)(u8 *usage_bits, + struct se_device *dev); + u8 usage_bits[]; +}; + struct se_hba { u16 hba_tpgt; u32 hba_id; diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h index 6a13220d2d27..155c495b89ea 100644 --- a/include/trace/bpf_probe.h +++ b/include/trace/bpf_probe.h @@ -21,6 +21,9 @@ #undef __get_bitmask #define __get_bitmask(field) (char *)__get_dynamic_array(field) +#undef __get_cpumask +#define __get_cpumask(field) (char *)__get_dynamic_array(field) + #undef __get_sockaddr #define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field)) @@ -40,6 +43,9 @@ #undef __get_rel_bitmask #define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field) +#undef __get_rel_cpumask +#define __get_rel_cpumask(field) (char *)__get_rel_dynamic_array(field) + #undef __get_rel_sockaddr #define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field)) diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h index c6b372401c27..31d994e6b4ca 100644 --- a/include/trace/events/f2fs.h +++ b/include/trace/events/f2fs.h @@ -48,6 +48,8 @@ TRACE_DEFINE_ENUM(CP_DISCARD); TRACE_DEFINE_ENUM(CP_TRIMMED); TRACE_DEFINE_ENUM(CP_PAUSE); TRACE_DEFINE_ENUM(CP_RESIZE); +TRACE_DEFINE_ENUM(EX_READ); +TRACE_DEFINE_ENUM(EX_BLOCK_AGE); #define show_block_type(type) \ __print_symbolic(type, \ @@ -154,6 +156,11 @@ TRACE_DEFINE_ENUM(CP_RESIZE); { COMPRESS_ZSTD, "ZSTD" }, \ { COMPRESS_LZORLE, "LZO-RLE" }) +#define show_extent_type(type) \ + __print_symbolic(type, \ + { EX_READ, "Read" }, \ + { EX_BLOCK_AGE, "Block Age" }) + struct f2fs_sb_info; struct f2fs_io_info; struct extent_info; @@ -322,7 +329,7 @@ TRACE_EVENT(f2fs_unlink_enter, __field(ino_t, ino) __field(loff_t, size) __field(blkcnt_t, blocks) - __field(const char *, name) + __string(name, dentry->d_name.name) ), TP_fast_assign( @@ -330,7 +337,7 @@ TRACE_EVENT(f2fs_unlink_enter, __entry->ino = dir->i_ino; __entry->size = dir->i_size; __entry->blocks = dir->i_blocks; - __entry->name = dentry->d_name.name; + __assign_str(name, dentry->d_name.name); ), TP_printk("dev = (%d,%d), dir ino = %lu, i_size = %lld, " @@ -338,7 +345,7 @@ TRACE_EVENT(f2fs_unlink_enter, show_dev_ino(__entry), __entry->size, (unsigned long long)__entry->blocks, - __entry->name) + __get_str(name)) ); DEFINE_EVENT(f2fs__inode_exit, f2fs_unlink_exit, @@ -940,25 +947,29 @@ TRACE_EVENT(f2fs_direct_IO_enter, TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) - __field(struct kiocb *, iocb) + __field(loff_t, ki_pos) + __field(int, ki_flags) + __field(u16, ki_ioprio) __field(unsigned long, len) __field(int, rw) ), TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->ino = inode->i_ino; - __entry->iocb = iocb; - __entry->len = len; - __entry->rw = rw; + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->ki_pos = iocb->ki_pos; + __entry->ki_flags = iocb->ki_flags; + __entry->ki_ioprio = iocb->ki_ioprio; + __entry->len = len; + __entry->rw = rw; ), TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu ki_flags = %x ki_ioprio = %x rw = %d", show_dev_ino(__entry), - __entry->iocb->ki_pos, + __entry->ki_pos, __entry->len, - __entry->iocb->ki_flags, - __entry->iocb->ki_ioprio, + __entry->ki_flags, + __entry->ki_ioprio, __entry->rw) ); @@ -1400,26 +1411,26 @@ TRACE_EVENT(f2fs_readpages, TRACE_EVENT(f2fs_write_checkpoint, - TP_PROTO(struct super_block *sb, int reason, char *msg), + TP_PROTO(struct super_block *sb, int reason, const char *msg), TP_ARGS(sb, reason, msg), TP_STRUCT__entry( __field(dev_t, dev) __field(int, reason) - __field(char *, msg) + __string(dest_msg, msg) ), TP_fast_assign( __entry->dev = sb->s_dev; __entry->reason = reason; - __entry->msg = msg; + __assign_str(dest_msg, msg); ), TP_printk("dev = (%d,%d), checkpoint for %s, state = %s", show_dev(__entry->dev), show_cpreason(__entry->reason), - __entry->msg) + __get_str(dest_msg)) ); DECLARE_EVENT_CLASS(f2fs_discard, @@ -1518,28 +1529,31 @@ TRACE_EVENT(f2fs_issue_flush, TRACE_EVENT(f2fs_lookup_extent_tree_start, - TP_PROTO(struct inode *inode, unsigned int pgofs), + TP_PROTO(struct inode *inode, unsigned int pgofs, enum extent_type type), - TP_ARGS(inode, pgofs), + TP_ARGS(inode, pgofs, type), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(unsigned int, pgofs) + __field(enum extent_type, type) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->pgofs = pgofs; + __entry->type = type; ), - TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u", + TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, type = %s", show_dev_ino(__entry), - __entry->pgofs) + __entry->pgofs, + show_extent_type(__entry->type)) ); -TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end, +TRACE_EVENT_CONDITION(f2fs_lookup_read_extent_tree_end, TP_PROTO(struct inode *inode, unsigned int pgofs, struct extent_info *ei), @@ -1553,8 +1567,8 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end, __field(ino_t, ino) __field(unsigned int, pgofs) __field(unsigned int, fofs) - __field(u32, blk) __field(unsigned int, len) + __field(u32, blk) ), TP_fast_assign( @@ -1562,26 +1576,65 @@ TRACE_EVENT_CONDITION(f2fs_lookup_extent_tree_end, __entry->ino = inode->i_ino; __entry->pgofs = pgofs; __entry->fofs = ei->fofs; + __entry->len = ei->len; __entry->blk = ei->blk; + ), + + TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, " + "read_ext_info(fofs: %u, len: %u, blk: %u)", + show_dev_ino(__entry), + __entry->pgofs, + __entry->fofs, + __entry->len, + __entry->blk) +); + +TRACE_EVENT_CONDITION(f2fs_lookup_age_extent_tree_end, + + TP_PROTO(struct inode *inode, unsigned int pgofs, + struct extent_info *ei), + + TP_ARGS(inode, pgofs, ei), + + TP_CONDITION(ei), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(unsigned int, pgofs) + __field(unsigned int, fofs) + __field(unsigned int, len) + __field(unsigned long long, age) + __field(unsigned long long, blocks) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pgofs = pgofs; + __entry->fofs = ei->fofs; __entry->len = ei->len; + __entry->age = ei->age; + __entry->blocks = ei->last_blocks; ), TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, " - "ext_info(fofs: %u, blk: %u, len: %u)", + "age_ext_info(fofs: %u, len: %u, age: %llu, blocks: %llu)", show_dev_ino(__entry), __entry->pgofs, __entry->fofs, - __entry->blk, - __entry->len) + __entry->len, + __entry->age, + __entry->blocks) ); -TRACE_EVENT(f2fs_update_extent_tree_range, +TRACE_EVENT(f2fs_update_read_extent_tree_range, - TP_PROTO(struct inode *inode, unsigned int pgofs, block_t blkaddr, - unsigned int len, + TP_PROTO(struct inode *inode, unsigned int pgofs, unsigned int len, + block_t blkaddr, unsigned int c_len), - TP_ARGS(inode, pgofs, blkaddr, len, c_len), + TP_ARGS(inode, pgofs, len, blkaddr, c_len), TP_STRUCT__entry( __field(dev_t, dev) @@ -1596,67 +1649,108 @@ TRACE_EVENT(f2fs_update_extent_tree_range, __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->pgofs = pgofs; - __entry->blk = blkaddr; __entry->len = len; + __entry->blk = blkaddr; __entry->c_len = c_len; ), TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, " - "blkaddr = %u, len = %u, " - "c_len = %u", + "len = %u, blkaddr = %u, c_len = %u", show_dev_ino(__entry), __entry->pgofs, - __entry->blk, __entry->len, + __entry->blk, __entry->c_len) ); +TRACE_EVENT(f2fs_update_age_extent_tree_range, + + TP_PROTO(struct inode *inode, unsigned int pgofs, unsigned int len, + unsigned long long age, + unsigned long long last_blks), + + TP_ARGS(inode, pgofs, len, age, last_blks), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(unsigned int, pgofs) + __field(unsigned int, len) + __field(unsigned long long, age) + __field(unsigned long long, blocks) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->pgofs = pgofs; + __entry->len = len; + __entry->age = age; + __entry->blocks = last_blks; + ), + + TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, " + "len = %u, age = %llu, blocks = %llu", + show_dev_ino(__entry), + __entry->pgofs, + __entry->len, + __entry->age, + __entry->blocks) +); + TRACE_EVENT(f2fs_shrink_extent_tree, TP_PROTO(struct f2fs_sb_info *sbi, unsigned int node_cnt, - unsigned int tree_cnt), + unsigned int tree_cnt, enum extent_type type), - TP_ARGS(sbi, node_cnt, tree_cnt), + TP_ARGS(sbi, node_cnt, tree_cnt, type), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned int, node_cnt) __field(unsigned int, tree_cnt) + __field(enum extent_type, type) ), TP_fast_assign( __entry->dev = sbi->sb->s_dev; __entry->node_cnt = node_cnt; __entry->tree_cnt = tree_cnt; + __entry->type = type; ), - TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u", + TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u, type = %s", show_dev(__entry->dev), __entry->node_cnt, - __entry->tree_cnt) + __entry->tree_cnt, + show_extent_type(__entry->type)) ); TRACE_EVENT(f2fs_destroy_extent_tree, - TP_PROTO(struct inode *inode, unsigned int node_cnt), + TP_PROTO(struct inode *inode, unsigned int node_cnt, + enum extent_type type), - TP_ARGS(inode, node_cnt), + TP_ARGS(inode, node_cnt, type), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(unsigned int, node_cnt) + __field(enum extent_type, type) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->node_cnt = node_cnt; + __entry->type = type; ), - TP_printk("dev = (%d,%d), ino = %lu, destroyed: node_cnt = %u", + TP_printk("dev = (%d,%d), ino = %lu, destroyed: node_cnt = %u, type = %s", show_dev_ino(__entry), - __entry->node_cnt) + __entry->node_cnt, + show_extent_type(__entry->type)) ); DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes, diff --git a/include/trace/events/ib_mad.h b/include/trace/events/ib_mad.h index 59363a083ecb..d92691c78cff 100644 --- a/include/trace/events/ib_mad.h +++ b/include/trace/events/ib_mad.h @@ -49,7 +49,6 @@ DECLARE_EVENT_CLASS(ib_mad_send_template, __field(int, retries_left) __field(int, max_retries) __field(int, retry) - __field(u16, pkey) ), TP_fast_assign( @@ -89,7 +88,7 @@ DECLARE_EVENT_CLASS(ib_mad_send_template, "hdr : base_ver 0x%x class 0x%x class_ver 0x%x " \ "method 0x%x status 0x%x class_specific 0x%x tid 0x%llx " \ "attr_id 0x%x attr_mod 0x%x => dlid 0x%08x sl %d "\ - "pkey 0x%x rpqn 0x%x rqpkey 0x%x", + "rpqn 0x%x rqpkey 0x%x", __entry->dev_index, __entry->port_num, __entry->qp_num, __entry->agent_priv, be64_to_cpu(__entry->wrtid), __entry->retries_left, __entry->max_retries, @@ -100,7 +99,7 @@ DECLARE_EVENT_CLASS(ib_mad_send_template, be16_to_cpu(__entry->class_specific), be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id), be32_to_cpu(__entry->attr_mod), - be32_to_cpu(__entry->dlid), __entry->sl, __entry->pkey, + be32_to_cpu(__entry->dlid), __entry->sl, __entry->rqpn, __entry->rqkey ) ); @@ -204,7 +203,6 @@ TRACE_EVENT(ib_mad_recv_done_handler, __field(u16, wc_status) __field(u32, slid) __field(u32, dev_index) - __field(u16, pkey) ), TP_fast_assign( @@ -224,9 +222,6 @@ TRACE_EVENT(ib_mad_recv_done_handler, __entry->slid = wc->slid; __entry->src_qp = wc->src_qp; __entry->sl = wc->sl; - ib_query_pkey(qp_info->port_priv->device, - qp_info->port_priv->port_num, - wc->pkey_index, &__entry->pkey); __entry->wc_status = wc->status; ), @@ -234,7 +229,7 @@ TRACE_EVENT(ib_mad_recv_done_handler, "base_ver 0x%02x class 0x%02x class_ver 0x%02x " \ "method 0x%02x status 0x%04x class_specific 0x%04x " \ "tid 0x%016llx attr_id 0x%04x attr_mod 0x%08x " \ - "slid 0x%08x src QP%d, sl %d pkey 0x%04x", + "slid 0x%08x src QP%d, sl %d", __entry->dev_index, __entry->port_num, __entry->qp_num, __entry->wc_status, __entry->length, @@ -244,7 +239,7 @@ TRACE_EVENT(ib_mad_recv_done_handler, be16_to_cpu(__entry->class_specific), be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id), be32_to_cpu(__entry->attr_mod), - __entry->slid, __entry->src_qp, __entry->sl, __entry->pkey + __entry->slid, __entry->src_qp, __entry->sl ) ); diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index e87cb2b80ed3..412b5a46374c 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -91,10 +91,10 @@ #define IF_HAVE_PG_IDLE(flag,string) #endif -#ifdef CONFIG_64BIT -#define IF_HAVE_PG_ARCH_2(flag,string) ,{1UL << flag, string} +#ifdef CONFIG_ARCH_USES_PG_ARCH_X +#define IF_HAVE_PG_ARCH_X(flag,string) ,{1UL << flag, string} #else -#define IF_HAVE_PG_ARCH_2(flag,string) +#define IF_HAVE_PG_ARCH_X(flag,string) #endif #ifdef CONFIG_KASAN_HW_TAGS @@ -130,7 +130,8 @@ IF_HAVE_PG_UNCACHED(PG_uncached, "uncached" ) \ IF_HAVE_PG_HWPOISON(PG_hwpoison, "hwpoison" ) \ IF_HAVE_PG_IDLE(PG_young, "young" ) \ IF_HAVE_PG_IDLE(PG_idle, "idle" ) \ -IF_HAVE_PG_ARCH_2(PG_arch_2, "arch_2" ) \ +IF_HAVE_PG_ARCH_X(PG_arch_2, "arch_2" ) \ +IF_HAVE_PG_ARCH_X(PG_arch_3, "arch_3" ) \ IF_HAVE_PG_SKIP_KASAN_POISON(PG_skip_kasan_poison, "skip_kasan_poison") #define show_page_flags(flags) \ diff --git a/include/trace/events/pwm.h b/include/trace/events/pwm.h index cf243de41cc8..12b35e4ff917 100644 --- a/include/trace/events/pwm.h +++ b/include/trace/events/pwm.h @@ -10,9 +10,9 @@ DECLARE_EVENT_CLASS(pwm, - TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state), + TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state, int err), - TP_ARGS(pwm, state), + TP_ARGS(pwm, state, err), TP_STRUCT__entry( __field(struct pwm_device *, pwm) @@ -20,6 +20,7 @@ DECLARE_EVENT_CLASS(pwm, __field(u64, duty_cycle) __field(enum pwm_polarity, polarity) __field(bool, enabled) + __field(int, err) ), TP_fast_assign( @@ -28,28 +29,27 @@ DECLARE_EVENT_CLASS(pwm, __entry->duty_cycle = state->duty_cycle; __entry->polarity = state->polarity; __entry->enabled = state->enabled; + __entry->err = err; ), - TP_printk("%p: period=%llu duty_cycle=%llu polarity=%d enabled=%d", + TP_printk("%p: period=%llu duty_cycle=%llu polarity=%d enabled=%d err=%d", __entry->pwm, __entry->period, __entry->duty_cycle, - __entry->polarity, __entry->enabled) + __entry->polarity, __entry->enabled, __entry->err) ); DEFINE_EVENT(pwm, pwm_apply, - TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state), - - TP_ARGS(pwm, state) + TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state, int err), + TP_ARGS(pwm, state, err) ); DEFINE_EVENT(pwm, pwm_get, - TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state), - - TP_ARGS(pwm, state) + TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state, int err), + TP_ARGS(pwm, state, err) ); #endif /* _TRACE_PWM_H */ diff --git a/include/trace/events/rwmmio.h b/include/trace/events/rwmmio.h index de41159216c1..a43e5dd7436b 100644 --- a/include/trace/events/rwmmio.h +++ b/include/trace/events/rwmmio.h @@ -12,12 +12,14 @@ DECLARE_EVENT_CLASS(rwmmio_rw_template, - TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr), + TP_PROTO(unsigned long caller, unsigned long caller0, u64 val, u8 width, + volatile void __iomem *addr), - TP_ARGS(caller, val, width, addr), + TP_ARGS(caller, caller0, val, width, addr), TP_STRUCT__entry( __field(unsigned long, caller) + __field(unsigned long, caller0) __field(unsigned long, addr) __field(u64, val) __field(u8, width) @@ -25,56 +27,64 @@ DECLARE_EVENT_CLASS(rwmmio_rw_template, TP_fast_assign( __entry->caller = caller; + __entry->caller0 = caller0; __entry->val = val; __entry->addr = (unsigned long)addr; __entry->width = width; ), - TP_printk("%pS width=%d val=%#llx addr=%#lx", - (void *)__entry->caller, __entry->width, + TP_printk("%pS -> %pS width=%d val=%#llx addr=%#lx", + (void *)__entry->caller0, (void *)__entry->caller, __entry->width, __entry->val, __entry->addr) ); DEFINE_EVENT(rwmmio_rw_template, rwmmio_write, - TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr), - TP_ARGS(caller, val, width, addr) + TP_PROTO(unsigned long caller, unsigned long caller0, u64 val, u8 width, + volatile void __iomem *addr), + TP_ARGS(caller, caller0, val, width, addr) ); DEFINE_EVENT(rwmmio_rw_template, rwmmio_post_write, - TP_PROTO(unsigned long caller, u64 val, u8 width, volatile void __iomem *addr), - TP_ARGS(caller, val, width, addr) + TP_PROTO(unsigned long caller, unsigned long caller0, u64 val, u8 width, + volatile void __iomem *addr), + TP_ARGS(caller, caller0, val, width, addr) ); TRACE_EVENT(rwmmio_read, - TP_PROTO(unsigned long caller, u8 width, const volatile void __iomem *addr), + TP_PROTO(unsigned long caller, unsigned long caller0, u8 width, + const volatile void __iomem *addr), - TP_ARGS(caller, width, addr), + TP_ARGS(caller, caller0, width, addr), TP_STRUCT__entry( __field(unsigned long, caller) + __field(unsigned long, caller0) __field(unsigned long, addr) __field(u8, width) ), TP_fast_assign( __entry->caller = caller; + __entry->caller0 = caller0; __entry->addr = (unsigned long)addr; __entry->width = width; ), - TP_printk("%pS width=%d addr=%#lx", - (void *)__entry->caller, __entry->width, __entry->addr) + TP_printk("%pS -> %pS width=%d addr=%#lx", + (void *)__entry->caller0, (void *)__entry->caller, __entry->width, __entry->addr) ); TRACE_EVENT(rwmmio_post_read, - TP_PROTO(unsigned long caller, u64 val, u8 width, const volatile void __iomem *addr), + TP_PROTO(unsigned long caller, unsigned long caller0, u64 val, u8 width, + const volatile void __iomem *addr), - TP_ARGS(caller, val, width, addr), + TP_ARGS(caller, caller0, val, width, addr), TP_STRUCT__entry( __field(unsigned long, caller) + __field(unsigned long, caller0) __field(unsigned long, addr) __field(u64, val) __field(u8, width) @@ -82,13 +92,14 @@ TRACE_EVENT(rwmmio_post_read, TP_fast_assign( __entry->caller = caller; + __entry->caller0 = caller0; __entry->val = val; __entry->addr = (unsigned long)addr; __entry->width = width; ), - TP_printk("%pS width=%d val=%#llx addr=%#lx", - (void *)__entry->caller, __entry->width, + TP_printk("%pS -> %pS width=%d val=%#llx addr=%#lx", + (void *)__entry->caller0, (void *)__entry->caller, __entry->width, __entry->val, __entry->addr) ); diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index 049b52e7aa6a..c6cfed00d0c6 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -471,7 +471,7 @@ TRACE_EVENT(rxrpc_peer, TP_STRUCT__entry( __field(unsigned int, peer ) __field(int, ref ) - __field(int, why ) + __field(enum rxrpc_peer_trace, why ) ), TP_fast_assign( diff --git a/include/trace/perf.h b/include/trace/perf.h index 5800d13146c3..8f3bf1e17707 100644 --- a/include/trace/perf.h +++ b/include/trace/perf.h @@ -21,6 +21,9 @@ #undef __get_bitmask #define __get_bitmask(field) (char *)__get_dynamic_array(field) +#undef __get_cpumask +#define __get_cpumask(field) (char *)__get_dynamic_array(field) + #undef __get_sockaddr #define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field)) @@ -41,6 +44,9 @@ #undef __get_rel_bitmask #define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field) +#undef __get_rel_cpumask +#define __get_rel_cpumask(field) (char *)__get_rel_dynamic_array(field) + #undef __get_rel_sockaddr #define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field)) diff --git a/include/trace/stages/stage1_struct_define.h b/include/trace/stages/stage1_struct_define.h index 1b7bab60434c..69e0dae453bf 100644 --- a/include/trace/stages/stage1_struct_define.h +++ b/include/trace/stages/stage1_struct_define.h @@ -32,6 +32,9 @@ #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(char, item, -1) +#undef __cpumask +#define __cpumask(item) __dynamic_array(char, item, -1) + #undef __sockaddr #define __sockaddr(field, len) __dynamic_array(u8, field, len) @@ -47,6 +50,9 @@ #undef __rel_bitmask #define __rel_bitmask(item, nr_bits) __rel_dynamic_array(char, item, -1) +#undef __rel_cpumask +#define __rel_cpumask(item) __rel_dynamic_array(char, item, -1) + #undef __rel_sockaddr #define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len) diff --git a/include/trace/stages/stage2_data_offsets.h b/include/trace/stages/stage2_data_offsets.h index 1b7a8f764fdd..469b6a64293d 100644 --- a/include/trace/stages/stage2_data_offsets.h +++ b/include/trace/stages/stage2_data_offsets.h @@ -38,6 +38,9 @@ #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) +#undef __cpumask +#define __cpumask(item) __dynamic_array(unsigned long, item, -1) + #undef __sockaddr #define __sockaddr(field, len) __dynamic_array(u8, field, len) @@ -53,5 +56,8 @@ #undef __rel_bitmask #define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1) +#undef __rel_cpumask +#define __rel_cpumask(item) __rel_dynamic_array(unsigned long, item, -1) + #undef __rel_sockaddr #define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len) diff --git a/include/trace/stages/stage3_trace_output.h b/include/trace/stages/stage3_trace_output.h index e3b183e9d18e..66374df61ed3 100644 --- a/include/trace/stages/stage3_trace_output.h +++ b/include/trace/stages/stage3_trace_output.h @@ -42,6 +42,9 @@ trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ }) +#undef __get_cpumask +#define __get_cpumask(field) __get_bitmask(field) + #undef __get_rel_bitmask #define __get_rel_bitmask(field) \ ({ \ @@ -51,6 +54,9 @@ trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ }) +#undef __get_rel_cpumask +#define __get_rel_cpumask(field) __get_rel_bitmask(field) + #undef __get_sockaddr #define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field)) diff --git a/include/trace/stages/stage4_event_fields.h b/include/trace/stages/stage4_event_fields.h index a8fb25f39a99..affd541fd25e 100644 --- a/include/trace/stages/stage4_event_fields.h +++ b/include/trace/stages/stage4_event_fields.h @@ -46,6 +46,12 @@ #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) +#undef __cpumask +#define __cpumask(item) { \ + .type = "__data_loc cpumask_t", .name = #item, \ + .size = 4, .align = 4, \ + .is_signed = 0, .filter_type = FILTER_OTHER }, + #undef __sockaddr #define __sockaddr(field, len) __dynamic_array(u8, field, len) @@ -64,5 +70,11 @@ #undef __rel_bitmask #define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1) +#undef __rel_cpumask +#define __rel_cpumask(item) { \ + .type = "__rel_loc cpumask_t", .name = #item, \ + .size = 4, .align = 4, \ + .is_signed = 0, .filter_type = FILTER_OTHER }, + #undef __rel_sockaddr #define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len) diff --git a/include/trace/stages/stage5_get_offsets.h b/include/trace/stages/stage5_get_offsets.h index fba4c24ed9e6..ac5c24d3beeb 100644 --- a/include/trace/stages/stage5_get_offsets.h +++ b/include/trace/stages/stage5_get_offsets.h @@ -82,10 +82,16 @@ #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \ __bitmask_size_in_longs(nr_bits)) +#undef __cpumask +#define __cpumask(item) __bitmask(item, nr_cpumask_bits) + #undef __rel_bitmask #define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, \ __bitmask_size_in_longs(nr_bits)) +#undef __rel_cpumask +#define __rel_cpumask(item) __rel_bitmask(item, nr_cpumask_bits) + #undef __sockaddr #define __sockaddr(field, len) __dynamic_array(u8, field, len) diff --git a/include/trace/stages/stage6_event_callback.h b/include/trace/stages/stage6_event_callback.h index 3c554a585320..49c32394b53f 100644 --- a/include/trace/stages/stage6_event_callback.h +++ b/include/trace/stages/stage6_event_callback.h @@ -57,6 +57,16 @@ #define __assign_bitmask(dst, src, nr_bits) \ memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) +#undef __cpumask +#define __cpumask(item) __dynamic_array(unsigned long, item, -1) + +#undef __get_cpumask +#define __get_cpumask(field) (char *)__get_dynamic_array(field) + +#undef __assign_cpumask +#define __assign_cpumask(dst, src) \ + memcpy(__get_cpumask(dst), (src), __bitmask_size_in_bytes(nr_cpumask_bits)) + #undef __sockaddr #define __sockaddr(field, len) __dynamic_array(u8, field, len) @@ -98,6 +108,16 @@ #define __assign_rel_bitmask(dst, src, nr_bits) \ memcpy(__get_rel_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) +#undef __rel_cpumask +#define __rel_cpumask(item) __rel_dynamic_array(unsigned long, item, -1) + +#undef __get_rel_cpumask +#define __get_rel_cpumask(field) (char *)__get_rel_dynamic_array(field) + +#undef __assign_rel_cpumask +#define __assign_rel_cpumask(dst, src) \ + memcpy(__get_rel_cpumask(dst), (src), __bitmask_size_in_bytes(nr_cpumask_bits)) + #undef __rel_sockaddr #define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len) diff --git a/include/trace/stages/stage7_class_define.h b/include/trace/stages/stage7_class_define.h index 8a7ec24c246d..8795429f388b 100644 --- a/include/trace/stages/stage7_class_define.h +++ b/include/trace/stages/stage7_class_define.h @@ -13,11 +13,13 @@ #undef __get_dynamic_array_len #undef __get_str #undef __get_bitmask +#undef __get_cpumask #undef __get_sockaddr #undef __get_rel_dynamic_array #undef __get_rel_dynamic_array_len #undef __get_rel_str #undef __get_rel_bitmask +#undef __get_rel_cpumask #undef __get_rel_sockaddr #undef __print_array #undef __print_hex_dump diff --git a/include/uapi/asm-generic/types.h b/include/uapi/asm-generic/types.h index dfaa50d99d8f..7ad4dd01b8bf 100644 --- a/include/uapi/asm-generic/types.h +++ b/include/uapi/asm-generic/types.h @@ -1,9 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _ASM_GENERIC_TYPES_H -#define _ASM_GENERIC_TYPES_H +#ifndef _UAPI_ASM_GENERIC_TYPES_H +#define _UAPI_ASM_GENERIC_TYPES_H /* * int-ll64 is used everywhere now. */ #include <asm-generic/int-ll64.h> -#endif /* _ASM_GENERIC_TYPES_H */ +#endif /* _UAPI_ASM_GENERIC_TYPES_H */ diff --git a/include/uapi/linux/acrn.h b/include/uapi/linux/acrn.h index ccf47ed92500..7b714c1902eb 100644 --- a/include/uapi/linux/acrn.h +++ b/include/uapi/linux/acrn.h @@ -12,7 +12,6 @@ #define _UAPI_ACRN_H #include <linux/types.h> -#include <linux/uuid.h> #define ACRN_IO_REQUEST_MAX 16 @@ -186,7 +185,7 @@ struct acrn_ioreq_notify { * @reserved0: Reserved and must be 0 * @vcpu_num: Number of vCPU in the VM. Return from hypervisor. * @reserved1: Reserved and must be 0 - * @uuid: UUID of the VM. Pass to hypervisor directly. + * @uuid: Empty space never to be used again (used to be UUID of the VM) * @vm_flag: Flag of the VM creating. Pass to hypervisor directly. * @ioreq_buf: Service VM GPA of I/O request buffer. Pass to * hypervisor directly. @@ -198,7 +197,7 @@ struct acrn_vm_creation { __u16 reserved0; __u16 vcpu_num; __u16 reserved1; - guid_t uuid; + __u8 uuid[16]; __u64 vm_flag; __u64 ioreq_buf; __u64 cpu_affinity; diff --git a/include/uapi/linux/f2fs.h b/include/uapi/linux/f2fs.h index 3121d127d5aa..955d440be104 100644 --- a/include/uapi/linux/f2fs.h +++ b/include/uapi/linux/f2fs.h @@ -42,6 +42,7 @@ struct f2fs_comp_option) #define F2FS_IOC_DECOMPRESS_FILE _IO(F2FS_IOCTL_MAGIC, 23) #define F2FS_IOC_COMPRESS_FILE _IO(F2FS_IOCTL_MAGIC, 24) +#define F2FS_IOC_START_ATOMIC_REPLACE _IO(F2FS_IOCTL_MAGIC, 25) /* * should be same as XFS_IOC_GOINGDOWN. diff --git a/include/uapi/linux/hsi/cs-protocol.h b/include/uapi/linux/hsi/cs-protocol.h index c7f6e7672cb5..07c3bfb67463 100644 --- a/include/uapi/linux/hsi/cs-protocol.h +++ b/include/uapi/linux/hsi/cs-protocol.h @@ -6,20 +6,6 @@ * * Contact: Kai Vehmanen <kai.vehmanen@nokia.com> * Original author: Peter Ujfalusi <peter.ujfalusi@nokia.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA */ #ifndef _CS_PROTOCOL_H diff --git a/include/uapi/linux/hsi/hsi_char.h b/include/uapi/linux/hsi/hsi_char.h index 91623b0398b1..5ef72f0daf94 100644 --- a/include/uapi/linux/hsi/hsi_char.h +++ b/include/uapi/linux/hsi/hsi_char.h @@ -5,20 +5,6 @@ * Copyright (C) 2010 Nokia Corporation. All rights reserved. * * Contact: Andras Domokos <andras.domokos at nokia.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA */ #ifndef __HSI_CHAR_H diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h index 2b9e7feba3f3..1d553bedbdb5 100644 --- a/include/uapi/linux/idxd.h +++ b/include/uapi/linux/idxd.h @@ -295,7 +295,7 @@ struct dsa_completion_record { }; uint32_t delta_rec_size; - uint32_t crc_val; + uint64_t crc_val; /* DIF check & strip */ struct { diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h index 578b18aab821..0824fbc026a1 100644 --- a/include/uapi/linux/if_alg.h +++ b/include/uapi/linux/if_alg.h @@ -52,6 +52,7 @@ struct af_alg_iv { #define ALG_SET_AEAD_ASSOCLEN 4 #define ALG_SET_AEAD_AUTHSIZE 5 #define ALG_SET_DRBG_ENTROPY 6 +#define ALG_SET_KEY_BY_KEY_SERIAL 7 /* Operations */ #define ALG_OP_DECRYPT 0 diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h new file mode 100644 index 000000000000..98ebba80cfa1 --- /dev/null +++ b/include/uapi/linux/iommufd.h @@ -0,0 +1,347 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. + */ +#ifndef _UAPI_IOMMUFD_H +#define _UAPI_IOMMUFD_H + +#include <linux/types.h> +#include <linux/ioctl.h> + +#define IOMMUFD_TYPE (';') + +/** + * DOC: General ioctl format + * + * The ioctl interface follows a general format to allow for extensibility. Each + * ioctl is passed in a structure pointer as the argument providing the size of + * the structure in the first u32. The kernel checks that any structure space + * beyond what it understands is 0. This allows userspace to use the backward + * compatible portion while consistently using the newer, larger, structures. + * + * ioctls use a standard meaning for common errnos: + * + * - ENOTTY: The IOCTL number itself is not supported at all + * - E2BIG: The IOCTL number is supported, but the provided structure has + * non-zero in a part the kernel does not understand. + * - EOPNOTSUPP: The IOCTL number is supported, and the structure is + * understood, however a known field has a value the kernel does not + * understand or support. + * - EINVAL: Everything about the IOCTL was understood, but a field is not + * correct. + * - ENOENT: An ID or IOVA provided does not exist. + * - ENOMEM: Out of memory. + * - EOVERFLOW: Mathematics overflowed. + * + * As well as additional errnos, within specific ioctls. + */ +enum { + IOMMUFD_CMD_BASE = 0x80, + IOMMUFD_CMD_DESTROY = IOMMUFD_CMD_BASE, + IOMMUFD_CMD_IOAS_ALLOC, + IOMMUFD_CMD_IOAS_ALLOW_IOVAS, + IOMMUFD_CMD_IOAS_COPY, + IOMMUFD_CMD_IOAS_IOVA_RANGES, + IOMMUFD_CMD_IOAS_MAP, + IOMMUFD_CMD_IOAS_UNMAP, + IOMMUFD_CMD_OPTION, + IOMMUFD_CMD_VFIO_IOAS, +}; + +/** + * struct iommu_destroy - ioctl(IOMMU_DESTROY) + * @size: sizeof(struct iommu_destroy) + * @id: iommufd object ID to destroy. Can be any destroyable object type. + * + * Destroy any object held within iommufd. + */ +struct iommu_destroy { + __u32 size; + __u32 id; +}; +#define IOMMU_DESTROY _IO(IOMMUFD_TYPE, IOMMUFD_CMD_DESTROY) + +/** + * struct iommu_ioas_alloc - ioctl(IOMMU_IOAS_ALLOC) + * @size: sizeof(struct iommu_ioas_alloc) + * @flags: Must be 0 + * @out_ioas_id: Output IOAS ID for the allocated object + * + * Allocate an IO Address Space (IOAS) which holds an IO Virtual Address (IOVA) + * to memory mapping. + */ +struct iommu_ioas_alloc { + __u32 size; + __u32 flags; + __u32 out_ioas_id; +}; +#define IOMMU_IOAS_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_ALLOC) + +/** + * struct iommu_iova_range - ioctl(IOMMU_IOVA_RANGE) + * @start: First IOVA + * @last: Inclusive last IOVA + * + * An interval in IOVA space. + */ +struct iommu_iova_range { + __aligned_u64 start; + __aligned_u64 last; +}; + +/** + * struct iommu_ioas_iova_ranges - ioctl(IOMMU_IOAS_IOVA_RANGES) + * @size: sizeof(struct iommu_ioas_iova_ranges) + * @ioas_id: IOAS ID to read ranges from + * @num_iovas: Input/Output total number of ranges in the IOAS + * @__reserved: Must be 0 + * @allowed_iovas: Pointer to the output array of struct iommu_iova_range + * @out_iova_alignment: Minimum alignment required for mapping IOVA + * + * Query an IOAS for ranges of allowed IOVAs. Mapping IOVA outside these ranges + * is not allowed. num_iovas will be set to the total number of iovas and + * the allowed_iovas[] will be filled in as space permits. + * + * The allowed ranges are dependent on the HW path the DMA operation takes, and + * can change during the lifetime of the IOAS. A fresh empty IOAS will have a + * full range, and each attached device will narrow the ranges based on that + * device's HW restrictions. Detaching a device can widen the ranges. Userspace + * should query ranges after every attach/detach to know what IOVAs are valid + * for mapping. + * + * On input num_iovas is the length of the allowed_iovas array. On output it is + * the total number of iovas filled in. The ioctl will return -EMSGSIZE and set + * num_iovas to the required value if num_iovas is too small. In this case the + * caller should allocate a larger output array and re-issue the ioctl. + * + * out_iova_alignment returns the minimum IOVA alignment that can be given + * to IOMMU_IOAS_MAP/COPY. IOVA's must satisfy:: + * + * starting_iova % out_iova_alignment == 0 + * (starting_iova + length) % out_iova_alignment == 0 + * + * out_iova_alignment can be 1 indicating any IOVA is allowed. It cannot + * be higher than the system PAGE_SIZE. + */ +struct iommu_ioas_iova_ranges { + __u32 size; + __u32 ioas_id; + __u32 num_iovas; + __u32 __reserved; + __aligned_u64 allowed_iovas; + __aligned_u64 out_iova_alignment; +}; +#define IOMMU_IOAS_IOVA_RANGES _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_IOVA_RANGES) + +/** + * struct iommu_ioas_allow_iovas - ioctl(IOMMU_IOAS_ALLOW_IOVAS) + * @size: sizeof(struct iommu_ioas_allow_iovas) + * @ioas_id: IOAS ID to allow IOVAs from + * @num_iovas: Input/Output total number of ranges in the IOAS + * @__reserved: Must be 0 + * @allowed_iovas: Pointer to array of struct iommu_iova_range + * + * Ensure a range of IOVAs are always available for allocation. If this call + * succeeds then IOMMU_IOAS_IOVA_RANGES will never return a list of IOVA ranges + * that are narrower than the ranges provided here. This call will fail if + * IOMMU_IOAS_IOVA_RANGES is currently narrower than the given ranges. + * + * When an IOAS is first created the IOVA_RANGES will be maximally sized, and as + * devices are attached the IOVA will narrow based on the device restrictions. + * When an allowed range is specified any narrowing will be refused, ie device + * attachment can fail if the device requires limiting within the allowed range. + * + * Automatic IOVA allocation is also impacted by this call. MAP will only + * allocate within the allowed IOVAs if they are present. + * + * This call replaces the entire allowed list with the given list. + */ +struct iommu_ioas_allow_iovas { + __u32 size; + __u32 ioas_id; + __u32 num_iovas; + __u32 __reserved; + __aligned_u64 allowed_iovas; +}; +#define IOMMU_IOAS_ALLOW_IOVAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_ALLOW_IOVAS) + +/** + * enum iommufd_ioas_map_flags - Flags for map and copy + * @IOMMU_IOAS_MAP_FIXED_IOVA: If clear the kernel will compute an appropriate + * IOVA to place the mapping at + * @IOMMU_IOAS_MAP_WRITEABLE: DMA is allowed to write to this mapping + * @IOMMU_IOAS_MAP_READABLE: DMA is allowed to read from this mapping + */ +enum iommufd_ioas_map_flags { + IOMMU_IOAS_MAP_FIXED_IOVA = 1 << 0, + IOMMU_IOAS_MAP_WRITEABLE = 1 << 1, + IOMMU_IOAS_MAP_READABLE = 1 << 2, +}; + +/** + * struct iommu_ioas_map - ioctl(IOMMU_IOAS_MAP) + * @size: sizeof(struct iommu_ioas_map) + * @flags: Combination of enum iommufd_ioas_map_flags + * @ioas_id: IOAS ID to change the mapping of + * @__reserved: Must be 0 + * @user_va: Userspace pointer to start mapping from + * @length: Number of bytes to map + * @iova: IOVA the mapping was placed at. If IOMMU_IOAS_MAP_FIXED_IOVA is set + * then this must be provided as input. + * + * Set an IOVA mapping from a user pointer. If FIXED_IOVA is specified then the + * mapping will be established at iova, otherwise a suitable location based on + * the reserved and allowed lists will be automatically selected and returned in + * iova. + * + * If IOMMU_IOAS_MAP_FIXED_IOVA is specified then the iova range must currently + * be unused, existing IOVA cannot be replaced. + */ +struct iommu_ioas_map { + __u32 size; + __u32 flags; + __u32 ioas_id; + __u32 __reserved; + __aligned_u64 user_va; + __aligned_u64 length; + __aligned_u64 iova; +}; +#define IOMMU_IOAS_MAP _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_MAP) + +/** + * struct iommu_ioas_copy - ioctl(IOMMU_IOAS_COPY) + * @size: sizeof(struct iommu_ioas_copy) + * @flags: Combination of enum iommufd_ioas_map_flags + * @dst_ioas_id: IOAS ID to change the mapping of + * @src_ioas_id: IOAS ID to copy from + * @length: Number of bytes to copy and map + * @dst_iova: IOVA the mapping was placed at. If IOMMU_IOAS_MAP_FIXED_IOVA is + * set then this must be provided as input. + * @src_iova: IOVA to start the copy + * + * Copy an already existing mapping from src_ioas_id and establish it in + * dst_ioas_id. The src iova/length must exactly match a range used with + * IOMMU_IOAS_MAP. + * + * This may be used to efficiently clone a subset of an IOAS to another, or as a + * kind of 'cache' to speed up mapping. Copy has an efficiency advantage over + * establishing equivalent new mappings, as internal resources are shared, and + * the kernel will pin the user memory only once. + */ +struct iommu_ioas_copy { + __u32 size; + __u32 flags; + __u32 dst_ioas_id; + __u32 src_ioas_id; + __aligned_u64 length; + __aligned_u64 dst_iova; + __aligned_u64 src_iova; +}; +#define IOMMU_IOAS_COPY _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_COPY) + +/** + * struct iommu_ioas_unmap - ioctl(IOMMU_IOAS_UNMAP) + * @size: sizeof(struct iommu_ioas_unmap) + * @ioas_id: IOAS ID to change the mapping of + * @iova: IOVA to start the unmapping at + * @length: Number of bytes to unmap, and return back the bytes unmapped + * + * Unmap an IOVA range. The iova/length must be a superset of a previously + * mapped range used with IOMMU_IOAS_MAP or IOMMU_IOAS_COPY. Splitting or + * truncating ranges is not allowed. The values 0 to U64_MAX will unmap + * everything. + */ +struct iommu_ioas_unmap { + __u32 size; + __u32 ioas_id; + __aligned_u64 iova; + __aligned_u64 length; +}; +#define IOMMU_IOAS_UNMAP _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_UNMAP) + +/** + * enum iommufd_option - ioctl(IOMMU_OPTION_RLIMIT_MODE) and + * ioctl(IOMMU_OPTION_HUGE_PAGES) + * @IOMMU_OPTION_RLIMIT_MODE: + * Change how RLIMIT_MEMLOCK accounting works. The caller must have privilege + * to invoke this. Value 0 (default) is user based accouting, 1 uses process + * based accounting. Global option, object_id must be 0 + * @IOMMU_OPTION_HUGE_PAGES: + * Value 1 (default) allows contiguous pages to be combined when generating + * iommu mappings. Value 0 disables combining, everything is mapped to + * PAGE_SIZE. This can be useful for benchmarking. This is a per-IOAS + * option, the object_id must be the IOAS ID. + */ +enum iommufd_option { + IOMMU_OPTION_RLIMIT_MODE = 0, + IOMMU_OPTION_HUGE_PAGES = 1, +}; + +/** + * enum iommufd_option_ops - ioctl(IOMMU_OPTION_OP_SET) and + * ioctl(IOMMU_OPTION_OP_GET) + * @IOMMU_OPTION_OP_SET: Set the option's value + * @IOMMU_OPTION_OP_GET: Get the option's value + */ +enum iommufd_option_ops { + IOMMU_OPTION_OP_SET = 0, + IOMMU_OPTION_OP_GET = 1, +}; + +/** + * struct iommu_option - iommu option multiplexer + * @size: sizeof(struct iommu_option) + * @option_id: One of enum iommufd_option + * @op: One of enum iommufd_option_ops + * @__reserved: Must be 0 + * @object_id: ID of the object if required + * @val64: Option value to set or value returned on get + * + * Change a simple option value. This multiplexor allows controlling options + * on objects. IOMMU_OPTION_OP_SET will load an option and IOMMU_OPTION_OP_GET + * will return the current value. + */ +struct iommu_option { + __u32 size; + __u32 option_id; + __u16 op; + __u16 __reserved; + __u32 object_id; + __aligned_u64 val64; +}; +#define IOMMU_OPTION _IO(IOMMUFD_TYPE, IOMMUFD_CMD_OPTION) + +/** + * enum iommufd_vfio_ioas_op - IOMMU_VFIO_IOAS_* ioctls + * @IOMMU_VFIO_IOAS_GET: Get the current compatibility IOAS + * @IOMMU_VFIO_IOAS_SET: Change the current compatibility IOAS + * @IOMMU_VFIO_IOAS_CLEAR: Disable VFIO compatibility + */ +enum iommufd_vfio_ioas_op { + IOMMU_VFIO_IOAS_GET = 0, + IOMMU_VFIO_IOAS_SET = 1, + IOMMU_VFIO_IOAS_CLEAR = 2, +}; + +/** + * struct iommu_vfio_ioas - ioctl(IOMMU_VFIO_IOAS) + * @size: sizeof(struct iommu_vfio_ioas) + * @ioas_id: For IOMMU_VFIO_IOAS_SET the input IOAS ID to set + * For IOMMU_VFIO_IOAS_GET will output the IOAS ID + * @op: One of enum iommufd_vfio_ioas_op + * @__reserved: Must be 0 + * + * The VFIO compatibility support uses a single ioas because VFIO APIs do not + * support the ID field. Set or Get the IOAS that VFIO compatibility will use. + * When VFIO_GROUP_SET_CONTAINER is used on an iommufd it will get the + * compatibility ioas, either by taking what is already set, or auto creating + * one. From then on VFIO will continue to use that ioas and is not effected by + * this ioctl. SET or CLEAR does not destroy any auto-created IOAS. + */ +struct iommu_vfio_ioas { + __u32 size; + __u32 ioas_id; + __u16 op; + __u16 __reserved; +}; +#define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS) +#endif diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 0d5d4419139a..55155e262646 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -86,14 +86,6 @@ struct kvm_debug_guest { /* *** End of deprecated interfaces *** */ -/* for KVM_CREATE_MEMORY_REGION */ -struct kvm_memory_region { - __u32 slot; - __u32 flags; - __u64 guest_phys_addr; - __u64 memory_size; /* bytes */ -}; - /* for KVM_SET_USER_MEMORY_REGION */ struct kvm_userspace_memory_region { __u32 slot; @@ -104,9 +96,9 @@ struct kvm_userspace_memory_region { }; /* - * The bit 0 ~ bit 15 of kvm_memory_region::flags are visible for userspace, - * other bits are reserved for kvm internal use which are defined in - * include/linux/kvm_host.h. + * The bit 0 ~ bit 15 of kvm_userspace_memory_region::flags are visible for + * userspace, other bits are reserved for kvm internal use which are defined + * in include/linux/kvm_host.h. */ #define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0) #define KVM_MEM_READONLY (1UL << 1) @@ -485,6 +477,9 @@ struct kvm_run { #define KVM_MSR_EXIT_REASON_INVAL (1 << 0) #define KVM_MSR_EXIT_REASON_UNKNOWN (1 << 1) #define KVM_MSR_EXIT_REASON_FILTER (1 << 2) +#define KVM_MSR_EXIT_REASON_VALID_MASK (KVM_MSR_EXIT_REASON_INVAL | \ + KVM_MSR_EXIT_REASON_UNKNOWN | \ + KVM_MSR_EXIT_REASON_FILTER) __u32 reason; /* kernel -> user */ __u32 index; /* kernel -> user */ __u64 data; /* kernel <-> user */ @@ -1178,6 +1173,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_S390_ZPCI_OP 221 #define KVM_CAP_S390_CPU_TOPOLOGY 222 #define KVM_CAP_DIRTY_LOG_RING_ACQ_REL 223 +#define KVM_CAP_S390_PROTECTED_ASYNC_DISABLE 224 +#define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225 #ifdef KVM_CAP_IRQ_ROUTING @@ -1267,6 +1264,7 @@ struct kvm_x86_mce { #define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3) #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4) #define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5) +#define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6) struct kvm_xen_hvm_config { __u32 flags; @@ -1438,17 +1436,11 @@ struct kvm_vfio_spapr_tce { }; /* - * ioctls for VM fds - */ -#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region) -/* * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns * a vcpu fd. */ #define KVM_CREATE_VCPU _IO(KVMIO, 0x41) #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) -/* KVM_SET_MEMORY_ALIAS is obsolete: */ -#define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias) #define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44) #define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) #define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46, \ @@ -1740,6 +1732,8 @@ enum pv_cmd_id { KVM_PV_UNSHARE_ALL, KVM_PV_INFO, KVM_PV_DUMP, + KVM_PV_ASYNC_CLEANUP_PREPARE, + KVM_PV_ASYNC_CLEANUP_PERFORM, }; struct kvm_pv_cmd { @@ -1770,8 +1764,10 @@ struct kvm_xen_hvm_attr { union { __u8 long_mode; __u8 vector; + __u8 runstate_update_flag; struct { __u64 gfn; +#define KVM_XEN_INVALID_GFN ((__u64)-1) } shared_info; struct { __u32 send_port; @@ -1803,6 +1799,7 @@ struct kvm_xen_hvm_attr { } u; }; + /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */ #define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0 #define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1 @@ -1810,6 +1807,8 @@ struct kvm_xen_hvm_attr { /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */ #define KVM_XEN_ATTR_TYPE_EVTCHN 0x3 #define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4 +/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG */ +#define KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG 0x5 /* Per-vCPU Xen attributes */ #define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr) @@ -1826,6 +1825,7 @@ struct kvm_xen_vcpu_attr { __u16 pad[3]; union { __u64 gpa; +#define KVM_XEN_INVALID_GPA ((__u64)-1) __u64 pad[8]; struct { __u64 state; diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 82a03ea954af..85ab1278811e 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -1058,6 +1058,7 @@ /* Precision Time Measurement */ #define PCI_PTM_CAP 0x04 /* PTM Capability */ #define PCI_PTM_CAP_REQ 0x00000001 /* Requester capable */ +#define PCI_PTM_CAP_RES 0x00000002 /* Responder capable */ #define PCI_PTM_CAP_ROOT 0x00000004 /* Root capable */ #define PCI_PTM_GRANULARITY_MASK 0x0000FF00 /* Clock granularity */ #define PCI_PTM_CTRL 0x08 /* PTM Control */ diff --git a/include/uapi/linux/pr.h b/include/uapi/linux/pr.h index ccc78cbf1221..d8126415966f 100644 --- a/include/uapi/linux/pr.h +++ b/include/uapi/linux/pr.h @@ -4,6 +4,23 @@ #include <linux/types.h> +enum pr_status { + PR_STS_SUCCESS = 0x0, + /* + * The following error codes are based on SCSI, because the interface + * was originally created for it and has existing users. + */ + /* Generic device failure. */ + PR_STS_IOERR = 0x2, + PR_STS_RESERVATION_CONFLICT = 0x18, + /* Temporary path failure that can be retried. */ + PR_STS_RETRY_PATH_FAILURE = 0xe0000, + /* The request was failed due to a fast failure timer. */ + PR_STS_PATH_FAST_FAILED = 0xf0000, + /* The path cannot be reached and has been marked as failed. */ + PR_STS_PATH_FAILED = 0x10000, +}; + enum pr_type { PR_WRITE_EXCLUSIVE = 1, PR_EXCLUSIVE_ACCESS = 2, diff --git a/include/uapi/linux/serial.h b/include/uapi/linux/serial.h index cea06924b295..53bc1af67a41 100644 --- a/include/uapi/linux/serial.h +++ b/include/uapi/linux/serial.h @@ -107,33 +107,50 @@ struct serial_icounter_struct { int reserved[9]; }; -/* +/** + * struct serial_rs485 - serial interface for controlling RS485 settings. + * @flags: RS485 feature flags. + * @delay_rts_before_send: Delay before send (milliseconds). + * @delay_rts_after_send: Delay after send (milliseconds). + * @addr_recv: Receive filter for RS485 addressing mode + * (used only when %SER_RS485_ADDR_RECV is set). + * @addr_dest: Destination address for RS485 addressing mode + * (used only when %SER_RS485_ADDR_DEST is set). + * @padding0: Padding (set to zero). + * @padding1: Padding (set to zero). + * @padding: Deprecated, use @padding0 and @padding1 instead. + * Do not use with @addr_recv and @addr_dest (due to + * overlap). + * * Serial interface for controlling RS485 settings on chips with suitable * support. Set with TIOCSRS485 and get with TIOCGRS485 if supported by your * platform. The set function returns the new state, with any unsupported bits * reverted appropriately. + * + * The flag bits are: + * + * * %SER_RS485_ENABLED - RS485 enabled. + * * %SER_RS485_RTS_ON_SEND - Logical level for RTS pin when sending. + * * %SER_RS485_RTS_AFTER_SEND - Logical level for RTS pin after sent. + * * %SER_RS485_RX_DURING_TX - Full-duplex RS485 line. + * * %SER_RS485_TERMINATE_BUS - Enable bus termination (if supported). + * * %SER_RS485_ADDRB - Enable RS485 addressing mode. + * * %SER_RS485_ADDR_RECV - Receive address filter (enables @addr_recv). Requires %SER_RS485_ADDRB. + * * %SER_RS485_ADDR_DEST - Destination address (enables @addr_dest). Requires %SER_RS485_ADDRB. */ - struct serial_rs485 { - __u32 flags; /* RS485 feature flags */ -#define SER_RS485_ENABLED (1 << 0) /* If enabled */ -#define SER_RS485_RTS_ON_SEND (1 << 1) /* Logical level for - RTS pin when - sending */ -#define SER_RS485_RTS_AFTER_SEND (1 << 2) /* Logical level for - RTS pin after sent*/ + __u32 flags; +#define SER_RS485_ENABLED (1 << 0) +#define SER_RS485_RTS_ON_SEND (1 << 1) +#define SER_RS485_RTS_AFTER_SEND (1 << 2) #define SER_RS485_RX_DURING_TX (1 << 4) -#define SER_RS485_TERMINATE_BUS (1 << 5) /* Enable bus - termination - (if supported) */ - -/* RS-485 addressing mode */ -#define SER_RS485_ADDRB (1 << 6) /* Enable addressing mode */ -#define SER_RS485_ADDR_RECV (1 << 7) /* Receive address filter */ -#define SER_RS485_ADDR_DEST (1 << 8) /* Destination address */ +#define SER_RS485_TERMINATE_BUS (1 << 5) +#define SER_RS485_ADDRB (1 << 6) +#define SER_RS485_ADDR_RECV (1 << 7) +#define SER_RS485_ADDR_DEST (1 << 8) - __u32 delay_rts_before_send; /* Delay before send (milliseconds) */ - __u32 delay_rts_after_send; /* Delay after send (milliseconds) */ + __u32 delay_rts_before_send; + __u32 delay_rts_after_send; /* The fields below are defined by flags */ union { diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h index 0723a9cce747..01717181339e 100644 --- a/include/uapi/linux/swab.h +++ b/include/uapi/linux/swab.h @@ -3,7 +3,7 @@ #define _UAPI_LINUX_SWAB_H #include <linux/types.h> -#include <linux/compiler.h> +#include <linux/stddef.h> #include <asm/bitsperlong.h> #include <asm/swab.h> diff --git a/include/uapi/linux/usb/g_uvc.h b/include/uapi/linux/usb/g_uvc.h index 652f169a019e..8d7824dde1b2 100644 --- a/include/uapi/linux/usb/g_uvc.h +++ b/include/uapi/linux/usb/g_uvc.h @@ -21,6 +21,9 @@ #define UVC_EVENT_DATA (V4L2_EVENT_PRIVATE_START + 5) #define UVC_EVENT_LAST (V4L2_EVENT_PRIVATE_START + 5) +#define UVC_STRING_CONTROL_IDX 0 +#define UVC_STRING_STREAMING_IDX 1 + struct uvc_request_data { __s32 length; __u8 data[60]; diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h index bfdae12cdacf..6e8e572c2980 100644 --- a/include/uapi/linux/usb/video.h +++ b/include/uapi/linux/usb/video.h @@ -466,7 +466,7 @@ struct uvc_format_uncompressed { __u8 bDefaultFrameIndex; __u8 bAspectRatioX; __u8 bAspectRatioY; - __u8 bmInterfaceFlags; + __u8 bmInterlaceFlags; __u8 bCopyProtect; } __attribute__((__packed__)); @@ -522,7 +522,7 @@ struct uvc_format_mjpeg { __u8 bDefaultFrameIndex; __u8 bAspectRatioX; __u8 bAspectRatioY; - __u8 bmInterfaceFlags; + __u8 bmInterlaceFlags; __u8 bCopyProtect; } __attribute__((__packed__)); diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index d7d8e0922376..23105eb036fa 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -819,12 +819,20 @@ struct vfio_device_feature { * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P means that RUNNING_P2P * is supported in addition to the STOP_COPY states. * + * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_PRE_COPY means that + * PRE_COPY is supported in addition to the STOP_COPY states. + * + * VFIO_MIGRATION_STOP_COPY | VFIO_MIGRATION_P2P | VFIO_MIGRATION_PRE_COPY + * means that RUNNING_P2P, PRE_COPY and PRE_COPY_P2P are supported + * in addition to the STOP_COPY states. + * * Other combinations of flags have behavior to be defined in the future. */ struct vfio_device_feature_migration { __aligned_u64 flags; #define VFIO_MIGRATION_STOP_COPY (1 << 0) #define VFIO_MIGRATION_P2P (1 << 1) +#define VFIO_MIGRATION_PRE_COPY (1 << 2) }; #define VFIO_DEVICE_FEATURE_MIGRATION 1 @@ -875,8 +883,13 @@ struct vfio_device_feature_mig_state { * RESUMING - The device is stopped and is loading a new internal state * ERROR - The device has failed and must be reset * - * And 1 optional state to support VFIO_MIGRATION_P2P: + * And optional states to support VFIO_MIGRATION_P2P: * RUNNING_P2P - RUNNING, except the device cannot do peer to peer DMA + * And VFIO_MIGRATION_PRE_COPY: + * PRE_COPY - The device is running normally but tracking internal state + * changes + * And VFIO_MIGRATION_P2P | VFIO_MIGRATION_PRE_COPY: + * PRE_COPY_P2P - PRE_COPY, except the device cannot do peer to peer DMA * * The FSM takes actions on the arcs between FSM states. The driver implements * the following behavior for the FSM arcs: @@ -908,20 +921,48 @@ struct vfio_device_feature_mig_state { * * To abort a RESUMING session the device must be reset. * + * PRE_COPY -> RUNNING * RUNNING_P2P -> RUNNING * While in RUNNING the device is fully operational, the device may generate * interrupts, DMA, respond to MMIO, all vfio device regions are functional, * and the device may advance its internal state. * + * The PRE_COPY arc will terminate a data transfer session. + * + * PRE_COPY_P2P -> RUNNING_P2P * RUNNING -> RUNNING_P2P * STOP -> RUNNING_P2P * While in RUNNING_P2P the device is partially running in the P2P quiescent * state defined below. * + * The PRE_COPY_P2P arc will terminate a data transfer session. + * + * RUNNING -> PRE_COPY + * RUNNING_P2P -> PRE_COPY_P2P * STOP -> STOP_COPY - * This arc begin the process of saving the device state and will return a - * new data_fd. + * PRE_COPY, PRE_COPY_P2P and STOP_COPY form the "saving group" of states + * which share a data transfer session. Moving between these states alters + * what is streamed in session, but does not terminate or otherwise affect + * the associated fd. + * + * These arcs begin the process of saving the device state and will return a + * new data_fd. The migration driver may perform actions such as enabling + * dirty logging of device state when entering PRE_COPY or PER_COPY_P2P. + * + * Each arc does not change the device operation, the device remains + * RUNNING, P2P quiesced or in STOP. The STOP_COPY state is described below + * in PRE_COPY_P2P -> STOP_COPY. * + * PRE_COPY -> PRE_COPY_P2P + * Entering PRE_COPY_P2P continues all the behaviors of PRE_COPY above. + * However, while in the PRE_COPY_P2P state, the device is partially running + * in the P2P quiescent state defined below, like RUNNING_P2P. + * + * PRE_COPY_P2P -> PRE_COPY + * This arc allows returning the device to a full RUNNING behavior while + * continuing all the behaviors of PRE_COPY. + * + * PRE_COPY_P2P -> STOP_COPY * While in the STOP_COPY state the device has the same behavior as STOP * with the addition that the data transfers session continues to stream the * migration state. End of stream on the FD indicates the entire device @@ -939,6 +980,13 @@ struct vfio_device_feature_mig_state { * device state for this arc if required to prepare the device to receive the * migration data. * + * STOP_COPY -> PRE_COPY + * STOP_COPY -> PRE_COPY_P2P + * These arcs are not permitted and return error if requested. Future + * revisions of this API may define behaviors for these arcs, in this case + * support will be discoverable by a new flag in + * VFIO_DEVICE_FEATURE_MIGRATION. + * * any -> ERROR * ERROR cannot be specified as a device state, however any transition request * can be failed with an errno return and may then move the device_state into @@ -950,7 +998,7 @@ struct vfio_device_feature_mig_state { * The optional peer to peer (P2P) quiescent state is intended to be a quiescent * state for the device for the purposes of managing multiple devices within a * user context where peer-to-peer DMA between devices may be active. The - * RUNNING_P2P states must prevent the device from initiating + * RUNNING_P2P and PRE_COPY_P2P states must prevent the device from initiating * any new P2P DMA transactions. If the device can identify P2P transactions * then it can stop only P2P DMA, otherwise it must stop all DMA. The migration * driver must complete any such outstanding operations prior to completing the @@ -963,6 +1011,8 @@ struct vfio_device_feature_mig_state { * above FSM arcs. As there are multiple paths through the FSM arcs the path * should be selected based on the following rules: * - Select the shortest path. + * - The path cannot have saving group states as interior arcs, only + * starting/end states. * Refer to vfio_mig_get_next_state() for the result of the algorithm. * * The automatic transit through the FSM arcs that make up the combination @@ -976,6 +1026,9 @@ struct vfio_device_feature_mig_state { * support them. The user can discover if these states are supported by using * VFIO_DEVICE_FEATURE_MIGRATION. By using combination transitions the user can * avoid knowing about these optional states if the kernel driver supports them. + * + * Arcs touching PRE_COPY and PRE_COPY_P2P are removed if support for PRE_COPY + * is not present. */ enum vfio_device_mig_state { VFIO_DEVICE_STATE_ERROR = 0, @@ -984,8 +1037,70 @@ enum vfio_device_mig_state { VFIO_DEVICE_STATE_STOP_COPY = 3, VFIO_DEVICE_STATE_RESUMING = 4, VFIO_DEVICE_STATE_RUNNING_P2P = 5, + VFIO_DEVICE_STATE_PRE_COPY = 6, + VFIO_DEVICE_STATE_PRE_COPY_P2P = 7, +}; + +/** + * VFIO_MIG_GET_PRECOPY_INFO - _IO(VFIO_TYPE, VFIO_BASE + 21) + * + * This ioctl is used on the migration data FD in the precopy phase of the + * migration data transfer. It returns an estimate of the current data sizes + * remaining to be transferred. It allows the user to judge when it is + * appropriate to leave PRE_COPY for STOP_COPY. + * + * This ioctl is valid only in PRE_COPY states and kernel driver should + * return -EINVAL from any other migration state. + * + * The vfio_precopy_info data structure returned by this ioctl provides + * estimates of data available from the device during the PRE_COPY states. + * This estimate is split into two categories, initial_bytes and + * dirty_bytes. + * + * The initial_bytes field indicates the amount of initial precopy + * data available from the device. This field should have a non-zero initial + * value and decrease as migration data is read from the device. + * It is recommended to leave PRE_COPY for STOP_COPY only after this field + * reaches zero. Leaving PRE_COPY earlier might make things slower. + * + * The dirty_bytes field tracks device state changes relative to data + * previously retrieved. This field starts at zero and may increase as + * the internal device state is modified or decrease as that modified + * state is read from the device. + * + * Userspace may use the combination of these fields to estimate the + * potential data size available during the PRE_COPY phases, as well as + * trends relative to the rate the device is dirtying its internal + * state, but these fields are not required to have any bearing relative + * to the data size available during the STOP_COPY phase. + * + * Drivers have a lot of flexibility in when and what they transfer during the + * PRE_COPY phase, and how they report this from VFIO_MIG_GET_PRECOPY_INFO. + * + * During pre-copy the migration data FD has a temporary "end of stream" that is + * reached when both initial_bytes and dirty_byte are zero. For instance, this + * may indicate that the device is idle and not currently dirtying any internal + * state. When read() is done on this temporary end of stream the kernel driver + * should return ENOMSG from read(). Userspace can wait for more data (which may + * never come) by using poll. + * + * Once in STOP_COPY the migration data FD has a permanent end of stream + * signaled in the usual way by read() always returning 0 and poll always + * returning readable. ENOMSG may not be returned in STOP_COPY. + * Support for this ioctl is mandatory if a driver claims to support + * VFIO_MIGRATION_PRE_COPY. + * + * Return: 0 on success, -1 and errno set on failure. + */ +struct vfio_precopy_info { + __u32 argsz; + __u32 flags; + __aligned_u64 initial_bytes; + __aligned_u64 dirty_bytes; }; +#define VFIO_MIG_GET_PRECOPY_INFO _IO(VFIO_TYPE, VFIO_BASE + 21) + /* * Upon VFIO_DEVICE_FEATURE_SET, allow the device to be moved into a low power * state with the platform-based power management. Device use of lower power @@ -1128,6 +1243,19 @@ struct vfio_device_feature_dma_logging_report { #define VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT 8 +/* + * Upon VFIO_DEVICE_FEATURE_GET read back the estimated data length that will + * be required to complete stop copy. + * + * Note: Can be called on each device state. + */ + +struct vfio_device_feature_mig_data_size { + __aligned_u64 stop_copy_length; +}; + +#define VFIO_DEVICE_FEATURE_MIG_DATA_SIZE 9 + /* -------- API for Type1 VFIO IOMMU -------- */ /** diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h index 5e29f2cfa42d..f33d914d8f46 100644 --- a/include/uapi/misc/fastrpc.h +++ b/include/uapi/misc/fastrpc.h @@ -13,6 +13,7 @@ #define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap) #define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap) #define FASTRPC_IOCTL_INIT_ATTACH_SNS _IO('R', 8) +#define FASTRPC_IOCTL_INIT_CREATE_STATIC _IOWR('R', 9, struct fastrpc_init_create_static) #define FASTRPC_IOCTL_MEM_MAP _IOWR('R', 10, struct fastrpc_mem_map) #define FASTRPC_IOCTL_MEM_UNMAP _IOWR('R', 11, struct fastrpc_mem_unmap) #define FASTRPC_IOCTL_GET_DSP_INFO _IOWR('R', 13, struct fastrpc_ioctl_capability) @@ -87,6 +88,12 @@ struct fastrpc_init_create { __u64 file; /* pointer to elf file */ }; +struct fastrpc_init_create_static { + __u32 namelen; /* length of pd process name */ + __u32 memlen; + __u64 name; /* pd process name */ +}; + struct fastrpc_alloc_dma_buf { __s32 fd; /* fd */ __u32 flags; /* flags to map with */ diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index e00ebe05097d..3b995e841eb8 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -597,6 +597,10 @@ enum gaudi2_engine_id { GAUDI2_ENGINE_ID_NIC10_1, GAUDI2_ENGINE_ID_NIC11_0, GAUDI2_ENGINE_ID_NIC11_1, + GAUDI2_ENGINE_ID_PCIE, + GAUDI2_ENGINE_ID_PSOC, + GAUDI2_ENGINE_ID_ARC_FARM, + GAUDI2_ENGINE_ID_KDMA, GAUDI2_ENGINE_ID_SIZE }; @@ -717,6 +721,8 @@ enum hl_server_type { * HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE - Indicates device is unavailable * HL_NOTIFIER_EVENT_USER_ENGINE_ERR - Indicates device engine in error state * HL_NOTIFIER_EVENT_GENERAL_HW_ERR - Indicates device HW error + * HL_NOTIFIER_EVENT_RAZWI - Indicates razwi happened + * HL_NOTIFIER_EVENT_PAGE_FAULT - Indicates page fault happened */ #define HL_NOTIFIER_EVENT_TPC_ASSERT (1ULL << 0) #define HL_NOTIFIER_EVENT_UNDEFINED_OPCODE (1ULL << 1) @@ -725,6 +731,8 @@ enum hl_server_type { #define HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE (1ULL << 4) #define HL_NOTIFIER_EVENT_USER_ENGINE_ERR (1ULL << 5) #define HL_NOTIFIER_EVENT_GENERAL_HW_ERR (1ULL << 6) +#define HL_NOTIFIER_EVENT_RAZWI (1ULL << 7) +#define HL_NOTIFIER_EVENT_PAGE_FAULT (1ULL << 8) /* Opcode for management ioctl * @@ -778,6 +786,9 @@ enum hl_server_type { * HL_INFO_UNREGISTER_EVENTFD - Unregister eventfd * HL_INFO_GET_EVENTS - Retrieve the last occurred events * HL_INFO_UNDEFINED_OPCODE_EVENT - Retrieve last undefined opcode error information. + * HL_INFO_ENGINE_STATUS - Retrieve the status of all the h/w engines in the asic. + * HL_INFO_PAGE_FAULT_EVENT - Retrieve parameters of captured page fault. + * HL_INFO_USER_MAPPINGS - Retrieve user mappings, captured after page fault event. */ #define HL_INFO_HW_IP_INFO 0 #define HL_INFO_HW_EVENTS 1 @@ -809,6 +820,8 @@ enum hl_server_type { #define HL_INFO_GET_EVENTS 30 #define HL_INFO_UNDEFINED_OPCODE_EVENT 31 #define HL_INFO_ENGINE_STATUS 32 +#define HL_INFO_PAGE_FAULT_EVENT 33 +#define HL_INFO_USER_MAPPINGS 34 #define HL_INFO_VERSION_MAX_LEN 128 #define HL_INFO_CARD_NAME_MAX_LEN 16 @@ -859,6 +872,7 @@ enum hl_server_type { * @number_of_user_interrupts: The number of interrupts that are available to the userspace * application to use. Relevant for Gaudi2 and later. * @device_mem_alloc_default_page_size: default page size used in device memory allocation. + * @revision_id: PCI revision ID of the ASIC. */ struct hl_info_hw_ip_info { __u64 sram_base_address; @@ -889,6 +903,12 @@ struct hl_info_hw_ip_info { __u16 pad2; __u64 reserved4; __u64 device_mem_alloc_default_page_size; + __u64 reserved5; + __u64 reserved6; + __u32 reserved7; + __u8 reserved8; + __u8 revision_id; + __u8 pad[2]; }; struct hl_info_dram_usage { @@ -896,7 +916,7 @@ struct hl_info_dram_usage { __u64 ctx_dram_mem; }; -#define HL_BUSY_ENGINES_MASK_EXT_SIZE 2 +#define HL_BUSY_ENGINES_MASK_EXT_SIZE 4 struct hl_info_hw_idle { __u32 is_idle; @@ -1071,31 +1091,44 @@ struct hl_info_cs_timeout_event { __u64 seq; }; -#define HL_RAZWI_PAGE_FAULT 0 -#define HL_RAZWI_MMU_ACCESS_ERROR 1 +#define HL_RAZWI_NA_ENG_ID U16_MAX +#define HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR 128 +#define HL_RAZWI_READ BIT(0) +#define HL_RAZWI_WRITE BIT(1) +#define HL_RAZWI_LBW BIT(2) +#define HL_RAZWI_HBW BIT(3) +#define HL_RAZWI_RR BIT(4) +#define HL_RAZWI_ADDR_DEC BIT(5) /** * struct hl_info_razwi_event - razwi information. * @timestamp: timestamp of razwi. * @addr: address which accessing it caused razwi. - * @engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does not - * have engine id it will be set to U16_MAX. - * @engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible - * engines which one them caused the razwi. In that case, it will contain the - * second possible engine id, otherwise it will be set to U16_MAX. - * @no_engine_id: if razwi initiator does not have engine id, this field will be set to 1, - * otherwise 0. - * @error_type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX. - * @pad: padding to 64 bit. + * @engine_id: engine id of the razwi initiator, if it was initiated by engine that does not + * have engine id it will be set to HL_RAZWI_NA_ENG_ID. If there are several possible + * engines which caused the razwi, it will hold all of them. + * @num_of_possible_engines: contains number of possible engine ids. In some asics, razwi indication + * might be common for several engines and there is no way to get the + * exact engine. In this way, engine_id array will be filled with all + * possible engines caused this razwi. Also, there might be possibility + * in gaudi, where we don't indication on specific engine, in that case + * the value of this parameter will be zero. + * @flags: bitmask for additional data: HL_RAZWI_READ - razwi caused by read operation + * HL_RAZWI_WRITE - razwi caused by write operation + * HL_RAZWI_LBW - razwi caused by lbw fabric transaction + * HL_RAZWI_HBW - razwi caused by hbw fabric transaction + * HL_RAZWI_RR - razwi caused by range register + * HL_RAZWI_ADDR_DEC - razwi caused by address decode error + * Note: this data is not supported by all asics, in that case the relevant bits will not + * be set. */ struct hl_info_razwi_event { __s64 timestamp; __u64 addr; - __u16 engine_id_1; - __u16 engine_id_2; - __u8 no_engine_id; - __u8 error_type; - __u8 pad[2]; + __u16 engine_id[HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR]; + __u16 num_of_possible_engines; + __u8 flags; + __u8 pad[5]; }; #define MAX_QMAN_STREAMS_INFO 4 @@ -1174,6 +1207,29 @@ struct hl_info_sec_attest { __u8 pad0[2]; }; +/** + * struct hl_page_fault_info - page fault information. + * @timestamp: timestamp of page fault. + * @addr: address which accessing it caused page fault. + * @engine_id: engine id which caused the page fault, supported only in gaudi3. + */ +struct hl_page_fault_info { + __s64 timestamp; + __u64 addr; + __u16 engine_id; + __u8 pad[6]; +}; + +/** + * struct hl_user_mapping - user mapping information. + * @dev_va: device virtual address. + * @size: virtual address mapping size. + */ +struct hl_user_mapping { + __u64 dev_va; + __u64 size; +}; + enum gaudi_dcores { HL_GAUDI_WS_DCORE, HL_GAUDI_WN_DCORE, @@ -1200,6 +1256,8 @@ enum gaudi_dcores { * needed, hence updating this variable so user will know the exact amount * of bytes copied by the kernel to the buffer. * @sec_attest_nonce: Nonce number used for attestation report. + * @array_size: Number of array members copied to user buffer. + * Relevant for HL_INFO_USER_MAPPINGS info ioctl. * @pad: Padding to 64 bit. */ struct hl_info_args { @@ -1215,6 +1273,7 @@ struct hl_info_args { __u32 eventfd; __u32 user_buffer_actual_size; __u32 sec_attest_nonce; + __u32 array_size; }; __u32 pad; diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h index f6fde06db4b4..745790ce3c26 100644 --- a/include/uapi/rdma/hns-abi.h +++ b/include/uapi/rdma/hns-abi.h @@ -85,11 +85,26 @@ struct hns_roce_ib_create_qp_resp { __aligned_u64 dwqe_mmap_key; }; +enum { + HNS_ROCE_EXSGE_FLAGS = 1 << 0, +}; + +enum { + HNS_ROCE_RSP_EXSGE_FLAGS = 1 << 0, +}; + struct hns_roce_ib_alloc_ucontext_resp { __u32 qp_tab_size; __u32 cqe_size; __u32 srq_tab_size; __u32 reserved; + __u32 config; + __u32 max_inline_data; +}; + +struct hns_roce_ib_alloc_ucontext { + __u32 config; + __u32 reserved; }; struct hns_roce_ib_alloc_pd_resp { diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h index 7dd56210226f..d7c5aaa32744 100644 --- a/include/uapi/rdma/ib_user_ioctl_verbs.h +++ b/include/uapi/rdma/ib_user_ioctl_verbs.h @@ -57,6 +57,8 @@ enum ib_uverbs_access_flags { IB_UVERBS_ACCESS_ZERO_BASED = 1 << 5, IB_UVERBS_ACCESS_ON_DEMAND = 1 << 6, IB_UVERBS_ACCESS_HUGETLB = 1 << 7, + IB_UVERBS_ACCESS_FLUSH_GLOBAL = 1 << 8, + IB_UVERBS_ACCESS_FLUSH_PERSISTENT = 1 << 9, IB_UVERBS_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_OPTIONAL_FIRST, IB_UVERBS_ACCESS_OPTIONAL_RANGE = @@ -251,6 +253,7 @@ enum rdma_driver_id { RDMA_DRIVER_EFA, RDMA_DRIVER_SIW, RDMA_DRIVER_ERDMA, + RDMA_DRIVER_MANA, }; enum ib_uverbs_gid_type { diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 43672cb1fd57..e16650f0c85d 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -105,6 +105,18 @@ enum { IB_USER_VERBS_EX_CMD_MODIFY_CQ }; +/* see IBA A19.4.1.1 Placement Types */ +enum ib_placement_type { + IB_FLUSH_GLOBAL = 1U << 0, + IB_FLUSH_PERSISTENT = 1U << 1, +}; + +/* see IBA A19.4.1.2 Selectivity Level */ +enum ib_selectivity_level { + IB_FLUSH_RANGE = 0, + IB_FLUSH_MR, +}; + /* * Make sure that all structs defined in this file remain laid out so * that they pack the same way on 32-bit and 64-bit architectures (to @@ -466,6 +478,8 @@ enum ib_uverbs_wc_opcode { IB_UVERBS_WC_BIND_MW = 5, IB_UVERBS_WC_LOCAL_INV = 6, IB_UVERBS_WC_TSO = 7, + IB_UVERBS_WC_FLUSH = 8, + IB_UVERBS_WC_ATOMIC_WRITE = 9, }; struct ib_uverbs_wc { @@ -784,6 +798,8 @@ enum ib_uverbs_wr_opcode { IB_UVERBS_WR_RDMA_READ_WITH_INV = 11, IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12, IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13, + IB_UVERBS_WR_FLUSH = 14, + IB_UVERBS_WR_ATOMIC_WRITE = 15, /* Review enum ib_wr_opcode before modifying this */ }; @@ -1331,6 +1347,11 @@ enum ib_uverbs_device_cap_flags { /* Deprecated. Please use IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS. */ IB_UVERBS_DEVICE_RAW_SCATTER_FCS = 1ULL << 34, IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING = 1ULL << 36, + /* Flush placement types */ + IB_UVERBS_DEVICE_FLUSH_GLOBAL = 1ULL << 38, + IB_UVERBS_DEVICE_FLUSH_PERSISTENT = 1ULL << 39, + /* Atomic write attributes */ + IB_UVERBS_DEVICE_ATOMIC_WRITE = 1ULL << 40, }; enum ib_uverbs_raw_packet_caps { diff --git a/include/uapi/rdma/mana-abi.h b/include/uapi/rdma/mana-abi.h new file mode 100644 index 000000000000..5fcb31b37fb9 --- /dev/null +++ b/include/uapi/rdma/mana-abi.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) */ +/* + * Copyright (c) 2022, Microsoft Corporation. All rights reserved. + */ + +#ifndef MANA_ABI_USER_H +#define MANA_ABI_USER_H + +#include <linux/types.h> +#include <rdma/ib_user_ioctl_verbs.h> + +/* + * Increment this value if any changes that break userspace ABI + * compatibility are made. + */ + +#define MANA_IB_UVERBS_ABI_VERSION 1 + +struct mana_ib_create_cq { + __aligned_u64 buf_addr; +}; + +struct mana_ib_create_qp { + __aligned_u64 sq_buf_addr; + __u32 sq_buf_size; + __u32 port; +}; + +struct mana_ib_create_qp_resp { + __u32 sqid; + __u32 cqid; + __u32 tx_vp_offset; + __u32 reserved; +}; + +struct mana_ib_create_wq { + __aligned_u64 wq_buf_addr; + __u32 wq_buf_size; + __u32 reserved; +}; + +/* RX Hash function flags */ +enum mana_ib_rx_hash_function_flags { + MANA_IB_RX_HASH_FUNC_TOEPLITZ = 1 << 0, +}; + +struct mana_ib_create_qp_rss { + __aligned_u64 rx_hash_fields_mask; + __u8 rx_hash_function; + __u8 reserved[7]; + __u32 rx_hash_key_len; + __u8 rx_hash_key[40]; + __u32 port; +}; + +struct rss_resp_entry { + __u32 cqid; + __u32 wqid; +}; + +struct mana_ib_create_qp_rss_resp { + __aligned_u64 num_entries; + struct rss_resp_entry entries[64]; +}; + +#endif diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h index 73f679dfd2df..bb092fccb813 100644 --- a/include/uapi/rdma/rdma_user_rxe.h +++ b/include/uapi/rdma/rdma_user_rxe.h @@ -84,6 +84,13 @@ struct rxe_send_wr { union { struct { __aligned_u64 remote_addr; + __u32 length; + __u32 rkey; + __u8 type; + __u8 level; + } flush; + struct { + __aligned_u64 remote_addr; __u32 rkey; __u32 reserved; } rdma; @@ -146,6 +153,7 @@ struct rxe_dma_info { __u32 reserved; union { __DECLARE_FLEX_ARRAY(__u8, inline_data); + __DECLARE_FLEX_ARRAY(__u8, atomic_wr); __DECLARE_FLEX_ARRAY(struct rxe_sge, sge); }; }; diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 9f28349ebcff..5cf81dff60aa 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -802,7 +802,9 @@ struct ufs_hba_monitor { * @caps: bitmask with information about UFS controller capabilities * @devfreq: frequency scaling information owned by the devfreq core * @clk_scaling: frequency scaling information owned by the UFS driver - * @is_sys_suspended: whether or not the entire system has been suspended + * @system_suspending: system suspend has been started and system resume has + * not yet finished. + * @is_sys_suspended: UFS device has been suspended because of system suspend * @urgent_bkops_lvl: keeps track of urgent bkops level for device * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for * device is known or not. @@ -943,6 +945,7 @@ struct ufs_hba { struct devfreq *devfreq; struct ufs_clk_scaling clk_scaling; + bool system_suspending; bool is_sys_suspended; enum bkops_status urgent_bkops_lvl; @@ -1069,12 +1072,6 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val); void ufshcd_hba_stop(struct ufs_hba *hba); void ufshcd_schedule_eh_work(struct ufs_hba *hba); -static inline void check_upiu_size(void) -{ - BUILD_BUG_ON(ALIGNED_UPIU_SIZE < - GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); -} - /** * ufshcd_set_variant - set variant specific data to the hba * @hba: per adapter instance diff --git a/include/video/omap-panel-data.h b/include/video/omap-panel-data.h deleted file mode 100644 index 42b77249ee14..000000000000 --- a/include/video/omap-panel-data.h +++ /dev/null @@ -1,71 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Header containing platform_data structs for omap panels - * - * Copyright (C) 2013 Texas Instruments - * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> - * Archit Taneja <archit@ti.com> - * - * Copyright (C) 2011 Texas Instruments - * Author: Mayuresh Janorkar <mayur@ti.com> - * - * Copyright (C) 2010 Canonical Ltd. - * Author: Bryan Wu <bryan.wu@canonical.com> - */ - -#ifndef __OMAP_PANEL_DATA_H -#define __OMAP_PANEL_DATA_H - -#include <video/display_timing.h> - -/** - * connector_atv platform data - * @name: name for this display entity - * @source: name of the display entity used as a video source - * @invert_polarity: invert signal polarity - */ -struct connector_atv_platform_data { - const char *name; - const char *source; - - bool invert_polarity; -}; - -/** - * panel_dpi platform data - * @name: name for this display entity - * @source: name of the display entity used as a video source - * @data_lines: number of DPI datalines - * @display_timing: timings for this panel - * @backlight_gpio: gpio to enable/disable the backlight (or -1) - * @enable_gpio: gpio to enable/disable the panel (or -1) - */ -struct panel_dpi_platform_data { - const char *name; - const char *source; - - int data_lines; - - const struct display_timing *display_timing; - - int backlight_gpio; - int enable_gpio; -}; - -/** - * panel_acx565akm platform data - * @name: name for this display entity - * @source: name of the display entity used as a video source - * @reset_gpio: gpio to reset the panel (or -1) - * @datapairs: number of SDI datapairs - */ -struct panel_acx565akm_platform_data { - const char *name; - const char *source; - - int reset_gpio; - - int datapairs; -}; - -#endif /* __OMAP_PANEL_DATA_H */ |